diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..7975a2a --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +*.json.pbz2 filter=lfs diff=lfs merge=lfs -text +*.csv filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..7f33e3c --- /dev/null +++ b/.gitignore @@ -0,0 +1,162 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +own_results \ No newline at end of file diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..5483ed0 --- /dev/null +++ b/AUTHORS @@ -0,0 +1,2 @@ +Malena Schmidt +Bismark Singh diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..0d0ce16 --- /dev/null +++ b/LICENSE @@ -0,0 +1,19 @@ +Copyright 2025, Malena Schmidt and Bismark Singh + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index 4d497e1..e582d14 100644 --- a/README.md +++ b/README.md @@ -1 +1,74 @@ -# 2024.0693 \ No newline at end of file +[![INFORMS Journal on Computing Logo](https://INFORMSJoC.github.io/logos/INFORMS_Journal_on_Computing_Header.jpg)](https://pubsonline.informs.org/journal/ijoc) + +## The Balanced Facility Location Problem: Complexity and Heuristics + +This archive is distributed in association with the [INFORMS Journal on Computing](https://pubsonline.informs.org/journal/ijoc) under the [MIT License](LICENSE). + +The software and data in this repository are a snapshot of the software and data that were used in the research reported on in the paper [The Balanced Facility Location Problem: Complexity and Heuristics](https://doi.org/10.1287/ijoc.2024.0693) by Malena Schmidt and Bismark Singh. + +**Important: This code is being developed on an on-going basis at our two GitHub pages: [Malena205/heuristics_quadratic_facility_location](https://github.com/Malena205/heuristics_quadratic_facility_location) +and [schmitt-hub/preferential_access_and_fairness_in_waste_management](https://github.com/schmitt-hub/preferential_access_and_fairness_in_waste_management). Please go there if you would like to get the earlier versions, a more recent version, or would like support.** + +## Cite + +To cite the contents of this repository, please cite both the paper and this repo, using their respective DOIs. + +https://doi.org/10.1287/ijoc.2024.0693 + +https://doi.org/10.1287/ijoc.2024.0693.cd + +Below is the BibTex for citing this snapshot of the repository. + +``` +@misc{Schmidt2025, + author = {Schmidt, Malena and Singh, Bismark}, + publisher = {INFORMS Journal on Computing}, + title = {The balanced facility location problem: Complexity and heuristics}, + year = {2025}, + doi = {10.1287/ijoc.2024.0693.cd}, + url = {https://github.com/INFORMSJoC/2024.0693}, + note = {Available for download at https://github.com/INFORMSJoC/2024.0693}, +} +``` + +## Requirements for running the code +We provide the implementations of all the algorithms and heuristics discussed in the article as well as the the instances we use to test our heuristics on. Before cloning the code, make sure you have [Git Large File Storage (Git LFS)](https://docs.github.com/en/repositories/working-with-files/managing-large-files/installing-git-large-file-storage) installed. Please see instructions for downloading and installing Git LFS on the linked website. Note that the data files will not function properly, due to their size, if you just download the zipped file. After installing Git LFS, +(i) clone the repository as usual (with `git clone https://github.com/Malena205/heuristics_quadratic_facility_location.git`) to get the actual data files. Then, (ii) navigate into the folder as usual (with `cd`) and (iii) pull the large data files with Git LFS (`git lfs pull` and then `git lfs install`). Finally, (iv) copy the `Data` folder into the `heuristics_and_mips` folder. + +Further, before running the code, make sure you have the below installed and linked with their respective paths (see instructions on installation on their respective pages): +- python3 +- numpy +- pandas +- geopy +- pyomo +- gurobi + + +## Repository content +Please refer to the preprint for the terminology. The repository is structured as follows: +- ```data```: This folder contains the data for the four instances. For each instance, there is a file containing the travel probabilities $P_{ij}$ (e.g., ```instance_1_travel_dict.json.pbz2```) and a file containing the rest of the input data in a dataframe (e.g., ```instance_1_users_and_facs.csv```). For each instance, we also provide a corresponding instance where the capacity has been scaled to $C_j = \sum_{i \in I} U_i P_{ij}$ (```instance_1_suff_users_and_facs.csv```). +To load the data, we provide the function ```load_an_instance```. See an example of loading Instance 1 without sufficient capacity just below. ```users``` is the set $I$ and ```facs``` is the set $J$. Note that in our use of the instances we scale the input capacities by a factor. If ```cap_factor``` is provided as an input to the function, then set it: to 1.0 for any of the sufficient capacity instances, to 1.5 for Instance 1 or Instance 2, and to 0.8 for Instance 3 or Instance 4. +```python +users_and_facs_df, travel_dict, users, facs = load_an_instance(1, False) +``` +- ```heuristics_and_mips```: This folder contains the main part of the code. We briefly describe the purpose of each of the files: + - ```BFLP_heuristics.py```: This file contains the code for all the BFLP heuristics. + - ```BFLP_MIP.py```: This file contains the code for running the BFLP MIP. + - ```BUAP_heuristics.py```: This file contains all the BUAP heuristics and also implementations of those that are just needed for use within the BFLP heuristics; e.g., the optimization of `relaxation rounding` when used in the first version of `close greedy`. + - ```BUAP_MIP.py```: This file contains the code for running the BUAP MIP and also for editing the model, as needed for `relaxation rouding` with the first version of `close greedy`. + - ```results_heuristics.py```: This file contains functions for running all the heuristics across multiple budget factors and input parameters. For each BFLP heuristic, we provide a function for running the heuristics with multiple inputs (see the function name starting with `get_`). Results are written in JSON files; we also provide a function for converting these results into more easily readable Excel tables (see the function name starting with `write_`). All results created are saved in the `own_results` folder, which is created if it does not exist yet. For the BUAP, there is a function that runs all the heuristics on a single instance and the corresponding function which writes the results into a table. + - ```utils.py```: This file contains a few subroutines that are useful for reading and writing data, and some subroutines that are used in multiple heuristics. + - ```main.py```: This file contains a concrete example of how to run the functions in the repository. The line `results = ` may be changed to run the relevant heuristic (with the appropriate data input existing). + +## Example +We provide a short concrete example of how to run this code in the ```main.py``` file. To run the file, make sure you have navigated to the appropriate repository folder in your terminal. Then, run the file from the terminal using ```python3 ./heuristics_and_mips/main.py``` or directly from a python interface (such as, Spyder). In this example, by default, we run the `open greedy` algorithm on Instance 1. The algorithm tests out how different values for the parameters $n_c$ and $d$ perform across different budgets. The results for the BFLP MIP for this instance are saved in the file ```instance_1_BFLP_MIP.json``` in the `own_results` folder. + +Similarly, to run the `close greedy` algorithm, or the BFLP and BUAP models, just copy-paste the corresponding get_ command as we explained above into the main file and run it. + +## Ongoing Development + +This code is being developed on an on-going basis at [Schmidt_Singh](https://github.com/Malena205/heuristics_quadratic_facility_location) and [Schmitt_Singh](https://github.com/schmitt-hub/preferential_access_and_fairness_in_waste_management). + +## Support + +For support in using this software, contact the authors. diff --git a/data/instance_1_suff_users_and_facs.csv b/data/instance_1_suff_users_and_facs.csv new file mode 100644 index 0000000..3389f01 --- /dev/null +++ b/data/instance_1_suff_users_and_facs.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95817627fed24c5bc39422072f707da3cfcefc0aa25a32de774e6a3a4adca1a2 +size 157511 diff --git a/data/instance_1_travel_dict.json.pbz2 b/data/instance_1_travel_dict.json.pbz2 new file mode 100644 index 0000000..03fb440 --- /dev/null +++ b/data/instance_1_travel_dict.json.pbz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bc0d7d4c93e8f8cd9289946dda926dd7a4b95408a3673610a91999cce0bbc3a +size 26152779 diff --git a/data/instance_1_users_and_facs.csv b/data/instance_1_users_and_facs.csv new file mode 100644 index 0000000..08df917 --- /dev/null +++ b/data/instance_1_users_and_facs.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fef699df6d373114e5e6b91411e66fdbe61ae1c24b9deb888f59486a38abcb8d +size 188173 diff --git a/data/instance_3_suff_users_and_facs.csv b/data/instance_3_suff_users_and_facs.csv new file mode 100644 index 0000000..f28e4dd --- /dev/null +++ b/data/instance_3_suff_users_and_facs.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8637861fcb6fc3c070ce37b849e4af3d476f45b361867942733f240350254cb6 +size 467081 diff --git a/data/instance_3_travel_dict.json.pbz2 b/data/instance_3_travel_dict.json.pbz2 new file mode 100644 index 0000000..d93b255 --- /dev/null +++ b/data/instance_3_travel_dict.json.pbz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eae2cf0c6aff710b39906d33bf5a7f5cb6704cfea5c32ee0aaf6785e3f9126de +size 118257598 diff --git a/data/instance_3_users_and_facs.csv b/data/instance_3_users_and_facs.csv new file mode 100644 index 0000000..4ee7857 --- /dev/null +++ b/data/instance_3_users_and_facs.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:831915f40915d49f2feee2cb8910ed6d1d91e8d36f90ed58ec3a44cd2c896b54 +size 467111 diff --git a/data/instance_4_suff_users_and_facs.csv b/data/instance_4_suff_users_and_facs.csv new file mode 100644 index 0000000..0e1e52c --- /dev/null +++ b/data/instance_4_suff_users_and_facs.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1aefddc9817074ec62b63393ee6540cd749916fb482ec09524fc6f858455f6de +size 137911 diff --git a/data/instance_4_travel_dict.json.pbz2 b/data/instance_4_travel_dict.json.pbz2 new file mode 100644 index 0000000..edbeeb2 --- /dev/null +++ b/data/instance_4_travel_dict.json.pbz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1410310e6324b5f4a1724553af334500d72d9cf66978bacb06aebc0feffb28b2 +size 5759798 diff --git a/data/instance_4_users_and_facs.csv b/data/instance_4_users_and_facs.csv new file mode 100644 index 0000000..fc501e5 --- /dev/null +++ b/data/instance_4_users_and_facs.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dce0e0063232288661683de2e92c09d0b8a6203165679f3d17d76276a4c415b0 +size 138150 diff --git a/heuristics_and_mips/BFLP_MIP.py b/heuristics_and_mips/BFLP_MIP.py new file mode 100644 index 0000000..ab00ae1 --- /dev/null +++ b/heuristics_and_mips/BFLP_MIP.py @@ -0,0 +1,207 @@ +""" +Function for solving the BFLP MIP +""" +import pyomo.environ as pyo +from pyomo.opt import SolverStatus, TerminationCondition +from pyomo.core.base.constraint import Constraint +import time +from utils import * + +def cap_init(m, j): + return m.users_and_facs_df.at[j, 'capacity'] * m.cap_factor.value + +def exp_travelers_init(m, i, j): + return m.users_and_facs_df.at[i, 'population'] * m.travel_dict[i][j] + +def obj_expression(m): + return sum(m.cap[j] * (1 - m.u[j]) ** 2 for j in m.facs) + +def define_utilization(m,j): + return m.u[j] == sum(m.exp_travelers[(i, j)] * m.x[(i, j)] for (i, j2) in m.travel_pairs if j2 == j) / m.cap[j] + + +def assign_to_one_cstr(m, i): + eligible_js = [j for (i2, j) in m.travel_pairs if i2 == i] + if not eligible_js: + return Constraint.Skip + + expr = sum(m.x[i, j] for j in eligible_js) + + if m.strict_assign_to_one: + return expr == 1 + return expr <= 1 + +def assign_to_open_cstr(m, i, j): + return m.y[j] >= m.x[(i, j)] + +def budget_cstr(m): + return pyo.summation(m.y) <= m.budget + +def build_BFLP_model(users_and_facs_df, travel_dict, users, facs, budget_factor = 1.0, + cap_factor = 1.5, cutoff = 0.2, strict_assign_to_one = True, + cap_dict = None, continous = False, num_consider = -1): + """ + Build the BFLP model + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param budget_factor: proportion of facilities to open + :param cap_factor: factor by which all capacities are scaled + :param cutoff: minimum preference required for facility user pair to be considered; + only used if num_consider is -1 + :param strict_assign_to_one: boolean indicating whether each user has to be assigned; + i.e. whether sum_j x_ij == 1 (true) or sum_j x_ij <= 1 is a constraint + :param cap_dict: optional dictionary of all the capacities; if not provided (i.e. None) this is computed + from the other input data + :param continous: boolean indicating whether to solve this model continously + :param num_consider: number of closest facilities to consider for a user out of the open facilities, + same as n_r in paper; setting this to -1 means we use a cutoff instead + :return: the BFLP built model + """ + print('Build BFLP model...') + start = time.time() + m = pyo.ConcreteModel() + m.users_and_facs_df = users_and_facs_df + m.travel_dict = travel_dict + + # declare sets, variables and parameters + m.users = pyo.Set(initialize=users) + m.facs = pyo.Set(initialize=facs) + m.cutoff = pyo.Param(initialize=cutoff) + m.num_consider = pyo.Param(initialize=num_consider) + m.budget_factor = pyo.Param(initialize=budget_factor) + m.budget = pyo.Param(initialize=round(m.budget_factor * len(list(m.facs)))) + if num_consider == -1: + m.travel_pairs = pyo.Set(initialize=[(i, j) for i in m.users for j in m.facs + if m.travel_dict[i][j] > m.cutoff.value]) + else: + travel_pairs = [(i, j) for i in m.users for j in sorted(m.facs, key=lambda j_1: m.travel_dict[i][j_1], reverse=True)[0:num_consider]] + travel_pairs = list(set(travel_pairs)) + m.travel_pairs = pyo.Set(initialize=travel_pairs) + if cap_dict == None: + m.cap_factor = pyo.Param(initialize=cap_factor) + m.cap = pyo.Param(m.facs, initialize=cap_init) + else: + m.cap_factor = pyo.Param(initialize=cap_dict[facs[0]]/m.users_and_facs_df.at[facs[0], 'capacity'] ) + m.cap = pyo.Param(m.facs, initialize=lambda m, j: cap_dict[j]) + m.exp_travelers = pyo.Param(m.travel_pairs, initialize=exp_travelers_init) + m.strict_assign_to_one = pyo.Param(initialize=strict_assign_to_one) + m.continous = pyo.Param(initialize=continous) + + # variables for facilities open + m.y = pyo.Var(m.facs, bounds=(0, 1)) + # variables for assignment + if m.continous: + m.x = pyo.Var(m.travel_pairs, bounds=(0,1)) + else: + m.x = pyo.Var(m.travel_pairs, within=pyo.Binary) + + + m.u = pyo.Var(m.facs, bounds=(0, 1)) + + # constraints and objective + m.define_utilization = pyo.Constraint(m.facs, rule=define_utilization) + m.assign_to_one_cstr = pyo.Constraint(m.users, rule=assign_to_one_cstr) + m.assign_to_open_cstr = pyo.Constraint(m.travel_pairs, rule=assign_to_open_cstr) + m.budget_cstr = pyo.Constraint(rule=budget_cstr) + m.obj = pyo.Objective(rule=obj_expression, sense=pyo.minimize) + print('setup complete in ', time.time() - start, 'seconds') + + return m + + +def optimize_BFLP_model(m, threads=1, tolerance=0.0,time_limit = 20000): + """ + Solve the BFLP model + :param m: the model to be optimized + :param threads: number of threads + :param tolerance: tolerance on the optimality of the solution + :param time_limit: how long the model is allowed to run for, + this does not include the time to build the model + :return: boolean indicating whether the model is feasible and results dictionary + """ + print('Optimize model...') + opt = pyo.SolverFactory("gurobi") + opt.options["threads"] = threads + opt.options["MIPGap"] = tolerance + opt.options["NodefileStart"] = 0.5 + opt.options["Time_limit"] = time_limit + opt.options["NodeMethod"] = 2 + opt.options["OptimalityTol"] = 1e-4 + opt.options["FeasibilityTol"] = 1e-4 + # solve with enabling logging of solver + res = opt.solve(m, tee=True) + + if res.solver.termination_condition == TerminationCondition.infeasible: + return False, {} + + # create dictionary for the results after postprocessing + assignment = {} + results_x = {i : {} for i in m.users} + open_facs = [] + results_y = {j: 0 for j in m.facs} + for j in m.facs: + if m.y[j].value > 1e-4: + open_facs.append(j) + results_y[j] = 1 + # assign each user to facility with highest x_i,j + if m.continous: + for (i,j) in m.travel_pairs: + results_x[i][j] = min(m.x[(i,j)].value,1.0) + assignment = {i : max(results_x[i], key=results_x[i].get) for i in m.users} + else: + for (i,j) in m.travel_pairs: + results_x[i][j] = round(m.x[(i,j)].value,2) + for i in m.users: + for j in m.facs: + if (i, j) in m.travel_pairs and m.x[(i, j)].value > 1e-4: + assignment[i] = j + results = {"solution_details": + {"assignment": assignment, "open_facs": open_facs, "objective_value": pyo.value(m.obj), + "lower_bound": res['Problem'][0]['Lower bound'], "solving_time": res.Solver[0]['Time']}, + "model_details": + {"users": list(m.users), "facs": list(m.facs), "cap_factor": m.cap_factor.value, + "budget_factor": m.budget_factor.value, "cutoff": m.cutoff.value, + "num_consider_in_model": m.num_consider.value,"strict_assign_to_one": m.strict_assign_to_one.value, + "tolerance": tolerance, "time_limit": time_limit} + } + return True, results + +def run_BFLP_model(users_and_facs_df, travel_dict, users, facs, budget_factor = 1.0, + cap_factor = 1.5, cutoff = 0.2, strict_assign_to_one = True, + cap_dict = None, continous = False, num_consider = -1, + threads=1, tolerance=0.0,time_limit = 20000, output_filename = "basic_model.json"): + """ + Build and solve the BFLP model + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param budget_factor: proportion of facilities to open + :param cap_factor: factor by which all capacities are scaled + :param cutoff: minimum preference required for facility user pair to be considered; + only used if num_consider is -1 + :param strict_assign_to_one: boolean indicating whether each user has to be assigned; + i.e. whether sum_j x_ij == 1 (true) or sum_j x_ij <= 1 is a constraint + :param cap_dict: optional dictionary of all the capacities; if not provided (i.e. None) this is computed + from the other input data + :param continous: boolean indicating whether to solve this model continously + :param num_consider: number of closest facilities to consider for a user out of the open facilities, + same as n_r in paper; setting this to -1 means we use a cutoff instead + :param threads: number of threads + :param tolerance: tolerance on the optimality of the solution + :param time_limit: how long the model is allowed to run for, + this does not include the time to build the model + :param output_filename: the file name of where the results of this model should be stored + within the folder own_results + :return: boolean indicating whether the model is feasible and results dictionary + """ + start_time = time.time() + model = build_BFLP_model(users_and_facs_df, travel_dict, users, facs, budget_factor, cap_factor, + cutoff, strict_assign_to_one, cap_dict, continous, num_consider) + is_feasible, results = optimize_BFLP_model(model, threads, tolerance,time_limit) + results["solution_details"]["solving_time"] = time.time() - start_time + write_results_list([results], output_filename) + return is_feasible, results + \ No newline at end of file diff --git a/heuristics_and_mips/BFLP_heuristics.py b/heuristics_and_mips/BFLP_heuristics.py new file mode 100644 index 0000000..b62fceb --- /dev/null +++ b/heuristics_and_mips/BFLP_heuristics.py @@ -0,0 +1,1187 @@ +""" +Functions for running different BFLP heuristics +""" + +import time +from utils import * +import numpy as np +from BUAP_heuristics import greedily_assign_users, run_assignment_method, greedy_reassign_open +from BUAP_MIP import * +from math import inf +import random + +def Schmitt_Singh_localsearch(users_and_facs_df, travel_dict, users, facs, lb=0.0, budget_factor=1.0, cap_factor=1.5, + turnover_factor=0.02, tolerance=5e-3, time_limit=20000, iteration_limit=20): + """ + Schmitt and Singh local search heuristic, code copied from + https://github.com/schmitt-hub/preferential_access_and_fairness_in_waste_management + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param lb: a lower bound on the optimal objective value + :param budget_factor: ratio of facilities that are allowed to be opened + :param cap_factor: factor by which all capacities are scaled + :param turnover_factor: maximal fraction of facilities that are swapped in each iteration + :param tolerance: tolerance on the optimality of the solution + :param time_limit: time limit in seconds + :param iteration_limit: limit for the number of consecutive iterations without improvement + :return: dictionary of results + """ + print('Running Schmitt and Singh heurisitic...') + start_time = time.time() + # filter the data frame by the used facilities and sort it by their capacities + sorted_users_and_facs_df = users_and_facs_df.iloc[facs].sort_values(by=['capacity'], ascending=False) + + # initialize + nr_of_users = len(users) + nr_of_facs = len(facs) + budget = round(budget_factor * nr_of_facs) + cap_dict = {j: cap_factor * users_and_facs_df.at[j, 'capacity'] for j in facs} + best_open_facs = sorted_users_and_facs_df.index[:budget] + best_obj = cap_factor * sum(users_and_facs_df['capacity']) + best_gap = (best_obj-lb)/best_obj + best_assignment = {} + it_ctr = 0 + open_facs = best_open_facs.copy() + turnover = round(turnover_factor * budget) + is_feasible = False + + # create dictionary of expected travelers + travel_matrix = np.zeros((nr_of_users, nr_of_facs)) + for i in range(nr_of_users): + for j in range(nr_of_facs): + travel_matrix[i][j] = travel_dict[users[i]][facs[j]] + exp_travelers_matrix = np.array(users_and_facs_df.loc[users]['population']).reshape(-1, 1) * travel_matrix + exp_travelers_dict = {users[i]: {facs[j]: exp_travelers_matrix[i][j] for j in range(nr_of_facs)} + for i in range(nr_of_users)} + print(best_obj) + while time.time()-start_time <= time_limit: + # assign users + is_feasible_assignment, assignment_results = greedily_assign_users(users_and_facs_df, travel_dict, users, facs, + open_facs, exp_travelers_dict, cap_dict) + print(assignment_results['obj']) + # update best solution + if is_feasible_assignment and assignment_results['obj'] < best_obj: + is_feasible = True + best_obj = assignment_results['obj'] + best_gap = (best_obj-lb)/best_obj + best_open_facs = open_facs.copy() + best_assignment = assignment_results['assignment'] + it_ctr = 0 + else: + it_ctr += 1 + + # check stop criteria + if it_ctr == iteration_limit or best_gap < tolerance: + break + # update open facilities + sorted_facs = dict(sorted(assignment_results['utilization'].items(), key=lambda item: item[1])) + ditched_facs = list(sorted_facs.keys())[:turnover] + ditched_zipcodes = [area for (area, fac) in assignment_results['assignment'].items() if fac in ditched_facs] + open_facs = list(sorted_facs.keys())[turnover:] + access = {j: sum(exp_travelers_dict[i][j] for i in ditched_zipcodes) for j in facs if j not in open_facs} + sorted_access = dict(sorted(access.items(), key=lambda item: -item[1])) + open_facs += list(sorted_access.keys())[:turnover] + open_facs = pd.Index(open_facs) + + solving_time = time.time()-start_time + if not is_feasible: + print('no feasible solution could be constructed.') + return is_feasible, {} + + # write dictionary with results + results = {"solution_details": + {"assignment": best_assignment, "open_facs": list(best_open_facs), "objective_value": best_obj, + "lower_bound": lb, "solving_time": solving_time}, + "model_details": + {"users": users, "facs": facs, "cap_factor": cap_factor, "budget_factor": budget_factor, + "turnover_factor": turnover_factor, "tolerance": tolerance, "time_limit": time_limit, + "iteration_limit": iteration_limit} + } + return is_feasible, results + +def close_greedy_basic(users_and_facs_df, travel_dict, users, facs, budget_factor = 0.7, starting_open_facs = None, + cap_factor=1.5, threads=1, tolerance=0.0,cutoff_localsearch = 0.2, num_consider_in_relaxation = -1, + num_consider_per_iteration = 5,time_limit_relaxation=1000, assignment_method ="greedy_assign", + local_search_method = "None", time_limit_while_localsearch = 1, + final_assignment_method = "relaxation_rounding",output_filename = "test.json"): + """ + Basic implementation of the close greedy algorithm for the BFLP based on the DROP procedure. + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param budget_factor: proportion of facilities we want to remain open + :param starting_open_facs: if this is none, we start with all facilities open, + otherwise we start with the ones that are given + :param cap_factor: factor by which all capacities are scaled + :param threads: number of threads used in the relaxation + :param tolerance: tolerance on the optimality of the solution used in the relaxation + :param cutoff_localsearch: parameter for localsearch for minimum preference required to reassing/swap to that facility + :param num_consider_in_relaxation: number of most preferred facilities to consider in relaxation, + if -1 all facilities are considered in relaxation + :param num_consider_per_iteration: number of facilities to consider closing in each iteration + :param time_limit_relaxation: time limit in seconds for the optimization used in the relaxation + :param assignment_method: which method of assignment used when deciding which facilities are best to close + options are "relaxation_rounding" or "greedy_assign" + :param local_search_method: BUAP local search method to run on the assignment resulting from assignment_method + and final_assignment_method: options are "None", "local_random_reassign", "local_random_swap" + :param: time_limit_while_localsearch: the time limit in seconds used in the while loop in the BUAP localsearch + :param final_assignment_method: assignment method to run after open facilities decided + if this is different to the one used in each iteration, currently only support relaxation rounding + :param output_filename: name of the output file + :return: boolean indicating feasibility and dictionary containing the results + """ + start_time = time.time() + # dictionary of capacities of facilities after scaling + cap_dict = {j: cap_factor * users_and_facs_df.at[j, 'capacity'] for j in facs} + nr_of_users = len(users) + nr_of_facs = len(facs) + # create dictionary of how many users are expected to travel to each facility + # using numpy here since it is 10 times faster + travel_matrix = np.zeros((nr_of_users, nr_of_facs)) + for i in range(nr_of_users): + for j in range(nr_of_facs): + travel_matrix[i][j] = travel_dict[users[i]][facs[j]] + exp_travelers_matrix = np.array(users_and_facs_df.loc[users]['population']).reshape(-1, 1) * travel_matrix + exp_travelers_dict = {users[i]: {facs[j]: exp_travelers_matrix[i][j] for j in range(nr_of_facs)} + for i in range(nr_of_users)} + + # number of facilities that should remain open + budget = round(budget_factor*len(facs)) + # the facilities we start with being open, defaults to all facilities + open_facs = [] + if starting_open_facs == None or len(starting_open_facs) < budget: + open_facs = facs.copy() + else: + open_facs = starting_open_facs.copy() + # the number of facilities we need to close + num_facs_to_close = len(open_facs) - budget + print("Number to close: " + str(num_facs_to_close)) + # initialise options for relaxation rounding + options_relaxation = {"model": None, "new_open_facs": [], "new_closed_facs": [], + "time_limit_relaxation": time_limit_relaxation, "tolerance": tolerance, "threads": threads, + "num_consider_in_relaxation": num_consider_in_relaxation } + options_localsearch = {"cutoff": cutoff_localsearch, "time_limit_while_localsearch": time_limit_while_localsearch} + is_feasible_assignment, assignment_results = run_assignment_method(assignment_method, local_search_method, + users_and_facs_df, travel_dict, users, + facs, open_facs.copy(), exp_travelers_dict, + cap_dict, + options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if not is_feasible_assignment: + return False, {} + for k in range(num_facs_to_close): + print("Facility to close " + str(k)) + underused_facs = sorted(open_facs.copy(), + key=lambda j: assignment_results["utilization"][j])[:min(num_consider_per_iteration, len(open_facs))] + is_feasible_assignment_best, assignment_results_best, j_best = False, {"obj": inf}, None + options_relaxation["new_open_facs"] = [] + for j in underused_facs: + current_open_facs = open_facs.copy() + current_open_facs.remove(j) + options_relaxation["new_closed_facs"] = [j] + is_feasible_assignment_current, assignment_results_current = run_assignment_method(assignment_method, + local_search_method, + users_and_facs_df, + travel_dict, users, + facs, current_open_facs, + exp_travelers_dict, cap_dict, + options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if is_feasible_assignment_current and assignment_results_current["obj"] < assignment_results_best["obj"]: + is_feasible_assignment_best = is_feasible_assignment_current + assignment_results_best = assignment_results_current + j_best = j + options_relaxation["new_open_facs"] = [j] + if assignment_method == "relaxation_rounding": + options_relaxation["model"] = assignment_results_current["other"]["model"] + if is_feasible_assignment_best == False: + return False, {} + open_facs.remove(j_best) + if assignment_method == "relaxation_rounding" and options_relaxation["new_open_facs"][0] != j_best: + change_open_facs(options_relaxation["model"], options_relaxation["new_open_facs"], [j_best]) + is_feasible_assignment = is_feasible_assignment_best + assignment_results = assignment_results_best + if assignment_method != "relaxation_rounding" and final_assignment_method == "relaxation_rounding": + options_relaxation["new_closed_facs"] = [] + options_relaxation["new_open_facs"] = [] + options_relaxation["model"] = None + is_feasible_assignment_current, assignment_results_current = run_assignment_method(final_assignment_method, + local_search_method, + users_and_facs_df, + travel_dict, users, + facs, open_facs, + exp_travelers_dict, + cap_dict, + options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if is_feasible_assignment_current and assignment_results_current["obj"]< assignment_results["obj"]: + print("Better final assignment") + is_feasible_assignment = is_feasible_assignment_current + assignment_results = assignment_results_current + + end_time = time.time() + if not is_feasible_assignment: + return False, {} + result = {"solution_details": + {"assignment": assignment_results["assignment"], "open_facs": open_facs, + "objective_value": assignment_results["obj"], + "solving_time": end_time-start_time}, + "model_details": + {"users": users, "facs": facs, "cap_factor": cap_factor, + "budget_factor": budget_factor, "starting_open_facs": starting_open_facs + }, + "algorithm_details": { + "overall_algorithm": "close_greedily", "assignment_method": assignment_method, + "local_search_assignment_method": local_search_method, + "num_consider_per_iteration": num_consider_per_iteration, + "cutoff_localsearch": cutoff_localsearch,"time_limit_while_localsearch": time_limit_while_localsearch, + "final_assignment_method": final_assignment_method, "num_consider_in_relaxaton": num_consider_in_relaxation + } + } + write_results_list([result], output_filename) + return True, result + +def close_greedy_reuse_assignment(users_and_facs_df, travel_dict, users, facs, budget_factor = 0.7, + starting_open_facs = None, cap_factor=1.5, threads=1, tolerance=0.0, + cutoff_localsearch = 0.2, num_consider_in_relaxation = -1, + num_consider_per_iteration = 5,time_limit_relaxation=1000, + assignment_method ="greedy_assign", local_search_method = "None", + time_limit_while_localsearch = 1, num_fix_assignment_iteration = 50, + final_assignment_method = "relaxation_rounding",output_filename = "test.json"): + """ + First improvement to basic implementation of the close greedy algorithm for the BFLP based on the DROP procedure; + this reuses the previous iterations assignment and fixes it using the greedy reassign algorithm. + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param budget_factor: proportion of facilities we want to remain open + :param starting_open_facs: if this is none, we start with all facilities open, + otherwise we start with the ones that are given + :param cap_factor: factor by which all capacities are scaled + :param threads: number of threads used in the relaxation + :param tolerance: tolerance on the optimality of the solution used in the relaxation + :param cutoff_localsearch: parameter for BUAP localsearch for minimum preference required to reassign to the facility + :param num_consider_in_relaxation: number of most preferred facilities to consider in relaxationx, + if -1 all facilities are considered in relaxation + :param num_consider_per_iteration: number of facilities to consider closing in each iteration + :param time_limit_relaxation: time limit in seconds for the optimization used in the relaxation + :param assignment_method: which method of assignment is used to create the first assignment, + options are "relaxation_rounding" or "greedy_assign" + :param local_search_method: BUAP local search method to run on the assignment resulting from assignment_method + and final_assignment_method; options are "None", "local_random_reassign", "local_random_swap" + :param: time_limit_while_localsearch: the time limit in seconds used in the while loop in the localsearch + :param num_fix_assignment_iteration: number of iterations after which to recompute the assignment from scratch + using assignment_method; no saving of model for relaxation rounding implemented so this is only for + when assignment_method is "greedy_assign" + :param final_assignment_method: assignment method to run after open facilities decided, + currently only support relaxation rounding + :param output_filename: name of the output file + :return: boolean indicating feasibility and dictionary containing the results + """ + start_time = time.time() + # dictionary of capacities of facilities after scaling + cap_dict = {j: cap_factor * users_and_facs_df.at[j, 'capacity'] for j in facs} + nr_of_users = len(users) + nr_of_facs = len(facs) + # create dictionary of how many users are expected to travel to each facility + # using numpy here since it is 10 times faster + travel_matrix = np.zeros((nr_of_users, nr_of_facs)) + for i in range(nr_of_users): + for j in range(nr_of_facs): + travel_matrix[i][j] = travel_dict[users[i]][facs[j]] + exp_travelers_matrix = np.array(users_and_facs_df.loc[users]['population']).reshape(-1, 1) * travel_matrix + exp_travelers_dict = {users[i]: {facs[j]: exp_travelers_matrix[i][j] for j in range(nr_of_facs)} + for i in range(nr_of_users)} + + # number of facilities that should remain open + budget = round(budget_factor*len(facs)) + # the facilities we start with being open, defaults to all facilities + open_facs = [] + if starting_open_facs == None or len(starting_open_facs) < budget: + open_facs = facs.copy() + else: + open_facs = starting_open_facs.copy() + # the number of facilities we need to close + num_facs_to_close = len(open_facs) - budget + print("Number to close: " + str(num_facs_to_close)) + # initialise options for relaxation rounding + options_relaxation = {"model": None, "new_open_facs": [], "new_closed_facs": [], + "time_limit_relaxation": time_limit_relaxation, "tolerance": tolerance, "threads": threads, + "num_consider_in_relaxation": num_consider_in_relaxation } + options_localsearch = {"cutoff": cutoff_localsearch, "time_limit_while_localsearch": time_limit_while_localsearch} + is_feasible_assignment, assignment_results = run_assignment_method(assignment_method, local_search_method, + users_and_facs_df, travel_dict, users, + facs, open_facs.copy(), exp_travelers_dict, + cap_dict, options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if not is_feasible_assignment: + return False, {} + for i in range(num_facs_to_close): + print("closing facility " + str(i)) + if len(open_facs) > num_consider_per_iteration: + underused_facs = sorted(open_facs.copy(), + key=lambda j: assignment_results["utilization"][j])[0:min(num_consider_per_iteration, len(open_facs))] + else: + underused_facs = open_facs.copy() + is_feasible_assignment_best, assignment_results_best, j_best = False, {"obj": inf}, None + for j in underused_facs: + is_feasible_assignment_current, assignment_results_current = greedily_assign_users(users_and_facs_df, + travel_dict, users, facs, + open_facs, + exp_travelers_dict, + cap_dict, + assignment = assignment_results["assignment"], + fac_to_close = j) + if is_feasible_assignment_current and assignment_results_current["obj"] < assignment_results_best["obj"]: + is_feasible_assignment_best = is_feasible_assignment_current + assignment_results_best = assignment_results_current.copy() + j_best = j + if is_feasible_assignment_best == False: + return False, {} + open_facs.remove(j_best) + is_feasible_assignment = is_feasible_assignment_best + assignment_results = assignment_results_best.copy() + print("objective this round " + str( assignment_results["obj"])) + if i % num_fix_assignment_iteration == 0 and i> 0: + print("Running assignment method") + is_feasible_assignment_rerun, assignment_results_rerun = run_assignment_method(assignment_method, + local_search_method, + users_and_facs_df, + travel_dict, users, facs, + open_facs.copy(), + exp_travelers_dict, cap_dict, + options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if is_feasible_assignment_rerun and assignment_results_rerun["obj"] < assignment_results["obj"]: + print("Used new assignment") + is_feasible_assignment = is_feasible_assignment_rerun + assignment_results = assignment_results_rerun + + if final_assignment_method == "relaxation_rounding": + options_relaxation["new_closed_facs"] = [] + options_relaxation["new_open_facs"] = [] + is_feasible_assignment_current, assignment_results_current = run_assignment_method(final_assignment_method, + local_search_method, + users_and_facs_df, + travel_dict, users, + facs, open_facs, + exp_travelers_dict, + cap_dict, + options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if is_feasible_assignment_current and assignment_results_current["obj"]< assignment_results["obj"]: + print("Better final assignment") + is_feasible_assignment = is_feasible_assignment_current + assignment_results = assignment_results_current + + end_time = time.time() + if not is_feasible_assignment: + return False, {} + result = {"solution_details": + {"assignment": assignment_results["assignment"], "open_facs": open_facs, + "objective_value": assignment_results["obj"], "solving_time": end_time-start_time}, + "model_details": + {"users": users, "facs": facs, "cap_factor": cap_factor, + "budget_factor": budget_factor, "starting_open_facs": starting_open_facs + }, + "algorithm_details": { + "overall_algorithm": "close_greedy_reuse_assignment", "assignment_method": assignment_method, + "local_search_assignment_method": local_search_method, + "num_consider_per_iteration": num_consider_per_iteration, + "cutoff_localsearch": cutoff_localsearch,"time_limit_while_localsearch": time_limit_while_localsearch, + "final_assignment_method": final_assignment_method, "num_consider_in_relaxaton": num_consider_in_relaxation, + "num_fix_assignment_iteration": num_fix_assignment_iteration, "fixing_assignment_method": "greedy_reassign" + } + } + write_results_list([result], output_filename) + return True, result + +def close_greedy_final(users_and_facs_df, travel_dict, users, facs, budget_factor = 0.7, + starting_open_facs = None,cap_factor=1.5, threads=1, tolerance=0.0, + cutoff_localsearch = 0.2, num_consider_in_relaxation = -1, + num_consider_per_iteration = 50,time_limit_relaxation=1000, + assignment_method ="greedy_assign",local_search_method = "None", + time_limit_while_localsearch = 1, num_fix_assignment_iteration = 5, + final_assignment_method = "relaxation_rounding",output_filename = "test.json", + write_to_file = True): + """ + Final implementation of the close greedy algorithm for the BFLP with both improvements based on the DROP procedure. + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param budget_factor: proportion of facilities we want to remain open + :param starting_open_facs: if this is none, we start with all facilities open, + otherwise we start with the ones that are given + :param cap_factor: factor by which all capacities are scaled + :param threads: number of threads used in the relaxation + :param tolerance: tolerance on the optimality of the solution used in the relaxation + :param cutoff_localsearch: parameter for BUAP localsearch for minimum preference required to reassign to the facility + :param num_consider_in_relaxation: number of most preferred facilities to consider in relaxation, + if -1 all facilities are considered in relaxation + :param num_consider_per_iteration: number of facilities to consider closing in each iteration + :param time_limit_relaxation: time limit in seconds for the optimization used in the relaxation + :param assignment_method: which method of assignment is used to create the first assignment, + options are "relaxation_rounding" or "greedy_assign" + :param local_search_method: BUAP local search method to run on the assignment resulting from assignment_method + and final_assignment_method; options are "None", "local_random_reassign", "local_random_swap" + :param: time_limit_while_localsearch: the time limit in seconds used in the while loop in the localsearch + :param num_fix_assignment_iteration: number of iterations after which to recompute the assignment from scratch + using assignment_method; no saving of model for relaxation rounding implemented so this is only for + when assignment_method is "greedy_assign" + :param final_assignment_method: assignment method to run after open facilities decided, + currently only support relaxation rounding + :param output_filename: name of the output file + :param write_to_file: boolean indicating whether the result should be written to file + :return: boolean indicating feasibility and dictionary containing the results + """ + start_time = time.time() + # dictionary of capacities of facilities after scaling + cap_dict = {j: cap_factor * users_and_facs_df.at[j, 'capacity'] for j in facs} + nr_of_users = len(users) + nr_of_facs = len(facs) + # create dictionary of how many users are expected to travel to each facility + # using numpy here since it is 10 times faster + travel_matrix = np.zeros((nr_of_users, nr_of_facs)) + for i in range(nr_of_users): + for j in range(nr_of_facs): + travel_matrix[i][j] = travel_dict[users[i]][facs[j]] + exp_travelers_matrix = np.array(users_and_facs_df.loc[users]['population']).reshape(-1, 1) * travel_matrix + exp_travelers_dict = {users[i]: {facs[j]: exp_travelers_matrix[i][j] for j in range(nr_of_facs)} + for i in range(nr_of_users)} + + # number of facilities that should remain open + budget = round(budget_factor*len(facs)) + open_facs = [] + # the facilities we start with being open, defaults to all facilities + if starting_open_facs == None or len(starting_open_facs) < budget: + open_facs = facs.copy() + else: + open_facs = starting_open_facs.copy() + # the number of facilities we need to close + num_facs_to_close = len(open_facs) - budget + print("Number to close: " + str(num_facs_to_close)) + # initialise options for relaxation rounding + options_relaxation = {"model": None, "new_open_facs": [], "new_closed_facs": [], + "time_limit_relaxation": time_limit_relaxation, "tolerance": tolerance, + "threads": threads, "num_consider_in_relaxation": num_consider_in_relaxation } + options_localsearch = {"cutoff": cutoff_localsearch, "time_limit_while_localsearch": time_limit_while_localsearch} + is_feasible_assignment, assignment_results = run_assignment_method(assignment_method, local_search_method, + users_and_facs_df, travel_dict, users, + facs, open_facs.copy(), exp_travelers_dict, + cap_dict, options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if not is_feasible_assignment: + return False, {} + facs_to_check = open_facs.copy() + change_in_objective = {j: 0 for j in open_facs} + for i in range(num_facs_to_close): + print("closing facility " + str(i)) + is_feasible_assignment_best, assignment_results_best, j_best = False, {"obj": inf}, None + for j in facs_to_check: + is_feasible_assignment_current, assignment_results_current = greedily_assign_users(users_and_facs_df, + travel_dict, users, facs, + open_facs, + exp_travelers_dict, + cap_dict, + assignment = assignment_results["assignment"], + fac_to_close = j) + change_in_objective[j] = assignment_results_current['obj'] - assignment_results['obj'] + if is_feasible_assignment_current and assignment_results_current["obj"] < assignment_results_best["obj"]: + is_feasible_assignment_best = is_feasible_assignment_current + assignment_results_best = assignment_results_current.copy() + j_best = j + if is_feasible_assignment_best == False: + return False, {} + open_facs.remove(j_best) + is_feasible_assignment = is_feasible_assignment_best + assignment_results = assignment_results_best.copy() + print("objective this round " + str( assignment_results["obj"])) + facs_to_check = sorted(open_facs, + key=lambda j: change_in_objective[j])[0:min(len(open_facs), num_consider_per_iteration)] + if i % num_fix_assignment_iteration == 0 and i> 0: + print("Running assignment method") + is_feasible_assignment_rerun, assignment_results_rerun = run_assignment_method(assignment_method, + local_search_method, + users_and_facs_df, + travel_dict, users, facs, + open_facs.copy(), + exp_travelers_dict, cap_dict, + options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if is_feasible_assignment_rerun and assignment_results_rerun["obj"] < assignment_results["obj"]: + print("Used new assignment") + is_feasible_assignment = is_feasible_assignment_rerun + assignment_results = assignment_results_rerun + + if final_assignment_method == "relaxation_rounding": + options_relaxation["new_closed_facs"] = [] + options_relaxation["new_open_facs"] = [] + is_feasible_assignment_current, assignment_results_current = run_assignment_method(final_assignment_method, + local_search_method, + users_and_facs_df, travel_dict, + users, facs, open_facs, + exp_travelers_dict, cap_dict, + options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if is_feasible_assignment_current and assignment_results_current["obj"]< assignment_results["obj"]: + print("Better final assignment") + is_feasible_assignment = is_feasible_assignment_current + assignment_results = assignment_results_current + + end_time = time.time() + if not is_feasible_assignment: + return False, {} + result = {"solution_details": + {"assignment": assignment_results["assignment"], "open_facs": open_facs, + "objective_value": assignment_results["obj"], "solving_time": end_time-start_time}, + "model_details": + {"users": users, "facs": facs, "cap_factor": cap_factor, + "budget_factor": budget_factor, "starting_open_facs": starting_open_facs + }, + "algorithm_details": { + "overall_algorithm": "close_greedy_final", "assignment_method": assignment_method, + "local_search_assignment_method": local_search_method, + "num_consider_per_iteration": num_consider_per_iteration, "cutoff_localsearch": cutoff_localsearch, + "time_limit_while_localsearch": time_limit_while_localsearch, + "final_assignment_method": final_assignment_method, "num_consider_in_relaxaton": num_consider_in_relaxation, "num_fix_assignment_iteration": num_fix_assignment_iteration, + "fixing_assignment_method": "greedy_reassign" + } + } + if write_to_file: + write_results_list([result], output_filename) + return True, result + +def open_greedy(users_and_facs_df, travel_dict, users, facs, budget_factor = 0.7, cap_factor=1.5, + threads=1, tolerance=0.0,cutoff_localsearch = 0.2, num_consider_in_relaxation = -1, + num_consider_per_iteration = 50, depth_greedy_reassignment = 1,time_limit_relaxation=1000, + assignment_method ="greedy_assign", local_search_method = "None", + time_limit_while_localsearch = 1, num_fix_assignment_iteration = 5, + final_assignment_method = "relaxation_rounding",output_filename = "test.json", write_to_file = True): + """ + Implementation of the open greedy algorithm for the BFLP based on the ADD procedure. + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param budget_factor: proportion of facilities we want to remain open + :param cap_factor: factor by which all capacities are scaled + :param threads: number of threads used in the relaxation + :param tolerance: tolerance on the optimality of the solution used in the relaxation + :param cutoff_localsearch: parameter for BUAP localsearch for minimum preference required to reassign to the facility + :param num_consider_in_relaxation: number of most preferred facilities to consider in relaxation, + if -1 all facilities are considered in relaxation + :param num_consider_per_iteration: number of facilities to consider closing in each iteration + :param depth_greedy_reassignment: depth of the greedy reassignment within the greedy reassign open algorithm + :param time_limit_relaxation: time limit in seconds for the optimization used in the relaxation + :param assignment_method: which method of assignment is used to create the first assignment, + options are "relaxation_rounding" or "greedy_assign" + :param local_search_method: BUAP local search method to run on the assignment resulting from assignment_method + and final_assignment_method; options are "None", "local_random_reassign", "local_random_swap" + :param: time_limit_while_localsearch: the time limit in seconds used in the while loop in the localsearch + :param num_fix_assignment_iteration: number of iterations after which to recompute the assignment from scratch + using assignment_method; no saving of model for relaxation rounding implemented so this is only for + when assignment_method is "greedy_assign" + :param final_assignment_method: assignment method to run after open facilities decided, + currently only support relaxation rounding + :param output_filename: name of the output file + :param write_to_file: boolean indicating whether results should be written to file + :return: boolean indicating feasibility and dictionary containing the results + """ + start_time = time.time() + # dictionary of capacities of facilities after scaling + cap_dict = {j: cap_factor * users_and_facs_df.at[j, 'capacity'] for j in facs} + nr_of_users = len(users) + nr_of_facs = len(facs) + # create dictionary of how many users are expected to travel to each facility + # using numpy here since it is 10 times faster + travel_matrix = np.zeros((nr_of_users, nr_of_facs)) + for i in range(nr_of_users): + for j in range(nr_of_facs): + travel_matrix[i][j] = travel_dict[users[i]][facs[j]] + exp_travelers_matrix = np.array(users_and_facs_df.loc[users]['population']).reshape(-1, 1) * travel_matrix + exp_travelers_dict = {users[i]: {facs[j]: exp_travelers_matrix[i][j] for j in range(nr_of_facs)} + for i in range(nr_of_users)} + + # number of facilities that should be open + num_facs_to_open = round(budget_factor*len(facs)) + # the facilities we start with being open, defaults to all facilities + closed_facs = facs.copy() + open_facs = [] + print("Number to open: " + str(num_facs_to_open)) + # initialise options for relaxation rounding + options_relaxation = {"model": None, "new_open_facs": [], "new_closed_facs": [], + "time_limit_relaxation": time_limit_relaxation, "tolerance": tolerance, + "threads": threads, "num_consider_in_relaxation": num_consider_in_relaxation } + options_localsearch = {"cutoff": cutoff_localsearch, "time_limit_while_localsearch": time_limit_while_localsearch} + sum_capacities = sum(cap_dict[j] for j in facs) + is_feasible_assignment, assignment_results = False, {'assignment': {i: "unassigned" for i in users}, + 'utilization': {j: 0 for j in facs}, + 'obj': sum_capacities, 'other': {'uassigned_users': users.copy()}} + facs_to_check = closed_facs.copy() + change_in_objective = {j: 0 for j in open_facs} + for i in range(num_facs_to_open): + print("opening facility " + str(i)) + is_feasible_assignment_best, assignment_results_best, j_best = False, {"obj": sum_capacities}, None + for j in facs_to_check: + is_feasible_assignment_current, assignment_results_current = greedy_reassign_open(users_and_facs_df, + travel_dict, users, + facs,open_facs.copy(), + exp_travelers_dict, + cap_dict, + assignment = assignment_results["assignment"].copy(), + fac_to_open = j, + depth_greedy_reassignment = depth_greedy_reassignment) + change_in_objective[j] = assignment_results_current['obj'] - assignment_results['obj'] + # only accept this if better than previous best one + # also only accept either if it is feasible or if our previous assignment is also not feasible + if (is_feasible_assignment_current or (is_feasible_assignment_current == is_feasible_assignment)) and assignment_results_current["obj"] < assignment_results_best["obj"]: + is_feasible_assignment_best = is_feasible_assignment_current + assignment_results_best = assignment_results_current.copy() + j_best = j + open_facs.append(j_best) + closed_facs.remove(j_best) + is_feasible_assignment = is_feasible_assignment_best + assignment_results = assignment_results_best.copy() + print("objective this round " + str( assignment_results["obj"])) + facs_to_check = sorted(closed_facs, + key=lambda j: change_in_objective[j])[0:min(len(closed_facs), num_consider_per_iteration)] + if i % num_fix_assignment_iteration == 0 and i> 0: + print("Running assignment method") + is_feasible_assignment_rerun, assignment_results_rerun = run_assignment_method(assignment_method, + local_search_method, + users_and_facs_df, + travel_dict, users, facs, + open_facs.copy(), + exp_travelers_dict, cap_dict, + options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if is_feasible_assignment_rerun and assignment_results_rerun["obj"] < assignment_results["obj"]: + print("Used new assignment") + is_feasible_assignment = is_feasible_assignment_rerun + assignment_results = assignment_results_rerun + + if final_assignment_method == "relaxation_rounding": + options_relaxation["new_closed_facs"] = [] + options_relaxation["new_open_facs"] = [] + is_feasible_assignment_current, assignment_results_current = run_assignment_method(final_assignment_method, + local_search_method, + users_and_facs_df, + travel_dict, users, facs, + open_facs, exp_travelers_dict, + cap_dict, + options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if is_feasible_assignment_current and assignment_results_current["obj"]< assignment_results["obj"]: + print("Better final assignment") + is_feasible_assignment = is_feasible_assignment_current + assignment_results = assignment_results_current + + end_time = time.time() + if not is_feasible_assignment: + return False, {} + result = {"solution_details": + {"assignment": assignment_results["assignment"], "open_facs": open_facs, + "objective_value": assignment_results["obj"], "solving_time": end_time-start_time}, + "model_details": + {"users": users, "facs": facs, "cap_factor": cap_factor, + "budget_factor": budget_factor, + }, + "algorithm_details": { + "overall_algorithm": "open_greedy", "assignment_method": assignment_method, + "local_search_assignment_method": local_search_method, + "num_consider_per_iteration": num_consider_per_iteration, "cutoff_localsearch": cutoff_localsearch, + "time_limit_while_localsearch": time_limit_while_localsearch, + "final_assignment_method": final_assignment_method, "num_consider_in_relaxaton": num_consider_in_relaxation, + "num_fix_assignment_iteration": num_fix_assignment_iteration, + "fixing_assignment_method": "greedy_reassign_open", "depth_greedy_reassignment": depth_greedy_reassignment + } + } + if write_to_file: + write_results_list([result], output_filename) + return True, result + +def localsearch_without_change(users_and_facs_df, travel_dict, users, facs, starting_assignment, + starting_open_facs, starting_obj, budget_factor, cap_factor = 1.5, + num_consider_per_iteration = 50, time_limit_while_loop = 120, + iteration_limit_while_loop = 300, assignment_method="greedy_assign", + final_assignment_method = "relaxation_rounding", + localsearch_method = "local_random_reassign", num_consider_in_relaxation = 50, + tolerance_relaxation = 2e-3, time_limit_relaxation = 1000, + threads_relaxation= 1, + cutoff_BUAP_localsearch = 0.2, time_limit_while_BUAP_localsearch = 1, + depth_greedy_reassignment = 1, fix_assignment = False, + output_filename = "test_without_change.json", write_to_file = True): + """ + Local search based on ADD / DROP for overall problem with random chocies of which facilities to consider. + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param starting_assignment: the assignment of users on which the local search starts its search + :param starting_open_facs: the open faciltiies of the solution on which the local search starts its search + :param starting_obj: the objective function value of the solution on which the local search starts its search + :param budget_factor: proportion of facilities to open + :param cap_factor: factor by which all capacities are scaled + :param num_consider_per_iteration: how many facilties should be considered for the chosen facility + to be swapped out with + :param time_limit_while_loop: the maximum time that the main while loop of the local search should run for + :param iteration_limit_while_loop: the maximum number of iterations the main while loop of + the local search should run for + :param assignment_method: which method of assignment is used to fix the assignment after change is made, + options are "relaxation_rounding" or "greedy_assign" + :param final_assignment_method: assignment method to run after open facilities decided, + currently only support relaxation rounding + :param local_search_method: BUAP local search method to run on the assignment resulting from assignment_method + and final_assignment_method; options are "None", "local_random_reassign", "local_random_swap" + :param num_consider_in_relaxation: number of facilities to consider for each user in BUAP relaxation, + if -1 all facilities are considered in relaxation + :param tolerance_relaxation: tolerance of the relaxation of the BUAP + :param time_limit_relaxation: time limit of running the BUAP relaxation + :param threads_relaxation: number of threads used when solving the relaxation of the BUAP + :param: cutoff_BUAP_localsearch: parameter for BUAP localsearch for minimum preference required to + reassign to the facility + :param time_limit_while_BUAP_localsearch: the time limit in seconds used in the while loop in the BUAP localsearch + :param fix_assignment: if true, fixes assignment with assignment_method after change is made + :param output_filename: name of the output file + :param write_to_file: boolean indicating whether results should be written to file + :return: boolean indicating feasibility and dictionary containing the results + """ + start_time = time.time() + # dictionary of capacities of facilities after scaling + cap_dict = {j: cap_factor * users_and_facs_df.at[j, 'capacity'] for j in facs} + nr_of_users = len(users) + nr_of_facs = len(facs) + # create dictionary of how many users are expected to travel to each facility + # using numpy here since it is 10 times faster + travel_matrix = np.zeros((nr_of_users, nr_of_facs)) + for i in range(nr_of_users): + for j in range(nr_of_facs): + travel_matrix[i][j] = travel_dict[users[i]][facs[j]] + exp_travelers_matrix = np.array(users_and_facs_df.loc[users]['population']).reshape(-1, 1) * travel_matrix + exp_travelers_dict = {users[i]: {facs[j]: exp_travelers_matrix[i][j] for j in range(nr_of_facs)} + for i in range(nr_of_users)} + facs_to_consider = facs.copy() + open_facs = starting_open_facs.copy() + closed_facs = [x for x in facs if x not in open_facs] + if len(open_facs) == 0 or len(closed_facs) == 0: + return False, {} + + assignment_results = { 'assignment': starting_assignment, 'obj': starting_obj } + print("Starting objective" + str(assignment_results['obj'])) + start_while_loop = time.time() + counter = 0 + while (len(facs_to_consider) > 0 and time.time() - start_while_loop < time_limit_while_loop and counter < iteration_limit_while_loop): + print("Counter: " + str(counter)) + counter += 1 + j = random.choice(facs_to_consider) + facs_to_consider.remove(j) + # close j and open another facility close to it + if j in open_facs: + best_closed_facs = random.sample(closed_facs, num_consider_per_iteration) + is_feasible_assignment_best, assignment_results_best, j_best = False, {"obj": inf}, None + # first close the facility and run re-assignment + is_feasible_assignment_current_after_close, assignment_results_current_after_close = greedily_assign_users(users_and_facs_df, + travel_dict, users, + facs, + open_facs.copy(), + exp_travelers_dict, + cap_dict, + assignment = assignment_results["assignment"].copy(), + fac_to_close = j) + temp_open_facs = open_facs.copy() + temp_open_facs.remove(j) + for j_to_open in best_closed_facs: + # then open facility and run reassignment + is_feasible_assignment_current, assignment_results_current = greedy_reassign_open(users_and_facs_df, + travel_dict, users, + facs, + temp_open_facs.copy(), + exp_travelers_dict, + cap_dict, + assignment = assignment_results_current_after_close["assignment"].copy(), + fac_to_open = j_to_open, + depth_greedy_reassignment=depth_greedy_reassignment) + if is_feasible_assignment_current and assignment_results_current["obj"] < assignment_results_best["obj"]: + is_feasible_assignment_best = is_feasible_assignment_current + assignment_results_best = assignment_results_current.copy() + j_best = j_to_open + # if best found better than current + print("Difference " + str(assignment_results_best["obj"] - assignment_results['obj'])) + if is_feasible_assignment_best and assignment_results_best["obj"] < assignment_results['obj']: + print("j " + str(j)) + print("j_best " + str(j_best)) + open_facs.append(j_best) + open_facs.remove(j) + closed_facs.append(j) + closed_facs.remove(j_best) + assignment_results = assignment_results_best.copy() + facs_to_consider = facs.copy() + print("Better objective " + str( assignment_results["obj"])) + + # open j and close another facility close to it + else: + best_open_facs = random.sample(open_facs, num_consider_per_iteration) + is_feasible_assignment_best, assignment_results_best, j_best = False, {"obj": inf}, None + #first open facility + is_feasible_assignment_current_after_open, assignment_results_current_after_open = greedy_reassign_open(users_and_facs_df, + travel_dict, users, + facs, open_facs.copy(), + exp_travelers_dict, + cap_dict, assignment = assignment_results["assignment"].copy(), + fac_to_open = j, + depth_greedy_reassignment=depth_greedy_reassignment) + temp_open_facs = open_facs.copy() + temp_open_facs.append(j) + for j_to_close in best_open_facs: + # then close facility and run reassignment + is_feasible_assignment_current, assignment_results_current = greedily_assign_users(users_and_facs_df, + travel_dict, users, facs, + temp_open_facs.copy(), + exp_travelers_dict, cap_dict, + assignment = assignment_results_current_after_open["assignment"].copy(), + fac_to_close = j_to_close) + if is_feasible_assignment_current and assignment_results_current["obj"] < assignment_results_best["obj"]: + is_feasible_assignment_best = is_feasible_assignment_current + assignment_results_best = assignment_results_current.copy() + j_best = j_to_close + # if best found better than current + print("Difference " + str(assignment_results_best["obj"] - assignment_results['obj'])) + if is_feasible_assignment_best and assignment_results_best["obj"] < assignment_results['obj']: + print("j " + str(j)) + print("j_best " + str(j_best)) + open_facs.append(j) + open_facs.remove(j_best) + closed_facs.append(j_best) + closed_facs.remove(j) + assignment_results = assignment_results_best.copy() + facs_to_consider = facs.copy() + print("Better objective" + str( assignment_results["obj"])) + if fix_assignment and len(facs) == len(facs_to_consider): + print("Running assignment method") + options_relaxation = {"model": None, "new_open_facs": [], "new_closed_facs": [], + "time_limit_relaxation": time_limit_relaxation, "tolerance": tolerance_relaxation, + "threads": threads_relaxation, + "num_consider_in_relaxation": num_consider_in_relaxation } + options_localsearch = {"cutoff": cutoff_BUAP_localsearch, "time_limit_while_localsearch": time_limit_while_BUAP_localsearch} + is_feasible_assignment_rerun, assignment_results_rerun = run_assignment_method(assignment_method, + localsearch_method, + users_and_facs_df, + travel_dict, users, + facs, open_facs.copy(), + exp_travelers_dict, cap_dict, + options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if is_feasible_assignment_rerun and assignment_results_rerun["obj"] < assignment_results["obj"]: + print("Used new assignment") + is_feasible_assignment = is_feasible_assignment_rerun + assignment_results = assignment_results_rerun + is_feasible_assignment = True + if final_assignment_method == "relaxation_rounding": + options_relaxation = {"model": None, "new_open_facs": [], "new_closed_facs": [], + "time_limit_relaxation": time_limit_relaxation, + "tolerance": tolerance_relaxation, "threads": threads_relaxation, + "num_consider_in_relaxation": num_consider_in_relaxation } + options_localsearch = {"cutoff": cutoff_BUAP_localsearch, + "time_limit_while_localsearch": time_limit_while_BUAP_localsearch} + is_feasible_assignment_current, assignment_results_current = run_assignment_method(final_assignment_method, + localsearch_method, + users_and_facs_df, + travel_dict, users, facs, + open_facs, exp_travelers_dict, + cap_dict, + options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if is_feasible_assignment_current and assignment_results_current["obj"]< assignment_results["obj"]: + print("Better final assignment") + is_feasible_assignment = is_feasible_assignment_current + assignment_results = assignment_results_current + if not is_feasible_assignment: + return False, {} + end_time = time.time() + result = {"solution_details": + {"assignment": assignment_results["assignment"], "open_facs": open_facs, + "objective_value": assignment_results["obj"],"solving_time": end_time-start_time}, + "model_details": + {"users": users, "facs": facs, "cap_factor": cap_factor, + "budget_factor": budget_factor, "starting_open_facs": starting_open_facs + }, + "algorithm_details": { + "overall_algorithm": "localsearch_without_change", + "local_search_assignment_method": localsearch_method, + "assignment_method": assignment_method, + "num_consider_per_iteration": num_consider_per_iteration, "cutoff_localsearch": cutoff_BUAP_localsearch, + "time_limit_while_localsearch": time_limit_while_BUAP_localsearch, + "final_assignment_method": final_assignment_method, + "num_consider_in_relaxation": num_consider_in_relaxation, + "fixing_assignment_method": "greedy_reassign and greedy_reassign_open", + "iteration_limit_while_loop": iteration_limit_while_loop, + "depth_greedy_reassignment":depth_greedy_reassignment + } + } + if write_to_file: + write_results_list([result], output_filename) + return True, result + +def localsearch_with_change(users_and_facs_df, travel_dict, users, facs, starting_assignment, + starting_open_facs, starting_obj, budget_factor, cap_factor = 1.5, + num_consider_per_iteration = 50, time_limit_while_loop = 120, + iteration_limit_while_loop = 300, assignment_method="greedy_assign", + final_assignment_method = "relaxation_rounding", + localsearch_method = "local_random_reassign", num_consider_in_relaxation = 50, + tolerance_relaxation = 2e-3, time_limit_relaxation = 1000, + threads_relaxation= 1, + cutoff_BUAP_localsearch = 0.2, time_limit_while_BUAP_localsearch = 1, + depth_greedy_reassignment = 1, fix_assignment = False, + output_filename = "test_without_change.json", write_to_file = True): + """ + Local search based on ADD / DROP for overall problem with choices of which facilities to consider + based on how good they appeared to close / open in previous iterations. + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param starting_assignment: the assignment of users on which the local search starts its search + :param starting_open_facs: the open faciltiies of the solution on which the local search starts its search + :param starting_obj: the objective function value of the solution on which the local search starts its search + :param budget_factor: proportion of facilities to open + :param cap_factor: factor by which all capacities are scaled + :param num_consider_per_iteration: how many facilties should be considered for the chosen facility + to be swapped out with + :param time_limit_while_loop: the maximum time that the main while loop of the local search should run for + :param iteration_limit_while_loop: the maximum number of iterations the main while loop of + the local search should run for + :param assignment_method: which method of assignment is used to fix the assignment after change is made, + options are "relaxation_rounding" or "greedy_assign" + :param final_assignment_method: assignment method to run after open facilities decided, + currently only support relaxation rounding + :param local_search_method: BUAP local search method to run on the assignment resulting from assignment_method + and final_assignment_method; options are "None", "local_random_reassign", "local_random_swap" + :param num_consider_in_relaxation: number of facilities to consider for each user in BUAP relaxation, + if -1 all facilities are considered in relaxation + :param tolerance_relaxation: tolerance of the relaxation of the BUAP + :param time_limit_relaxation: time limit of running the BUAP relaxation + :param threads_relaxation: number of threads used when solving the relaxation of the BUAP + :param: cutoff_BUAP_localsearch: parameter for BUAP localsearch for minimum preference required to + reassign to the facility + :param time_limit_while_BUAP_localsearch: the time limit in seconds used in the while loop in the BUAP localsearch + :param fix_assignment: if true, fixes assignment with assignment_method after change is made + :param output_filename: name of the output file + :param write_to_file: boolean indicating whether results should be written to file + :return: boolean indicating feasibility and dictionary containing the results + """ + start_time = time.time() + # dictionary of capacities of facilities after scaling + cap_dict = {j: cap_factor * users_and_facs_df.at[j, 'capacity'] for j in facs} + nr_of_users = len(users) + nr_of_facs = len(facs) + # create dictionary of how many users are expected to travel to each facility + # using numpy here since it is 10 times faster + travel_matrix = np.zeros((nr_of_users, nr_of_facs)) + for i in range(nr_of_users): + for j in range(nr_of_facs): + travel_matrix[i][j] = travel_dict[users[i]][facs[j]] + exp_travelers_matrix = np.array(users_and_facs_df.loc[users]['population']).reshape(-1, 1) * travel_matrix + exp_travelers_dict = {users[i]: {facs[j]: exp_travelers_matrix[i][j] for j in range(nr_of_facs)} + for i in range(nr_of_users)} + facs_to_consider = facs.copy() + open_facs = starting_open_facs.copy() + closed_facs = [x for x in facs if x not in open_facs] + if len(open_facs) == 0 or len(closed_facs) == 0: + return False, {} + + assignment_results = { 'assignment': starting_assignment, 'obj': starting_obj } + print("Starting objective" + str(assignment_results['obj'])) + # initilialise change: note if facility currently close, this is change of opening + # while if currently open its the change of closing + Change = {j: inf for j in facs} + for j in open_facs: + is_feasible_assignment_current, assignment_results_current = greedily_assign_users(users_and_facs_df, + travel_dict, users, facs, + open_facs.copy(), + exp_travelers_dict, cap_dict, + assignment = assignment_results["assignment"].copy(), + fac_to_close = j) + if is_feasible_assignment_current: + Change[j] = assignment_results_current["obj"] - assignment_results["obj"] + for j in closed_facs: + is_feasible_assignment_current, assignment_results_current = greedy_reassign_open(users_and_facs_df, + travel_dict, users, + facs, open_facs.copy(), + exp_travelers_dict, cap_dict, + assignment = assignment_results["assignment"].copy(), + fac_to_open = j, + depth_greedy_reassignment=depth_greedy_reassignment) + if is_feasible_assignment_current: + Change[j] = assignment_results_current["obj"] - assignment_results["obj"] + start_while_loop = time.time() + counter = 0 + while (len(facs_to_consider) > 0 and time.time() - start_while_loop < time_limit_while_loop and counter < iteration_limit_while_loop): + print("Counter: " + str(counter)) + counter += 1 + j = random.choice(facs_to_consider) + facs_to_choose_from_closed = [j1 for j1 in facs_to_consider if j1 in closed_facs] + facs_to_choose_from_open = [j1 for j1 in facs_to_consider if j1 in open_facs] + rand = random.random() + if (rand < len(open_facs) / len(facs) and len(facs_to_choose_from_open) > 0) or len(facs_to_choose_from_closed) == 0: + j = min(facs_to_choose_from_open, key = lambda j1:Change[j1]) + else: + j = min(facs_to_choose_from_closed, key = lambda j1:Change[j1]) + facs_to_consider.remove(j) + # close j and open another facility + if j in open_facs: + best_closed_facs = sorted(closed_facs, + key = lambda j1:Change[j1])[0:min(len(closed_facs), num_consider_per_iteration)] + is_feasible_assignment_best, assignment_results_best, j_best = False, {"obj": inf}, None + # first close the facility and run re-assignment + is_feasible_assignment_current_after_close, assignment_results_current_after_close = greedily_assign_users(users_and_facs_df, + travel_dict, users, facs, + open_facs.copy(), + exp_travelers_dict, + cap_dict, + assignment = assignment_results["assignment"].copy(), + fac_to_close = j) + Change[j] = assignment_results_current_after_close["obj"] - assignment_results["obj"] + temp_open_facs = open_facs.copy() + temp_open_facs.remove(j) + for j_to_open in best_closed_facs: + # then open facility and run reassignment + is_feasible_assignment_current, assignment_results_current = greedy_reassign_open(users_and_facs_df, + travel_dict, users, + facs, temp_open_facs.copy(), + exp_travelers_dict, + cap_dict, assignment = assignment_results_current_after_close["assignment"].copy(), + fac_to_open = j_to_open, + depth_greedy_reassignment=depth_greedy_reassignment) + Change[j_to_open] = assignment_results_current["obj"] - assignment_results["obj"] + if is_feasible_assignment_current and assignment_results_current["obj"] < assignment_results_best["obj"]: + is_feasible_assignment_best = is_feasible_assignment_current + assignment_results_best = assignment_results_current.copy() + j_best = j_to_open + # if best found better than current + print("Difference " + str(assignment_results_best["obj"] - assignment_results['obj'])) + if is_feasible_assignment_best and assignment_results_best["obj"] < assignment_results['obj']: + print("j " + str(j)) + print("j_best " + str(j_best)) + open_facs.append(j_best) + open_facs.remove(j) + closed_facs.append(j) + closed_facs.remove(j_best) + assignment_results = assignment_results_best.copy() + facs_to_consider = facs.copy() + print("Better objective " + str( assignment_results["obj"])) + # update change values (swap signs since now instead of opening consider closing and vice versa) + Change[j] = - Change[j] + Change[j_best] = - Change[j_best] + + # open j and close another facility close to it + else: + best_open_facs = sorted(open_facs, key = lambda j1:Change[j1])[0:min(len(open_facs), num_consider_per_iteration)] + is_feasible_assignment_best, assignment_results_best, j_best = False, {"obj": inf}, None + #first open facility + is_feasible_assignment_current_after_open, assignment_results_current_after_open = greedy_reassign_open(users_and_facs_df, + travel_dict, users, + facs, open_facs.copy(), + exp_travelers_dict, + cap_dict, assignment = assignment_results["assignment"].copy(), + fac_to_open = j, + depth_greedy_reassignment=depth_greedy_reassignment) + Change[j] = assignment_results_current_after_open["obj"] - assignment_results["obj"] + temp_open_facs = open_facs.copy() + temp_open_facs.append(j) + for j_to_close in best_open_facs: + # then open facility and run reassignment + is_feasible_assignment_current, assignment_results_current = greedily_assign_users(users_and_facs_df, + travel_dict, users, facs, + temp_open_facs.copy(), + exp_travelers_dict, cap_dict, + assignment = assignment_results_current_after_open["assignment"].copy(), + fac_to_close = j_to_close) + Change[j_to_close] = assignment_results_current["obj"] - assignment_results["obj"] + if is_feasible_assignment_current and assignment_results_current["obj"] < assignment_results_best["obj"]: + is_feasible_assignment_best = is_feasible_assignment_current + assignment_results_best = assignment_results_current.copy() + j_best = j_to_close + # if best found better than current + print("Difference " + str(assignment_results_best["obj"] - assignment_results['obj'])) + if is_feasible_assignment_best and assignment_results_best["obj"] < assignment_results['obj']: + print("j " + str(j)) + print("j_best " + str(j_best)) + open_facs.append(j) + open_facs.remove(j_best) + closed_facs.append(j_best) + closed_facs.remove(j) + assignment_results = assignment_results_best.copy() + facs_to_consider = facs.copy() + print("Better objective" + str( assignment_results["obj"])) + # update change values (swap signs since now instead of opening consider closing and vice versa) + Change[j] = - Change[j] + Change[j_best] = - Change[j_best] + if fix_assignment and len(facs) == len(facs_to_consider): + print("Running assignment method") + options_relaxation = {"model": None, "new_open_facs": [], "new_closed_facs": [], + "time_limit_relaxation": time_limit_relaxation, "tolerance": tolerance_relaxation, + "threads": threads_relaxation, "num_consider_in_relaxation": num_consider_in_relaxation } + options_localsearch = {"cutoff": cutoff_BUAP_localsearch, + "time_limit_while_localsearch": time_limit_while_BUAP_localsearch} + is_feasible_assignment_rerun, assignment_results_rerun = run_assignment_method(assignment_method, + localsearch_method, + users_and_facs_df, + travel_dict, users, facs, + open_facs.copy(), + exp_travelers_dict, + cap_dict, + options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if is_feasible_assignment_rerun and assignment_results_rerun["obj"] < assignment_results["obj"]: + print("Used new assignment") + is_feasible_assignment = is_feasible_assignment_rerun + assignment_results = assignment_results_rerun + is_feasible_assignment = True + if final_assignment_method == "relaxation_rounding": + print("In relaxation rounding if") + options_relaxation = {"model": None, "new_open_facs": [], "new_closed_facs": [], + "time_limit_relaxation": time_limit_relaxation, "tolerance": tolerance_relaxation, + "threads": threads_relaxation, "num_consider_in_relaxation": num_consider_in_relaxation } + options_localsearch = {"cutoff": cutoff_BUAP_localsearch, + "time_limit_while_localsearch": time_limit_while_BUAP_localsearch} + is_feasible_assignment_current, assignment_results_current = run_assignment_method(final_assignment_method, + localsearch_method, + users_and_facs_df, + travel_dict, users, facs, + open_facs, exp_travelers_dict, + cap_dict, + options_localsearch = options_localsearch, + options_relaxation=options_relaxation) + if is_feasible_assignment_current and assignment_results_current["obj"]< assignment_results["obj"]: + print("Better final assignment") + is_feasible_assignment = is_feasible_assignment_current + assignment_results = assignment_results_current + if not is_feasible_assignment: + return False, {} + end_time = time.time() + result = {"solution_details": + {"assignment": assignment_results["assignment"], "open_facs": open_facs, + "objective_value": assignment_results["obj"], "solving_time": end_time-start_time}, + "model_details": + {"users": users, "facs": facs, "cap_factor": cap_factor, + "budget_factor": budget_factor, "starting_open_facs": starting_open_facs + }, + "algorithm_details": { + "overall_algorithm": "localsearch", "local_search_assignment_method": localsearch_method, + "assignment_method": assignment_method, + "num_consider_per_iteration": num_consider_per_iteration, "cutoff_localsearch": cutoff_BUAP_localsearch, + "time_limit_while_localsearch": time_limit_while_BUAP_localsearch, + "final_assignment_method": final_assignment_method, + "num_consider_in_relaxation": num_consider_in_relaxation, + "fixing_assignment_method": "greedy_assign", "iteration_limit_while_loop": iteration_limit_while_loop, + "depth_greedy_reassignment":depth_greedy_reassignment + } + } + if write_to_file: + write_results_list([result], output_filename) + return True, result \ No newline at end of file diff --git a/heuristics_and_mips/BUAP_MIP.py b/heuristics_and_mips/BUAP_MIP.py new file mode 100644 index 0000000..dcd80d1 --- /dev/null +++ b/heuristics_and_mips/BUAP_MIP.py @@ -0,0 +1,237 @@ +""" +Functions for solving the BUAP MIP and its relaxation +The model can be edited so that similar models can be solved without having to rebuild the whole model. +""" +import pyomo.environ as pyo +from pyomo.opt import SolverStatus, TerminationCondition +import time +from utils import * +from pyomo.core.base.constraint import Constraint + + +def cap_init(m, j): + return m.users_and_facs_df.at[j, 'capacity'] * m.cap_factor.value + +def exp_travelers_init(m, i, j): + return m.users_and_facs_df.at[i, 'population'] * m.travel_dict[i][j] + +def obj_expression(m): + if m.define_u: + return sum(m.cap[j] * (1 - m.u[j]) ** 2 for j in m.facs) + else: + return sum(m.cap[j] * ( + 1 - (sum(m.exp_travelers[(i, j)] * m.x[(i, j)] for (i, j2) in m.travel_pairs if j2 == j) + / m.cap[j])) ** 2 for j in m.facs) + +def define_utilization(m,j): + return m.u[j] == sum(m.exp_travelers[(i, j)] * m.x[(i, j)] for (i, j2) in m.travel_pairs if j2 == j) / m.cap[j] + +def utilization_cstr(m, j): + if m.define_u: + return m.u[j] <= m.y[j] + else: + return sum(m.exp_travelers[(i, j)] * m.x[(i, j)] for (i, j2) in m.travel_pairs if j2 == j) / m.cap[ + j] <= m.y[j] + +def assign_to_one_cstr(m, i): + eligible_js = [j for (i2, j) in m.travel_pairs if i2 == i] + if not eligible_js: + return Constraint.Skip + + expr = sum(m.x[i, j] for j in eligible_js) + + if m.strict_assign_to_one: + return expr == 1 + return expr <= 1 + +def y_init(m,j): + if j in m.open_facs: + return 1 + else: + return 0 + + +def build_BUAP_model_editable(users_and_facs_df, travel_dict, users, facs, + cap_factor, cutoff, open_facs, cap_dict = None, continous = False, + define_u = True, num_consider = -1): + """ + Build the BUAP but make the open facilites editable in the future + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param cap_factor: factor by which all capacities are scaled + ::param cutoff: minimum preference required for facility user pair to be considered; + only used if num_consider is -1 + :param continous: whether to solve this model continously + :param open_facs: open facilities + :param define_u: whether to explicitely define u + :param num_consider: number of closest facilities to consider for a user out of the open facilities, + same as n_r in paper; setting this to -1 means we use a cutoff instead + :return: the built model + """ + print('Build BUAP model...') + start = time.time() + m = pyo.ConcreteModel() + m.users_and_facs_df = users_and_facs_df + m.travel_dict = travel_dict + + # declare sets, variables and parameters + m.users = pyo.Set(initialize=users) + m.facs = pyo.Set(initialize=facs) + m.open_facs = pyo.Set(initialize=open_facs, ordered = False) + m.closed_facs = pyo.Set(initialize = list(set(facs) - set(open_facs)), ordered = False) + m.cutoff = pyo.Param(initialize=cutoff) + m.num_consider = pyo.Param(initialize=num_consider) + if num_consider == -1: + m.travel_pairs = pyo.Set(initialize=[(i, j) for i in m.users for j in m.facs + if m.travel_dict[i][j] > m.cutoff.value]) + else: + travel_pairs = [(i, j) for i in m.users for j in sorted(m.open_facs, key=lambda j_1: travel_dict[i][j_1], reverse=True)[0:num_consider]] + travel_pairs = list(set(travel_pairs)) + m.travel_pairs = pyo.Set(initialize=travel_pairs) + if cap_dict == None: + m.cap_factor = pyo.Param(initialize=cap_factor) + m.cap = pyo.Param(m.facs, initialize=cap_init) + else: + m.cap_factor = pyo.Param(initialize=cap_dict[facs[0]]/m.users_and_facs_df.at[facs[0], 'capacity'] ) + m.cap = pyo.Param(m.facs, initialize=lambda m, j: cap_dict[j]) + m.exp_travelers = pyo.Param(m.travel_pairs, initialize=exp_travelers_init) + m.strict_assign_to_one = pyo.Param(initialize=True) + m.continous = pyo.Param(initialize=continous) + m.define_u = pyo.Param(initialize=define_u) + m.y = pyo.Param(m.facs, initialize=y_init, mutable = True) + + if m.continous: + m.x = pyo.Var(m.travel_pairs, bounds=(0,1)) + else: + m.x = pyo.Var(m.travel_pairs, within=pyo.Binary) + + # fix pairs of closed facilities to be zero + for (i,j) in m.travel_pairs: + if j in m.closed_facs: + m.x[(i,j)].fix(0) + + if m.define_u: + m.u = pyo.Var(m.facs, bounds=(0, 1)) + + # constraints and objective + if m.define_u: + m.define_utilization = pyo.Constraint(m.facs, rule=define_utilization) + if not m.define_u: + m.utilization_cstr = pyo.Constraint(m.facs, rule=utilization_cstr) + m.assign_to_one_cstr = pyo.Constraint(m.users, rule=assign_to_one_cstr) + m.obj = pyo.Objective(rule=obj_expression, sense=pyo.minimize) + print('setup complete in ', time.time() - start, 'seconds') + + return m + + +def change_open_facs(m, new_open_facs, new_closed_facs): + """ + Edit the input model m with the adapted open and closed facilities + :param m: model that needs editing, created with build_BUAP_model_editable + :param new_open_facs: facilities to be added to being open + :param new_closed_facs: facilities that need to be closed now + :return: the edited model + """ + m.open_facs.update(set(new_open_facs)) + for j in new_closed_facs: + m.open_facs.remove(j) + m.closed_facs.update(set(new_closed_facs)) + for j in new_open_facs: + m.closed_facs.remove(j) + # update y variables + for j in new_open_facs: + m.y[j] = 1 + for j in new_closed_facs: + m.y[j] = 0 + + # fix pairs of closed facilities to be zero + for (i,j) in m.travel_pairs: + if j in new_closed_facs: + m.x[(i,j)].fix(0) + + # unfix the ones that are not closed anymore + for (i,j) in m.travel_pairs: + if j in new_open_facs: + m.x[(i,j)].unfix() + return m + +def optimize_BUAP_model_editable(m, threads=1, tolerance=0.0,time_limit = 10000): + """ + Solve the BUAP + the objective value is the objective value of the full model + :param m: the model to be optimized + :param threads: number of threads + :param tolerance: tolerance on the optimality of the solution + :param print_sol: boolean indicating whether the solution should be printed + :param log_file: name of the log file. If "None", no log file will be produced + :param time_limit: how long the model is allowed to run for + :return: boolean indicating whether the model is feasible; updated results dictionary + """ + print('Optimize BUAP model...') + opt = pyo.SolverFactory("gurobi") + opt.options["threads"] = threads + opt.options["MIPGap"] = tolerance + opt.options["NodefileStart"] = 0.5 + opt.options["Time_limit"] = time_limit + res = opt.solve(m, tee=True) + + if res.solver.termination_condition == TerminationCondition.infeasible or res.solver.termination_condition == TerminationCondition.infeasibleOrUnbounded: + return False, {} + + # create dictionary for the results after postprocessing + assignment = {} + results_x = {i : {} for i in m.users} + for (i,j) in m.travel_pairs: + results_x[i][j] = round(min(m.x[(i,j)].value,1.0), 5) + # assign each user to facility with highest x_i,j + if m.continous: + assignment = {i : max(results_x[i], key=results_x[i].get) for i in m.users} + else: + for i in m.users: + for j in m.open_facs: + if (i, j) in m.travel_pairs and m.x[(i, j)].value > 1e-4: + assignment[i] = j + results = {"solution_details": + {"assignment": assignment, "open_facs": list(m.open_facs), "objective_value": pyo.value(m.obj), + "x": results_x, "u": { j: m.u[j].value for j in m.facs }, + "lower_bound": res['Problem'][0]['Lower bound'], "solving_time": res.Solver[0]['Time']}, + "model_details": + {"users": list(m.users), "facs": list(m.facs), "cap_factor": m.cap_factor.value, + "budget_factor": len(m.open_facs)/len(m.facs), "cutoff": m.cutoff.value, + "num_consider_relaxation": m.num_consider.value,"strict_assign_to_one": m.strict_assign_to_one.value, "tolerance": tolerance, + "time_limit": time_limit, "define_u": m.define_u.value} + } + return True, results + +def solve_BUAP_model_editable(users_and_facs_df, travel_dict, users, facs, + cap_factor, cutoff, open_facs, cap_dict = None, continous = False, + define_u = True, num_consider = -1, + threads=1, tolerance=0.0,time_limit = 10000): + """ + Build and solve the BUAP model + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param cap_factor: factor by which all capacities are scaled + ::param cutoff: minimum preference required for facility user pair to be considered; + only used if num_consider is -1 + :param continous: whether to solve this model continously + :param open_facs: open facilities + :param define_u: whether to explicitely define u + :param num_consider: number of closest facilities to consider for a user out of the open facilities, + same as n_r in paper; setting this to -1 means we use a cutoff instead + :param threads: number of threads + :param tolerance: tolerance on the optimality of the solution + :param print_sol: boolean indicating whether the solution should be printed + :param log_file: name of the log file. If "None", no log file will be produced + :param time_limit: how long the model is allowed to run for + :return: boolean indicating whether the model is feasible and results dictionary + """ + m = build_BUAP_model_editable(users_and_facs_df, travel_dict, users, facs, + cap_factor, cutoff, open_facs, cap_dict, continous, define_u, num_consider) + is_feasible, result = optimize_BUAP_model_editable(m, threads, tolerance, time_limit) + return is_feasible, result diff --git a/heuristics_and_mips/BUAP_heuristics.py b/heuristics_and_mips/BUAP_heuristics.py new file mode 100644 index 0000000..c8e213e --- /dev/null +++ b/heuristics_and_mips/BUAP_heuristics.py @@ -0,0 +1,588 @@ +""" +Functions for running different BUAP heuristics and the subroutines used within these heuristics +""" +import time +from utils import * +import numpy as np +from BUAP_MIP import * +import math + + +def make_assignment_satisfy_capacity_constraints(users_and_facs_df, travel_dict, cap_dict, users, facs, open_facs, assignment): + """ + Given an assignment that (potentially) does not satisfy capacity constraints, try and fix the assignment + to satisfy capacity constraints by reassigning users from the over capacity facilities to other facilities. + This routine is used in the relaxation rounding algorithm. + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param cap_dict: dictionary of capacities of each facility + :param users: list of the users used in the instance + :param faacs: list of facilities used in the instance + :param open_facs: list of the open facilities used in the instance + :param assignment: assignment of users to facilities that might not satisfy constraints + :return: in the case of success the function returns the objective function value of the assignment, the assignment + and a dictionary of 1-u_j; if the function fails as fixing the assignment it returns 0, {},{} + """ + slacks, not_satisfied, assigned_users = capacity_constraints_slack(users_and_facs_df, travel_dict, cap_dict, + open_facs, assignment) + failed_to_reassign = False + for j in not_satisfied: + capacity = cap_dict[j] + reassignable_users = assigned_users[j] + reassignable_users.sort(key=lambda i: travel_dict[i][j]) + too_large_users = [] + for i in reassignable_users: + if cap_dict[j] > capacity: + too_large_users.append(i) + while slacks[j] < 0 and len(reassignable_users) > 0: + is_too_large = False + if len(too_large_users) > 0: + chosen_user = too_large_users[0] + is_too_large = True + else: + chosen_user = reassignable_users[0] + other_facs = open_facs.copy() + other_facs.remove(j) + other_facs.sort( key=lambda k: travel_dict[chosen_user][k], reverse=True) + for k in other_facs: + if slacks[k] - travel_dict[chosen_user][k]*users_and_facs_df.at[chosen_user, 'population'] >= 0: + assignment[chosen_user] = k + slacks[k] = slacks[k] - travel_dict[chosen_user][k]*users_and_facs_df.at[chosen_user, 'population'] + slacks[j] = slacks[j] + travel_dict[chosen_user][j]*users_and_facs_df.at[chosen_user, 'population'] + break + reassignable_users.remove(chosen_user) + if is_too_large: + too_large_users.remove(chosen_user) + if slacks[j] < 0: + failed_to_reassign = True + + if failed_to_reassign: + print("failed to reassign") + return 0, {}, {} + + not_utilised = {j: 1 for j in facs} + not_utilised.update({j:(slacks[j] / cap_dict[j]) for j in open_facs}) + objective = sum(cap_dict[j] * (not_utilised[j]) ** 2 for j in facs) + return objective, assignment, not_utilised + +def reassign_user_better(slacks, user, original_fac, new_fac, cap_dict, exp_travelers_dict): + """ + Checks if areassigning user from original_fac to new_fac leads to a better objective function + This is a subroutine of the local search reassign algorithm. + :param slacks: how much slack is in curren assignment for each facility as a dictionary, + i.e. C_j - sum_i U_i P_ij x_ij + :param user: the user we want to reassign + :param original_fac: the facility the user is currently assigned to + :param new_fac: the facility we want to assign it to + :param cap_dict: dictionary of the facilities' capacities + :param exp_traverlers_dict: number of people we expect to travel + :return: false if assignment worse or not feasible, true otherwise and the difference in objective function + """ + if slacks[new_fac] - exp_travelers_dict[user][new_fac] < 0: + return False, 0 + old_objective_part = cap_dict[original_fac]*(slacks[original_fac]/cap_dict[original_fac])**2 + cap_dict[new_fac]*(slacks[new_fac]/cap_dict[new_fac])**2 + new_objective_part = cap_dict[original_fac]*((slacks[original_fac] + exp_travelers_dict[user][original_fac])/cap_dict[original_fac])**2 + cap_dict[new_fac]*((slacks[new_fac] - exp_travelers_dict[user][new_fac])/cap_dict[new_fac])**2 + difference = new_objective_part - old_objective_part + if difference < 0: + return True, difference + else: + return False, difference + +def swap_user_better(slacks, user_1, user_2, fac_1, fac_2, cap_dict, exp_travelers_dict): + """ + Checks if reassigning user from original_fac to new_fac leads to a better objective function + This is a subroutine of the local search swap algorithm. + :param slacks: how much slack is in curren assignment for each facility as a dictionary, + i.e. C_j - sum_i U_i P_ij x_ij + :param user_1: the user currently assigned to fac_1 + :param user_2: the user currently assigned to fac_2 + :param fac_1: the facility the user_1 is currently assigned to + :param fac_2: the facility user_2 is currently assigned to + :param cap_dict: dictionary of the facilities' capacities + :param exp_traverlers_dict: number of people we expect to travel + :return: false if assignment worse or not feasible, true otherwise and the the new slacks and difference in objective + """ + new_slack_1 = slacks[fac_1] - exp_travelers_dict[user_2][fac_1] + exp_travelers_dict[user_1][fac_1] + new_slack_2 = slacks[fac_2] - exp_travelers_dict[user_1][fac_2] + exp_travelers_dict[user_2][fac_2] + if new_slack_1 < 0 or new_slack_2 < 0: + return False, 0, 0, 0 + old_objective_part = cap_dict[fac_1]*(slacks[fac_1]/cap_dict[fac_1])**2 + cap_dict[fac_2]*(slacks[fac_2]/cap_dict[fac_2])**2 + new_objective_part = cap_dict[fac_1]*(new_slack_1/cap_dict[fac_1])**2 + cap_dict[fac_2]*(new_slack_2/cap_dict[fac_2])**2 + difference = new_objective_part - old_objective_part + if difference < -1: + return True, new_slack_1, new_slack_2, difference + else: + return False, new_slack_1, new_slack_2, difference + +def local_search_reassign(users_and_facs_df, travel_dict, users, facs, open_facs, assignment, + exp_travelers_dict, cap_dict, cutoff = 0.2, time_limit_while = 1): + """ + Local search for the BUAP that given an assignment tries to reassign users and implements the reassignment + if it leads to a better objective function value. + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param open_facs: list of open facilities + :param assignment: where to start the local search + :param exp_travelers_dict: dictionary stating the population expected to travel from each user to each facility + :param cap_dict: dictionary of the facilities' capacities + :param cutoff: minimum preference to consider reassinging a user + :param time_limit_while: time limit of the while loop + :return: Boolean indicating that the assignment is feasible and a dictionary containing assignments, + facility utilizations and the objective value. + If the input assignment is infeasible, the function simply returns False and an empty dictionary. + """ + slacks, not_satisfied, assigned_users = capacity_constraints_slack(users_and_facs_df, travel_dict,cap_dict, + open_facs, assignment) + if len(not_satisfied)> 0: + return False, {} + users_preferred_facs = {i: [j for j in open_facs if travel_dict[i][j]>= cutoff ] for i in users} + not_utilised = {j:(slacks[j] / cap_dict[j]) for j in open_facs} + partial_objective = sum(cap_dict[j] * (not_utilised[j]) ** 2 for j in open_facs) + start_time = time.time() + improvement = -20 + counter = 0 + while time.time() - start_time < time_limit_while and improvement < -10: + improvement = 0 + random_facs = open_facs.copy() + for j in random_facs: + if(len(assigned_users[j]) <= 1): + continue + for i in assigned_users[j]: + for j_2 in users_preferred_facs[i]: + is_better, change_objective = reassign_user_better(slacks, i, j, j_2, cap_dict, exp_travelers_dict) + if is_better: + assignment[i] = j_2 + slacks[j] += exp_travelers_dict[i][j] + slacks[j_2] -= exp_travelers_dict[i][j_2] + assigned_users[j].remove(i) + assigned_users[j_2].append(i) + partial_objective += change_objective + improvement += change_objective + break + counter += 1 + print(improvement) + closed_facs = [j for j in facs if j not in open_facs] + objective = partial_objective + sum(cap_dict[j] for j in closed_facs) + return True, {'assignment': assignment, 'utilization': {j: 1-(slacks[j]/cap_dict[j]) for j in open_facs}, + 'obj': objective, 'other': {}} + +def local_search_swap(users_and_facs_df, travel_dict, users, facs, open_facs, assignment, + exp_travelers_dict, cap_dict, cutoff = 0.2, time_limit_while = 1): + """ + Local search for the BUAP that given an assignment tries to swap user assignments and implements the swap + if it leads to a better objective function value. + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param open_facs: list of open facilities + :param assignment: where to start the local search + :param exp_travelers_dict: dictionary stating the population expected to travel from each user to each facility + :param cap_dict: dictionary of the facilities' capacities + :param cutoff: minimum preference to consider reassinging a user + :param time_limit_while: time limit of the while loop + :return: Boolean indicating that the assignment is feasible and a dictionary containing assignments, + facility utilizations and the objective value. + If the input assignment is infeasible, the function simply returns False and an empty dictionary. + """ + slacks, not_satisfied, assigned_users = capacity_constraints_slack(users_and_facs_df, travel_dict,cap_dict, + open_facs, assignment) + if len(not_satisfied)> 0: + return False, {} + users_preferred_facs = {i: [j for j in open_facs if travel_dict[i][j]>= cutoff ] for i in users} + not_utilised = {j:(slacks[j] / cap_dict[j]) for j in open_facs} + partial_objective = sum(cap_dict[j] * (not_utilised[j]) ** 2 for j in open_facs) + start_time = time.time() + improvement = -20 + counter = 0 + while time.time() - start_time < time_limit_while and improvement < -10: + improvement = 0 + random_facs = open_facs.copy() + for j in random_facs: + swapped = False + for i in assigned_users[j]: + for j_2 in users_preferred_facs[i]: + for i_2 in assigned_users[j_2]: + if j not in users_preferred_facs[i_2]: + continue + is_better, new_slack_1, new_slack_2, change_objective = swap_user_better(slacks, i, i_2, j, j_2, + cap_dict, + exp_travelers_dict) + if is_better: + assignment[i] = j_2 + assignment[i_2] = j + slacks[j] = new_slack_1 + slacks[j_2] = new_slack_2 + assigned_users[j].remove(i) + assigned_users[j_2].append(i) + assigned_users[j_2].remove(i_2) + assigned_users[j].append(i_2) + partial_objective += change_objective + improvement += change_objective + swapped = True + break + if swapped: + break + if swapped: + break + counter += 1 + print(improvement) + closed_facs = [j for j in facs if j not in open_facs] + objective = partial_objective + sum(cap_dict[j] for j in closed_facs) + return True, {'assignment': assignment, 'utilization': {j: 1-(slacks[j]/cap_dict[j]) for j in open_facs}, + 'obj': objective, 'other': {}} + +def greedily_assign_users(users_and_facs_df, travel_dict, users, facs, open_facs_input, + exp_travelers_dict, cap_dict, assignment = None, fac_to_close = None): + """ + Heuristic for the BUAP. + This is the greedy assign and greedy reassign procedures combined into one procedure for the BUAP. + If no assignment and fac_to_close is given, this simply computes the assignment with the greedy assign procedure. + If an aissignment and fac_to_close are given it reassign all the users assigned to fac_to_close greedily, + as in the greedy reassign procedure. + Code adapted from https://github.com/schmitt-hub/preferential_access_and_fairness_in_waste_management + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param open_facs: list of open facilities + :param exp_travelers_dict: dictionary stating the population expected to travel from each user to each facility + :param cap_dict: dictionary of the facilities' capacities + :param assignment: a previous assignment + :param fac_to_close: facility to close and reassign users from, if this and assignment given we do not + recompute the assignnment but just fix what needs changing based on fac_to_close closing + :return: boolean indicating whether a feasible assignment could be constructed; + a dictionary of assignments, facility utilizations and the objective value + """ + + # initialize + open_facs = open_facs_input.copy() + if assignment == None and fac_to_close == None: + cap_left = cap_dict.copy() + user_assignment = {} + unassigned_users = users.copy() + assigned_users = {j: [] for j in open_facs} + else: + user_assignment = assignment.copy() + cap_left = cap_dict.copy() + assigned_users = {j: [] for j in open_facs} + for i, j in user_assignment.items(): + assigned_users[j].append(i) + cap_left[j] = cap_left[j] - exp_travelers_dict[i][j] + unassigned_users = assigned_users[fac_to_close] + open_facs.remove(fac_to_close) + cap_left[fac_to_close] = cap_dict[fac_to_close] + assigned_users[fac_to_close] = {} + while unassigned_users: + users_not_assignable = True + most_preferred_users = {j: [] for j in open_facs} + # match every user with their most preferred facility + for i in unassigned_users: + possible_facs = [j for j in open_facs if cap_left[j] >= exp_travelers_dict[i][j]] + if not possible_facs: + continue + most_preferred_fac = possible_facs[np.argmax([travel_dict[i][j] for j in possible_facs])] + most_preferred_prob = travel_dict[i][most_preferred_fac] + most_preferred_users[most_preferred_fac].append((i, most_preferred_prob)) + users_not_assignable = False + # if no user could be assigned in this iteration, return without a new feasible assignment + if users_not_assignable: + return 0, {} + # assign users to their most preferred facility in decreasing rank of their preference to this facility + for j in most_preferred_users: + sorted_users = sorted(most_preferred_users[j], key=lambda x: -x[1]) + for (i, prob) in sorted_users: + if cap_left[j] >= exp_travelers_dict[i][j]: + unassigned_users.remove(i) + user_assignment[i] = j + cap_left[j] -= exp_travelers_dict[i][j] + assigned_users[j].append(i) + + utilization = {j: sum(exp_travelers_dict[i][j] for i in assigned_users[j]) / + cap_dict[j] for j in open_facs} + closed_facs = [j for j in facs if j not in open_facs] + objective = sum(cap_dict[j] * (1 - utilization[j]) ** 2 for j in open_facs) + sum(cap_dict[j] for j in closed_facs) + return 1, {'assignment': user_assignment, 'utilization': utilization, 'obj': objective, 'other': {}} + +def greedy_reassign_open(users_and_facs_df, travel_dict, users, facs, open_facs_input, + exp_travelers_dict, cap_dict, assignment = None, fac_to_open = None, + depth_greedy_reassignment = 1): + """ + Greedy assignment adapted to work within open greedy algorithm, + given an assignment opens a facility, reassigns any unassigned users to it + and runs a focused local searchs + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param open_facs: list of open facilities + :param exp_travelers_dict: dictionary stating the population expected to travel from each user to each facility + :param cap_dict: dictionary of the facilities' capacities + :param assignment: a previous assignment + :param fac_to_open: facility to open and reassign users to + :param depth_greedy_reassignment: depth of the greeddy reassignment within the algorithm; i.e. if it is 1 + we only try reassigning users to fac_to_open, if its 2 we addtionally also try to reassign users to the + facilities that the users that are now assigned to fac_to_open were assigned to, ... + :return: boolean indicating whether a feasible assignment could be constructed; + a dictionary of assignments, facility utilizations and the objective value + """ + + # initialize + open_facs = open_facs_input.copy() + cap_left = cap_dict.copy() + if assignment == None and fac_to_open == None: + user_assignment = {i: "unassigned" for i in users} + unassigned_users = users.copy() + else: + open_facs.append(fac_to_open) + user_assignment = assignment.copy() + for i, j in user_assignment.items(): + if j != "unassigned": + cap_left[j] = cap_left[j] - exp_travelers_dict[i][j] + unassigned_users = [i for i in users if assignment[i] == "unassigned"] + + while unassigned_users: + users_not_assignable = True + most_preferred_users = {j: [] for j in open_facs} + # match every user with their most preferred facility + for i in unassigned_users: + possible_facs = [j for j in open_facs if cap_left[j] >= exp_travelers_dict[i][j]] + if not possible_facs: + continue + most_preferred_fac = possible_facs[np.argmax([travel_dict[i][j] for j in possible_facs])] + most_preferred_prob = travel_dict[i][most_preferred_fac] + most_preferred_users[most_preferred_fac].append((i, most_preferred_prob)) + users_not_assignable = False + # if no user could be assigned in this iteration, break and return the partial assignment + if users_not_assignable: + break + # assign users to their most preferred facility in decreasing rank of their preference to this facility + for j in most_preferred_users: + sorted_users = sorted(most_preferred_users[j], key=lambda x: -x[1]) + for (i, prob) in sorted_users: + if cap_left[j] >= exp_travelers_dict[i][j]: + unassigned_users.remove(i) + user_assignment[i] = j + cap_left[j] -= exp_travelers_dict[i][j] + utilization = {j: 1- (cap_left[j] / cap_dict[j]) for j in open_facs} + closed_facs = [j for j in facs if j not in open_facs] + objective = sum(cap_dict[j] * (1 - utilization[j]) ** 2 for j in open_facs) + sum(cap_dict[j] for j in closed_facs) + # improve assignment if we are opening facility that still has space, start with opened facility + # facilities that had users reassigned in previous iteration are considered if they can take up other users + if fac_to_open != None and cap_left[fac_to_open] > 0: + facs_to_consider = [fac_to_open] + counter = 0 + while counter < depth_greedy_reassignment and len(facs_to_consider) > 0: + new_facs_to_consider = [] + for fac_to_reassign_to in facs_to_consider: + users_to_test_reassign = [i for i in users if + cap_left[fac_to_reassign_to] >= exp_travelers_dict[i][fac_to_reassign_to] + and user_assignment[i] != fac_to_reassign_to] + users_to_test_reassign.sort(key=lambda i: travel_dict[i][fac_to_reassign_to], reverse=True) + for i in users_to_test_reassign: + original_fac = user_assignment[i] + if original_fac == "unassigned" and cap_left[fac_to_reassign_to] >= exp_travelers_dict[i][fac_to_reassign_to]: + user_assignment[i] = fac_to_reassign_to + cap_left[fac_to_reassign_to] -= exp_travelers_dict[i][fac_to_reassign_to] + else: + is_better, change_objective = reassign_user_better(cap_left, i, original_fac,fac_to_reassign_to, + cap_dict, exp_travelers_dict) + if is_better: + user_assignment[i] = fac_to_reassign_to + cap_left[original_fac] += exp_travelers_dict[i][original_fac] + cap_left[fac_to_reassign_to] -= exp_travelers_dict[i][fac_to_reassign_to] + new_facs_to_consider.append(original_fac) + if original_fac not in open_facs: + print("FAIL: Original facility not in open facilities - something went wrong.") + facs_to_consider = list(set(new_facs_to_consider.copy())) + counter += 1 + utilization = {j: 1- (cap_left[j] / cap_dict[j]) for j in open_facs} + objective = sum(cap_dict[j] * (1 - utilization[j]) ** 2 for j in open_facs) + sum(cap_dict[j] for j in closed_facs) + + return len(unassigned_users) == 0, {'assignment': user_assignment, 'utilization': utilization, 'obj': objective, + 'other': {'uassigned_users': unassigned_users}} + +def relaxation_rounding(users_and_facs_df, travel_dict, users, facs, + cap_factor=1.5, threads=1, tolerance=0.0,cutoff = 0.1, + time_limit=20000, open_facs = [], budget_factor = 1.0, + define_u = True, num_consider = -1): + """ + Heuristic for the BUAP. + This implements the relaxation rounding algorithm: It solves the relaxation and assigns + a user to the facility for which they have the highest x_ij in the relaxation. + Then this assignment is fixed if any capacity constraints are exceeded. + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param cap_factor: factor by which all capacities are scaled + :param threads: number of threads used in the relaxation + :param tolerance: tolerance on the optimality of the solution used in the relaxation + :param cutoff: preference cutoff used when craeting the relaxation model + only used if num_consider is -1 + :param time_limit: time limit in seconds for the optimization used in the relaxation + :param open_facs: list of open facilities for this BUAP instance + :param budget_factor: the proportion of facilties that are open in this BUAP instance + :param define_u: boolean indicating whether to explicitely define u in the model + :param num_consider: number of closest facilities to consider for a user (alternative to cutoff value) + same as n_r in paper; setting this to -1 means we use a cutoff instead + :return: Boolean indicating whether a feasible solution was achieved; dictionary containing the results. + """ + start_time = time.time() + is_feasible, results = solve_BUAP_model_editable(users_and_facs_df, travel_dict, users, facs, + cap_factor, cutoff, open_facs, cap_dict = None, continous = True, + define_u = define_u, num_consider = num_consider, + threads=threads, tolerance=tolerance,time_limit = time_limit) + if not is_feasible: + print('The model is infeasible') + return is_feasible, {} + assignment = results["solution_details"]["assignment"] + cap_dict = {j: cap_factor * users_and_facs_df.at[j, 'capacity'] for j in facs} + + objective, assignment, not_utilised= make_assignment_satisfy_capacity_constraints(users_and_facs_df, travel_dict, cap_dict, users, facs, open_facs, assignment) + + + if len(assignment) == 0: + print("Failed to fix capacity constraints") + return False, results + + end_time = time.time() + results = {"solution_details": + {"assignment": assignment, "objective_value": objective, "solving_time": end_time - start_time}, + "model_details": + {"users": users,"open_facs": open_facs, "facs": facs, "cap_factor": cap_factor, "budget_factor": budget_factor, "cutoff": cutoff, + "heuristic": "Relaxation rounding", "define_u": define_u, "num_consider": num_consider + } + } + + return True, results + +def run_one_iteration_relaxation_rounding_simple_output(m, users_and_facs_df, travel_dict, cap_dict, users, facs, + open_facs = [], new_open_facs = [], new_closed_facs = [], + threads=1, tolerance=0.0,cutoff = 0.0, num_consider = -1, + time_limit=1000): + """ + Relaxation rounding implementation used within the close greedy basic algorithm. + Finds a solution the BUAP given a relaxed model that just needs swapping some facilities from open to closed + and/or the reverse. If no model is given it creates the model, before solving the relaxation. + From the relaxed solution, using rounding and some reassignments a BUAP solution is created. + :param m: relaxed model, if None this function creates the model + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param cap_dict: dictionary of capacities of the facilities + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param open_facs: open facilities currently + :param new_open_facs: facilities to now open + :param new_closed_facs: facilities to now close + :param threads: number of threads used in the relaxation + :param tolerance: tolerance on the optimality of the solution used in the relaxation + :param cutoff: preference cutoff used in relaxation model, only used if num_consider is -1 + :param num_consider: number of most preferred facilities to consider for each user, + same as n_r in paper; setting this to -1 means we use a cutoff instead + :param time_limit: time limit in seconds for the optimization used in the relaxation + :return: boolean indicating if feasible, dictionary of assignment, utilization, objective and new model + """ + if m == None: + m = build_BUAP_model_editable(users_and_facs_df, travel_dict, users, facs, + cap_factor = None, cutoff = cutoff, open_facs = open_facs.copy(), cap_dict = cap_dict, + continous = True, define_u = True, num_consider = num_consider) + if len(new_open_facs) > 0 or len(new_closed_facs) > 0: + m = change_open_facs(m, new_open_facs, new_closed_facs) + is_feasible, result = optimize_BUAP_model_editable(m, threads = threads, tolerance=tolerance, + time_limit = time_limit) + m_new = None + if not is_feasible: + print("NOT feasible!") + print("Rebuilding and resolving model") + # build with all these as this model is going to be used in the next iteration! + open_facs_all = open_facs.copy() + open_facs_all.extend(new_closed_facs) + m_new = build_BUAP_model_editable(users_and_facs_df, travel_dict, users, facs,cap_factor=None, + cutoff=cutoff, open_facs=open_facs_all.copy(), cap_dict = cap_dict, continous = True, + define_u = True, num_consider = num_consider) + m_new = change_open_facs(m_new, [], new_closed_facs) + is_feasible, result = optimize_BUAP_model_editable(m_new, threads = threads, tolerance=tolerance, + time_limit = time_limit) + # if still not feasible return this + if not is_feasible: + return is_feasible, {'assignment': {}, 'utilization': {}, 'obj': inf, + 'other': {"model": m }} + objective,assignment,not_utilised = make_assignment_satisfy_capacity_constraints(users_and_facs_df, travel_dict, + cap_dict, users, facs, + result["solution_details"]["open_facs"], + result["solution_details"]["assignment"]) + if len(assignment) == 0: + return 0, {"other": {"model": m}} + assignment_results = {'assignment': assignment, 'utilization': {j: 1 - not_utilised[j] for j in facs}, 'obj': objective, + 'other': {"model": m if m_new == None else m_new }} + return True, assignment_results + +def run_assignment_method(assignment_method, local_search_method, users_and_facs_df, travel_dict, + users, facs, open_facs, exp_travelers_dict, cap_dict, + options_localsearch = {"cutoff": 0.2, "time_limit_while_localsearch": 1}, + options_relaxation = {}): + """ + Runs the mentioned BUAP method on the input instance and returns the assignment + :param assignment_method: the method we use to assign, possible inputs: + greedy_assign, relaxation_rounding + :param local_search_method: local search method used after assignment is use, + possible inputs: None,local_random_reassign, local_random_swap + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param open_facs: list of open facilities + :param exp_travelers_dict: dictionary stating the population expected to travel from each user to each facility, U_iP_ij + :param cap_dict: dictionary of the facilities' capacities + :param options_localsearch:options for localsearch with fields: cutoff_localsearch and time_limit_while_localsearch + :param options_relaxation: input needed for the relaxation, has fields: + model, new_open_facs, new_closed_facs, threads, tolerance, time_limit_relaxation, num_consider_in_relaxation + :return: boolean indicating whether a feasible assignment could be constructed; + a dictionary of assignments, facility utilizations, the objective value, + and other required for that specific method + """ + is_feasible_assignment, assignment_results = False, {} + if assignment_method == "greedy_assign": + is_feasible_assignment, assignment_results = greedily_assign_users(users_and_facs_df, travel_dict, users, + facs, open_facs, exp_travelers_dict, cap_dict) + elif assignment_method == "relaxation_rounding": + open_facs_before = open_facs.copy() + print("right before run one iteration") + is_feasible_assignment, assignment_results = run_one_iteration_relaxation_rounding_simple_output( + options_relaxation["model"], + users_and_facs_df, travel_dict, + cap_dict, users, facs, + open_facs = open_facs_before, + new_open_facs = options_relaxation["new_open_facs"], + new_closed_facs = options_relaxation["new_closed_facs"], + threads=options_relaxation["threads"], + tolerance=options_relaxation["tolerance"], + cutoff = 0.0, + num_consider = options_relaxation["num_consider_in_relaxation"], + time_limit=options_relaxation["time_limit_relaxation"]) + options_relaxation["model"] = assignment_results["other"]["model"] + else: + print("Invalid assignment method") + return False, {} + + if local_search_method == "local_random_reassign": + is_feasible_assignment, assignment_results = local_search_reassign(users_and_facs_df, travel_dict, users, + facs, open_facs, + assignment_results["assignment"].copy(), + exp_travelers_dict, cap_dict, + cutoff = options_localsearch["cutoff"], + time_limit_while = options_localsearch["time_limit_while_localsearch"]) + elif local_search_method == "local_random_swap": + is_feasible_assignment, assignment_results = local_search_swap(users_and_facs_df, travel_dict, users, + facs, open_facs, + assignment_results["assignment"].copy(), + exp_travelers_dict, cap_dict, + cutoff = options_localsearch["cutoff"], + time_limit_while = options_localsearch["time_limit_while_localsearch"]) + elif local_search_method != "None": + print("Invald local search method") + return False, {} + return is_feasible_assignment, assignment_results \ No newline at end of file diff --git a/heuristics_and_mips/main.py b/heuristics_and_mips/main.py new file mode 100644 index 0000000..9f16887 --- /dev/null +++ b/heuristics_and_mips/main.py @@ -0,0 +1,35 @@ +from utils import * +from results_heuristics import * +from BFLP_MIP import * + +users_and_facs_df, travel_dict, users, facs = load_an_instance(1) + +# the code below runs the open greedy heuristic with the given parameters +results = get_results_open_greedy(users_and_facs_df, travel_dict, users, facs, + budget_factors = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1], + cap_factor=1.5, threads=1, tolerance=0.0, + cutoff_localsearch = 0.2, num_consider_in_relaxation = 50, + num_consider_per_iterations = [5,50], time_limit_relaxation=1000, + assignment_methods =["greedy_assign"],local_search_methods = ["local_random_reassign"], + time_limit_while_localsearch = 1, num_fix_assignment_iterations = [len(facs)], + depths_greedy_reassign = [1,2], + final_assignment_methods = ["relaxation_rounding"],output_filename = "open_greedy_instance_1.json") + +# similarly the code below (after uncommenting) runs the BFLP model with the given parameters +# is_feasible, results = run_BFLP_model(users_and_facs_df, travel_dict, users, facs, budget_factor = 1.0, +# cap_factor = 1.5, cutoff = 0.0, strict_assign_to_one = True, +# cap_dict = None, continous = False, num_consider = -1, +# threads=1, tolerance=0.0,time_limit = 20000, output_filename = "basic_model.json") + +# and, again, similarly the code below (afet uncommenting) runs the BUAP model with the given parameters; +# this model additionally requires a set of open facilities as example of which we give below +#open_facs = [1024, 1027, 1049, 1051, 1066, 1076, 1079, 1081, 1087, 1091, 1094, 1098, 1100, 1128, 1132, 1137, 1139, 1141, 1145, 1146, 1149, 1151, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1162, 1189, 1192, 1194, 1195, 1196, 1197, 1198, 1200, 1201, 1204, 1207, 1209, 1210, 1212, 1213, 1214, 1215, 1216, 1218, 1219, 1220, 1221, 1223, 1224, 1231, 1232, 1233, 1234, 1238, 1239, 1240, 1243, 1244, 1245, 1246, 1247, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1258, 1259, 1261, 1263, 1264, 1265, 1266, 1267, 1268, 1270, 1271, 1273, 1274, 1276, 1277, 1278, 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1290, 1291, 1292, 1295, 1296, 1299, 1300, 1301, 1302, 1303, 1304, 1306, 1311, 1314, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1344, 1348, 1350, 1352, 1387] +results = get_results_all_BUAP_methods(users_and_facs_df, travel_dict, users, facs, open_facs, + cap_factor=1.5,define_u = True,cutoff_relaxation = 0.0, + num_consider_relaxation = 20, + cutoff_localsearch = 0.2, time_limit_while_localsearch = 1, + output_filename = "BUAP_heuristics.json") + + +# to write the results in a file, do: +#write_results_open_greedy_table("open_greedy_instance_1.json", "open_greedy_instance_1.xlsx","instance_1_BFLP_MIP.json") diff --git a/heuristics_and_mips/results_heuristics.py b/heuristics_and_mips/results_heuristics.py new file mode 100644 index 0000000..486802b --- /dev/null +++ b/heuristics_and_mips/results_heuristics.py @@ -0,0 +1,739 @@ +''' +Functions for running the heuristics with multiple inputs (get_...) resulting in a json +and functions for converting these results into more easily readable tables (write_...) +''' +from BUAP_heuristics import * +from BFLP_heuristics import * +import math +from utils import * +from random import sample +from BUAP_MIP import * +import os +import time +from pathlib import Path + +def get_results_close_greedy_final(users_and_facs_df, travel_dict, users, facs, + budget_factors = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1], + cap_factor=1.5, threads=1, tolerance=0.0, + cutoff_localsearch = 0.2, num_consider_in_relaxation = 50, + num_consider_per_iterations = [5,50], time_limit_relaxation=1000, + assignment_methods =["greedy_assign"],local_search_methods = ["None"], + time_limit_while_localsearch = 1, num_fix_assignment_iterations = [100], + final_assignment_methods = ["relaxation_rounding"],output_filename = "test.json"): + ''' + Use the final version of close greedy to get results for the input instances at different input parameters + whenever a input to this function is a list, we consider all possible combinations of input paramters + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param budget_factors: list of proportions of facilities we want to remain open + :param cap_factor: factor by which all capacities are scaled + :param threads: number of threads used in the relaxation + :param tolerance: tolerance on the optimality of the solution used in the relaxation + :param cutoff_localsearch: parameter for BUAP localsearch for minimum preference required to reassign to the facility + :param num_consider_in_relaxation: number of most preferred facilities to consider in relaxation, + if -1 all facilities are considered in relaxation + :param num_consider_per_iterations: list of number of facilities to consider closing in each iteration + :param time_limit_relaxation: time limit in seconds for the optimization used in the relaxation + :param assignment_methods: list of methods which is used to create the first assignment, + options are "relaxation_rounding" or "greedy_assign" + :param local_search_methods: list of BUAP local search method to run on the assignment resulting from assignment_method + and final_assignment_method; options are "None", "local_random_reassign", "local_random_swap" + :param: time_limit_while_localsearch: the time limit in seconds used in the while loop in the localsearch + :param num_fix_assignment_iterations: list of number of iterations after which to recompute the assignment from scratch + using assignment_method; no saving of model for relaxation rounding implemented so this is only for + when assignment_method is "greedy_assign" + :param final_assignment_method: list of assignment method to run after open facilities decided, + currently only support relaxation rounding + :param output_filename: name of the output file + :return: list of results + ''' + results_list = [] + for n_c in num_consider_per_iterations: + for assignment_method in assignment_methods: + for local_search_method in local_search_methods: + for final_assignment_method in final_assignment_methods: + for n_f in num_fix_assignment_iterations: + for budget_factor in budget_factors: + is_feasible, result = close_greedy_final(users_and_facs_df, travel_dict, users, facs, + budget_factor = budget_factor, starting_open_facs = None,cap_factor=cap_factor, + threads = threads, tolerance = tolerance,cutoff_localsearch = cutoff_localsearch, + num_consider_in_relaxation = num_consider_in_relaxation, + num_consider_per_iteration = n_c,time_limit_relaxation = time_limit_relaxation, + assignment_method = assignment_method,local_search_method = local_search_method, + time_limit_while_localsearch = time_limit_while_localsearch, + num_fix_assignment_iteration = n_f, final_assignment_method =final_assignment_method, + output_filename = "test.json", write_to_file = False) + results_list.append(result) + + write_results_list(results_list, output_filename) + return results_list + + +def get_results_open_greedy(users_and_facs_df, travel_dict, users, facs, + budget_factors = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1], + cap_factor=1.5, threads=1, tolerance=0.0, + cutoff_localsearch = 0.2, num_consider_in_relaxation = 50, + num_consider_per_iterations = [5,50], time_limit_relaxation=1000, + assignment_methods =["greedy_assign"],local_search_methods = ["None"], + time_limit_while_localsearch = 1, num_fix_assignment_iterations = [100], + depths_greedy_reassign = [1,2], + final_assignment_methods = ["relaxation_rounding"],output_filename = "test.json"): + ''' + Use the open greedy to get results for the input instances at different input parameters + whenever a input to this function is a list, we consider all possible combinations of input paramters + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param budget_factors: list of proportions of facilities we want to remain open + :param cap_factor: factor by which all capacities are scaled + :param threads: number of threads used in the relaxation + :param tolerance: tolerance on the optimality of the solution used in the relaxation + :param cutoff_localsearch: parameter for BUAP localsearch for minimum preference required to reassign to the facility + :param num_consider_in_relaxation: number of most preferred facilities to consider in relaxation, + if -1 all facilities are considered in relaxation + :param num_consider_per_iterations: list of number of facilities to consider opening in each iteration + :param time_limit_relaxation: time limit in seconds for the optimization used in the relaxation + :param assignment_methods: list of methods which is used to create the first assignment, + options are "relaxation_rounding" or "greedy_assign" + :param local_search_methods: list of BUAP local search method to run on the assignment resulting from assignment_method + and final_assignment_method; options are "None", "local_random_reassign", "local_random_swap" + :param: time_limit_while_localsearch: the time limit in seconds used in the while loop in the localsearch + :param num_fix_assignment_iterations: list of number of iterations after which to recompute the assignment from scratch + using assignment_method; no saving of model for relaxation rounding implemented so this is only for + when assignment_method is "greedy_assign" + :param depths_greedy_reassign: list of depths of the greedy reassignment within the greedy reassign open algorithm + :param final_assignment_method: list of assignment method to run after open facilities decided, + currently only support relaxation rounding + :param output_filename: name of the output file + :return: list of results + ''' + results_list = [] + for n_c in num_consider_per_iterations: + for n_f in num_fix_assignment_iterations: + for d in depths_greedy_reassign: + for assignment_method in assignment_methods: + for local_search_method in local_search_methods: + for final_assignment_method in final_assignment_methods: + for budget_factor in budget_factors: + is_feasible, result = open_greedy(users_and_facs_df, travel_dict, users, facs, + budget_factor = budget_factor, cap_factor=cap_factor, threads=threads, + tolerance=tolerance,cutoff_localsearch = cutoff_localsearch, + num_consider_in_relaxation = num_consider_in_relaxation, + num_consider_per_iteration = n_c, depth_greedy_reassignment = d, + time_limit_relaxation=time_limit_relaxation, assignment_method =assignment_method, + local_search_method = local_search_method, + time_limit_while_localsearch = time_limit_while_localsearch, + num_fix_assignment_iteration = n_f, + final_assignment_method = final_assignment_method, + output_filename = "test.json", write_to_file = False) + results_list.append(result) + + write_results_list(results_list, output_filename) + return results_list + +def get_results_Schmitt_Singh(users_and_facs_df, travel_dict, users, facs, lb = 0.0, + budget_factors = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1], cap_factor = 1.5, + turnover_factor = 0.02, tolerance = 5e-3, time_limit = 20000, iteration_limit = 100, + output_filename = "test.json"): + ''' + Runs the Schmitt Singh heuristics for all the input budget factors + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param lb: a lower bound on the optimal objective value + :param budget_factor: ratio of facilities that are allowed to be opened + :param cap_factor: factor by which all capacities are scaled + :param turnover_factor: maximal fraction of facilities that are swapped in each iteration + :param tolerance: tolerance on the optimality of the solution + :param time_limit: time limit in seconds + :param iteration_limit: limit f + :return: list of results of the Schmitt Singh heuristic + ''' + results_list = [] + for budget_factor in budget_factors: + is_feasible, results = Schmitt_Singh_localsearch(users_and_facs_df, travel_dict, users, facs, lb = lb, + budget_factor = budget_factor, cap_factor = cap_factor, + turnover_factor = turnover_factor, tolerance = tolerance, + time_limit = time_limit, iteration_limit = iteration_limit) + results_list.append(results) + write_results_list(results_list, output_filename) + return results_list + + + + +def write_results_close_greedy_table(results_list_filename, output_filename, BLFP_MIP_filename, output_abs_path = None): + ''' + Given a json format of close greedy results, saves the relevant results as an excel table. + :param results_list_filename: file name of the close greedy results list, expected to be in own_results folder + :param output_filename: file name of the output excel file + :param BFLP_MIP_filename: file name of the corresponding results for the BFLP MIP in order to compare the results + of the heuristics to what the MIP achieves, expected to be in own_results folder + :param output_abs_path: file path for the output file, if not given the resulting excel file will be put + into the own_results folder + ''' + results_list = read_results_list(results_list_filename) + BFLP_MIP_list = read_results_list(BLFP_MIP_filename) + dictionary_BFLP_MIP = {} + for result in BFLP_MIP_list: + dictionary_BFLP_MIP[result["model_details"]["budget_factor"]] = result["solution_details"]["objective_value"] + assignment_method = [] + local_search_method = [] + final_assignment_method = [] + num_consider_per_iterations = [] + num_fix_assignments_iterations = [] + budget_factors = [] + running_time = [] + objective_value = [] + MIP_objectives = [] + Delta_MIPs = [] + for result in results_list: + budget_factor = result["model_details"]["budget_factor"] + assignment_method.append(result["algorithm_details"]["assignment_method"]) + local_search_method.append(result["algorithm_details"]["local_search_assignment_method"]) + final_assignment_method.append(result["algorithm_details"]["final_assignment_method"]) + budget_factors.append(budget_factor) + running_time.append(result["solution_details"]["solving_time"]) + objective_value.append(result["solution_details"]["objective_value"]) + num_consider_per_iterations.append(result["algorithm_details"]["num_consider_per_iteration"]) + num_fix_assignments_iterations.append(result["algorithm_details"]["num_fix_assignment_iteration"]) + closest_budget_factor = 0.0 + closest_budget_factor_distance = 1.0 + for b in dictionary_BFLP_MIP.keys(): + if abs(budget_factor - b) < closest_budget_factor_distance: + closest_budget_factor_distance = abs(budget_factor - b) + closest_budget_factor = b + if closest_budget_factor_distance > 1e-4: + print("Failed to find MIP results. Creating table failed.") + return + MIP_objective = dictionary_BFLP_MIP[closest_budget_factor] + MIP_objectives.append(MIP_objective) + Delta_MIPs.append(100*(MIP_objective-result["solution_details"]["objective_value"])/ MIP_objective) + + data = {"Budget factor": budget_factors, "Number of facilities considered per iteration (n_c)": num_consider_per_iterations, + "Number of iterations after which to fix assignment (n_f)":num_fix_assignments_iterations, + "Assignment method": assignment_method, + "Final assignment method": final_assignment_method, "Local search assignment method": local_search_method, + "Run time [s]": running_time,"Objective value close greedy": objective_value, "Objective value MIP": MIP_objectives, + "Delta_MIP [%]": Delta_MIPs} + df = pd.DataFrame(data=data) + + # save the table + if not output_abs_path: + output_abs_path = os.getcwd() + "/own_results" + if not os.path.exists(output_abs_path): + os.makedirs(output_abs_path) + with pd.ExcelWriter(Path(output_abs_path + "/" + output_filename)) as writer: + df.to_excel(writer, index=False)# + + +def write_results_open_greedy_table(results_list_filename, output_filename, BLFP_MIP_filename, output_abs_path = None): + ''' + Given a json format of open greedy results, saves the relevant results as an excel table. + :param results_list_filename: file name of the open greedy results list, expected to be in own_results folder + :param output_filename: file name of the output excel file + :param BFLP_MIP_filename: file name of the corresponding results for the BFLP MIP in order to compare the results + of the heuristics to what the MIP achieves, expected to be in own_results folder + :param output_abs_path: file path for the output file, if not given the resulting excel file will be put + into the own_results folder + ''' + results_list = read_results_list(results_list_filename) + BFLP_MIP_list = read_results_list(BLFP_MIP_filename) + dictionary_BFLP_MIP = {} + for result in BFLP_MIP_list: + dictionary_BFLP_MIP[result["model_details"]["budget_factor"]] = result["solution_details"]["objective_value"] + assignment_method = [] + local_search_method = [] + final_assignment_method = [] + num_consider_per_iterations = [] + num_fix_assignments_iterations = [] + depths = [] + budget_factors = [] + running_time = [] + objective_value = [] + MIP_objectives = [] + Delta_MIPs = [] + for result in results_list: + budget_factor = result["model_details"]["budget_factor"] + assignment_method.append(result["algorithm_details"]["assignment_method"]) + local_search_method.append(result["algorithm_details"]["local_search_assignment_method"]) + final_assignment_method.append(result["algorithm_details"]["final_assignment_method"]) + depths.append(result["algorithm_details"]["depth_greedy_reassignment"]) + budget_factors.append(budget_factor) + running_time.append(result["solution_details"]["solving_time"]) + objective_value.append(result["solution_details"]["objective_value"]) + num_consider_per_iterations.append(result["algorithm_details"]["num_consider_per_iteration"]) + num_fix_assignments_iterations.append(result["algorithm_details"]["num_fix_assignment_iteration"]) + closest_budget_factor = 0.0 + closest_budget_factor_distance = 1.0 + for b in dictionary_BFLP_MIP.keys(): + if abs(budget_factor - b) < closest_budget_factor_distance: + closest_budget_factor_distance = abs(budget_factor - b) + closest_budget_factor = b + if closest_budget_factor_distance > 1e-4: + print("Failed to find MIP results. Creating table failed.") + return + MIP_objective = dictionary_BFLP_MIP[closest_budget_factor] + MIP_objectives.append(MIP_objective) + Delta_MIPs.append(100*(MIP_objective-result["solution_details"]["objective_value"])/ MIP_objective) + + data = {"Budget factor": budget_factors, "Number of facilities considered per iteration (n_c)": num_consider_per_iterations, + "Depth greedy reassignment (d)":depths,"Number of iterations after which to fix assignment (n_f)":num_fix_assignments_iterations, + "Assignment method": assignment_method, + "Final assignment method": final_assignment_method, "Local search assignment method": local_search_method, + "Run time [s]": running_time,"Objective value open greedy": objective_value, "Objective value MIP": MIP_objectives, + "Delta_MIP [%]": Delta_MIPs} + df = pd.DataFrame(data=data) + + # save the table + if not output_abs_path: + output_abs_path = os.getcwd() + "/own_results" + if not os.path.exists(output_abs_path): + os.makedirs(output_abs_path) + with pd.ExcelWriter(Path(output_abs_path + "/" + output_filename)) as writer: + df.to_excel(writer, index=False) + +def write_results_Schmitt_Singh_table(results_list_filename, output_filename, BLFP_MIP_filename, output_abs_path = None): + ''' + Given a json format of the Schmitt Singh heuristics results, saves the relevant results as an excel table. + :param results_list_filename: file name of the Schmitt Singh heuristic results list, expected to be in own_results folder + :param output_filename: file name of the output excel file + :param BFLP_MIP_filename: file name of the corresponding results for the BFLP MIP in order to compare the results + of the heuristics to what the MIP achieves, expected to be in own_results folder + :param output_abs_path: file path for the output file, if not given the resulting excel file will be put + into the own_results folder + ''' + results_list = read_results_list(results_list_filename) + BFLP_MIP_list = read_results_list(BLFP_MIP_filename) + dictionary_BFLP_MIP = {} + for result in BFLP_MIP_list: + dictionary_BFLP_MIP[result["model_details"]["budget_factor"]] = result["solution_details"]["objective_value"] + assignment_method = [] + budget_factors = [] + running_time = [] + objective_value = [] + iteration_limits = [] + turnover_factors = [] + MIP_objectives = [] + Delta_MIPs = [] + for result in results_list: + budget_factor = result["model_details"]["budget_factor"] + assignment_method.append("greedy_assign") + budget_factors.append(budget_factor) + running_time.append(result["solution_details"]["solving_time"]) + objective_value.append(result["solution_details"]["objective_value"]) + iteration_limits.append(result["model_details"]["iteration_limit"]) + turnover_factors.append(result["model_details"]["turnover_factor"]) + closest_budget_factor = 0.0 + closest_budget_factor_distance = 1.0 + for b in dictionary_BFLP_MIP.keys(): + if abs(budget_factor - b) < closest_budget_factor_distance: + closest_budget_factor_distance = abs(budget_factor - b) + closest_budget_factor = b + if closest_budget_factor_distance > 1e-4: + print("Failed to find MIP results. Creating table failed.") + return + MIP_objective = dictionary_BFLP_MIP[closest_budget_factor] + MIP_objectives.append(MIP_objective) + Delta_MIPs.append(100*(MIP_objective-result["solution_details"]["objective_value"])/ MIP_objective) + + data = {"Budget factor": budget_factors,"Assignment method": assignment_method, + "Run time [s]": running_time,"Objective value Schmitt Singh heuristic": objective_value, "Objective value MIP": MIP_objectives, + "Delta_MIP [%]": Delta_MIPs} + df = pd.DataFrame(data=data) + + # save the table + if not output_abs_path: + output_abs_path = os.getcwd() + "/own_results" + if not os.path.exists(output_abs_path): + os.makedirs(output_abs_path) + with pd.ExcelWriter(output_abs_path + "/" + output_filename) as writer: + df.to_excel(writer, index=False) + +def get_results_all_BUAP_methods(users_and_facs_df, travel_dict, users, facs, open_facs, + cap_factor=1.5,define_u = True,cutoff_relaxation = 0.0, + num_consider_relaxation = 20, + cutoff_localsearch = 0.2, time_limit_while_localsearch = 1, + output_filename = "BUAP_heuristics.json"): + ''' + Functions for running all the BUAP methods on a single instance. + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :param open_facs: list of the open facilities for the BUAP instance + :param cap_factor: factor by which all capacities are scaled + :param define_u: boolean indicating whether to explicitely define u in the model + :param cutoff_relaxation: preference cutoff used when craeting the relaxation model + only used if num_consider_relaxation is -1 + :param num_consider_relaxation: number of closest facilities to consider for a user (alternative to cutoff value) + same as n_r in paper; setting this to -1 means we use a cutoff instead + :param cutoff_localsearch: minimum preference to consider reassinging a user in local search approaches + :param time_limit_while_localsearch: time limit of the while loop of the local search approaches + :param output_filename: name of the file where all the resuls of the heuristics are saved. + :return: list of results of all the heuristics on the input instance + ''' + cap_dict = {j: cap_factor * users_and_facs_df.at[j, 'capacity'] for j in facs} + nr_of_users = len(users) + nr_of_facs = len(open_facs) + budget_factor = len(open_facs) / len(facs) + # create dictionary of how many users are expected to travel to each facility + # using numpy here since it is 10 times faster + travel_matrix = np.zeros((nr_of_users, nr_of_facs)) + for i in range(nr_of_users): + for j in range(nr_of_facs): + travel_matrix[i][j] = travel_dict[users[i]][open_facs[j]] + exp_travelers_matrix = np.array(users_and_facs_df.loc[users]['population']).reshape(-1, 1) * travel_matrix + exp_travelers_dict = {users[i]: {open_facs[j]: exp_travelers_matrix[i][j] for j in range(nr_of_facs)} + for i in range(nr_of_users)} + results = [] + # relaxation rounding + is_feasible, result_relaxation_rounding = relaxation_rounding(users_and_facs_df, travel_dict, users, facs, + cap_factor=cap_factor, threads=1, tolerance=0.0,cutoff = cutoff_relaxation, + time_limit=1000, open_facs = open_facs, budget_factor = budget_factor, + define_u = define_u, num_consider = num_consider_relaxation) + result_relaxation_rounding["model_details"]["local_search"] = "None" + results.append(result_relaxation_rounding) + # reassign users local search based on relaxation rounding + start_time = time.time() + is_feasible, result_relaxation_localsearch_short = local_search_reassign(users_and_facs_df, travel_dict, users, facs, + open_facs, result_relaxation_rounding["solution_details"]["assignment"].copy(), + exp_travelers_dict, cap_dict, cutoff = cutoff_localsearch, + time_limit_while = time_limit_while_localsearch) + end_time = time.time() + if not is_feasible: + print("local search failed") + return results + result_relaxation_localsearch = {"solution_details": + {"assignment": result_relaxation_localsearch_short["assignment"], "objective_value": result_relaxation_localsearch_short["obj"], + "solving_time": end_time - start_time}, + "model_details": + {"users": users,"open_facs": open_facs, "facs": facs, "cap_factor": cap_factor, "budget_factor": budget_factor, "cutoff": cutoff_localsearch, + "heuristic": "Localsearch reassign with relaxation start","time_limit_while": time_limit_while_localsearch + } + } + results.append(result_relaxation_localsearch) + # swap users local search based on relaxation rounding + start_time = time.time() + is_feasible, result_relaxation_localsearch_swap_short = local_search_swap(users_and_facs_df, travel_dict, users, facs, + open_facs, + result_relaxation_rounding["solution_details"]["assignment"].copy(), + exp_travelers_dict, cap_dict, cutoff = cutoff_localsearch, + time_limit_while = time_limit_while_localsearch) + end_time = time.time() + if not is_feasible: + print("local search failed") + return results + result_relaxation_localsearch = {"solution_details": + {"assignment": result_relaxation_localsearch_swap_short["assignment"], + "objective_value": result_relaxation_localsearch_swap_short["obj"], + "solving_time": end_time - start_time}, + "model_details": + {"users": users,"open_facs": open_facs, "facs": facs, "cap_factor": cap_factor, + "budget_factor": budget_factor, "cutoff": cutoff_localsearch, + "heuristic": "Localsearch swap with relaxation start", + "time_limit_while": time_limit_while_localsearch + } + } + results.append(result_relaxation_localsearch) + + # greedy assignment + start_time = time.time() + is_feasible, result_greedy_short = greedily_assign_users(users_and_facs_df, travel_dict, users, facs, open_facs, exp_travelers_dict, cap_dict, assignment = None, fac_to_close = None) + end_time = time.time() + result_greedy = {"solution_details": + {"assignment": result_greedy_short["assignment"], "objective_value": result_greedy_short["obj"], + "solving_time": end_time - start_time}, + "model_details": + {"users": users,"open_facs": open_facs, "facs": facs, "cap_factor": cap_factor, "budget_factor": budget_factor, + "heuristic": "Greedy assignment" + } + } + results.append(result_greedy) + # reassign users local search based on greedy + start_time = time.time() + is_feasible, result_greedy_localsearch_short = local_search_reassign(users_and_facs_df, travel_dict, users, facs, + open_facs, result_greedy_short["assignment"].copy(), + exp_travelers_dict, cap_dict, cutoff = cutoff_localsearch, + time_limit_while = time_limit_while_localsearch) + end_time = time.time() + if not is_feasible: + print("local search failed") + return results + result_greedy_localsearch = {"solution_details": + {"assignment": result_greedy_localsearch_short["assignment"], "objective_value": result_greedy_localsearch_short["obj"], + "solving_time": end_time - start_time}, + "model_details": + {"users": users,"open_facs": open_facs, "facs": facs, "cap_factor": cap_factor, "budget_factor": budget_factor, "cutoff": cutoff_localsearch, + "heuristic": "Localsearch reassign with greedy start","time_limit_while": time_limit_while_localsearch + } + } + results.append(result_greedy_localsearch) + # swap users local search based on greedy + start_time = time.time() + is_feasible, result_greedy_localsearch_swap_short = local_search_swap(users_and_facs_df, travel_dict, users, facs, + open_facs, result_greedy_short["assignment"].copy(), + exp_travelers_dict, cap_dict, cutoff = cutoff_localsearch, + time_limit_while = time_limit_while_localsearch) + end_time = time.time() + if not is_feasible: + print("local search failed") + return results + result_greedy_localsearch = {"solution_details": + {"assignment": result_greedy_localsearch_swap_short["assignment"], "objective_value": result_greedy_localsearch_swap_short["obj"], + "solving_time": end_time - start_time}, + "model_details": + {"users": users,"open_facs": open_facs, "facs": facs, "cap_factor": cap_factor, "budget_factor": budget_factor, "cutoff": cutoff_localsearch, + "heuristic": "Localsearch swap with greedy start","time_limit_while": time_limit_while_localsearch + } + } + results.append(result_greedy_localsearch) + write_results_list(results, output_filename) + return results + +def write_BUAP_results_table(results_list_filename, MIP_objective, output_filename, output_abs_path = None): + ''' + Given a json format of the BUAP heuristics results, saves the relevant results as an excel table. + :param results_list_filename: file name of the BUAP heuristic results list, expected to be in own_results folder + :param output_filename: file name of the output excel file + :param MIP_objective: objective value of the MIP in order to compare the heuristic performance with this + :param output_abs_path: file path for the output file, if not given the resulting excel file will be put + into the own_results folder + ''' + results_list = read_results_list(results_list_filename) + method = [] + objective = [] + runtime = [] + MIP_objectives = [] + Delta_MIPs = [] + for result in results_list: + method.append(result["model_details"]["heuristic"]) + objective.append(result["solution_details"]["objective_value"]) + runtime.append(result["solution_details"]["solving_time"]) + MIP_objectives.append(MIP_objective) + Delta_MIPs.append(100*(MIP_objective-result["solution_details"]["objective_value"])/MIP_objective) + data = {"BUAP method": method,"Run time [s]": runtime, "Objective value BUAP heuristic": objective, + "MIP objective value": MIP_objectives, "Delta_MIP [%]": Delta_MIPs} + df = pd.DataFrame(data=data) + + # save the table + if not output_abs_path: + output_abs_path = os.getcwd() + "/own_results" + if not os.path.exists(output_abs_path): + os.makedirs(output_abs_path) + with pd.ExcelWriter(Path(output_abs_path + "/" + output_filename)) as writer: + df.to_excel(writer, index=False) + + +def get_results_localsearch(users_and_facs_df, travel_dict, input_filename, output_filename, + iteration_limits = [50, 100, 200], num_consider_per_iterations = [10,50,100], + depths_greedy_reassign = [1,2], time_limit_while_loop = 3600, assignment_method="greedy_assign", + final_assignment_method = "relaxation_rounding", + localsearch_method = "local_random_reassign", num_consider_in_relaxation = 50, + cutoff_BUAP_localsearch = 0.2, time_limit_while_BUAP_localsearch = 1, + fix_assignment = False): + ''' + Runs the local search (with delta) with the given input parameters on the starting instances given in the input + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param input_filename: name of the input file (i.e. results of open or close greedy) on which the local search + should be run + :param output_filename: name of the output file + :param iteration_limits: list of iteration limits to be used within the local search + :param num_consider_per_iterations: list of number of facilities to consider opening / closing in each iteration + :param depths_greedy_reassign: list of depths of the greedy reassignment within the greedy reassign open algorithm + :param: time_limit_while_loop: the time limit in seconds used in the while loop in the BFLP local search + :param assignment_method: method which is used to recompute the assignment if a change is made + param final_assignment_method: assignment method to run after open facilities decided, + currently only support relaxation rounding + :param local_search_methods: BUAP local search method to run on the assignment resulting from assignment_method + and final_assignment_method; options are "None", "local_random_reassign", "local_random_swap" + :param num_consider_in_relaxation: number of most preferred facilities to consider in relaxation, + if -1 all facilities are considered in relaxation + :param cutoff_BUAP_localsearch: parameter for BUAP local search for minimum preference required to reassign to the facility + :param time_limit_while_BUAP_localsearch: time limit in the BUAP local search + :param fix_assignment: boolean indicating whether the assignment should be recomputed from scratch + using assignment_method every time the open facilities are changed + :return: list of results of the local search + ''' + input_results_list = read_results_list(input_filename) + output_results_list = [] + for result in input_results_list: + for num_consider in num_consider_per_iterations: + for depth in depths_greedy_reassign: + for iteration_limit in iteration_limits: + starting_assignment = result["solution_details"]["assignment"] + starting_open_facs = result["solution_details"]["open_facs"] + starting_obj = result["solution_details"]["objective_value"] + budget_factor = result["model_details"]["budget_factor"] + users = result["model_details"]["users"] + facs = result["model_details"]["facs"] + cap_factor = result["model_details"]["cap_factor"] + print("Budget factor: " + str(budget_factor)) + print("Num consider per iteration: " + str(num_consider)) + print("Depth: " + str(depth)) + print("Iteration limit: " + str(iteration_limit)) + is_feasible, localsearch_result = localsearch_with_change(users_and_facs_df, travel_dict, users, + facs, starting_assignment, + starting_open_facs, starting_obj, budget_factor, cap_factor = cap_factor, + num_consider_per_iteration = num_consider, time_limit_while_loop = time_limit_while_loop, + iteration_limit_while_loop = iteration_limit, assignment_method=assignment_method, + final_assignment_method = final_assignment_method, + localsearch_method = localsearch_method, num_consider_in_relaxation = num_consider_in_relaxation, + tolerance_relaxation = 2e-3, time_limit_relaxation = 1000, + threads_relaxation= 1, + cutoff_BUAP_localsearch = cutoff_BUAP_localsearch, + time_limit_while_BUAP_localsearch = time_limit_while_BUAP_localsearch, + depth_greedy_reassignment = depth, fix_assignment = fix_assignment, + output_filename = "test.json", write_to_file = False) + overall_result = localsearch_result.copy() + overall_result["starting_results"] = result.copy() + output_results_list.append(overall_result) + write_results_list(output_results_list, output_filename) + return output_results_list + +def get_results_localsearch_without_change(users_and_facs_df, travel_dict, input_filename, output_filename, + iteration_limits = [50, 100, 200], num_consider_per_iterations = [10,50,100], + depths_greedy_reassign = [1,2], time_limit_while_loop = 3600, assignment_method="greedy_assign", + final_assignment_method = "relaxation_rounding", + localsearch_method = "local_random_reassign", num_consider_in_relaxation = 50, + cutoff_BUAP_localsearch = 0.2, time_limit_while_BUAP_localsearch = 1, + fix_assignment = False): + ''' + Runs the local search (with random choices instead of delta) + with the given input parameters on the starting instances given in the input + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param input_filename: name of the input file (i.e. results of open or close greedy) on which the local search + should be run + :param output_filename: name of the output file + :param iteration_limits: list of iteration limits to be used within the local search + :param num_consider_per_iterations: list of number of facilities to consider opening / closing in each iteration + :param depths_greedy_reassign: list of depths of the greedy reassignment within the greedy reassign open algorithm + :param: time_limit_while_loop: the time limit in seconds used in the while loop in the BFLP local search + :param assignment_method: method which is used to recompute the assignment if a change is made + param final_assignment_method: assignment method to run after open facilities decided, + currently only support relaxation rounding + :param local_search_methods: BUAP local search method to run on the assignment resulting from assignment_method + and final_assignment_method; options are "None", "local_random_reassign", "local_random_swap" + :param num_consider_in_relaxation: number of most preferred facilities to consider in relaxation, + if -1 all facilities are considered in relaxation + :param cutoff_BUAP_localsearch: parameter for BUAP local search for minimum preference required to reassign to the facility + :param time_limit_while_BUAP_localsearch: time limit in the BUAP local search + :param fix_assignment: boolean indicating whether the assignment should be recomputed from scratch + using assignment_method every time the open facilities are changed + :return: list of results of the local search + ''' + input_results_list = read_results_list(input_filename) + output_results_list = [] + for result in input_results_list: + for num_consider in num_consider_per_iterations: + for depth in depths_greedy_reassign: + for iteration_limit in iteration_limits: + starting_assignment = result["solution_details"]["assignment"] + starting_open_facs = result["solution_details"]["open_facs"] + starting_obj = result["solution_details"]["objective_value"] + budget_factor = result["model_details"]["budget_factor"] + users = result["model_details"]["users"] + facs = result["model_details"]["facs"] + cap_factor = result["model_details"]["cap_factor"] + print("Budget factor: " + str(budget_factor)) + print("Num consider per iteration: " + str(num_consider)) + print("Depth: " + str(depth)) + print("Iteration limit: " + str(iteration_limit)) + is_feasible, localsearch_result = localsearch_without_change(users_and_facs_df, travel_dict, users, + facs, starting_assignment, + starting_open_facs, starting_obj, budget_factor, cap_factor = cap_factor, + num_consider_per_iteration = num_consider, time_limit_while_loop = time_limit_while_loop, + iteration_limit_while_loop = iteration_limit, assignment_method = assignment_method, + final_assignment_method = final_assignment_method, + localsearch_method = localsearch_method, num_consider_in_relaxation = num_consider_in_relaxation, + tolerance_relaxation = 2e-3, time_limit_relaxation = 1000, + threads_relaxation= 1, + cutoff_BUAP_localsearch = cutoff_BUAP_localsearch, + time_limit_while_BUAP_localsearch = time_limit_while_BUAP_localsearch, + depth_greedy_reassignment = depth, fix_assignment = fix_assignment, + output_filename = "test_without_change.json") + + overall_result = localsearch_result.copy() + overall_result["starting_results"] = result.copy() + output_results_list.append(overall_result) + write_results_list(output_results_list, output_filename) + return output_results_list + +def write_results_localsearch_table(results_list_filename, output_filename,BLFP_MIP_filename, output_abs_path = None): + ''' + Given a json format of BFLP local search results, saves the relevant results as an excel table. + :param results_list_filename: file name of the BFLP local search results list, expected to be in own_results folder + :param output_filename: file name of the output excel file + :param BFLP_MIP_filename: file name of the corresponding results for the BFLP MIP in order to compare the results + of the heuristics to what the MIP achieves, expected to be in own_results folder + :param output_abs_path: file path for the output file, if not given the resulting excel file will be put + into the own_results folder + ''' + results_list = read_results_list(results_list_filename) + BFLP_MIP_list = read_results_list(BLFP_MIP_filename) + dictionary_BFLP_MIP = {} + for result in BFLP_MIP_list: + dictionary_BFLP_MIP[result["model_details"]["budget_factor"]] = result["solution_details"]["objective_value"] + assignment_method = [] + local_search_method = [] + final_assignment_method = [] + consider_per_iterations = [] + number_of_iterations = [] + depths = [] + budget_factors = [] + running_time = [] + objective_value = [] + starting_objective_value = [] + MIP_objectives = [] + Delta_MIPs = [] + Delta_MIPs_starting = [] + for result in results_list: + budget_factor =result["model_details"]["budget_factor"] + consider_per_iterations.append(result["algorithm_details"]["num_consider_per_iteration"]) + number_of_iterations.append(result["algorithm_details"]["iteration_limit_while_loop"]) + depths.append(result["algorithm_details"]["depth_greedy_reassignment"]) + budget_factors.append(budget_factor) + running_time.append(result["solution_details"]["solving_time"]) + assignment_method.append(result["algorithm_details"]["assignment_method"]) + local_search_method.append(result["algorithm_details"]["local_search_assignment_method"]) + final_assignment_method.append(result["algorithm_details"]["final_assignment_method"]) + objective_value.append(result["solution_details"]["objective_value"]) + starting_objective_value.append(result["starting_results"]["solution_details"]["objective_value"]) + closest_budget_factor = 0.0 + closest_budget_factor_distance = 1.0 + for b in dictionary_BFLP_MIP.keys(): + if abs(budget_factor - b) < closest_budget_factor_distance: + closest_budget_factor_distance = abs(budget_factor - b) + closest_budget_factor = b + if closest_budget_factor_distance > 1e-4: + print("Failed to find MIP results. Creating table failed.") + return + MIP_objective = dictionary_BFLP_MIP[closest_budget_factor] + MIP_objectives.append(MIP_objective) + Delta_MIPs.append(100*(MIP_objective-result["solution_details"]["objective_value"])/ MIP_objective) + Delta_MIPs_starting.append(100*(MIP_objective-result["starting_results"]["solution_details"]["objective_value"])/ MIP_objective) + + + data = {"Budget factor": budget_factors, "Number of facilities considered per iteration (n_c)": consider_per_iterations, + "Depth of localsearch within greedy reassign":depths, "number_of_iterations": number_of_iterations, + "Assignment method": assignment_method, "Final assignment metod": final_assignment_method, + "Local search assignment metod": local_search_method, + "Run time [s]": running_time, "Starting objective value": starting_objective_value, + "Objective value after local search": objective_value, "MIP objective value": MIP_objectives, + "Delta_MIP of starting solution [%]": Delta_MIPs_starting, "Delta_MIP after BFLP local search [%]": Delta_MIPs + } + df = pd.DataFrame(data=data) + + # save the table + if not output_abs_path: + output_abs_path = os.getcwd() + "/own_results" + if not os.path.exists(output_abs_path): + os.makedirs(output_abs_path) + with pd.ExcelWriter(output_abs_path + "/" + output_filename) as writer: + df.to_excel(writer, index=False) \ No newline at end of file diff --git a/heuristics_and_mips/utils.py b/heuristics_and_mips/utils.py new file mode 100644 index 0000000..038f8a0 --- /dev/null +++ b/heuristics_and_mips/utils.py @@ -0,0 +1,369 @@ +""" +Utility and help functions +Note that all function until geodesic distance are from or adapted from +https://github.com/schmitt-hub/preferential_access_and_fairness_in_waste_management +""" + +import os +from math import exp +import numpy as np +import pandas as pd +from geopy import distance +import json +import bz2 +import _pickle as cPickle +import random +from typing import List +from pathlib import Path + +# functions for computing travel probabilities +def f_urban(d): + if d < 5: + return exp(-0.2550891696011455 * d ** 0.8674531576586394) + else: + return 4.639450774188538 * exp(-1.4989521421856289 * d ** 0.3288777336829004) + + +def f_rural(d): + if d < 5: + return exp(-0.24990116894290326 * d ** 0.8201058149904008) + else: + return 1.6114912595353221 * exp(-0.6887217475464711 * d ** 0.43652329253292316) + + +def create_travel_dict(users_and_facs_df, users, facs, output_filename = 'travel_dict'): + """ + Create the travel dictionary that specifies the probabilities for each travel combination + :param users_and_facs_df: dataframe of the user and facility related input data + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :return: travel dictionary that specifies the probabilities for each travel combination + """ + travel_dict = {i: {} for i in users} + for i in users: + print('user', i) + regiotype = users_and_facs_df.at[i, 'regional spatial type'] + lat_1 = users_and_facs_df.at[i, 'centroid_lat'] + lon_1 = users_and_facs_df.at[i, 'centroid_lon'] + for j in facs: + lat_2 = users_and_facs_df.at[j, 'rc_centroid_lat'] + lon_2 = users_and_facs_df.at[j, 'rc_centroid_lon'] + dist = distance.distance((lat_1, lon_1), (lat_2, lon_2)).km + if regiotype == "urban": + travel_dict[i][j] = f_urban(dist) + else: + travel_dict[i][j] = f_rural(dist) + #with open(output_filename + '.json', 'w') as outfile: + # json.dump(travel_dict, outfile) + save_travel_dict(travel_dict, travel_dict_filename=output_filename+ ".json.pbz2") + + +# functions for loading and saving data files + +def save_travel_dict(travel_dict, travel_dict_filename, abs_path=None): + if not abs_path: + abs_path = os.getcwd() + "/data" + if not os.path.exists(abs_path): + os.makedirs(abs_path) + with bz2.BZ2File(Path(abs_path + "/" + travel_dict_filename), "w") as f: + cPickle.dump(travel_dict, f) + + +def load_users_and_facs(users_and_facs_filename="users_and_facilities.csv", abs_path=None): + if not abs_path: + abs_path = os.getcwd() + "/data" + users_and_facs_df = pd.read_csv(Path(abs_path + "/" + users_and_facs_filename)) + return users_and_facs_df + + +def load_travel_dict(travel_dict_filename="travel_dict.json.pbz2", abs_path=None): + if not abs_path: + abs_path = os.getcwd() + "/data" + data = bz2.BZ2File(Path(abs_path + "/" + travel_dict_filename), 'rb') + travel_dict = cPickle.load(data) + travel_dict = {int(i): {int(j): travel_dict[i][j] for j in travel_dict[i]} for i in travel_dict} + return travel_dict + +def load_facility_distance_dict(facility_distance_dict_filename = "facility_distance_dict.json.pbz2", abs_path=None): + if not abs_path: + abs_path = os.getcwd() + "/data" + data = bz2.BZ2File(Path(abs_path + "/" + facility_distance_dict_filename), 'rb') + travel_dict = cPickle.load(data) + travel_dict = {int(i): {int(j): travel_dict[i][j] for j in travel_dict[i]} for i in travel_dict} + return travel_dict + +def load_input_data( + users_and_facilities_filename="users_and_facilities.csv", + travel_dict_filename="travel_dict.json.pbz2", + abs_path=None +): + users_and_facs_df = load_users_and_facs(users_and_facilities_filename, abs_path) + travel_dict = load_travel_dict(travel_dict_filename, abs_path) + return users_and_facs_df, travel_dict + +def write_results_list(results, output_filename, output_abs_path = None): + # save the table + if not output_abs_path: + output_abs_path = os.getcwd() + "/own_results" + if not os.path.exists(output_abs_path): + os.makedirs(output_abs_path) + with open(Path(output_abs_path + "/" + output_filename), 'w') as f: + json.dump(results, f) + +def read_results_list(filename, abs_path = None): + """ + Read a results list and fix keys being string instead of int + :param filename: filename, if no abs_path given in own_results folder + :return: results list with fixed keys being int + """ + if not abs_path: + abs_path = os.getcwd() + "/own_results" + with open(Path(abs_path+"/"+filename), 'r') as infile: + results_list = json.load(infile) + for results in results_list: + results['solution_details']['assignment'] = {int(i): j for (i, j) in + results['solution_details']['assignment'].items()} + return results_list + +def geodesic_distance(users_and_facs_df, i, j): + """ + Compute the geodesic distance from user i to facility j + :param users_and_facs_df: dataframe of the user and facility related input data + :param i: index of the user + :param j: index of the facility + :return: distance in kilometers from user i to facility j + """ + return distance.distance( + (users_and_facs_df.at[i, 'centroid_lat'], users_and_facs_df.at[i, 'centroid_lon']), + (users_and_facs_df.at[j, 'rc_centroid_lat'], + users_and_facs_df.at[j, 'rc_centroid_lon'])).km + + +def capacity_constraints_slack(users_and_facs_df, travel_dict,cap_dict, facs, assignment): + """ + Compute how much capacity is left for each facility given an assignment. + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param cap_dict: dictionary of capacities of the facilities + :param facs: list of the facilities used in the instance + :param assignment: dictionary of key user, value facility + :return: return a dictionary of slack for each facility, + array of facilities who do not have their capacity constraint satisfied, + dictionary with key facilities and value list of users assigned to that facility in assignment + """ + slacks = cap_dict.copy() + assigned_users = {j: [] for j in facs} + for i, j in assignment.items(): + assigned_users[j].append(i) + slacks[j] = slacks[j] - users_and_facs_df.at[i, 'population'] * travel_dict[i][j] + not_satisfed = [j for j in facs if slacks[j] < 0] + return slacks, not_satisfed, assigned_users + + +def check_valid_solution(users_and_facs_df, travel_dict, facs, open_facs, users, assignment, + budget_factor, cap_factor = 1.5): + """" + Check if assignment is valid solution to a BFLP / BUAP instance + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param facs: list of the open facilities used in the instance + :param users: list of users + :param assignment: dictionary of key user, value facility + :param budget_factor: proportion of facilities that should be open at a maximum + :param cap_factor: factor by which all capacities are scaled + :return: true if assignment is valid, false otherwise. + """ + # check each user assigned to a facility + if(not all(i in assignment.keys() for i in users)): + print("Some user is not assigned a facility") + return False + # check only open facilities used + if(not all(j in open_facs for j in assignment.values())): + print("Some user is assigned to a closed facility") + return False + # check capacity constraints satisfied + slacks = {j: cap_factor * users_and_facs_df.at[j, 'capacity'] for j in facs} + for i, j in assignment.items(): + slacks[j] = slacks[j] - users_and_facs_df.at[i, 'population'] * travel_dict[i][j] + not_satisfed = [j for j in facs if slacks[j] < 0] + if len(not_satisfed) > 0: + print("Capacity constraints not satisfied") + print(not_satisfed) + return False + # check correct number of open facilities + if len(open_facs) > round(budget_factor*len(facs)): + print("Too many facilities open ") + return False + objective = sum(cap_factor*users_and_facs_df.at[j, "capacity"]*(slacks[j]/(cap_factor*users_and_facs_df.at[j, "capacity"]))**2 for j in facs) + print("Objective: " + str(objective)) + return True + + +def check_valid_solution_with_unassigned(users_and_facs_df, travel_dict, facs, open_facs, users, + assignment, budget_factor, cap_factor = 1.5): + """" + Check if assignment is valid solution, apart from allowing some users being unassigned. + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param facs: list of the open facilities used in the instance + :param users: list of users + :param assignment: dictionary of key user, value facility + :param budget_factor: proportion of open facilities + :param cap_factor: factor by which all capacities are scaled + :return: true if assignment is valid, false otherwise + """ + # check each user assigned to a facility + if(not all(i in assignment.keys() for i in users)): + print("Some user is not assigned a facility") + return False + # check only open facilities used + if(not all(j in open_facs or j=="unassigned" for j in assignment.values())): + print("Some user is assigned to a closed facility") + return False + # check capacity constraints satisfied + slacks = {j: cap_factor * users_and_facs_df.at[j, 'capacity'] for j in facs} + for i, j in assignment.items(): + if j != "unassigned": + slacks[j] = slacks[j] - users_and_facs_df.at[i, 'population'] * travel_dict[i][j] + not_satisfed = [j for j in facs if slacks[j] < 0] + if len(not_satisfed) > 0: + print("Capacity constraints not satisfied") + print(not_satisfed) + return False + # check correct number of open facilities + if len(open_facs) > round(budget_factor*len(facs)): + print("Too many facilities open ") + return False + objective = sum(cap_factor*users_and_facs_df.at[j, "capacity"]*(slacks[j]/(cap_factor*users_and_facs_df.at[j, "capacity"]))**2 for j in facs) + print("Objective: " + str(objective)) + return True + +def generate_data(n_users: int, p_fac: float, output_file_name_prefix = "large_random_instance"): + ''' + Generates random BFLP instance and saves a users_and_facs_df and a dictionary of P_ij (travel_dict) for this instance. + :param: n_users: number of users of generated instance + :param: p_fac: probability each ZIP code has a facility + :param: output_file_name_prefix: First part of the name for the files in which the instance is saved + ''' + # Longitude and latitude range for Germany + #lon_min, lon_max = 5.8663153, 15.0419319 + #lat_min, lat_max = 47.2701114, 55.099161 + # Longitude and latitude range for Bavaria + #lon_min, lon_max = 9.01111334936577, 13.778347475506 + #lat_min, lat_max = 47.3625560497034, 50.5248689558092 + + # Quarter of Bavaria size + lon_min, lon_max = 9.0, 11.5 + lat_min, lat_max = 47.3, 49.0 + + data = [] + users = list(range(n_users)) + facs = [] + for i in range(n_users): + zip = i + lon = random.uniform(lon_min, lon_max) + lat = random.uniform(lat_min, lat_max) + population = random.randint(0, 20000) + capacity = random.randint(0, 80000) if random.random() < p_fac else 0 + if capacity > 0: + facs.append(i) + spatial_type = "rural" if random.random() < 0.5 else "urban" + data.append({'zipcode': zip,'centroid_lat': lat,'centroid_lon': lon, + 'regional spatial type': spatial_type,'population': population, + 'capacity': capacity,'rc_centroid_lat': lat + random.gauss(0.0, 0.01), + 'rc_centroid_lon': lon + random.gauss(0.0, 0.01)}) + users_and_facs_df = pd.DataFrame(data) + users_and_facs_df.to_csv("data/"+ output_file_name_prefix+'_users_and_facs.csv', index=False) + create_travel_dict(users_and_facs_df, users, facs, output_filename=output_file_name_prefix+'_travel_dict') + +def create_users_facs_by_zip_code_division(users_and_facs_df, users,facs): + """ + Splits up instance 1 into small instances + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :return: list of dictionaries of users and facs split by the first two digits of their ZIP codes + """ + zip_codes_starts = [] + for i in users: + if str(users_and_facs_df.at[i, "zipcode"])[0:2] not in zip_codes_starts: + zip_codes_starts.append(str(users_and_facs_df.at[i, "zipcode"])[0:2]) + + split_users = {start: [] for start in zip_codes_starts} + split_facs = {start: [] for start in zip_codes_starts} + for i in users: + split_users[str(users_and_facs_df.at[i, "zipcode"])[0:2]].append(i) + + for j in facs: + split_facs[str(users_and_facs_df.at[j, "zipcode"])[0:2]].append(j) + return split_users, split_facs + +def create_larger_sets_by_combining_zip_code_starts(users_and_facs_df, users, facs): + """ + Splits up instance 1 into medium sized instances + :param users_and_facs_df: dataframe of the user and facility related input data + :param travel_dict: dictionary of the travel probabilities from each user to each facility + :param users: list of the users used in the instance + :param facs: list of the facilities used in the instance + :return: list of dictionaries of users and facs split by the first two digits of their ZIP codes + but combined to larger sets + """ + split_users, split_facs = create_users_facs_by_zip_code_division(users_and_facs_df, users,facs) + combined_zip_codes = {"63, 97, 96, 95" : [63, 97, 96, 95], "90, 91, 92": [90, 91, 92], + "93, 94": [93, 94], "80, 81, 82, 83, 84, 85" : [80, 81, 82, 83, 84, 85], + "86, 87,88, 89": [86, 87, 88, 89]} + split_users_combined = {start: [] for start in combined_zip_codes.keys()} + split_facs_combined = {start: [] for start in combined_zip_codes.keys()} + for key, value in split_users.items(): + for key_combined,value_combined in combined_zip_codes.items(): + if int(key) in value_combined: + split_users_combined[key_combined].extend(value) + for key, value in split_facs.items(): + for key_combined,value_combined in combined_zip_codes.items(): + if int(key) in value_combined: + split_facs_combined[key_combined].extend(value) + return split_users_combined, split_facs_combined + +def get_instance_2_users_facs(users_and_facs_df, users, facs): + ''' + Creates and returns the user and facility sets for Instance 2 from Instance 1 input + :param users_and_facs_df: dataframe of user and facility related input data + :param users: set of users + :param facs: set of facilities + :return: the users and facilities of instance 2 + ''' + users_list, facs_list = create_larger_sets_by_combining_zip_code_starts(users_and_facs_df, users, facs) + users_2 = users_list["90, 91, 92"] + facs_2 = facs_list["90, 91, 92"] + return users_2, facs_2 + +def load_an_instance(instance_number, sufficient_cap = False): + ''' + Helper function for getting all the data needed for a specific instance + :param instance_number: the number of the instance; possible inputs are 1,2,3,4 + :param sufficient_cap: boolean indicating whether the version of the instance where + sum_{i in I} U_i P_ij <= C_j should be loaded + :return: users and facilities dataframe, dictionary of P_ij, list of users, list of facilities + ''' + if instance_number != 1 and instance_number != 2 and instance_number != 3 and instance_number != 4: + print("The instance number is not in the allowed range of 1,2,3 or 4.") + print("This function may fail if you have not created an instance with your input instance number.") + actual_instance_number = str(instance_number) + if instance_number == 2: + actual_instance_number = "1" + + suff_postfix = "suff_" if sufficient_cap else "" + uaf_filename = "instance_" + actual_instance_number + "_" + suff_postfix + "users_and_facs.csv" + td_filename = "instance_" + actual_instance_number + "_travel_dict.json.pbz2" + + users_and_facs_df, travel_dict = load_input_data(users_and_facilities_filename=uaf_filename, + travel_dict_filename=td_filename) + + users = [int(i) for i in users_and_facs_df.index] + facs = [i for i in users_and_facs_df.index if users_and_facs_df.at[i, 'capacity'] > 0] + if instance_number == 2: + users, facs = get_instance_2_users_facs(users_and_facs_df, users, facs) + + return users_and_facs_df, travel_dict, users, facs +