Skip to content

Commit

Permalink
Simplify setup, publish to PyPI, disable LSTM predictor (#211)
Browse files Browse the repository at this point in the history
* Remove unnecessary dependencies

* Add MIT license

* Use Python 3.7-3.11 for CI

* Remove Py 3.10 which is interpreted as Py 3.1

* Copy common-utils into coordsim and disable lstm prediction

* Add publish workflow

* Add pyyaml dependency

* Remove Py 3.11 pipeline
  • Loading branch information
stefanbschneider committed May 4, 2023
1 parent d1ce7f1 commit 4386a56
Show file tree
Hide file tree
Showing 14 changed files with 597 additions and 18 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/python-package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.6]
python-version: [3.7, 3.8, 3.9]

steps:
- uses: actions/checkout@v2
Expand All @@ -27,7 +27,7 @@ jobs:
run: |
python -m pip install --upgrade pip setuptools
pip install flake8 nose2
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
pip install .
- name: Lint with flake8
run: |
flake8 src --count --exit-zero --show-source --statistics
Expand Down
41 changes: 41 additions & 0 deletions .github/workflows/python-publish.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# This workflow will upload a Python Package using Twine when a release is created
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries

# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.

name: Upload Python Package

on:
release:
types: [published]
workflow_dispatch:

permissions:
contents: read

jobs:
deploy:

runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v3
with:
python-version: '3.x'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install build
- name: Build package
run: python -m build
- name: Publish package
uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
with:
user: __token__
password: ${{ secrets.PYPI_API_TOKEN }}

5 changes: 3 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,11 @@ Feel free to open a pull request and add your own project if you use coord-sim!

## Setup

Requires **Python 3.6** (newer versions do not support the required TF 1.14). Install with (ideally using [virtualenv](https://virtualenv.pypa.io/en/stable/)):
Install with (ideally using [virtualenv](https://virtualenv.pypa.io/en/stable/)):

```bash
pip install -r requirements.txt
pip install .
# For dev install: pip install -e .
```


Expand Down
2 changes: 0 additions & 2 deletions requirements.txt

This file was deleted.

31 changes: 23 additions & 8 deletions setup.py
Original file line number Diff line number Diff line change
@@ -1,34 +1,43 @@
import os

from setuptools import setup, find_packages

# read the contents of the README file
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()

requirements = [
'scipy==1.5.4',
'simpy>=4',
'networkx==2.4',
'geopy',
'pyyaml>=5.1',
'numpy>=1.16.5,<=1.19.5',
'common-utils',
'scikit-learn',
'pandas==1.1.5',
'tensorflow==1.14.0',
'keras==2.2.5',
'matplotlib',
'pyyaml'
]
# extra requirements for the lstm_predictor (usually not needed)
lstm_extra_requirements = [
"scikit-learn",
'keras==2.2.5',
]

test_requirements = [
'flake8',
'nose2'
]

setup(
name='coord-sim',
version='2.1.1',
version='2.2.0',
description='Simulate flow-level, inter-node network coordination including scaling and placement of services and '
'scheduling/balancing traffic between them.',
url='https://github.com/RealVNF/coord-sim',
author='Stefan Schneider',
package_dir={'': 'src'},
packages=find_packages('src'),
python_requires=">=3.6.0",
install_requires=requirements,
extras_require={"lstm": lstm_extra_requirements},
tests_require=test_requirements,
zip_safe=False,
entry_points={
Expand All @@ -38,4 +47,10 @@
'lstm-predict=coordsim.traffic_predictor.lstm_predictor:main'
],
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
license="MIT",
)
21 changes: 21 additions & 0 deletions src/LICENSE
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
MIT License

Copyright (c) 2021 Stefan Schneider

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Empty file added src/animations/__init__.py
Empty file.
Empty file added src/common/__init__.py
Empty file.
106 changes: 106 additions & 0 deletions src/common/common_functionalities.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
import numpy as np
import os
import yaml
import networkx as nx
from shutil import copyfile

# url = 'https://github.com/numpy/numpy/blob/master/numpy/random/mtrand.pyx#L778'
# a threshold for floating point arithmetic error handling
accuracy = np.sqrt(np.finfo(np.float64).eps)


def normalize_scheduling_probabilities(input_list: list) -> list:
""" returns a rounded off list with the sum of all elements in the list to be equal to 1.0
Handles these case:
1) All the elements of the list are 0 -> the Probabilities are equally distributed
2) When the sum(input_list) is away from 1.0 by an offset -> each prob. is divided by sum(input_list) and
the difference of the sum of this new list to 1.0 is added to the first element of the list.
3) An empty list is provided as input -> simply returns an empty list.
Because of [1] an error range of +-0.000000014901161193847656 in the sum has to be handled.
[1]: https://stackoverflow.com/questions/588004/is-floating-point-math-broken
"""

output_list = []
# to handle the empty list case, we just return the empty list back
if len(input_list) == 0:
return output_list

offset = 1 - sum(input_list)

# a list with all elements 0, will be equally distributed to sum-up to 1.
# sum can also be 0 if some elements of the list are negative.
# In our case the list contains probabilities and they are not supposed to be negative, hence the case won't arise
if sum(input_list) == 0:
output_list = [round(1 / len(input_list), 10)] * len(input_list)

# Because of floating point precision (.59 + .33 + .08) can be equal to .99999999
# So we correct the sum only if the absolute difference is more than a tolerance(0.000000014901161193847656)
else:
if abs(offset) > accuracy:
sum_list = sum(input_list)
# we divide each number in the list by the sum of the list, so that Prob. Distribution is approx. 1
output_list = [round(prob / sum_list, 10) for prob in input_list]
else:
output_list = input_list.copy()

# 1 - sum(output_list) = the diff. by which the elements of the list are away from 1.0, could be +'ive /-i've
new_offset = 1 - sum(output_list)
if new_offset != 0:
i = 0
while output_list[i] + new_offset < 0:
i += 1
# the difference is added/subtracted from the 1st element of the list, which is also rounded to 2 decimal points
output_list[i] = output_list[i] + new_offset
assert abs(1 - sum(output_list)) < accuracy, "Sum of list not equal to 1.0"
return output_list


def create_input_file(target_dir, num_ingress, algo):
input_file_loc = f"{target_dir}/input.yaml"
os.makedirs(f"{target_dir}", exist_ok=True)
with open(input_file_loc, "w") as f:
inputs = {"num_ingress": num_ingress, "algorithm": algo}
yaml.dump(inputs, f, default_flow_style=False)


def num_ingress(network_path):
no_ingress = 0
network = nx.read_graphml(network_path, node_type=int)
for node in network.nodes(data=True):
if node[1]["NodeType"] == "Ingress":
no_ingress += 1
return no_ingress


def copy_input_files(target_dir, network_path, service_path, sim_config_path):
"""Create the results directory and copy input files"""
new_network_path = f"{target_dir}/{os.path.basename(network_path)}"
new_service_path = f"{target_dir}/{os.path.basename(service_path)}"
new_sim_config_path = f"{target_dir}/{os.path.basename(sim_config_path)}"

os.makedirs(target_dir, exist_ok=True)
copyfile(network_path, new_network_path)
copyfile(service_path, new_service_path)
copyfile(sim_config_path, new_sim_config_path)


def get_ingress_nodes_and_cap(network, cap=False):
"""
Gets a NetworkX DiGraph and returns a list of ingress nodes in the network and the largest capacity of nodes
Parameters:
network: NetworkX Digraph
cap: boolean to return the capacity also if True
Returns:
ing_nodes : a list of Ingress nodes in the Network
node_cap : the single largest capacity of all the nodes of the network
"""
ing_nodes = []
node_cap = {}
for node in network.nodes(data=True):
node_cap[node[0]] = node[1]['cap']
if node[1]["type"] == "Ingress":
ing_nodes.append(node[0])
if cap:
return ing_nodes, node_cap
else:
return ing_nodes
5 changes: 5 additions & 0 deletions src/dummy_env/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
""" Package to containing a dummy implementation for the environment.
"""
from .dummy_simulator import DummySimulator

__all__ = ['DummySimulator']
Loading

0 comments on commit 4386a56

Please sign in to comment.