diff --git a/.github/ISSUE_TEMPLATE/---bug-report.md b/.github/ISSUE_TEMPLATE/---bug-report.md index 4d25a843c..5c3c41a55 100644 --- a/.github/ISSUE_TEMPLATE/---bug-report.md +++ b/.github/ISSUE_TEMPLATE/---bug-report.md @@ -32,12 +32,12 @@ Steps to reproduce the behavior: ## Environment - MARO version (e.g., v0.1.1a1): -- MARO scenario (`CIM`, `Citi Bike`): +- MARO scenario (`CIM`, `Citi Bike`, `VM Scheduling`): - MARO component (`Simulation`, `RL`, `Distributed Training`): - Orchestration platform (`GraSS on Azure`, `AKS on Azure`): - How you installed MARO (`pip`, `source`): - OS (`Linux`, `Windows`, `macOS`): -- Python version (`3.6`, `3.7`): +- Python version (`3.7`, `3.8`, `3.9`): - Docker image (e.g., maro2020/maro:latest): - CPU/GPU: - Any other relevant information: diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 312429796..2be883cbe 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -31,8 +31,9 @@ Please also add other related information/contexts/dependencies here. - [ ] Mac OS - [ ] Linux - Python version: - - [ ] 3.6 - [ ] 3.7 + - [ ] 3.8 + - [ ] 3.9 - Key information snapshot(s): ## Needs Follow Up Actions diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml index 1cbcb0ce2..66ae53ae0 100644 --- a/.github/workflows/build_wheel.yml +++ b/.github/workflows/build_wheel.yml @@ -13,7 +13,7 @@ jobs: strategy: matrix: os: [ubuntu-latest, windows-latest, macos-latest] - python-version: [3.6, 3.7] + python-version: [3.7, 3.8, 3.9] steps: - uses: actions/checkout@v2 diff --git a/.github/workflows/deploy_docker_image.yml b/.github/workflows/deploy_docker_image.yml index a876abf93..7d35e204a 100644 --- a/.github/workflows/deploy_docker_image.yml +++ b/.github/workflows/deploy_docker_image.yml @@ -21,10 +21,10 @@ jobs: steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@v2 - - name: Set up Python 3.6 + - name: Set up Python 3.7 uses: actions/setup-python@v2 with: - python-version: 3.6 + python-version: 3.7 - name: Build image run: | diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 302ae9279..4493fa1f5 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,4 +1,3 @@ - name: test on: @@ -14,7 +13,7 @@ jobs: strategy: matrix: os: [ubuntu-18.04, windows-latest, macos-latest] - python-version: [3.6, 3.7, 3.8, 3.9] + python-version: [3.7, 3.8, 3.9] steps: - uses: actions/checkout@v2 diff --git a/.github/workflows/test_with_cli.yml b/.github/workflows/test_with_cli.yml index c17d95c3b..8a766a7d1 100644 --- a/.github/workflows/test_with_cli.yml +++ b/.github/workflows/test_with_cli.yml @@ -2,6 +2,7 @@ name: test_with_cli on: pull_request_review: + branches: [ master, v0.1, v0.2, v0.3 ] types: [ submitted ] workflow_dispatch: diff --git a/.gitignore b/.gitignore index 4ec4f4bd6..2d5f5ea48 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,4 @@ htmlcov/ .coverage .coveragerc .tmp/ +.xmake/ diff --git a/docs/source/apidoc/maro.rl.rst b/docs/source/apidoc/maro.rl.rst index 93bdd5688..aa3bc47df 100644 --- a/docs/source/apidoc/maro.rl.rst +++ b/docs/source/apidoc/maro.rl.rst @@ -39,6 +39,14 @@ maro.rl.exploration.strategies Model ================================================================================ +maro.rl.model.algorithm_nets +-------------------------------------------------------------------------------- + +.. automodule:: maro.rl.model.algorithm_nets + :members: + :undoc-members: + :show-inheritance: + maro.rl.model.abs_net -------------------------------------------------------------------------------- @@ -114,6 +122,17 @@ maro.rl.policy.discrete_rl_policy :undoc-members: :show-inheritance: +RL Component +================================================================================ + +maro.rl.rl_component.rl_component_bundle +-------------------------------------------------------------------------------- + +.. automodule:: maro.rl.rl_component.rl_component_bundle + :members: + :undoc-members: + :show-inheritance: + Rollout ================================================================================ diff --git a/docs/source/examples/citi_bike.rst b/docs/source/examples/citi_bike.rst deleted file mode 100644 index b9aae2e3d..000000000 --- a/docs/source/examples/citi_bike.rst +++ /dev/null @@ -1,75 +0,0 @@ -Example Scenario: Bike Repositioning (Citi Bike) -================================================ - -In this example we demonstrate using a simple greedy policy for `Citi Bike `_, -a real-world bike repositioning scenario. - -Greedy Policy -------------- - -Our greedy policy is simple: if the event type is supply, the policy will make -the current station send as many bikes as possible to one of k stations with the most empty docks. If the event type is -demand, the policy will make the current station request as many bikes as possible from one of k stations with the most -bikes. We use a heap data structure to find the top k supply/demand candidates from the action scope associated with -each decision event. - -.. code-block:: python - - class GreedyPolicy: - ... - def choose_action(self, decision_event: DecisionEvent): - if decision_event.type == DecisionType.Supply: - """ - Find k target stations with the most empty slots, randomly choose one of them and send as many bikes to - it as allowed by the action scope - """ - top_k_demands = [] - for demand_candidate, available_docks in decision_event.action_scope.items(): - if demand_candidate == decision_event.station_idx: - continue - - heapq.heappush(top_k_demands, (available_docks, demand_candidate)) - if len(top_k_demands) > self._demand_top_k: - heapq.heappop(top_k_demands) - - max_reposition, target_station_idx = random.choice(top_k_demands) - action = Action(decision_event.station_idx, target_station_idx, max_reposition) - else: - """ - Find k source stations with the most bikes, randomly choose one of them and request as many bikes from - it as allowed by the action scope. - """ - top_k_supplies = [] - for supply_candidate, available_bikes in decision_event.action_scope.items(): - if supply_candidate == decision_event.station_idx: - continue - - heapq.heappush(top_k_supplies, (available_bikes, supply_candidate)) - if len(top_k_supplies) > self._supply_top_k: - heapq.heappop(top_k_supplies) - - max_reposition, source_idx = random.choice(top_k_supplies) - action = Action(source_idx, decision_event.station_idx, max_reposition) - - return action - - -Interaction with the Greedy Policy ----------------------------------- - -This environment is driven by `real trip history data `_ from Citi Bike. - -.. code-block:: python - - env = Env(scenario=config.env.scenario, topology=config.env.topology, start_tick=config.env.start_tick, - durations=config.env.durations, snapshot_resolution=config.env.resolution) - - if config.env.seed is not None: - env.set_seed(config.env.seed) - - policy = GreedyPolicy(config.agent.supply_top_k, config.agent.demand_top_k) - metrics, decision_event, done = env.step(None) - while not done: - metrics, decision_event, done = env.step(policy.choose_action(decision_event)) - - env.reset() \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index 2b3fe29b4..2564203d1 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -89,7 +89,6 @@ Contents :maxdepth: 2 :caption: Examples - examples/multi_agent_dqn_cim.rst examples/greedy_policy_citi_bike.rst .. toctree:: diff --git a/docs/source/installation/grass_on_premises_cluster_provisioning.rst b/docs/source/installation/grass_on_premises_cluster_provisioning.rst index 4a5867153..1e42314f1 100644 --- a/docs/source/installation/grass_on_premises_cluster_provisioning.rst +++ b/docs/source/installation/grass_on_premises_cluster_provisioning.rst @@ -10,7 +10,7 @@ in local private network and run your training job in On-Premises distributed en Prerequisites ------------- -* Linux with Python 3.6+ +* Linux with Python 3.7+ * `Install Powershell `_ if you are using Windows Server * For master node vm, need install flask, gunicorn, and redis. @@ -96,4 +96,4 @@ grass-on-premises-join-cluster config: install_node_runtime: true - install_node_gpu_support: false \ No newline at end of file + install_node_gpu_support: false diff --git a/docs/source/installation/multi_processes_localhost_provisioning.rst b/docs/source/installation/multi_processes_localhost_provisioning.rst index 187c34053..f6e4b3492 100644 --- a/docs/source/installation/multi_processes_localhost_provisioning.rst +++ b/docs/source/installation/multi_processes_localhost_provisioning.rst @@ -5,7 +5,7 @@ the multi-processes in the localhost environment. Prerequisites ------------- -* Linux with Python 3.6+ +* Linux with Python 3.7+ * Redis Cluster Management diff --git a/docs/source/installation/pip_install.rst b/docs/source/installation/pip_install.rst index 3616a9e86..35b21afaa 100644 --- a/docs/source/installation/pip_install.rst +++ b/docs/source/installation/pip_install.rst @@ -25,7 +25,7 @@ Install MARO from Source (\ `Editable Mode = 3.6, < 3.8 `_ + * `Python >= 3.7 `_ * C++ Compiler * Linux or Mac OS X: ``gcc`` diff --git a/docs/source/key_components/data_model.rst b/docs/source/key_components/data_model.rst index 89a36735d..28b9e910c 100644 --- a/docs/source/key_components/data_model.rst +++ b/docs/source/key_components/data_model.rst @@ -265,7 +265,7 @@ For better data access, we also provide some advanced features, including: States in built-in scenarios' snapshot list ------------------------------------------- -TODO: move to environment part? +.. TODO: move to environment part? Currently there are 3 ways to expose states in built-in scenarios: diff --git a/docs/source/key_components/geographic_visualization.rst b/docs/source/key_components/geographic_visualization.rst index a5dfd72eb..406e0bded 100644 --- a/docs/source/key_components/geographic_visualization.rst +++ b/docs/source/key_components/geographic_visualization.rst @@ -12,7 +12,7 @@ local mode for the finished epoch under real-time mode. Dependency ---------- -Env-geographic's startup depends on **docker** and **docker-compose**. +Env-geographic's startup depends on **docker** and **docker-compose**. Therefore, users need to install docker on the machine and ensure that it can run normally. User could get docker through `Docker installation `_. @@ -44,7 +44,7 @@ Firstly, user need to start the local database with command: ---- After the command is executed successfully, user -could view the local data with localhost:9000 by default. +could view the local data with localhost:9000 by default. If the default port is occupied, user could obtain the access port of each container through the following command: @@ -91,7 +91,7 @@ To send data to database, there are 2 compulsory steps: User needs to set the value of the environment variable "MARO_STREAMIT_ENABLED" to "true". If user wants to specify the experiment name, -set the environment variable "MARO_STREAMIT_EXPERIMENT_NAME". If user does not +set the environment variable "MARO_STREAMIT_EXPERIMENT_NAME". If user does not set this value, a unique experiment name would be processed automatically. User could check the experiment name through database. It should be noted that when selecting a topology, user must select a topology with specific geographic @@ -118,7 +118,7 @@ package **streamit** with following code before environment initialization: # Initialize environment and send basic information of experiment to database. env = Env(scenario="cim", topology="global_trade.22p_l0.1", start_tick=0, durations=100) - + for ep in range(EPISODE_NUMBER): # Send experimental data to database by episode. streamit.episode(ep) @@ -144,8 +144,8 @@ command: ---- -Generally, the backend service runs on a local machine, occupying port 5000. -If port 5000 is already occupied, the program would find another free port starting from 5000. +Generally, the backend service runs on a local machine, occupying port 5000. +If port 5000 is already occupied, the program would find another free port starting from 5000. To be specific, if user would like to run the backend service in docker rather on a local machine, please run the shell script run_docker.sh under the folder maro\maro\cli\maro_real_time_vis. It should be noted that diff --git a/examples/vm_scheduling/offline_lp/launcher.py b/examples/vm_scheduling/offline_lp/launcher.py index 2e5f21870..7255d42f4 100644 --- a/examples/vm_scheduling/offline_lp/launcher.py +++ b/examples/vm_scheduling/offline_lp/launcher.py @@ -22,8 +22,8 @@ config = convert_dottable(raw_config) LOG_PATH = os.path.join(FILE_PATH, "log", config.experiment_name) -simulation_logger = Logger(tag="simulation", format_=LogFormat.none, dump_path=LOG_PATH, dump_mode="w") -ilp_logger = Logger(tag="ilp", format_=LogFormat.none, dump_path=LOG_PATH, dump_mode="w") +simulation_logger = Logger(tag="simulation", format_=LogFormat.none, dump_folder=LOG_PATH, dump_mode="w") +ilp_logger = Logger(tag="ilp", format_=LogFormat.none, dump_folder=LOG_PATH, dump_mode="w") if __name__ == "__main__": start_time = timeit.default_timer() diff --git a/maro/README.rst b/maro/README.rst index a7cdee728..abd6ede7c 100644 --- a/maro/README.rst +++ b/maro/README.rst @@ -310,8 +310,8 @@ Install MARO from Source # If your environment is not clean, create a virtual environment firstly. python -m venv maro_venv - # You may need this for SecurityError in PowerShell. - Set-ExecutionPolicy -Scope CurrentUser -ExecutionPolicy Unrestricted + # You may need this for SecurityError in PowerShell. + Set-ExecutionPolicy -Scope CurrentUser -ExecutionPolicy Unrestricted # Activate the virtual environment. .\maro_venv\Scripts\activate diff --git a/maro/backends/backend.pxd b/maro/backends/backend.pxd index 04fc3c4c9..2af22eff7 100644 --- a/maro/backends/backend.pxd +++ b/maro/backends/backend.pxd @@ -27,7 +27,7 @@ ctypedef float ATTR_FLOAT ctypedef double ATTR_DOUBLE # Type for snapshot querying. -ctypedef float QUERY_FLOAT +ctypedef double QUERY_FLOAT # TYPE of node and attribute ctypedef unsigned short NODE_TYPE diff --git a/maro/backends/np_backend.pyx b/maro/backends/np_backend.pyx index 52e7452f6..c23caeddf 100644 --- a/maro/backends/np_backend.pyx +++ b/maro/backends/np_backend.pyx @@ -30,6 +30,17 @@ attribute_type_mapping = { AttributeType.Double: "d" } +attribute_type_range = { + "b": ("AttributeType.Byte", -128, 127), + "B": ("AttributeType.UByte", 0, 255), + "h": ("AttributeType.Short", -32768, 32767), + "H": ("AttributeType.UShort", 0, 65535), + "i": ("AttributeType.Int", -2147483648, 2147483647), + "I": ("AttributeType.UInt", 0, 4294967295), + "q": ("AttributeType.Long", -9223372036854775808, 9223372036854775807), + "Q": ("AttributeType.ULong", 0, 18446744073709551615), +} + IF NODES_MEMORY_LAYOUT == "ONE_BLOCK": # with this flag, we will allocate a big enough memory for all node types, then use this block construct numpy array @@ -167,6 +178,13 @@ cdef class NumpyBackend(BackendAbc): cdef AttrInfo attr = self._attrs_list[attr_type] + cdef bytes dtype = attr.dtype.encode() + if dtype in attribute_type_range: + assert value >= attribute_type_range[dtype][1] and value <= attribute_type_range[dtype][2], ( + f"Value {value} out of range ({attribute_type_range[dtype][0]}: " + f"[{attribute_type_range[dtype][1]}, {attribute_type_range[dtype][2]}])" + ) + if attr.node_type >= len(self._nodes_list): raise Exception("Invalid node type.") @@ -208,9 +226,22 @@ cdef class NumpyBackend(BackendAbc): cdef AttrInfo attr = self._attrs_list[attr_type] cdef np.ndarray attr_array = self._node_data_dict[attr.node_type][attr.name] + cdef bytes dtype = attr.dtype.encode() + if attr.slot_number == 1: + if dtype in attribute_type_range: + assert value[0] >= attribute_type_range[dtype][1] and value[0] <= attribute_type_range[dtype][2], ( + f"Value {value[0]} out of range ({attribute_type_range[dtype][0]}: " + f"[{attribute_type_range[dtype][1]}, {attribute_type_range[dtype][2]}])" + ) attr_array[0][node_index, slot_index[0]] = value[0] else: + if dtype in attribute_type_range: + for val in value: + assert val >= attribute_type_range[dtype][1] and val <= attribute_type_range[dtype][2], ( + f"Value {val} out of range ({attribute_type_range[dtype][0]}: " + f"[{attribute_type_range[dtype][1]}, {attribute_type_range[dtype][2]}])" + ) attr_array[0][node_index, slot_index] = value cdef list get_attr_values(self, NODE_INDEX node_index, ATTR_TYPE attr_type, SLOT_INDEX[:] slot_indices) except +: @@ -500,10 +531,10 @@ cdef class NPSnapshotList(SnapshotListAbc): # since we have a clear tick to index mapping, do not need additional checking here if tick in self._tick2index_dict: - retq.append(data_arr[attr.name][self._tick2index_dict[tick], node_index].astype("f").flatten()) + retq.append(data_arr[attr.name][self._tick2index_dict[tick], node_index].astype(np.double).flatten()) else: # padding for tick which not exist - retq.append(np.zeros(attr.slot_number, dtype='f')) + retq.append(np.zeros(attr.slot_number, dtype=np.double)) return np.concatenate(retq) diff --git a/maro/backends/raw/attribute.cpp b/maro/backends/raw/attribute.cpp index f8dc1a3a1..5dd8f8afe 100644 --- a/maro/backends/raw/attribute.cpp +++ b/maro/backends/raw/attribute.cpp @@ -61,7 +61,8 @@ namespace maro bool Attribute::is_nan() const noexcept { - return _type == AttrDataType::AFLOAT && isnan(get_value()); + return (_type == AttrDataType::AFLOAT && isnan(get_value())) + || (_type == AttrDataType::ADOUBLE && isnan(get_value())); } template diff --git a/maro/backends/raw/common.h b/maro/backends/raw/common.h index 93d90f17a..89c975c4e 100644 --- a/maro/backends/raw/common.h +++ b/maro/backends/raw/common.h @@ -29,7 +29,7 @@ namespace maro using NODE_INDEX = uint32_t; using SLOT_INDEX = uint32_t; - using QUERY_FLOAT = float; + using QUERY_FLOAT = double; // TODO: Precision issue for Long data type. using ATTR_CHAR = char; using ATTR_UCHAR = unsigned char; diff --git a/maro/backends/raw/test/README.md b/maro/backends/raw/test/README.md new file mode 100644 index 000000000..341c5a5d3 --- /dev/null +++ b/maro/backends/raw/test/README.md @@ -0,0 +1,5 @@ +# How to run + +1. Install xmake according to +2. Go to directory: maro/backends/raw +3. Run commands: `xmake; xmake run` diff --git a/maro/backends/raw/test/main.cpp b/maro/backends/raw/test/main.cpp new file mode 100644 index 000000000..f18d5de02 --- /dev/null +++ b/maro/backends/raw/test/main.cpp @@ -0,0 +1,7 @@ +#include + + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/maro/backends/raw/test/test_attribute.cpp b/maro/backends/raw/test/test_attribute.cpp new file mode 100644 index 000000000..a403fa498 --- /dev/null +++ b/maro/backends/raw/test/test_attribute.cpp @@ -0,0 +1,34 @@ +#include + +#include "../attribute.h" + +using namespace maro::backends::raw; + + +// test attribute creation +TEST(Attribute, Creation) { + Attribute attr; + + EXPECT_EQ(attr.get_type(), AttrDataType::ACHAR); + EXPECT_FALSE(attr.is_nan()); + EXPECT_EQ(attr.slot_number, 0); + +} + +// test create attribute with other type value. +TEST(Attribute, CreateWithTypedValue) { + Attribute attr{ ATTR_UINT(12)}; + + EXPECT_EQ(attr.get_type(), AttrDataType::AUINT); + EXPECT_EQ(attr.get_value(), 12); + EXPECT_EQ(attr.slot_number, 0); + EXPECT_FALSE(attr.is_nan()); +} + +// test is nan case +TEST(Attribute, CreateWithNan) { + Attribute attr{ nan("nan")}; + + EXPECT_TRUE(attr.is_nan()); +} + diff --git a/maro/backends/raw/test/test_frame.cpp b/maro/backends/raw/test/test_frame.cpp new file mode 100644 index 000000000..f981a5c44 --- /dev/null +++ b/maro/backends/raw/test/test_frame.cpp @@ -0,0 +1,84 @@ +#include +#include + +#include + +#include "../common.h" +#include "../frame.h" +#include "../snapshotlist.h" + +using namespace maro::backends::raw; + + +TEST(test, correct) { + EXPECT_EQ(1, 1); +} + +// show how to use frame and snapshot at c++ end +TEST(test, show_case) { + // a new frame + Frame frame; + + // add a new node with a name + auto node_type = frame.add_node("test_node", 1); + + // add an attribute to this node, this is a list attribute, it has different value to change the value + // NOTE: list means is it dynamic array, that the size can be changed even after setting up + auto attr_type_1 = frame.add_attr(node_type, "a1", AttrDataType::AUINT, 10, false, true); + + // this is a normal attribute + // NOTE: list == false means it is a fixed array that cannot change the size after setting up + auto attr_type_2 = frame.add_attr(node_type, "a2", AttrDataType::AUINT, 2); + + // setup means initialize the frame with node definitions (allocate memory) + // NOTE: call this method before accessing the attributes + frame.setup(); + + // list and normal attribute have different method to set value + frame.set_value(0, attr_type_2, 0, 33554441); + frame.insert_to_list(0, attr_type_1, 0, 33554442); + + // but they have same get method + auto v1 = frame.get_value(0, attr_type_1, 0); + auto v2 = frame.get_value(0, attr_type_2, 0); + + // test with true type + EXPECT_EQ(v2, 33554441); + EXPECT_EQ(v1, 33554442); + + // test with query result type + EXPECT_EQ(QUERY_FLOAT(v2), 3.3554441e+07); + EXPECT_EQ(QUERY_FLOAT(v1), 3.3554442e+07); + + // snapshot instance + SnapshotList ss; + + // NOTE: we need following 2 method to initialize the snapshot instance, or accessing will cause exception + // which frame we will use to copy the values + ss.setup(&frame); + // max snapshot it will keep, oldeat one will be delete when reading the limitation + ss.set_max_size(10); + + // take a snapshot for a tick + ss.take_snapshot(0); + + // query parameters + std::array ticks{ 0 }; + std::array indices{ 0 }; + std::array< ATTR_TYPE, 1> attributes{attr_type_1}; + + // we need use the parameter to get how many items we need to hold the results + auto shape = ss.prepare(node_type, &(ticks[0]), ticks.size(), &(indices[0]), indices.size(), &(attributes[0]), attributes.size()); + + auto total = shape.tick_number * shape.max_node_number * shape.max_slot_number * shape.attr_number; + + // then query (the snapshot instance will remember the latest query parameters, so just pass the result array + QUERY_FLOAT* results = new QUERY_FLOAT[total]; + + ss.query(results); + + // 1st slot value of first node + EXPECT_EQ(results[0], 3.3554442e+07); + + delete[] results; +} diff --git a/maro/backends/raw/xmake.lua b/maro/backends/raw/xmake.lua new file mode 100644 index 000000000..f8643f509 --- /dev/null +++ b/maro/backends/raw/xmake.lua @@ -0,0 +1,7 @@ +add_requires("gtest") + +target("test") + set_kind("binary") + add_files("test/*.cpp") + add_files("./*.cpp") + add_packages("gtest") diff --git a/maro/backends/raw_backend.pyx b/maro/backends/raw_backend.pyx index 584fa0569..0d7195f64 100644 --- a/maro/backends/raw_backend.pyx +++ b/maro/backends/raw_backend.pyx @@ -5,6 +5,8 @@ #distutils: language = c++ #distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION +import warnings + import numpy as np cimport numpy as np cimport cython @@ -288,7 +290,7 @@ cdef class RawSnapshotList(SnapshotListAbc): return None # Result holder - cdef QUERY_FLOAT[:, :, :, :] result = view.array(shape=(shape.tick_number, shape.max_node_number, shape.attr_number, shape.max_slot_number), itemsize=sizeof(QUERY_FLOAT), format="f") + cdef QUERY_FLOAT[:, :, :, :] result = view.array(shape=(shape.tick_number, shape.max_node_number, shape.attr_number, shape.max_slot_number), itemsize=sizeof(QUERY_FLOAT), format="d") # Default result value result[:, :, :, :] = 0 @@ -331,139 +333,217 @@ cdef class RawSnapshotList(SnapshotListAbc): cdef class AttributeCharAccessor(AttributeAccessor): cdef void set_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= -128 and value <= 127, f"Value {value} out of range (AttributeType.Byte: [-127, 128])" self._backend._frame.set_value[ATTR_CHAR](node_index, self._attr_type, slot_index, value) cdef object get_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index) except +: return self._backend._frame.get_value[ATTR_CHAR](node_index, self._attr_type, slot_index) cdef void append_value(self, NODE_INDEX node_index, object value) except +: + assert value >= -128 and value <= 127, f"Value {value} out of range (AttributeType.Byte: [-127, 128])" self._backend._frame.append_to_list[ATTR_CHAR](node_index, self._attr_type, value) cdef void insert_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= -128 and value <= 127, f"Value {value} out of range (AttributeType.Byte: [-127, 128])" self._backend._frame.insert_to_list[ATTR_CHAR](node_index, self._attr_type, slot_index, value) cdef class AttributeUCharAccessor(AttributeAccessor): cdef void set_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= 0 and value <= 255, f"Value {value} out of range (AttributeType.UByte: [0, 255])" self._backend._frame.set_value[ATTR_UCHAR](node_index, self._attr_type, slot_index, value) cdef object get_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index) except +: return self._backend._frame.get_value[ATTR_UCHAR](node_index, self._attr_type, slot_index) cdef void append_value(self, NODE_INDEX node_index, object value) except +: + assert value >= 0 and value <= 255, f"Value {value} out of range (AttributeType.UByte: [0, 255])" self._backend._frame.append_to_list[ATTR_UCHAR](node_index, self._attr_type, value) cdef void insert_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= 0 and value <= 255, f"Value {value} out of range (AttributeType.UByte: [0, 255])" self._backend._frame.insert_to_list[ATTR_UCHAR](node_index, self._attr_type, slot_index, value) cdef class AttributeShortAccessor(AttributeAccessor): cdef void set_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= -32768 and value <= 32767, ( + f"Value {value} out of range (AttributeType.Short: [-32,768, 32,767])" + ) self._backend._frame.set_value[ATTR_SHORT](node_index, self._attr_type, slot_index, value) cdef object get_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index) except +: return self._backend._frame.get_value[ATTR_SHORT](node_index, self._attr_type, slot_index) cdef void append_value(self, NODE_INDEX node_index, object value) except +: + assert value >= -32768 and value <= 32767, ( + f"Value {value} out of range (AttributeType.Short: [-32,768, 32,767])" + ) self._backend._frame.append_to_list[ATTR_SHORT](node_index, self._attr_type, value) cdef void insert_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= -32768 and value <= 32767, ( + f"Value {value} out of range (AttributeType.Short: [-32,768, 32,767])" + ) self._backend._frame.insert_to_list[ATTR_SHORT](node_index, self._attr_type, slot_index, value) cdef class AttributeUShortAccessor(AttributeAccessor): cdef void set_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= 0 and value <= 65535, f"Value {value} out of range (AttributeType.UShort: [0, 65,535])" self._backend._frame.set_value[ATTR_USHORT](node_index, self._attr_type, slot_index, value) cdef object get_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index) except +: return self._backend._frame.get_value[ATTR_USHORT](node_index, self._attr_type, slot_index) cdef void append_value(self, NODE_INDEX node_index, object value) except +: + assert value >= 0 and value <= 65535, f"Value {value} out of range (AttributeType.UShort: [0, 65,535])" self._backend._frame.append_to_list[ATTR_USHORT](node_index, self._attr_type, value) cdef void insert_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= 0 and value <= 65535, f"Value {value} out of range (AttributeType.UShort: [0, 65,535])" self._backend._frame.insert_to_list[ATTR_USHORT](node_index, self._attr_type, slot_index, value) cdef class AttributeIntAccessor(AttributeAccessor): cdef void set_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= -2147483648 and value <= 2147483647, ( + f"Value {value} out of range (AttributeType.Int: [-2,147,483,648, 2,147,483,647])" + ) self._backend._frame.set_value[ATTR_INT](node_index, self._attr_type, slot_index, value) cdef object get_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index) except +: return self._backend._frame.get_value[ATTR_INT](node_index, self._attr_type, slot_index) cdef void append_value(self, NODE_INDEX node_index, object value) except +: + assert value >= -2147483648 and value <= 2147483647, ( + f"Value {value} out of range (AttributeType.Int: [-2,147,483,648, 2,147,483,647])" + ) self._backend._frame.append_to_list[ATTR_INT](node_index, self._attr_type, value) cdef void insert_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= -2147483648 and value <= 2147483647, ( + f"Value {value} out of range (AttributeType.Int: [-2,147,483,648, 2,147,483,647])" + ) self._backend._frame.insert_to_list[ATTR_INT](node_index, self._attr_type, slot_index, value) cdef class AttributeUIntAccessor(AttributeAccessor): cdef void set_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= 0 and value <= 4294967295, ( + f"Value {value} out of range (AttributeType.UInt: [0, 4,294,967,295])" + ) self._backend._frame.set_value[ATTR_UINT](node_index, self._attr_type, slot_index, value) cdef object get_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index) except +: return self._backend._frame.get_value[ATTR_UINT](node_index, self._attr_type, slot_index) cdef void append_value(self, NODE_INDEX node_index, object value) except +: + assert value >= 0 and value <= 4294967295, ( + f"Value {value} out of range (AttributeType.UInt: [0, 4,294,967,295])" + ) self._backend._frame.append_to_list[ATTR_UINT](node_index, self._attr_type, value) cdef void insert_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= 0 and value <= 4294967295, ( + f"Value {value} out of range (AttributeType.UInt: [0, 4,294,967,295])" + ) self._backend._frame.insert_to_list[ATTR_UINT](node_index, self._attr_type, slot_index, value) cdef class AttributeLongAccessor(AttributeAccessor): cdef void set_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= -9223372036854775808 and value <= 9223372036854775807, ( + f"Value {value} out of range (AttributeType.Long: [-9,223,372,036,854,775,808, 9,223,372,036,854,775,807])" + ) self._backend._frame.set_value[ATTR_LONG](node_index, self._attr_type, slot_index, value) cdef object get_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index) except +: return self._backend._frame.get_value[ATTR_LONG](node_index, self._attr_type, slot_index) cdef void append_value(self, NODE_INDEX node_index, object value) except +: + assert value >= -9223372036854775808 and value <= 9223372036854775807, ( + f"Value {value} out of range (AttributeType.Long: [-9,223,372,036,854,775,808, 9,223,372,036,854,775,807])" + ) self._backend._frame.append_to_list[ATTR_LONG](node_index, self._attr_type, value) cdef void insert_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= -9223372036854775808 and value <= 9223372036854775807, ( + f"Value {value} out of range (AttributeType.Long: [-9,223,372,036,854,775,808, 9,223,372,036,854,775,807])" + ) self._backend._frame.insert_to_list[ATTR_LONG](node_index, self._attr_type, slot_index, value) cdef class AttributeULongAccessor(AttributeAccessor): cdef void set_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= 0 and value <= 18446744073709551615, ( + f"Value {value} out of range (AttributeType.ULong: [0, 18,446,744,073,709,551,615])" + ) self._backend._frame.set_value[ATTR_ULONG](node_index, self._attr_type, slot_index, value) cdef object get_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index) except +: return self._backend._frame.get_value[ATTR_ULONG](node_index, self._attr_type, slot_index) cdef void append_value(self, NODE_INDEX node_index, object value) except +: + assert value >= 0 and value <= 18446744073709551615, ( + f"Value {value} out of range (AttributeType.ULong: [0, 18,446,744,073,709,551,615])" + ) self._backend._frame.append_to_list[ATTR_ULONG](node_index, self._attr_type, value) cdef void insert_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + assert value >= 0 and value <= 18446744073709551615, ( + f"Value {value} out of range (AttributeType.ULong: [0, 18,446,744,073,709,551,615])" + ) self._backend._frame.insert_to_list[ATTR_ULONG](node_index, self._attr_type, slot_index, value) cdef class AttributeFloatAccessor(AttributeAccessor): cdef void set_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + n_val = float(f"{value:e}") + assert abs(n_val - value) < 1, f"Value {value} out of range (AttributeType.Float)" + if n_val != value: + warnings.warn(f"[Precision lost] Value {value} would be converted to {n_val}") self._backend._frame.set_value[ATTR_FLOAT](node_index, self._attr_type, slot_index, value) cdef object get_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index) except +: return self._backend._frame.get_value[ATTR_FLOAT](node_index, self._attr_type, slot_index) cdef void append_value(self, NODE_INDEX node_index, object value) except +: + n_val = float(f"{value:e}") + assert abs(n_val - value) < 1, f"Value {value} out of range (AttributeType.Float)" + if n_val != value: + warnings.warn(f"[Precision lost] Value {value} would be converted to {n_val}") self._backend._frame.append_to_list[ATTR_FLOAT](node_index, self._attr_type, value) cdef void insert_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + n_val = float(f"{value:e}") + assert abs(n_val - value) < 1, f"Value {value} out of range (AttributeType.Float)" + if n_val != value: + warnings.warn(f"[Precision lost] Value {value} would be converted to {n_val}") self._backend._frame.insert_to_list[ATTR_FLOAT](node_index, self._attr_type, slot_index, value) cdef class AttributeDoubleAccessor(AttributeAccessor): cdef void set_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + n_val = float(f"{value:.15e}") + assert abs(n_val - value) < 1, f"Value {value} out of range (AttributeType.Double)" + if n_val != value: + warnings.warn(f"[Precision lost] Value {value} would be converted to {n_val}") self._backend._frame.set_value[ATTR_DOUBLE](node_index, self._attr_type, slot_index, value) cdef object get_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index) except +: return self._backend._frame.get_value[ATTR_DOUBLE](node_index, self._attr_type, slot_index) cdef void append_value(self, NODE_INDEX node_index, object value) except +: + n_val = float(f"{value:.15e}") + assert abs(n_val - value) < 1, f"Value {value} out of range (AttributeType.Double)" + if n_val != value: + warnings.warn(f"[Precision lost] Value {value} would be converted to {n_val}") self._backend._frame.append_to_list[ATTR_DOUBLE](node_index, self._attr_type, value) cdef void insert_value(self, NODE_INDEX node_index, SLOT_INDEX slot_index, object value) except +: + n_val = float(f"{value:.15e}") + assert abs(n_val - value) < 1, f"Value {value} out of range (AttributeType.Double)" + if n_val != value: + warnings.warn(f"[Precision lost] Value {value} would be converted to {n_val}") self._backend._frame.insert_to_list[ATTR_DOUBLE](node_index, self._attr_type, slot_index, value) diff --git a/maro/cli/inspector/env_data_process.py b/maro/cli/inspector/env_data_process.py index d0f914b4e..97aee0f98 100644 --- a/maro/cli/inspector/env_data_process.py +++ b/maro/cli/inspector/env_data_process.py @@ -210,13 +210,23 @@ def _get_index_index_name_conversion(scenario: GlobalScenarios, source_path: str if os.path.exists(os.path.join(source_path, GlobalFileNames.name_convert)): os.remove(os.path.join(source_path, GlobalFileNames.name_convert)) if scenario == GlobalScenarios.CITI_BIKE: - with open(conversion_path, "r", encoding="utf8")as mapping_file: - mapping_json_data = json.load(mapping_file) - name_list = [] - for item in mapping_json_data["data"]["stations"]: - name_list.append(item["name"]) - df = pd.DataFrame({"name": name_list}) - df.to_csv(os.path.join(source_path, GlobalFileNames.name_convert), index=False) + # TODO: the commented out code are older version which will cause errors. + # TODO: the updated code could work but the fix is temporary. + # TODO: we need to refactor the dump logic in citi bike scenario and make a stable solution later. + + # with open(conversion_path, "r", encoding="utf8")as mapping_file: + # mapping_json_data = json.load(mapping_file) + # name_list = [] + # for item in mapping_json_data["data"]["stations"]: + # name_list.append(item["name"]) + # df = pd.DataFrame({"name": name_list}) + # df.to_csv(os.path.join(source_path, GlobalFileNames.name_convert), index=False) + + df_station = pd.read_csv(os.path.join(source_path, "epoch_0", "stations.csv")) + name_list = df_station["name"].unique() + df = pd.DataFrame({"name": name_list}) + df.to_csv(os.path.join(source_path, GlobalFileNames.name_convert), index=False) + elif scenario == GlobalScenarios.CIM: cim_information = yaml.load( open(conversion_path, "r").read(), diff --git a/maro/cli/k8s/utils/params.py b/maro/cli/k8s/utils/params.py new file mode 100644 index 000000000..4ac8c999b --- /dev/null +++ b/maro/cli/k8s/utils/params.py @@ -0,0 +1,10 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + + +import os + + +class K8sPaths: + MARO_K8S_LIB = "~/.maro/lib/k8s" + ABS_MARO_K8S_LIB = os.path.expanduser(MARO_K8S_LIB) diff --git a/maro/utils/logger.py b/maro/utils/logger.py index 37dcc0936..ac1e546f7 100644 --- a/maro/utils/logger.py +++ b/maro/utils/logger.py @@ -220,12 +220,12 @@ def __init__(self): if self.log_level == logging.DEBUG: super().__init__( tag='cli', - format_=LogFormat.cli_debug, dump_path=dump_path, dump_mode='a', stdout_level=self.log_level + format_=LogFormat.cli_debug, dump_folder=dump_path, dump_mode='a', stdout_level=self.log_level ) elif self.log_level >= logging.INFO: super().__init__( tag='cli', - format_=LogFormat.cli_info, dump_path=dump_path, dump_mode='a', stdout_level=self.log_level + format_=LogFormat.cli_info, dump_folder=dump_path, dump_mode='a', stdout_level=self.log_level ) _logger = None diff --git a/scripts/build_manylinux.sh b/scripts/build_manylinux.sh index f8e0892f8..fe1187499 100644 --- a/scripts/build_manylinux.sh +++ b/scripts/build_manylinux.sh @@ -1,6 +1,6 @@ #!/bin/bash -# this script used build maro packages for linux +# this script used build maro packages for linux cd "$(dirname $(readlink -f $0))/.." @@ -8,4 +8,4 @@ bash ./scripts/compile_cython.sh # NOTE: currently we only support python3.6 and 3.7, need to be clearfy the python and packages version # about manylinux: https://github.com/pypa/manylinux -docker run --rm -v "$PWD":/maro quay.io/pypa/manylinux2010_x86_64 bash /maro/scripts/build_wheel.sh \ No newline at end of file +docker run --rm -v "$PWD":/maro quay.io/pypa/manylinux2010_x86_64 bash /maro/scripts/build_wheel.sh diff --git a/scripts/random_config.py b/scripts/random_config.py deleted file mode 100644 index 1bcce4e52..000000000 --- a/scripts/random_config.py +++ /dev/null @@ -1,400 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. - -""" -A simple script that used to generate random configurations. -""" - -import argparse -import os -import random -from typing import Optional - -import numpy as np -from flloat.parser.ltlf import LTLfParser -from yaml import safe_load, safe_dump - -# Definition of warehouse. -warehouse_def = """ -class: "WarehouseFacility" -children: - storage: - class: "StorageUnit" - distribution: - class: "DistributionUnit" - products: - class: "ProductUnit" - is_template: true - config: - agent_type: 4 - consumer: - class: "ConsumerUnit" -config: - agent_type: 1 -""" - -# Definition of supplier. -supplier_def = """ -class: "SupplierFacility" -children: - storage: - class: "StorageUnit" - distribution: - class: "DistributionUnit" - products: - class: "ProductUnit" - is_template: true - config: - agent_type: 3 - consumer: - class: "ConsumerUnit" - manufacture: - class: "ManufactureUnit" -config: - agent_type: 0 -""" - -# Definition of retailer. -retailer_def = """ -class: "RetailerFacility" -children: - storage: - class: "StorageUnit" - products: - class: "StoreProductUnit" - is_template: true - config: - agent_type: 5 - consumer: - class: "ConsumerUnit" - seller: - class: "SellerUnit" - config: - sale_hist_len: 4 -config: - agent_type: 2 -""" - -# Template to generate a supplier facility. -# Properties to change: -# . name -# . skus -# . vehicles -# . config (optional) -supplier_template = """ -name: "Supplier_001" -definition_ref: "SupplierFacility" -skus: {} -children: - storage: - config: - capacity: 10000 - unit_storage_cost: 1 - distribution: - children: - vehicles: [] - config: - unit_price: 1 -config: {} -""" - -# Template to generate warehouse facility. -# Property to change: -# . name -# . skus -# . vehicles -# . config (optional) -warehouse_template = """ -name: "Warehouse_001" -definition_ref: "WarehouseFacility" -skus: {} -children: - storage: - config: - capacity: 10000 - unit_storage_cost: 1 - distribution: - children: - vehicles: [] - config: - unit_price: 1 -config: {} -""" - -# Template to generate retailer. -# Property to change: -# . name -# . skus -# . config (optional) -retailer_template = """ -name: "Retailer_001" -definition_ref: "RetailerFacility" -skus: {} -children: - storage: - config: - capacity: 10000 - unit_storage_cost: 1 -config: {} -""" - - -def generate_config(sku_num: int, supplier_num: int, warehouse_num: int, retailer_num: int, grid_width: int, - grid_height: int, output_path: Optional[str] = None): - constraints = ['G(stock_constraint)', - 'G(is_replenish_constraint -> ((X!is_replenish_constraint)&(XX!is_replenish_constraint)))', - 'G(low_profit -> low_stock_constraint)'] - - # constraints = ['G(is_replenish_constraint -> ((X!is_replenish_constraint)&(XX!is_replenish_constraint)))'] - - def construct_formula(constraint): - parser = LTLfParser() - formula = parser(constraint) - return formula - - constraint_formulas = {constraint: construct_formula(constraint) for constraint in constraints} - constraint_automata = {constraint: constraint_formulas[constraint].to_automaton().determinize() for constraint in - constraints} - - max_constraint_states = int(np.max([len(a.states) for a in constraint_automata.values()])) - - # Base configuration of vehicle used in all facility. - vehicle_conf = { - "class": "VehicleUnit", - "config": { - "patient": 100, - "unit_transport_cost": 1 - } - } - - # Save the vehicle definition in the config, so later distribution will reference to it. - config = { - "normal_vehicle": vehicle_conf, - "facility_definitions": {}, - "settings": { - "global_reward_weight_producer": 0.50, - "global_reward_weight_consumer": 0.50, - "downsampling_rate": 1, - "episod_duration": 21, - "initial_balance": 100000, - "consumption_hist_len": 4, - "sale_hist_len": 4, - "pending_order_len": 4, - "constraint_state_hist_len": max_constraint_states, - "total_echelons": 3, - "replenishment_discount": 0.9, - "reward_normalization": 1e7, - "constraint_violate_reward": -1e6, - "gamma": 0.99, - "tail_timesteps": 7, - "heading_timesteps": 7, - } - } - - # Add the facility definitions. - config["facility_definitions"]["SupplierFacility"] = safe_load(supplier_def) - config["facility_definitions"]["WarehouseFacility"] = safe_load(warehouse_def) - config["facility_definitions"]["RetailerFacility"] = safe_load(retailer_def) - - # Generate settings first. - world_conf = {} - - sku_names = [f'SKU{i}' for i in range(sku_num)] - - sku_list = [] - for sku_index, sku_name in enumerate(sku_names): - sku_list.append({ - "id": sku_index, - "name": sku_name - }) - - # Add the sku list to the world configuration. - world_conf["skus"] = sku_list - - # Generate sku information. - sku_cost = {f'SKU{i}': random.randint(10, 500) for i in range(sku_num)} - sku_product_cost = {f'SKU{i}': int(sku_cost[f'SKU{i}'] * 0.9) for i in range(sku_num)} - sku_price = {f'SKU{i}': int(sku_cost[f'SKU{i}'] * (1 + random.randint(10, 100) / 100)) for i in range(sku_num)} - sku_gamma = {f'SKU{i}': random.randint(5, 100) for i in range(sku_num)} - total_gamma = sum(list(sku_gamma.values())) - sku_vlt = {f'SKU{i}': random.randint(1, 3) for i in range(sku_num)} - - # Generate suppliers. - supplier_facilities = [] - - for i in range(supplier_num): - facility = safe_load(supplier_template) - - facility["name"] = f"SUPPLIER{i}" - facility["children"]["storage"]["config"]["capacity"] = total_gamma * 100 - - for _ in range(10 * sku_num): - # this will save as a reference in the final yaml file - facility["children"]["distribution"]["children"]["vehicles"].append(vehicle_conf) - - # Facility config. - facility["config"] = {} - facility["config"]["order_cost"] = 200 - facility["config"]["delay_order_penalty"] = 1000 - - # Sku list of this facility. - sku_list = {} - - for j in range(sku_num): - sku_name = f"SKU{j}" - sku_list[sku_name] = { - "price": sku_cost[sku_name], - "cost": sku_product_cost[sku_name], - "service_level": .95, - "vlt": 3, - "init_stock": int(sku_gamma[sku_name] * 50), - # Why this configuration, as manufacture is controlled by action? - "production_rate": int(sku_gamma[sku_name] * 50), - # For this script, all sku is a production that produced by suppliers, no bom. - "type": "production", - "product_unit_cost": 1, - } - - facility["skus"] = sku_list - - supplier_facilities.append(facility) - - # Warehouses. - warehouse_list = [] - for i in range(warehouse_num): - facility = safe_load(warehouse_template) - - facility["name"] = f"WAREHOUSE{i}" - facility["children"]["storage"]["config"]["capacity"] = total_gamma * 100 - - for _ in range(10 * sku_num): - facility["children"]["distribution"]["children"]["vehicles"].append(vehicle_conf) - - facility["config"] = {} - facility["config"]["order_cost"] = 500 - facility["config"]["delay_order_penalty"] = 1000 - - sku_list = {} - - for j in range(sku_num): - sku_name = f"SKU{j}" - sku_list[sku_name] = { - "price": sku_cost[sku_name], - "cost": sku_cost[sku_name], - "vlt": sku_vlt[sku_name], - "init_stock": int(sku_gamma[sku_name] * 20), - "service_level": .96 - } - - facility["skus"] = sku_list - - warehouse_list.append(facility) - - sku_constraints = {} - for i in range(sku_num): - if random.random() <= 0.5: - continue - sku_constraints[f"SKU{i}"] = constraints[random.randint(0, len(constraints) - 1)] - - # Retailers. - retailer_list = [] - for i in range(retailer_num): - facility = safe_load(retailer_template) - - facility["name"] = f"STORE{i}" - facility["children"]["storage"]["config"]["capacity"] = total_gamma * 20 - - facility["config"] = {} - facility["config"]["order_cost"] = 500 - - sku_list = {} - - for j in range(sku_num): - sku_name = f"SKU{j}" - sku_list[sku_name] = { - "price": sku_price[sku_name], - "service_level": 0.95, - "cost": sku_cost[sku_name], - "init_stock": sku_gamma[sku_name] * (sku_vlt[sku_name] + random.randint(1, 5)), - "sale_gamma": sku_gamma[sku_name], - 'max_stock': 1000, - "constraint": sku_constraints.get(sku_name, None) - } - - facility["skus"] = sku_list - - retailer_list.append(facility) - - world_conf["facilities"] = supplier_facilities + warehouse_list + retailer_list - - # According to original code, the upstream relationship is like following: - # supplier <- warehouse <- retailer - # as current configuration supplier and warehouse contain all the sku, so we can just random pick. - world_conf["topology"] = {} - - # Random pick upstreams for retailers from warehouses. - for store in retailer_list: - store_upstream = {} - - for i in range(sku_num): - sku_name = f"SKU{i}" - store_upstream[sku_name] = [warehouse_list[random.randint(0, warehouse_num - 1)]["name"], ] - - world_conf["topology"][store["name"]] = store_upstream - - # Random pick upstreams for warehouses from suppliers. - for warehouse in warehouse_list: - warehouse_upstream = {} - - for i in range(sku_num): - sku_name = f"SKU{i}" - warehouse_upstream[sku_name] = [supplier_facilities[random.randint(0, supplier_num) - 1]["name"], ] - - world_conf["topology"][warehouse["name"]] = warehouse_upstream - - # Grid settings. - world_conf["grid"] = {} - world_conf["grid"]["size"] = [grid_width, grid_height] - - # Random pick location. - available_cells = [(x, y) for x in range(grid_width) for y in range(grid_height)] - - world_conf["grid"]["facilities"] = {} - for facility in world_conf["facilities"]: - cell = random.randint(0, len(available_cells) - 1) - - world_conf["grid"]["facilities"][facility["name"]] = available_cells[cell] - - del available_cells[cell] - - config["world"] = world_conf - - if output_path is None: - output_path = "." - - with open(os.path.join(output_path, "config.yml"), "wt+") as fp: - safe_dump(config, fp) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument("--sku_num", type=int, default=random.randint(4, 5)) - parser.add_argument("--supplier_num", type=int, default=1) - parser.add_argument("--warehouse_num", type=int, default=1) - parser.add_argument("--retailer_num", type=int, default=1) - parser.add_argument("--grid_width", type=int, default=20) - parser.add_argument("--grid_height", type=int, default=20) - parser.add_argument("--output_path", type=str, default=".") - - arg = parser.parse_args() - - generate_config( - arg.sku_num, - arg.supplier_num, - arg.warehouse_num, - arg.retailer_num, - arg.grid_width, - arg.grid_height, - arg.output_path - ) diff --git a/setup.py b/setup.py index 04a426de2..6d472c72c 100644 --- a/setup.py +++ b/setup.py @@ -117,12 +117,11 @@ 'Operating System :: Unix', "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence"], - python_requires=">=3.6", + python_requires=">=3.7", setup_requires=[ "numpy<1.20.0", ], diff --git a/tests/dummy/dummy_business_engine.py b/tests/dummy/dummy_business_engine.py index fd635863b..c0ada59ff 100644 --- a/tests/dummy/dummy_business_engine.py +++ b/tests/dummy/dummy_business_engine.py @@ -54,3 +54,6 @@ def get_node_info(self): def get_agent_idx_list(self): return [node.index for node in self._dummy_list] + + def set_seed(self, seed: int) -> None: + pass diff --git a/tests/test_trajectory_utils.py b/tests/test_trajectory_utils.py deleted file mode 100644 index df05d8032..000000000 --- a/tests/test_trajectory_utils.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. - -import unittest - -import numpy as np - -from maro.rl.utils.trajectory_utils import get_k_step_returns, get_lambda_returns - - -class TestTrajectoryUtils(unittest.TestCase): - def setUp(self) -> None: - self.rewards = np.asarray([3, 2, 4, 1, 5]) - self.values = np.asarray([4, 7, 1, 3, 6]) - self.lam = 0.6 - self.discount = 0.8 - self.k = 4 - - def test_k_step_return(self): - returns = get_k_step_returns(self.rewards, self.values, self.discount, k=self.k) - expected = np.asarray([10.1296, 8.912, 8.64, 5.8, 6.0]) - np.testing.assert_allclose(returns, expected, rtol=1e-4) - - def test_lambda_return(self): - returns = get_lambda_returns(self.rewards, self.values, self.discount, self.lam, k=self.k) - expected = np.asarray([8.1378176, 6.03712, 7.744, 5.8, 6.0]) - np.testing.assert_allclose(returns, expected, rtol=1e-4) - - -if __name__ == "__main__": - unittest.main()