diff --git a/examples/vm_scheduling/offline_lp/launcher.py b/examples/vm_scheduling/offline_lp/launcher.py index 2e5f21870..7255d42f4 100644 --- a/examples/vm_scheduling/offline_lp/launcher.py +++ b/examples/vm_scheduling/offline_lp/launcher.py @@ -22,8 +22,8 @@ config = convert_dottable(raw_config) LOG_PATH = os.path.join(FILE_PATH, "log", config.experiment_name) -simulation_logger = Logger(tag="simulation", format_=LogFormat.none, dump_path=LOG_PATH, dump_mode="w") -ilp_logger = Logger(tag="ilp", format_=LogFormat.none, dump_path=LOG_PATH, dump_mode="w") +simulation_logger = Logger(tag="simulation", format_=LogFormat.none, dump_folder=LOG_PATH, dump_mode="w") +ilp_logger = Logger(tag="ilp", format_=LogFormat.none, dump_folder=LOG_PATH, dump_mode="w") if __name__ == "__main__": start_time = timeit.default_timer() diff --git a/maro/cli/inspector/env_data_process.py b/maro/cli/inspector/env_data_process.py index d0f914b4e..97aee0f98 100644 --- a/maro/cli/inspector/env_data_process.py +++ b/maro/cli/inspector/env_data_process.py @@ -210,13 +210,23 @@ def _get_index_index_name_conversion(scenario: GlobalScenarios, source_path: str if os.path.exists(os.path.join(source_path, GlobalFileNames.name_convert)): os.remove(os.path.join(source_path, GlobalFileNames.name_convert)) if scenario == GlobalScenarios.CITI_BIKE: - with open(conversion_path, "r", encoding="utf8")as mapping_file: - mapping_json_data = json.load(mapping_file) - name_list = [] - for item in mapping_json_data["data"]["stations"]: - name_list.append(item["name"]) - df = pd.DataFrame({"name": name_list}) - df.to_csv(os.path.join(source_path, GlobalFileNames.name_convert), index=False) + # TODO: the commented out code are older version which will cause errors. + # TODO: the updated code could work but the fix is temporary. + # TODO: we need to refactor the dump logic in citi bike scenario and make a stable solution later. + + # with open(conversion_path, "r", encoding="utf8")as mapping_file: + # mapping_json_data = json.load(mapping_file) + # name_list = [] + # for item in mapping_json_data["data"]["stations"]: + # name_list.append(item["name"]) + # df = pd.DataFrame({"name": name_list}) + # df.to_csv(os.path.join(source_path, GlobalFileNames.name_convert), index=False) + + df_station = pd.read_csv(os.path.join(source_path, "epoch_0", "stations.csv")) + name_list = df_station["name"].unique() + df = pd.DataFrame({"name": name_list}) + df.to_csv(os.path.join(source_path, GlobalFileNames.name_convert), index=False) + elif scenario == GlobalScenarios.CIM: cim_information = yaml.load( open(conversion_path, "r").read(), diff --git a/maro/utils/logger.py b/maro/utils/logger.py index 37dcc0936..ac1e546f7 100644 --- a/maro/utils/logger.py +++ b/maro/utils/logger.py @@ -220,12 +220,12 @@ def __init__(self): if self.log_level == logging.DEBUG: super().__init__( tag='cli', - format_=LogFormat.cli_debug, dump_path=dump_path, dump_mode='a', stdout_level=self.log_level + format_=LogFormat.cli_debug, dump_folder=dump_path, dump_mode='a', stdout_level=self.log_level ) elif self.log_level >= logging.INFO: super().__init__( tag='cli', - format_=LogFormat.cli_info, dump_path=dump_path, dump_mode='a', stdout_level=self.log_level + format_=LogFormat.cli_info, dump_folder=dump_path, dump_mode='a', stdout_level=self.log_level ) _logger = None