diff --git a/pyproject.toml b/pyproject.toml index f1a4a3d..4bc4a6d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,6 +35,7 @@ dependencies = [ "toml>=0.10.2", "pyarrow>=14.0.0", "tqdm>=4.66.0", + "pyproj=3.6.1", ] [project.optional-dependencies] diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/prep_disolv/common/config.py b/src/prep_disolv/common/config.py index a05047b..6320767 100644 --- a/src/prep_disolv/common/config.py +++ b/src/prep_disolv/common/config.py @@ -2,7 +2,7 @@ import toml -from src.prep_disolv.common.columns import ( +from prep_disolv.common.columns import ( ACTIVATIONS_FOLDER, POSITIONS_FOLDER, ) diff --git a/src/prep_disolv/core/core.py b/src/prep_disolv/core/core.py index b7479a4..a363a1e 100644 --- a/src/prep_disolv/core/core.py +++ b/src/prep_disolv/core/core.py @@ -2,14 +2,14 @@ import logging -from src.prep_disolv.controller.controller import ControllerConverter -from src.prep_disolv.rsu.rsu import RsuConverter -from src.prep_disolv.common.config import ( +from prep_disolv.controller.controller import ControllerConverter +from prep_disolv.rsu.rsu import RsuConverter +from prep_disolv.common.config import ( LOG_SETTINGS, Config, ) -from src.prep_disolv.common.logger import setup_logging -from src.prep_disolv.vehicle.vehicle import VehicleConverter +from prep_disolv.common.logger import setup_logging +from prep_disolv.vehicle.vehicle import VehicleConverter logger = logging.getLogger(__name__) diff --git a/src/prep_disolv/rsu/given.py b/src/prep_disolv/rsu/given.py index 7b968d9..44184c1 100644 --- a/src/prep_disolv/rsu/given.py +++ b/src/prep_disolv/rsu/given.py @@ -3,8 +3,8 @@ import numpy as np import pandas as pd -from src.prep_disolv.common.columns import POSITIONS_FOLDER, ACTIVATIONS_FOLDER, ACTIVATION_COLUMNS -from src.prep_disolv.common.config import Config, RSU_SETTINGS, START_TIME, SIMULATION_SETTINGS, DURATION, RSU_FILENAME +from prep_disolv.common.columns import POSITIONS_FOLDER, ACTIVATIONS_FOLDER, ACTIVATION_COLUMNS +from prep_disolv.common.config import Config, RSU_SETTINGS, START_TIME, SIMULATION_SETTINGS, DURATION, RSU_FILENAME class RSUData: diff --git a/src/prep_disolv/rsu/junction.py b/src/prep_disolv/rsu/junction.py index 19a537a..48d48d0 100644 --- a/src/prep_disolv/rsu/junction.py +++ b/src/prep_disolv/rsu/junction.py @@ -7,7 +7,7 @@ import numpy as np import pandas as pd -from src.prep_disolv.common.columns import ( +from prep_disolv.common.columns import ( POSITIONS_FOLDER, ACTIVATIONS_FOLDER, RSU_COLUMNS, @@ -15,8 +15,8 @@ COORD_X, COORD_Y, ) -from src.prep_disolv.common.utils import get_offsets, get_projection -from src.prep_disolv.common.config import ID_INIT, NETWORK_FILE, START_TIME, \ +from prep_disolv.common.utils import get_offsets, get_projection +from prep_disolv.common.config import ID_INIT, NETWORK_FILE, START_TIME, \ RSU_SETTINGS, TRAFFIC_SETTINGS, SIMULATION_SETTINGS, Config, DURATION JUNCTION = "junction" diff --git a/src/prep_disolv/rsu/rsu.py b/src/prep_disolv/rsu/rsu.py index 2e34052..b404395 100644 --- a/src/prep_disolv/rsu/rsu.py +++ b/src/prep_disolv/rsu/rsu.py @@ -1,14 +1,14 @@ from __future__ import annotations -from src.prep_disolv.common.config import ( +from prep_disolv.common.config import ( OUTPUT_PATH, OUTPUT_SETTINGS, PLACEMENT, RSU_SETTINGS, Config, ) -from src.prep_disolv.rsu.junction import JunctionPlacement -from src.prep_disolv.rsu.given import InputPlacement +from prep_disolv.rsu.junction import JunctionPlacement +from prep_disolv.rsu.given import InputPlacement class RsuConverter: diff --git a/src/prep_disolv/scripts/ns3_input_fix.py b/src/prep_disolv/scripts/ns3_input_fix.py new file mode 100644 index 0000000..e397376 --- /dev/null +++ b/src/prep_disolv/scripts/ns3_input_fix.py @@ -0,0 +1,103 @@ +import argparse +from pathlib import Path + +import pandas as pd + + +def prepare_veh_id_map(veh_actions: pd.DataFrame) -> dict[str, str]: + agent_ids = veh_actions["agent_id"] + ns3_ids = range(0, len(agent_ids)) + id_map = dict(zip(agent_ids, ns3_ids)) + return id_map + + +def prepare_rsu_id_map(rsu_actions: pd.DataFrame, veh_count: int) -> dict[str, str]: + agent_ids = rsu_actions["agent_id"] + ns3_ids = range(veh_count, len(agent_ids) + veh_count) + id_map = dict(zip(agent_ids, ns3_ids)) + return id_map + + +def modify_agent_ids(input_df: pd.DataFrame, veh_id_map: dict[str, str]) -> pd.DataFrame: + new_df = input_df.copy() + new_df["agent_id"] = new_df["agent_id"].map(veh_id_map) + return new_df + + +if __name__ == "__main__": + args = argparse.ArgumentParser() + args.add_argument("--scenario", type=str, required=True) + scenario = args.parse_args().scenario + + input_path = Path("/Users/charan/workspace/disolv/input/" + scenario) + output_path = Path("/Users/charan/workspace/disolv/input/" + scenario + "_ns3") + + # Create directories + output_path.mkdir(exist_ok=True) + act_path = output_path / "activations" + act_path.mkdir(exist_ok=True) + links_path = output_path / "links" + links_path.mkdir(exist_ok=True) + pos_path = output_path / "positions" + pos_path.mkdir(exist_ok=True) + + # Read vehicle activations and create veh ID map + veh_activations = input_path / "activations" / "vehicle_activations.parquet" + veh_ac_df = pd.read_parquet(veh_activations) + veh_id_map = prepare_veh_id_map(veh_ac_df) + + # Read RSU activations and create RSU id map + rsu_activations = input_path / "activations" / "rsu_activations.parquet" + rsu_ac_df = pd.read_parquet(rsu_activations) + rsu_id_map = prepare_rsu_id_map(rsu_ac_df, len(veh_id_map)) + + modified_df = modify_agent_ids(veh_ac_df, veh_id_map) + new_veh_activations = output_path / "activations" / "vehicle_activations.parquet" + modified_df.to_parquet(new_veh_activations) + + modified_df = modify_agent_ids(rsu_ac_df, rsu_id_map) + new_rsu_activations = output_path / "activations" / "rsu_activations.parquet" + modified_df.to_parquet(new_rsu_activations) + + # Modify the links files + links_file = input_path / "links" / "v2r_links.parquet" + links_df = pd.read_parquet(links_file) + links_df["agent_id"] = links_df["agent_id"].map(veh_id_map) + links_df["target_id"] = links_df["target_id"].map(rsu_id_map) + new_links = output_path / "links" / "v2r_links.parquet" + links_df.to_parquet(new_links) + + links_file = input_path / "links" / "v2v_links.parquet" + links_df = pd.read_parquet(links_file) + links_df["agent_id"] = links_df["agent_id"].map(veh_id_map) + links_df["target_id"] = links_df["target_id"].map(veh_id_map) + new_links = output_path / "links" / "v2v_links.parquet" + modified_df.to_parquet(new_links) + + links_file = input_path / "links" / "r2v_links.parquet" + links_df = pd.read_parquet(links_file) + links_df["agent_id"] = links_df["agent_id"].map(rsu_id_map) + links_df["target_id"] = links_df["target_id"].map(veh_id_map) + new_links = output_path / "links" / "r2v_links.parquet" + modified_df.to_parquet(new_links) + + links_file = input_path / "links" / "r2r_links.parquet" + links_df = pd.read_parquet(links_file) + links_df["agent_id"] = links_df["agent_id"].map(rsu_id_map) + links_df["target_id"] = links_df["target_id"].map(rsu_id_map) + new_links = output_path / "links" / "r2r_links.parquet" + modified_df.to_parquet(new_links) + + veh_positions = input_path / "positions" / (scenario + "_fcd.parquet") + veh_positions_df = pd.read_parquet(veh_positions) + modified_df = modify_agent_ids(veh_positions_df, veh_id_map) + new_positions = output_path / "positions" / (scenario + "_fcd.parquet") + modified_df.to_parquet(new_positions) + + rsu_positions = input_path / "positions" / "rsu_deployment.parquet" + rsu_positions_df = pd.read_parquet(rsu_positions) + modified_df = modify_agent_ids(rsu_positions_df, rsu_id_map) + new_positions = output_path / "positions" / "rsu_deployment.parquet" + modified_df.to_parquet(new_positions) + + # Save the ID mappings to the output. diff --git a/src/prep_disolv/scripts/v2r_transitions.py b/src/prep_disolv/scripts/v2r_transitions.py index 7bda337..33e5e85 100644 --- a/src/prep_disolv/scripts/v2r_transitions.py +++ b/src/prep_disolv/scripts/v2r_transitions.py @@ -26,15 +26,15 @@ def _read_node_mapping(self): rsu_df = pd.read_parquet(self.rsu_file) # Create a mapping between node ID and NS3 ID - self.mapping_with_offset = dict(zip(rsu_df["agent_id"], rsu_df["ns3_id"])) + self.mapping_with_offset = dict(zip(rsu_df["agent_id"], rsu_df["agent_id"])) # Get offset based on first RSU NS3 ID and subtract from all IDs. - offset = rsu_df.iloc[0]["ns3_id"] - rsu_df["ns3_id"] = rsu_df["ns3_id"] - offset - rsu_df["ns3_id"] = rsu_df["ns3_id"].astype(int) + offset = rsu_df.iloc[0]["agent_id"] + rsu_df["agent_id"] = rsu_df["agent_id"] - offset + rsu_df["agent_id"] = rsu_df["agent_id"].astype(int) # Create a mapping between node ID and NS3 ID - self.mapping_without_offset = dict(zip(rsu_df["agent_id"], rsu_df["ns3_id"])) + self.mapping_without_offset = dict(zip(rsu_df["agent_id"], rsu_df["agent_id"])) def get_mapping_with_offset(self) -> dict: return self.mapping_with_offset @@ -48,8 +48,8 @@ def get_mapping_without_offset(self) -> dict: args.add_argument("--scenario", type=str, required=True) scenario = args.parse_args().scenario - input_path = Path("/mnt/hdd/workspace/disolv/input/" + scenario) - rsu_pos_file = input_path / "positions" / "roadside_units.parquet" + input_path = Path("/Users/charan/workspace/disolv/input/" + scenario) + rsu_pos_file = input_path / "positions" / "rsu_deployment.parquet" links_file = input_path / "links" / "v2r_links.parquet" print("Reading RSU data from", rsu_pos_file) print("Reading links data from", links_file) @@ -77,17 +77,9 @@ def get_mapping_without_offset(self) -> dict: # Convert node IDs to NS3 IDs ns3_df = filtered_df.copy() - node_mapping = transitions.get_mapping_with_offset() - ns3_df["target_id"] = ns3_df["target_id"].replace(node_mapping) + # node_mapping = transitions.get_mapping_with_offset() + # ns3_df["target_id"] = ns3_df["target_id"].replace(node_mapping) output_file = input_path / "links" / "v2r_transitions.parquet" print("Writing links data for NS3 to", output_file) ns3_df.to_parquet(output_file, index=False) - - mosaic_df = filtered_df.copy() - node_mapping = transitions.get_mapping_without_offset() - mosaic_df["target_id"] = mosaic_df["target_id"].replace(node_mapping) - - link_transition_file = input_path / "links" / "v2r_transitions.csv" - print("Writing links data for Mosaic to", link_transition_file) - mosaic_df.to_csv(link_transition_file, index=False, header=False) diff --git a/src/prep_disolv/vehicle/sumo.py b/src/prep_disolv/vehicle/sumo.py index a891ac2..db049e9 100644 --- a/src/prep_disolv/vehicle/sumo.py +++ b/src/prep_disolv/vehicle/sumo.py @@ -11,13 +11,11 @@ import pyarrow.parquet as pq import tqdm -from typing import NamedTuple - -from src.prep_disolv.common.columns import POSITIONS_FOLDER -from src.prep_disolv.common.utils import get_offsets -from src.prep_disolv.common.config import NETWORK_FILE, TRACE_FILE, Config, \ +from prep_disolv.common.columns import POSITIONS_FOLDER +from prep_disolv.common.utils import get_offsets +from prep_disolv.common.config import NETWORK_FILE, TRACE_FILE, Config, \ TRAFFIC_SETTINGS, SIMULATION_SETTINGS, DURATION -from src.prep_disolv.vehicle.veh_activations import VehicleActivation +from prep_disolv.vehicle.veh_activations import VehicleActivation logger = logging.getLogger(__name__)