Skip to content

Commit

Permalink
fixed: minor changes finalized for paper two
Browse files Browse the repository at this point in the history
  • Loading branch information
nagacharan-tangirala committed Dec 5, 2024
1 parent 3c755d5 commit 0c267ef
Show file tree
Hide file tree
Showing 10 changed files with 131 additions and 37 deletions.
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ dependencies = [
"toml>=0.10.2",
"pyarrow>=14.0.0",
"tqdm>=4.66.0",
"pyproj=3.6.1",
]

[project.optional-dependencies]
Expand Down
Empty file added src/__init__.py
Empty file.
2 changes: 1 addition & 1 deletion src/prep_disolv/common/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import toml

from src.prep_disolv.common.columns import (
from prep_disolv.common.columns import (
ACTIVATIONS_FOLDER,
POSITIONS_FOLDER,
)
Expand Down
10 changes: 5 additions & 5 deletions src/prep_disolv/core/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@

import logging

from src.prep_disolv.controller.controller import ControllerConverter
from src.prep_disolv.rsu.rsu import RsuConverter
from src.prep_disolv.common.config import (
from prep_disolv.controller.controller import ControllerConverter
from prep_disolv.rsu.rsu import RsuConverter
from prep_disolv.common.config import (
LOG_SETTINGS,
Config,
)
from src.prep_disolv.common.logger import setup_logging
from src.prep_disolv.vehicle.vehicle import VehicleConverter
from prep_disolv.common.logger import setup_logging
from prep_disolv.vehicle.vehicle import VehicleConverter

logger = logging.getLogger(__name__)

Expand Down
4 changes: 2 additions & 2 deletions src/prep_disolv/rsu/given.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
import numpy as np
import pandas as pd

from src.prep_disolv.common.columns import POSITIONS_FOLDER, ACTIVATIONS_FOLDER, ACTIVATION_COLUMNS
from src.prep_disolv.common.config import Config, RSU_SETTINGS, START_TIME, SIMULATION_SETTINGS, DURATION, RSU_FILENAME
from prep_disolv.common.columns import POSITIONS_FOLDER, ACTIVATIONS_FOLDER, ACTIVATION_COLUMNS
from prep_disolv.common.config import Config, RSU_SETTINGS, START_TIME, SIMULATION_SETTINGS, DURATION, RSU_FILENAME


class RSUData:
Expand Down
6 changes: 3 additions & 3 deletions src/prep_disolv/rsu/junction.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,16 @@
import numpy as np
import pandas as pd

from src.prep_disolv.common.columns import (
from prep_disolv.common.columns import (
POSITIONS_FOLDER,
ACTIVATIONS_FOLDER,
RSU_COLUMNS,
ACTIVATION_COLUMNS,
COORD_X,
COORD_Y,
)
from src.prep_disolv.common.utils import get_offsets, get_projection
from src.prep_disolv.common.config import ID_INIT, NETWORK_FILE, START_TIME, \
from prep_disolv.common.utils import get_offsets, get_projection
from prep_disolv.common.config import ID_INIT, NETWORK_FILE, START_TIME, \
RSU_SETTINGS, TRAFFIC_SETTINGS, SIMULATION_SETTINGS, Config, DURATION

JUNCTION = "junction"
Expand Down
6 changes: 3 additions & 3 deletions src/prep_disolv/rsu/rsu.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
from __future__ import annotations

from src.prep_disolv.common.config import (
from prep_disolv.common.config import (
OUTPUT_PATH,
OUTPUT_SETTINGS,
PLACEMENT,
RSU_SETTINGS,
Config,
)
from src.prep_disolv.rsu.junction import JunctionPlacement
from src.prep_disolv.rsu.given import InputPlacement
from prep_disolv.rsu.junction import JunctionPlacement
from prep_disolv.rsu.given import InputPlacement


class RsuConverter:
Expand Down
103 changes: 103 additions & 0 deletions src/prep_disolv/scripts/ns3_input_fix.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
import argparse
from pathlib import Path

import pandas as pd


def prepare_veh_id_map(veh_actions: pd.DataFrame) -> dict[str, str]:
agent_ids = veh_actions["agent_id"]
ns3_ids = range(0, len(agent_ids))
id_map = dict(zip(agent_ids, ns3_ids))
return id_map


def prepare_rsu_id_map(rsu_actions: pd.DataFrame, veh_count: int) -> dict[str, str]:
agent_ids = rsu_actions["agent_id"]
ns3_ids = range(veh_count, len(agent_ids) + veh_count)
id_map = dict(zip(agent_ids, ns3_ids))
return id_map


def modify_agent_ids(input_df: pd.DataFrame, veh_id_map: dict[str, str]) -> pd.DataFrame:
new_df = input_df.copy()
new_df["agent_id"] = new_df["agent_id"].map(veh_id_map)
return new_df


if __name__ == "__main__":
args = argparse.ArgumentParser()
args.add_argument("--scenario", type=str, required=True)
scenario = args.parse_args().scenario

input_path = Path("/Users/charan/workspace/disolv/input/" + scenario)
output_path = Path("/Users/charan/workspace/disolv/input/" + scenario + "_ns3")

# Create directories
output_path.mkdir(exist_ok=True)
act_path = output_path / "activations"
act_path.mkdir(exist_ok=True)
links_path = output_path / "links"
links_path.mkdir(exist_ok=True)
pos_path = output_path / "positions"
pos_path.mkdir(exist_ok=True)

# Read vehicle activations and create veh ID map
veh_activations = input_path / "activations" / "vehicle_activations.parquet"
veh_ac_df = pd.read_parquet(veh_activations)
veh_id_map = prepare_veh_id_map(veh_ac_df)

# Read RSU activations and create RSU id map
rsu_activations = input_path / "activations" / "rsu_activations.parquet"
rsu_ac_df = pd.read_parquet(rsu_activations)
rsu_id_map = prepare_rsu_id_map(rsu_ac_df, len(veh_id_map))

modified_df = modify_agent_ids(veh_ac_df, veh_id_map)
new_veh_activations = output_path / "activations" / "vehicle_activations.parquet"
modified_df.to_parquet(new_veh_activations)

modified_df = modify_agent_ids(rsu_ac_df, rsu_id_map)
new_rsu_activations = output_path / "activations" / "rsu_activations.parquet"
modified_df.to_parquet(new_rsu_activations)

# Modify the links files
links_file = input_path / "links" / "v2r_links.parquet"
links_df = pd.read_parquet(links_file)
links_df["agent_id"] = links_df["agent_id"].map(veh_id_map)
links_df["target_id"] = links_df["target_id"].map(rsu_id_map)
new_links = output_path / "links" / "v2r_links.parquet"
links_df.to_parquet(new_links)

links_file = input_path / "links" / "v2v_links.parquet"
links_df = pd.read_parquet(links_file)
links_df["agent_id"] = links_df["agent_id"].map(veh_id_map)
links_df["target_id"] = links_df["target_id"].map(veh_id_map)
new_links = output_path / "links" / "v2v_links.parquet"
modified_df.to_parquet(new_links)

links_file = input_path / "links" / "r2v_links.parquet"
links_df = pd.read_parquet(links_file)
links_df["agent_id"] = links_df["agent_id"].map(rsu_id_map)
links_df["target_id"] = links_df["target_id"].map(veh_id_map)
new_links = output_path / "links" / "r2v_links.parquet"
modified_df.to_parquet(new_links)

links_file = input_path / "links" / "r2r_links.parquet"
links_df = pd.read_parquet(links_file)
links_df["agent_id"] = links_df["agent_id"].map(rsu_id_map)
links_df["target_id"] = links_df["target_id"].map(rsu_id_map)
new_links = output_path / "links" / "r2r_links.parquet"
modified_df.to_parquet(new_links)

veh_positions = input_path / "positions" / (scenario + "_fcd.parquet")
veh_positions_df = pd.read_parquet(veh_positions)
modified_df = modify_agent_ids(veh_positions_df, veh_id_map)
new_positions = output_path / "positions" / (scenario + "_fcd.parquet")
modified_df.to_parquet(new_positions)

rsu_positions = input_path / "positions" / "rsu_deployment.parquet"
rsu_positions_df = pd.read_parquet(rsu_positions)
modified_df = modify_agent_ids(rsu_positions_df, rsu_id_map)
new_positions = output_path / "positions" / "rsu_deployment.parquet"
modified_df.to_parquet(new_positions)

# Save the ID mappings to the output.
26 changes: 9 additions & 17 deletions src/prep_disolv/scripts/v2r_transitions.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ def _read_node_mapping(self):
rsu_df = pd.read_parquet(self.rsu_file)

# Create a mapping between node ID and NS3 ID
self.mapping_with_offset = dict(zip(rsu_df["agent_id"], rsu_df["ns3_id"]))
self.mapping_with_offset = dict(zip(rsu_df["agent_id"], rsu_df["agent_id"]))

# Get offset based on first RSU NS3 ID and subtract from all IDs.
offset = rsu_df.iloc[0]["ns3_id"]
rsu_df["ns3_id"] = rsu_df["ns3_id"] - offset
rsu_df["ns3_id"] = rsu_df["ns3_id"].astype(int)
offset = rsu_df.iloc[0]["agent_id"]
rsu_df["agent_id"] = rsu_df["agent_id"] - offset
rsu_df["agent_id"] = rsu_df["agent_id"].astype(int)

# Create a mapping between node ID and NS3 ID
self.mapping_without_offset = dict(zip(rsu_df["agent_id"], rsu_df["ns3_id"]))
self.mapping_without_offset = dict(zip(rsu_df["agent_id"], rsu_df["agent_id"]))

def get_mapping_with_offset(self) -> dict:
return self.mapping_with_offset
Expand All @@ -48,8 +48,8 @@ def get_mapping_without_offset(self) -> dict:
args.add_argument("--scenario", type=str, required=True)
scenario = args.parse_args().scenario

input_path = Path("/mnt/hdd/workspace/disolv/input/" + scenario)
rsu_pos_file = input_path / "positions" / "roadside_units.parquet"
input_path = Path("/Users/charan/workspace/disolv/input/" + scenario)
rsu_pos_file = input_path / "positions" / "rsu_deployment.parquet"
links_file = input_path / "links" / "v2r_links.parquet"
print("Reading RSU data from", rsu_pos_file)
print("Reading links data from", links_file)
Expand Down Expand Up @@ -77,17 +77,9 @@ def get_mapping_without_offset(self) -> dict:

# Convert node IDs to NS3 IDs
ns3_df = filtered_df.copy()
node_mapping = transitions.get_mapping_with_offset()
ns3_df["target_id"] = ns3_df["target_id"].replace(node_mapping)
# node_mapping = transitions.get_mapping_with_offset()
# ns3_df["target_id"] = ns3_df["target_id"].replace(node_mapping)

output_file = input_path / "links" / "v2r_transitions.parquet"
print("Writing links data for NS3 to", output_file)
ns3_df.to_parquet(output_file, index=False)

mosaic_df = filtered_df.copy()
node_mapping = transitions.get_mapping_without_offset()
mosaic_df["target_id"] = mosaic_df["target_id"].replace(node_mapping)

link_transition_file = input_path / "links" / "v2r_transitions.csv"
print("Writing links data for Mosaic to", link_transition_file)
mosaic_df.to_csv(link_transition_file, index=False, header=False)
10 changes: 4 additions & 6 deletions src/prep_disolv/vehicle/sumo.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,11 @@
import pyarrow.parquet as pq
import tqdm

from typing import NamedTuple

from src.prep_disolv.common.columns import POSITIONS_FOLDER
from src.prep_disolv.common.utils import get_offsets
from src.prep_disolv.common.config import NETWORK_FILE, TRACE_FILE, Config, \
from prep_disolv.common.columns import POSITIONS_FOLDER
from prep_disolv.common.utils import get_offsets
from prep_disolv.common.config import NETWORK_FILE, TRACE_FILE, Config, \
TRAFFIC_SETTINGS, SIMULATION_SETTINGS, DURATION
from src.prep_disolv.vehicle.veh_activations import VehicleActivation
from prep_disolv.vehicle.veh_activations import VehicleActivation

logger = logging.getLogger(__name__)

Expand Down

0 comments on commit 0c267ef

Please sign in to comment.