Skip to content

Commit

Permalink
Create version 4.3.3 (#5)
Browse files Browse the repository at this point in the history
Signed-off-by: David P. Chassin <dchassin@slac.stanford.edu>
Signed-off-by: David P. Chassin <david.chassin@me.com>
Signed-off-by: Mitchell Victoriano <mitchell.victoriano@gmail.com>
Signed-off-by: Mitchell Victoriano <47313912+MitchellAV@users.noreply.github.com>
Signed-off-by: Duncan Ragsdale <88173870+Thistleman@users.noreply.github.com>
Co-authored-by: Mitchell Victoriano <47313912+MitchellAV@users.noreply.github.com>
Co-authored-by: aivanova5 <ivanova.alyona5@gmail.com>
Co-authored-by: Duncan Ragsdale <88173870+Thistleman@users.noreply.github.com>
Signed-off-by: David P. Chassin <david.chassin@me.com>
  • Loading branch information
4 people authored and dchassin committed Jan 27, 2024
1 parent 2291174 commit ad8e25f
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 41 deletions.
55 changes: 18 additions & 37 deletions converters/csv-ami2glm-player.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
Shell:
$ gridlabd convert -i ami:AMI.csv,ami_key:AMI_KEYS.csv, network:NETWORK.csv
-o PLAYERS.csv -f csv-ami -t glm-player [OPTIONS ...]
-o PLAYERS.csv -f xlsx-spida -t csv-geodata [OPTIONS ...]
GLM:
#convert ami:AMI.csv,ami_key:AMI_KEYS.csv
Expand All @@ -29,11 +29,10 @@
import re
import numpy as np
import os
import csv


default_options = {
"folder_name" : "./player/",
# "include_network" : None,
}

def string_clean(input_str):
Expand All @@ -46,6 +45,7 @@ def string_clean(input_str):
ami_key = False

def convert(input_files, output_file, options={}):
print('test')

if type(input_files) is dict:
for key in input_files:
Expand Down Expand Up @@ -88,37 +88,18 @@ def convert(input_files, output_file, options={}):

node_ID_set = set(df_ami['transformer_structure'])

df = pd.DataFrame({'class': ['player']*len(node_ID_set), 'parent' : list(node_ID_set), 'file' : ['player_' + str(node) + '.csv' for node in node_ID_set]})

if not os.path.exists(folder_name):
os.makedirs(folder_name)

if os.path.splitext(output_file)[1]=='.csv' :
df.to_csv(os.path.join(folder_name,os.path.basename(output_file)), index=False)
elif os.path.splitext(output_file)[1]=='.glm' :
with open(output_file, mode='w') as file :
file.write('module tape;\n')

for node_ID in node_ID_set :
file.write('object player {\n')
file.write('\tparent "' + str(node_ID) + '";\n')
file.write('\tfile "' + os.path.join(folder_name,str(node_ID)) + '.csv";\n')
file.write('}\n')

new_column_names = {
'reading_dttm': 'timestamp',
'net_usage': 'power[kW]',
'transformer_structure': 'customer_id'
}
df_ami.rename(columns=new_column_names,inplace=True)
df_ami.drop(['interval_pcfc_date','interval_pcfc_hour'],axis=1,inplace=True)
df_ami.sort_index(inplace=True)

# Iterate over unique customer IDs
for customer_id in df_ami['customer_id'].unique():
# Create a new DataFrame for each customer ID
customer_df = df_ami[df_ami['customer_id'] == customer_id].drop(columns='customer_id')
customer_df = customer_df.sort_values(by='timestamp')
# Save the DataFrame to a CSV file
output_file = f"{folder_name}/{customer_id}.csv"
customer_df.to_csv(output_file, index=False, header=False)
with open(output_file, mode='w', newline='') as file :
writer = csv.writer(file)
writer.writerow(['module tape;'])

for node_ID in node_ID_set :
writer.writerow(['\n'])
writer.writerow(['object player {\n'])
writer.writerow(['\tproperty measured_real_energy;\n'])
writer.writerow(['\tparent ' + node_ID + '\n'])
writer.writerow(['\tfile ./player/' + node_ID + '.csv\n'])
writer.writerow(['}\n'])




9 changes: 5 additions & 4 deletions converters/xlsx-spida2csv-geodata.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@
"include_dummy_network" : None,
"include_weather" : None,
"include_mount" : None,
"include_network" : None
}

def string_clean(input_str):
Expand All @@ -74,6 +73,7 @@ def string_clean(input_str):
extract_equipment = False

def convert(input_files, output_file, options={}):

if type(input_files) is dict:
for key in input_files:
if not key in ["poles","equipment","network"]:
Expand All @@ -92,10 +92,11 @@ def convert(input_files, output_file, options={}):
extract_equipment = True

if "network" in input_files:
global network_name
network_name = input_files["network"]
global include_network
include_network = input_files["network"]
global include_mount
include_mount = True

elif type(input_files) is str:
input_pole_file = input_files
else:
Expand Down Expand Up @@ -134,7 +135,7 @@ def convert(input_files, output_file, options={}):
'AS-IS Effective Stress Adjustment', 'AS-IS GPS Point'])

# Read the overhead lines
df_lines = pd.read_csv(network_name) if include_network else pd.DataFrame()
df_lines = pd.read_csv(include_network) if include_network else pd.DataFrame()
overheadline_names = []
for index, row in df_lines.iterrows():

Expand Down

0 comments on commit ad8e25f

Please sign in to comment.