-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgen_climate_csv.py
196 lines (157 loc) · 6.87 KB
/
gen_climate_csv.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
import csv
import datetime
from nbiHeaderMapper import *
from nbi_prop_converter import *
import sys
import os
append_rows=False
if len(sys.argv)<=1:
print('please input NOAA climate file path.')
exit()
else:
file_path=sys.argv[1]
if len(sys.argv)>2:
append_rows=sys.argv[2].lower()=='true'
root='./output/'
if not os.path.exists(root):
os.makedirs(root)
def gen_prop(row,prop_name,format=True):
val=row[prop_name]
if len(val)>2 and val[0]=='\'' and val[-1]=='\'':
val=val[1:-1]
#replace \' in val with white space
idx=val.find('\'')
if idx!=-1:
val=val[:idx]+' '+val[idx+1:]
if len(val)==0:
return ''
if prop_name in prop_converters:
converter=prop_converters[prop_name]
res=converter(val)
if isinstance(res,(int,float)):
return str(res)
elif isinstance(res,datetime.datetime):
return '\''+res.strftime('%Y-%m-%d')+'\'' if format else res.strftime('%Y-%m-%d')
elif isinstance(res,str):
return '\''+res+'\'' if format else res
else:
return '\''+str(res)+'\'' if format else str(res)
return '\''+val+'\'' if format else val
def gen_prop_post_script(label,prop_name,prop_type,is_node=True):
leading_part='MATCH (n:'+label+')' if is_node else 'MATCH p=()-[n:'+label+']-()'
if prop_type=='string':return '\n'
if prop_type=='datetime':
return leading_part+' WHERE EXISTS(n.'+prop_name+') WITH n,[x IN split(toString(n.'+prop_name+'),"-") | toInteger(x)] AS parts SET n.'+prop_name+'=datetime({day: parts[2], month: parts[1], year: parts[0]})\n'
if prop_type=='int':
conv_func='toInt'
return leading_part+' WHERE EXISTS(n.'+prop_name+') SET n.'+prop_name+'='+conv_func+'(n.'+prop_name+')\n'
elif prop_type=='float':
conv_func='toFloat'
return leading_part+' WHERE EXISTS(n.'+prop_name+') SET n.'+prop_name+'='+conv_func+'(n.'+prop_name+')\n'
else:
return '\n'
def gen_node_item(row,node_def,writer):
if node_def.constraint:
for k,v in node_def.constraint.items():
if callable(v):
if not v(row[k]):
return
elif row[k]!=v:
return
item=[]
id=''
for prop in node_def.id_props:
id+=gen_prop(row,prop,False)+'_'
item.append(id[:-1])
for k,v in sorted(node_def.props.items()):
p=gen_prop(row,v,False)
item.append(p)
#TODO: take all default_props as string now, update it later
if node_def.default_props:
for k,v in sorted(node_def.default_props.items()):
item.append(v)
if len(id.rstrip('_'))==0:return #skip row with bad id
writer.writerow(item)
def gen_edge_item(row,edge_def,writer):
if edge_def.constraint:
for k,v in edge_def.constraint.items():
if callable(v):
if not v(row[k]):
return
elif row[k]!=v:
return
item=[]
src_id=''
for prop in edge_def.src_id_props:
src_id+=gen_prop(row,prop,False)+'_'
item.append(src_id[:-1])
dst_id=''
for prop in edge_def.dst_id_props:
dst_id+=gen_prop(row,prop,False)+'_'
item.append(dst_id[:-1])
rel_str=edge_def.label
if edge_def.props:
for k,v in sorted(edge_def.props.items()):
p=gen_prop(row,v,False)
item.append(p)
if len(src_id.rstrip('_'))==0 or len(dst_id.rstrip('_'))==0:return #skip row with bad id
writer.writerow(item)
def gen_inventory_item(row,writters):
for mapper in station_node_mappers:
gen_node_item(row,mapper,writers[mapper.label])
for mapper in station_edge_mappers:
gen_edge_item(row,mapper,writers[mapper.rel_id])
def prepare(append_rows):
script_file=open(root+'load_climate_data.script','w')
post_file=open(root+'post_climate_data.script','w')
writers={}
file_mode='a+' if append_rows else 'w'
#for node definitions
for mapper in station_node_mappers:
f=open(root+'climate_'+mapper.label+'.csv',file_mode,newline='')
writer=csv.writer(f)
header=['id']
for k,v in sorted(mapper.props.items()):
header.append(k)
if not mapper.prop_types==None:post_file.write(gen_prop_post_script(mapper.label,k,mapper.prop_types[k]))
if mapper.default_props:
for k,v in sorted(mapper.default_props.items()):
header.append(k)
if not append_rows:writer.writerow(header)
writers[mapper.label]=writer
script_file.write('CREATE INDEX ON :'+mapper.label+'(id)\r\n')
script_file.write('using periodic commit 500 LOAD CSV WITH HEADERS FROM "file:///climate_'+mapper.label+'.csv" AS row merge (n:'+mapper.label+'{id:row.id}) on match set n+=row on create set n=row;\r\n')
#for edge definitions
for mapper in station_edge_mappers:
f=open(root+'climate_'+mapper.rel_id+'.csv',file_mode,newline='')
writer=csv.writer(f)
header=['src_id','dst_id']
prop_map='{'
if mapper.props:
for k,v in sorted(mapper.props.items()):
header.append(k)
prop_map+=k+':row.'+k+','
if not mapper.prop_types==None:post_file.write(gen_prop_post_script(mapper.label,k,mapper.prop_types[k],is_node=False))
prop_map=prop_map.rstrip(',')+'}'
if not append_rows:writer.writerow(header)
writers[mapper.rel_id]=writer
if len(prop_map)>2:
script_file.write('using periodic commit 500 LOAD CSV WITH HEADERS FROM "file:///climate_'+mapper.rel_id+'.csv" AS row match (s:'+mapper.src_label+'{id:row.src_id}),(d:'+mapper.dst_label+'{id:row.dst_id}) merge (s)-[r:'+mapper.label+']->(d) on match set r+='+prop_map+' on create set r='+prop_map+';\r\n')
else:
script_file.write('using periodic commit 500 LOAD CSV WITH HEADERS FROM "file:///climate_'+mapper.rel_id+'.csv" AS row match (s:'+mapper.src_label+'{id:row.src_id}),(d:'+mapper.dst_label+'{id:row.dst_id}) merge (s)-[r:'+mapper.label+']->(d);\r\n')
post_file.write('match (n:Station) set n.loc_point=point({longitude:n.longitude,latitude:n.latitude})')
return writers
with open(file_path) as csv_file:
writers=prepare(append_rows)
csv_reader=csv.DictReader(csv_file,delimiter=',')
line_count=0
header=None
for row in csv_reader:
if line_count==0:
header=row
else:
gen_inventory_item(row,writers)
line_count+=1
if (line_count%100)==0: print(str(line_count)+' rows processed...')
print('file {'+file_path+'} converted.')
print('copy all csv files in '+root+' folder to import folder in neo4j installation path, and exec scripts in load_nbi_data.script file one by one')