forked from mattwigway/census-tools
-
Notifications
You must be signed in to change notification settings - Fork 0
/
census2text2010.py
executable file
·445 lines (341 loc) · 17.6 KB
/
census2text2010.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
#!/usr/bin/env python
""" Convert remote U.S. Census 2010 data to local tab-separated text files.
Run with --help flag for usage instructions.
"""
from sys import stdout, stderr, argv
from os import SEEK_SET, SEEK_CUR, SEEK_END
from time import time
from csv import reader, DictWriter, DictReader
from os.path import basename, dirname, join
from datetime import timedelta
from optparse import OptionParser
from urlparse import urlparse, urljoin
from cStringIO import StringIO
from httplib import HTTPConnection
from urllib import urlopen
from zipfile import ZipFile
from itertools import izip
import re
class RemoteFileObject:
""" Implement enough of this to be useful:
http://docs.python.org/release/2.5.2/lib/bltin-file-objects.html
Pull data from a remote URL with HTTP range headers.
"""
def __init__(self, url, verbose=False, block_size=(16 * 1024)):
self.verbose = verbose
# scheme://host/path;parameters?query#fragment
(scheme, host, path, parameters, query, fragment) = urlparse(url)
self.host = host
self.rest = path + (query and ('?' + query) or '')
self.offset = 0
self.length = self.get_length()
self.chunks = {}
self.block_size = block_size
self.start_time = time()
def get_length(self):
"""
"""
conn = HTTPConnection(self.host)
conn.request('GET', self.rest, headers={'Range': '0-1'})
length = int(conn.getresponse().getheader('content-length'))
if self.verbose:
print >> stderr, length, 'bytes in', basename(self.rest)
return length
def get_range(self, start, end):
"""
"""
headers = {'Range': 'bytes=%(start)d-%(end)d' % locals()}
conn = HTTPConnection(self.host)
conn.request('GET', self.rest, headers=headers)
return conn.getresponse().read()
def read(self, count=None):
""" Read /count/ bytes from the resource at the current offset.
"""
if count is None:
# to the end
count = self.length - self.offset
out = StringIO()
while count:
chunk_offset = self.block_size * (self.offset / self.block_size)
if chunk_offset not in self.chunks:
range = chunk_offset, min(self.length, self.offset + self.block_size) - 1
self.chunks[chunk_offset] = StringIO(self.get_range(*range))
if self.verbose:
loaded = float(self.block_size) * len(self.chunks) / self.length
expect = (time() - self.start_time) / loaded
remain = max(0, int(expect * (1 - loaded)))
print >> stderr, '%.1f%%' % min(100, 100 * loaded),
print >> stderr, 'of', basename(self.rest),
print >> stderr, 'with', timedelta(seconds=remain), 'to go'
chunk = self.chunks[chunk_offset]
in_chunk_offset = self.offset % self.block_size
in_chunk_count = min(count, self.block_size - in_chunk_offset)
chunk.seek(in_chunk_offset, SEEK_SET)
out.write(chunk.read(in_chunk_count))
count -= in_chunk_count
self.offset += in_chunk_count
out.seek(0)
return out.read()
def seek(self, offset, whence=SEEK_SET):
""" Seek to the specified offset.
/whence/ behaves as with other file-like objects:
http://docs.python.org/lib/bltin-file-objects.html
"""
if whence == SEEK_SET:
self.offset = offset
elif whence == SEEK_CUR:
self.offset += offset
elif whence == SEEK_END:
self.offset = self.length + offset
def tell(self):
return self.offset
def file_choice(tables, verbose):
"""
Choose the right summary file component for the given Census table
"""
# code originally in readcsv.py by Peter Gao
datareader = DictReader(open(dirname(argv[0]) + "sf1_data_field_descriptors_2010.csv"))
data = []
entry = None
prevCol = None
current_table = ""
for line in datareader:
new_table_number = line['TABLE NUMBER']
if new_table_number != current_table:
# save the old one
if entry != None:
data.append(entry)
entry = {}
current_table = new_table_number
entry['Matrix Number'] = line['TABLE NUMBER']
entry['File Name'] = line['SEGMENT']
next_line = datareader.next()
entry['Universe'] = (next_line['FIELD NAME'][9:].lstrip())
entry['Name'] = line['FIELD NAME'][:line['FIELD NAME'].index('[')-1]
entry['Cell Count'] = 0
entry['Field Names'] = []
# Increment the cell count iff there's actually data, rather than this being a descriptive row,
# and save the column name
if len(line['FIELD CODE']) > 0:
entry['Cell Count'] += 1
entry['Field Names'].append(line['FIELD CODE'])
# sanity check: ensure the columns are stored in order
if entry['Cell Count'] == 1:
assert int(re.sub('[A-Z]', '', line['FIELD CODE'][-4:])) == 1,\
'Field names not stored in order for matrix %s: first column is %s' % (entry['Matrix Number'], line['FIELD CODE'])
else:
assert int(re.sub('[A-Z]', '', line['FIELD CODE'][-4:])) == int(re.sub('[A-Z]', '', prevCol[-4:])) + 1,\
'Field names are not stored in order for matrix %s: column %s follows column %s' %\
(entry['Matrix Number'], line['FIELD CODE'], prevCol)
prevCol = line['FIELD CODE']
files = []
for table in tables:
file_name, column_offset = None, 5
for row in data:
curr_file, curr_table, cell_count = row.get('File Name'), row.get('Matrix Number'), int(row.get('Cell Count'))
if curr_file != file_name:
file_name, column_offset = curr_file, 5
if curr_table == table:
if verbose:
print >> stderr, table, '-', row.get('Name'), 'in', row.get('Universe')
files.append((table, file_name, column_offset, cell_count, row.get('Field Names')))
break
column_offset += cell_count
return files
def file_paths(state, files):
"""
Convert File 3 California into ca000032010.sf1
"""
return '%sgeo2010.sf1' % states[state].lower(), dict([(f, '%s%05d2010.sf1' % (states[state].lower(), int(f))) for f in files])
def column_names(wide):
"""
Column names for geographic header file
"""
if wide is True:
return ['Summary Level', 'Geographic Component', 'State FIPS', 'Place FIPS', 'County FIPS', 'Tract', 'Zip', 'Block Group', 'Block', 'Name', 'Latitude', 'Longitude', 'Land Area', 'Water Area', 'Population', 'Housing Units']
elif wide is False:
return ['State FIPS', 'Place FIPS', 'County FIPS', 'Tract', 'Zip', 'Block Group', 'Block']
else:
return ['Summary Level', 'Geographic Component', 'State FIPS', 'Place FIPS', 'County FIPS', 'Tract', 'Zip', 'Block Group', 'Block', 'Name', 'Latitude', 'Longitude']
def key_names(wide):
"""
Key names for geographic header file
"""
if wide is True:
return ('SUMLEV', 'GEOCOMP', 'STATE', 'PLACE', 'COUNTY', 'TRACT', 'ZCTA5', 'BLOCKGROUP', 'BLOCK', 'NAME', 'LATITUDE', 'LONGITUDE', 'AREALAND', 'AREAWATER', 'POP100', 'HU100')
elif wide is False:
return ('STATE', 'PLACE', 'COUNTY', 'TRACT', 'ZCTA5', 'BLOCKGROUP', 'BLOCK')
else:
return ('SUMLEV', 'GEOCOMP', 'STATE', 'PLACE', 'COUNTY', 'TRACT', 'ZCTA5', 'BLOCKGROUP', 'BLOCK', 'NAME', 'LATITUDE', 'LONGITUDE')
def get_file_in_zipfile(url, fname, verbose):
"""
Return a file-like object for a file in a remote zipfile
"""
f = RemoteFileObject(url, verbose, 256 * 1024)
z = ZipFile(f)
assert fname in z.namelist(), 'Filename %s not found in ZIP %s' % (fname, url)
return z.open(fname)
def geo_lines(url, fname, verbose):
"""
Get the appropriate geographic header
"""
# make sure it is a geographic header
assert fname[2:] == 'geo2010.sf1', 'Not a geographic header file: %s' % fname
inp = get_file_in_zipfile(url, fname, verbose)
# The column offsets and widths are recorded here for the 2010 geographic header
# Offsets here are one-based to match the documentation on page 19 of the SF1 documentation
# Note that AREAWATER is called AREAWATR in the docs; despite dropping penultimate e's being
# all the rage in cool web 2.0 apps (e.g. Flickr), we're going to restore it.
cols = [('LATITUDE', 337, 11), ('LONGITUDE', 348, 12),
('LOGRECNO', 19, 7), ('SUMLEV', 9, 3), ('GEOCOMP', 12, 2),
('STATE', 28, 2), ('PLACE', 46, 5), ('COUNTY', 30, 3), ('TRACT', 55, 6),
('BLOCKGROUP', 61, 1), ('BLOCK', 62, 4), ('NAME', 227, 90), ('ZCTA5', 172, 5),
('AREALAND', 199, 14), ('AREAWATER', 213, 14),
('POP100', 319, 9), ('HU100', 328, 9)]
for line in inp:
data = dict( [(key, line[s-1:s-1+l].strip()) for (key, s, l) in cols] )
# Census Bureau represents positive latitude and longitude as +number, get rid of the plus
# There is positive longitude in the US, check out Attu Station CDP, Alaska
for key in ('LATITUDE', 'LONGITUDE'):
data[key] = data[key].lstrip('+')
yield data
def data_lines(url, fname, verbose):
"""
Get all the lines in data file fname from zip file path
"""
data = get_file_in_zipfile(url, fname, verbose)
for row in reader(data):
yield row
# Updated for 2010 census
summary_levels = {'state': '040', 'county': '050', 'tract': '140', 'zip': '871', 'blockgroup': '150', 'block': '101', 'place': '160'}
states = {'Alabama': 'AL', 'Alaska': 'AK', 'American Samoa': 'AS', 'Arizona': 'AZ',
'Arkansas': 'AR', 'California': 'CA', 'Colorado': 'CO', 'Connecticut': 'CT', 'Delaware': 'DE',
'District of Columbia': 'DC', 'Florida': 'FL', 'Georgia': 'GA', 'Hawaii': 'HI', 'Idaho': 'ID',
'Illinois': 'IL', 'Indiana': 'IN', 'Iowa': 'IA', 'Kansas': 'KS', 'Kentucky': 'KY',
'Louisiana': 'LA', 'Maine': 'ME', 'Marshall Islands': 'MH', 'Maryland': 'MD',
'Massachusetts': 'MA', 'Michigan': 'MI', 'Minnesota': 'MN', 'Mississippi': 'MS',
'Missouri': 'MO', 'Montana': 'MT', 'Nebraska': 'NE', 'Nevada': 'NV', 'New Hampshire': 'NH',
'New Jersey': 'NJ', 'New Mexico': 'NM', 'New York': 'NY', 'North Carolina': 'NC',
'North Dakota': 'ND', 'Ohio': 'OH', 'Oklahoma': 'OK', 'Oregon': 'OR', 'Pennsylvania': 'PA',
'Puerto Rico': 'PR', 'Rhode Island': 'RI', 'South Carolina': 'SC', 'South Dakota': 'SD',
'Tennessee': 'TN', 'Texas': 'TX', 'Utah': 'UT', 'Vermont': 'VT', 'Virginia': 'VA',
'Washington': 'WA', 'West Virginia': 'WV', 'Wisconsin': 'WI', 'Wyoming': 'WY'}
parser = OptionParser(usage="""%%prog [options] [list of table IDs]
Convert remote U.S. Census 2010 data to local tab-separated text files.
Examples:
Housing basics for counties in Rhode Island
census2text.py --state 'Rhode Island' H1 H3 H4
Age breakdowns for census tracts around Oakland, CA
census2text.py --state California --bbox 37.86 -122.35 37.70 -122.10 --geography tract P12
Complete documentation of Summary File data is dense but helpful:
http://www.census.gov/prod/cen2010/doc/sf1.pdf
Column descriptions are start on page 183.
Available summary files: SF1.
Available summary levels: %s.
See also numeric summary levels in the SF1 documentation, page 107.
""".rstrip() % ', '.join(summary_levels.keys()))
parser.set_defaults(summary_level='county', table='P1', verbose=None, wide=None)
parser.add_option('-s', '--state', dest='state',
help='State, e.g. "Alaska", "District of Columbia." Required.',
type='choice', choices=states.keys())
parser.add_option('-o', '--output', dest='output',
help='Optional output filename, stdout if omitted.')
parser.add_option('-g', '--geography', dest='summary_level',
help='Geographic summary level, e.g. "state", "040". Some available summary levels are %s.' % ', '.join(summary_levels.keys()),
type='choice', choices=summary_levels.keys() + summary_levels.values())
parser.add_option('-c', '--county', dest='county',
help='County FIPS code (3 digits). e.g. --state California --county 083 would yield data for Santa Barbara County, CA',
type='string')
parser.add_option('-b', '--bbox', dest='bbox',
help='Optional geographic bounds: north west south east.',
type='float', nargs=4)
parser.add_option('-n', '--narrow', dest='wide',
help='Output fewer columns than normal',
action='store_false')
parser.add_option('-w', '--wide', dest='wide',
help='Output more columns than normal',
action='store_true')
parser.add_option('-q', '--quiet', dest='verbose',
help='Be quieter than normal',
action='store_false')
parser.add_option('-v', '--verbose', dest='verbose',
help='Be louder than normal',
action='store_true')
if __name__ == '__main__':
options, tables = parser.parse_args()
if options.state == None:
parser.error('Please specify a state; the 2010 Census no longer provides nation-level files')
if options.summary_level in summary_levels:
options.summary_level = summary_levels[options.summary_level]
# There may be multiple summary levels; if not, fix up
if type(options.summary_level) is not tuple:
options.summary_level = (options.summary_level, )
# Figure out what files we need to fetch
files = file_choice(tables, options.verbose is not False)
# set up the path to the zipfile
src_file = 'http://www2.census.gov/census_2010/04-Summary_File_1/%s/%s2010.sf1.zip' % (options.state.replace(' ', '_'), states[options.state].lower())
if options.verbose is not False:
print >> stderr, 'Fetching from %s' % src_file
print >> stderr, ', '.join(options.summary_level), options.state, '-',
print >> stderr, ', '.join(['%s: file %s (%d @%d)' % (tbl, fn, cc, co) for (tbl, fn, co, cc, flds) in files])
print >> stderr, '-' * 32
file_names = set([file_name for (tbl, file_name, co, cc, flds) in files])
geo_path, data_paths = file_paths(options.state, file_names)
# Be forgiving about the bounding box
if options.bbox is not None:
north = max(options.bbox[0], options.bbox[2])
south = min(options.bbox[0], options.bbox[2])
east = max(options.bbox[1], options.bbox[3])
west = min(options.bbox[1], options.bbox[3])
# Get the header for the geo columns
row = column_names(options.wide)
pat = re.compile(r'^([A-Z]+)(\d+)([A-Z]*)$')
# Write the header for the data columns
for (table, fn, co, cell_count, field_names) in files:
row += field_names
out = options.output and open(options.output, 'w') or stdout
out = DictWriter(out, dialect='excel-tab', fieldnames=row)
out.writeheader()
# Get iterables for all of the files
file_iters = {}
for (tbl, file_name, co, cc, flds) in files:
if file_name not in file_iters:
file_iters[file_name] = data_lines(src_file, data_paths[file_name], options.verbose)
file_names = sorted(file_iters.keys())
# get rows from geographic header
geo_iter = geo_lines(src_file, geo_path, options.verbose)
for geo in geo_iter:
if geo['SUMLEV'] not in options.summary_level:
# This is not the summary level you're looking for.
continue
if geo['GEOCOMP'] != '00':
# Geographic Component "00" means the whole thing,
# not e.g. "01" for urban or "43" for rural parts.
continue
if options.county != None and geo['COUNTY'] != options.county:
# This is not the county you're looking for
continue
if options.bbox is not None:
lat, lon = float(geo['LATITUDE']), float(geo['LONGITUDE'])
if lat < south or north < lat or lon < west or east < lon:
# This geography is outside the bounding box
continue
vals = [geo[key] for key in key_names(options.wide)]
# name the columns appropriately
row = dict(zip(column_names(options.wide), vals))
# Iterate over every line in each of the necessary files
# It is possible that there won't be an entry for some variable in some file,
# so we can't iterate over them all at once as was done in the 2000 version of this script
for fname in file_iters.keys():
for line in file_iters[fname]:
if line[4] == geo['LOGRECNO']:
# We found a match, grab every matrix in this file at once
# matrix is in the form (matrix/table name, file, offset, cell count, field names)
for matrix in [i for i in files if i[1] == fname]:
names = matrix[4]
values = line[matrix[2]:matrix[2]+matrix[3]]
row.update(zip(names, values))
# done
break
out.writerow(row)
stdout.flush()