X-Git-Url: http://gitweb.michael.orlitzky.com/?a=blobdiff_plain;f=makefile;h=11a9c5c60c82671876ab45d006a8770c4a213538;hb=7b8cd40d5f84e25422f34e23ed3734053298b5b9;hp=36543d4e156bb7dd5a6ea8c60e948d613dd29739;hpb=d6aa9f176f67ee918f854c1df3ec5b072a502288;p=dead%2Fcensus-tools.git diff --git a/makefile b/makefile index 36543d4..11a9c5c 100644 --- a/makefile +++ b/makefile @@ -1,7 +1,24 @@ -DB_NAME='census' -DB_USER='postgres' -TIGER_DATA_URL='http://www2.census.gov/geo/tiger/TIGER2009/24_MARYLAND/tl_2009_24_tabblock00.zip' -TIGER_SRID='4269' +DB_NAME=census +DB_USER=postgres +TIGER_SRID=4269 +SHAPELY_URL=http://pypi.python.org/packages/source/S/Shapely/Shapely-1.0.14.tar.gz + +# Root folder for the shapefiles. +TIGER_ROOT=http://www2.census.gov/geo/tiger/TIGER2009 + +# State-specific folders. +DC_ROOT=$(TIGER_ROOT)/11_DISTRICT_OF_COLUMBIA +MD_ROOT=$(TIGER_ROOT)/24_MARYLAND +VA_ROOT=$(TIGER_ROOT)/51_VIRGINIA +PA_ROOT=$(TIGER_ROOT)/42_PENNSYLVANIA +NY_ROOT=$(TIGER_ROOT)/36_NEW_YORK + +# URLs for the TIGER/Line block-level shapefiles. +DC_BLOCKS_URL=$(DC_ROOT)/tl_2009_11_tabblock00.zip +MD_BLOCKS_URL=$(MD_ROOT)/tl_2009_24_tabblock00.zip +VA_BLOCKS_URL=$(VA_ROOT)/tl_2009_51_tabblock00.zip +PA_BLOCKS_URL=$(PA_ROOT)/tl_2009_42_tabblock00.zip +NY_BLOCKS_URL=$(NY_ROOT)/tl_2009_36_tabblock00.zip # Starting with PostGIS 1.4.0, these paths are calculated at install # time using the pg_config utility. Rather than try to guess where @@ -12,18 +29,28 @@ PG_SHAREDIR=`pg_config --sharedir` # Necessary to run test/data without prerequisites. # -.PHONY : test data +.PHONY : test data lib # The default task, since it comes first in the list. # -all: clean test +all: clean lib test test: ./bin/run_tests +# Download or check out any third-party libraries. +lib: + if [ ! -d lib/Shapely ]; then \ + wget -O shapely.tar.gz $(SHAPELY_URL); \ + tar -xvzf shapely.tar.gz -C lib/ ; \ + rm shapely.tar.gz; \ + mv lib/Shapely* lib/Shapely; \ + fi; + + # Remove byte-compiled python code. # clean: @@ -31,59 +58,166 @@ clean: # Download the shapefiles from Tiger if they don't already exist. -# For now, we're only dealing with the Census 2000 Maryland Block -# data, so the filenames are hard-coded. Easy enough to change. -# -data: +data: tiger_blocks tiger_lines + +tiger_blocks: dc_blocks md_blocks va_blocks pa_blocks ny_blocks + +dc_blocks: + mkdir -p data/census2000/district_of_columbia/block + if [ ! -f data/census2000/district_of_columbia/block/tl_2009_11_tabblock00.shp ]; \ + then \ + wget -O dcblocks.zip $(DC_BLOCKS_URL); \ + unzip dcblocks.zip -d ./data/census2000/district_of_columbia/block; \ + rm dcblocks.zip; \ + fi; + +md_blocks: mkdir -p data/census2000/maryland/block - if [ ! -f data/census2000/maryland/block/tl_2009_24_tabblock00.shp ]; then \ - wget -O tmp.zip $(TIGER_DATA_URL); \ - unzip tmp.zip -d ./data/census2000/maryland/block; \ - rm tmp.zip; \ + if [ ! -f data/census2000/maryland/block/tl_2009_24_tabblock00.shp ]; \ + then \ + wget -O mdblocks.zip $(MD_BLOCKS_URL); \ + unzip mdblocks.zip -d ./data/census2000/maryland/block; \ + rm mdblocks.zip; \ fi; +va_blocks: + mkdir -p data/census2000/virginia/block + if [ ! -f data/census2000/virginia/block/tl_2009_51_tabblock00.shp ]; \ + then \ + wget -O vablocks.zip $(VA_BLOCKS_URL); \ + unzip vablocks.zip -d ./data/census2000/virginia/block; \ + rm vablocks.zip; \ + fi; -# This task does a couple of things. First, it drops and re-creates -# the DB_NAME database (or schema, whatever). Then, it adds PL/pgSQL -# support to the database. +pa_blocks: + mkdir -p data/census2000/pennsylvania/block + if [ ! -f data/census2000/pennsylvania/block/tl_2009_42_tabblock00.shp ]; \ + then \ + wget -O pablocks.zip $(PA_BLOCKS_URL); \ + unzip pablocks.zip -d ./data/census2000/pennsylvania/block; \ + rm pablocks.zip; \ + fi; + +ny_blocks: + mkdir -p data/census2000/new_york/block + if [ ! -f data/census2000/new_york/block/tl_2009_36_tabblock00.shp ]; \ + then \ + wget -O nyblocks.zip $(NY_BLOCKS_URL); \ + unzip nyblocks.zip -d ./data/census2000/new_york/block; \ + rm nyblocks.zip; \ + fi; + + +tiger_lines: + bin/download_data + + +# This imports the Tiger data using shp2pgsql. The shapefiles +# should exist, since this task depends on the "data" task, which +# downloads said shapefiles. +# +# After the TIGER import is done, we use the sf1blocks2sql script to +# parse and import the geographic header record information. +# +db: data newdb tiger_blocks_table tiger_lines_table sf1_blocks_table +# All Blocks +# +# The table already exists, so we can append to it, and we don't have +# to create the GiST index. + for state in data/census2000/*; do \ + $(PG_BINDIR)/shp2pgsql \ + -a \ + -s $(TIGER_SRID) \ + -D \ + $$state/block/*.shp \ + tiger_blocks \ + | psql -U $(DB_USER) -d $(DB_NAME); \ + done; + +# All Lines +# +# Since the table and index already exist, we can utilize -a, +# and leave -I out. + for state in data/census2000/*; do \ + for shapefile in $$state/lines/*.shp; do \ + echo "Importing $$shapefile."; \ + $(PG_BINDIR)/shp2pgsql \ + -a \ + -s $(TIGER_SRID) \ + $$shapefile \ + tiger_lines \ + | bin/filter-transactions \ + | psql -U $(DB_USER) -d $(DB_NAME) \ + > /dev/null; \ + done; \ + done; + + bin/sf1blocks2sql src/Tests/Fixtures/SummaryFile1/mdgeo.uf1 sf1_blocks \ + | psql -U postgres -d $(DB_NAME) \ + > /dev/null + + + +# First, we drop and re-create the DB_NAME database (or schema, +# whatever). Then, we add PL/pgSQL support to the database. # # At that point, we import the two PostGIS files, postgis.sql and # spatial_ref_sys.sql. The postgis.sql file contains the geometry # functions, while spatial_ref_sys.sql contains a table of SRIDs, and # their associated properties. PostGIS requires both. # -# Then, we import the Tiger data using shp2pgsql. The shapefiles -# should exist, since this task depends on the "data" task, which -# downloads said shapefiles. -# -# Finally, we create the table for the demographic data (obtained from -# the geographic header records), and populate that table with the output -# of the sf1blocks2sql script. -# -db: data - # Ignore the result of dropdb when it fails. +newdb: +# Ignore the result of dropdb when it fails. dropdb -U $(DB_USER) $(DB_NAME) || true createdb -U $(DB_USER) $(DB_NAME) createlang -U $(DB_USER) plpgsql $(DB_NAME) - psql -d $(DB_NAME) \ - -U $(DB_USER) \ - -f $(PG_SHAREDIR)/contrib/postgis.sql - - psql -d $(DB_NAME) \ - -U $(DB_USER) \ - -f $(PG_SHAREDIR)/contrib/spatial_ref_sys.sql - - $(PG_BINDIR)/shp2pgsql -I \ - -s $(TIGER_SRID) \ - data/census2000/maryland/block/tl_2009_24_tabblock00.shp \ - tiger \ - | psql -U $(DB_USER) -d $(DB_NAME) - - psql -d $(DB_NAME) \ - -U $(DB_USER) \ - -f sql/create-sf1_blocks-table.sql - - bin/sf1blocks2sql src/Tests/Fixtures/SummaryFile1/mdgeo.uf1 sf1_blocks \ - | psql -U postgres -d $(DB_NAME) - + psql -d $(DB_NAME) \ + -U $(DB_USER) \ + -f $(PG_SHAREDIR)/contrib/postgis.sql \ + > /dev/null + + psql -d $(DB_NAME) \ + -U $(DB_USER) \ + -f $(PG_SHAREDIR)/contrib/spatial_ref_sys.sql \ + > /dev/null + + +# This just runs the SQL script to create the sf1_blocks table. +sf1_blocks_table: + psql -d $(DB_NAME) \ + -U $(DB_USER) \ + -f sql/create-sf1_blocks-table.sql \ + > /dev/null + + +# Create the tiger_blocks table, and create its GiST index. Having the +# table already exist makes importing via shp2pgsql much easier. +# Any blocks file will work as an argument. +tiger_blocks_table: + $(PG_BINDIR)/shp2pgsql \ + -p \ + -I \ + -s $(TIGER_SRID) \ + data/census2000/maryland/block/tl_2009_24_tabblock00.shp \ + tiger_blocks \ + | psql -U postgres -d $(DB_NAME) \ + > /dev/null + + +# Prepare the tiger_lines table, and create the GiST index on its +# geometry column. Any lines shapefile will do here. +tiger_lines_table: + $(PG_BINDIR)/shp2pgsql \ + -p \ + -I \ + -s $(TIGER_SRID) \ + data/census2000/maryland/lines/tl_2009_24510_edges.shp \ + tiger_lines \ + | psql -U postgres -d $(DB_NAME) \ + > /dev/null + +# Add a unique index on the "tlid" column. + psql -U postgres \ + -d census \ + -f sql/create_tlid_unique_index.sql