DB_NAME=census DB_USER=postgres TIGER_SRID=4269 SHAPELY_URL=http://pypi.python.org/packages/source/S/Shapely/Shapely-1.0.14.tar.gz # Root folder for the shapefiles. TIGER_ROOT=http://www2.census.gov/geo/tiger/TIGER2009 # State-specific folders. DC_ROOT=$(TIGER_ROOT)/11_DISTRICT_OF_COLUMBIA MD_ROOT=$(TIGER_ROOT)/24_MARYLAND VA_ROOT=$(TIGER_ROOT)/51_VIRGINIA PA_ROOT=$(TIGER_ROOT)/42_PENNSYLVANIA NY_ROOT=$(TIGER_ROOT)/36_NEW_YORK # URLs for the TIGER/Line block-level shapefiles. DC_BLOCKS_URL=$(DC_ROOT)/tl_2009_11_tabblock00.zip MD_BLOCKS_URL=$(MD_ROOT)/tl_2009_24_tabblock00.zip VA_BLOCKS_URL=$(VA_ROOT)/tl_2009_51_tabblock00.zip PA_BLOCKS_URL=$(PA_ROOT)/tl_2009_42_tabblock00.zip NY_BLOCKS_URL=$(NY_ROOT)/tl_2009_36_tabblock00.zip # Starting with PostGIS 1.4.0, these paths are calculated at install # time using the pg_config utility. Rather than try to guess where # PostGIS will wind up installed, we can just check the output of # pg_config ourselves. PG_BINDIR=`pg_config --bindir` PG_SHAREDIR=`pg_config --sharedir` # Necessary to run test/data without prerequisites. # .PHONY : test data lib # The default task, since it comes first in the list. # all: clean lib test test: ./bin/run_tests # Download or check out any third-party libraries. lib: if [ ! -d lib/Shapely ]; then \ wget -O shapely.tar.gz $(SHAPELY_URL); \ tar -xvzf shapely.tar.gz -C lib/ ; \ rm shapely.tar.gz; \ mv lib/Shapely* lib/Shapely; \ fi; # Remove byte-compiled python code. # clean: find ./ -name '*.pyc' -print0 | xargs -0 rm -f # Download the shapefiles from Tiger if they don't already exist. data: tiger_blocks tiger_lines tiger_blocks: dc_blocks md_blocks va_blocks pa_blocks ny_blocks dc_blocks: mkdir -p data/census2000/district_of_columbia/block if [ ! -f data/census2000/district_of_columbia/block/tl_2009_11_tabblock00.shp ]; \ then \ wget -O dcblocks.zip $(DC_BLOCKS_URL); \ unzip dcblocks.zip -d ./data/census2000/district_of_columbia/block; \ rm dcblocks.zip; \ fi; md_blocks: mkdir -p data/census2000/maryland/block if [ ! -f data/census2000/maryland/block/tl_2009_24_tabblock00.shp ]; \ then \ wget -O mdblocks.zip $(MD_BLOCKS_URL); \ unzip mdblocks.zip -d ./data/census2000/maryland/block; \ rm mdblocks.zip; \ fi; va_blocks: mkdir -p data/census2000/virginia/block if [ ! -f data/census2000/virginia/block/tl_2009_51_tabblock00.shp ]; \ then \ wget -O vablocks.zip $(VA_BLOCKS_URL); \ unzip vablocks.zip -d ./data/census2000/virginia/block; \ rm vablocks.zip; \ fi; pa_blocks: mkdir -p data/census2000/pennsylvania/block if [ ! -f data/census2000/pennsylvania/block/tl_2009_42_tabblock00.shp ]; \ then \ wget -O pablocks.zip $(PA_BLOCKS_URL); \ unzip pablocks.zip -d ./data/census2000/pennsylvania/block; \ rm pablocks.zip; \ fi; ny_blocks: mkdir -p data/census2000/new_york/block if [ ! -f data/census2000/new_york/block/tl_2009_36_tabblock00.shp ]; \ then \ wget -O nyblocks.zip $(NY_BLOCKS_URL); \ unzip nyblocks.zip -d ./data/census2000/new_york/block; \ rm nyblocks.zip; \ fi; tiger_lines: bin/download_data # This imports the Tiger data using shp2pgsql. The shapefiles # should exist, since this task depends on the "data" task, which # downloads said shapefiles. # # After the TIGER import is done, we use the sf1blocks2sql script to # parse and import the geographic header record information. # db: data newdb tiger_blocks_table tiger_lines_table sf1_blocks_table # All Blocks # # The table already exists, so we can append to it, and we don't have # to create the GiST index. for state in data/census2000/*; do \ $(PG_BINDIR)/shp2pgsql \ -a \ -s $(TIGER_SRID) \ -D \ $$state/block/*.shp \ tiger_blocks \ | psql -U $(DB_USER) -d $(DB_NAME); \ done; # All Lines # # Since the table and index already exist, we can utilize -a, # and leave -I out. for state in data/census2000/*; do \ for shapefile in $$state/lines/*.shp; do \ echo "Importing $$shapefile."; \ $(PG_BINDIR)/shp2pgsql \ -a \ -s $(TIGER_SRID) \ $$shapefile \ tiger_lines \ | bin/filter-transactions \ | psql -U $(DB_USER) -d $(DB_NAME) \ > /dev/null; \ done; \ done; bin/sf1blocks2sql src/Tests/Fixtures/SummaryFile1/mdgeo.uf1 sf1_blocks \ | psql -U postgres -d $(DB_NAME) \ > /dev/null # First, we drop and re-create the DB_NAME database (or schema, # whatever). Then, we add PL/pgSQL support to the database. # # At that point, we import the two PostGIS files, postgis.sql and # spatial_ref_sys.sql. The postgis.sql file contains the geometry # functions, while spatial_ref_sys.sql contains a table of SRIDs, and # their associated properties. PostGIS requires both. # newdb: # Ignore the result of dropdb when it fails. dropdb -U $(DB_USER) $(DB_NAME) || true createdb -U $(DB_USER) $(DB_NAME) createlang -U $(DB_USER) plpgsql $(DB_NAME) psql -d $(DB_NAME) \ -U $(DB_USER) \ -f $(PG_SHAREDIR)/contrib/postgis.sql \ > /dev/null psql -d $(DB_NAME) \ -U $(DB_USER) \ -f $(PG_SHAREDIR)/contrib/spatial_ref_sys.sql \ > /dev/null # This just runs the SQL script to create the sf1_blocks table. sf1_blocks_table: psql -d $(DB_NAME) \ -U $(DB_USER) \ -f sql/create-sf1_blocks-table.sql \ > /dev/null # Create the tiger_blocks table, and create its GiST index. Having the # table already exist makes importing via shp2pgsql much easier. # Any blocks file will work as an argument. tiger_blocks_table: $(PG_BINDIR)/shp2pgsql \ -p \ -I \ -s $(TIGER_SRID) \ data/census2000/maryland/block/tl_2009_24_tabblock00.shp \ tiger_blocks \ | psql -U postgres -d $(DB_NAME) \ > /dev/null # Prepare the tiger_lines table, and create the GiST index on its # geometry column. Any lines shapefile will do here. tiger_lines_table: $(PG_BINDIR)/shp2pgsql \ -p \ -I \ -s $(TIGER_SRID) \ data/census2000/maryland/lines/tl_2009_24510_edges.shp \ tiger_lines \ | psql -U postgres -d $(DB_NAME) \ > /dev/null # Add a unique index on the "tlid" column. psql -U postgres \ -d census \ -f sql/create_tlid_unique_index.sql