]> gitweb.michael.orlitzky.com - dead/census-tools.git/blobdiff - makefile
Added a "Related Projects" heading containing a description of TRAGIS.
[dead/census-tools.git] / makefile
index 13032e9a18ef6b031dc251820dfd49186f8dd26e..11a9c5c60c82671876ab45d006a8770c4a213538 100644 (file)
--- a/makefile
+++ b/makefile
@@ -1,22 +1,56 @@
-DB_NAME='census2000'
-DB_USER='postgres'
-TIGER_DATA_URL='http://www2.census.gov/cgi-bin/shapefiles/multi-file-download?files=24_MARYLAND%2Ftl_2008_24_tabblock00.zip'
-TIGER_SRID='4269'
+DB_NAME=census
+DB_USER=postgres
+TIGER_SRID=4269
+SHAPELY_URL=http://pypi.python.org/packages/source/S/Shapely/Shapely-1.0.14.tar.gz
+
+# Root folder for the shapefiles.
+TIGER_ROOT=http://www2.census.gov/geo/tiger/TIGER2009
+
+# State-specific folders.
+DC_ROOT=$(TIGER_ROOT)/11_DISTRICT_OF_COLUMBIA
+MD_ROOT=$(TIGER_ROOT)/24_MARYLAND
+VA_ROOT=$(TIGER_ROOT)/51_VIRGINIA
+PA_ROOT=$(TIGER_ROOT)/42_PENNSYLVANIA
+NY_ROOT=$(TIGER_ROOT)/36_NEW_YORK
+
+# URLs for the TIGER/Line block-level shapefiles.
+DC_BLOCKS_URL=$(DC_ROOT)/tl_2009_11_tabblock00.zip
+MD_BLOCKS_URL=$(MD_ROOT)/tl_2009_24_tabblock00.zip
+VA_BLOCKS_URL=$(VA_ROOT)/tl_2009_51_tabblock00.zip
+PA_BLOCKS_URL=$(PA_ROOT)/tl_2009_42_tabblock00.zip
+NY_BLOCKS_URL=$(NY_ROOT)/tl_2009_36_tabblock00.zip
+
+# Starting with PostGIS 1.4.0, these paths are calculated at install
+# time using the pg_config utility. Rather than try to guess where
+# PostGIS will wind up installed, we can just check the output of
+# pg_config ourselves.
+PG_BINDIR=`pg_config --bindir`
+PG_SHAREDIR=`pg_config --sharedir`
 
 # Necessary to run test/data without prerequisites.
 #
-.PHONY : test data
+.PHONY : test data lib
 
 
 # The default task, since it comes first in the list.
 #
-all: clean test
+all: clean lib test
 
 
 test:
        ./bin/run_tests
 
 
+# Download or check out any third-party libraries.
+lib:
+       if [ ! -d lib/Shapely ]; then                   \
+               wget -O shapely.tar.gz $(SHAPELY_URL);  \
+               tar -xvzf shapely.tar.gz -C lib/    ;   \
+               rm shapely.tar.gz;                      \
+               mv lib/Shapely* lib/Shapely;            \
+       fi;
+
+
 # Remove byte-compiled python code.
 #
 clean:
@@ -24,60 +58,166 @@ clean:
 
 
 # Download the shapefiles from Tiger if they don't already exist.
-# For now, we're only dealing with the Census 2000 Maryland Block
-# data, so the filenames are hard-coded. Easy enough to change.
-#
-data:
-       mkdir -p data/census-2000-block/maryland/
-       if [ ! -f data/census-2000-block/maryland/tl_2008_24_tabblock00.shp ]; then \
-               wget -O tiger.zip $(TIGER_DATA_URL); \
-               unzip tiger.zip; \
-               rm tiger.zip; \
-               unzip srv/ftp/geo/tiger/TIGER2008/24_MARYLAND/tl_2008_24_tabblock00.zip \
-                      -d ./data/census-2000-block/maryland/; \
-               rm -rf srv; \
+data: tiger_blocks tiger_lines
+
+tiger_blocks: dc_blocks md_blocks va_blocks pa_blocks ny_blocks
+
+dc_blocks:
+       mkdir -p data/census2000/district_of_columbia/block
+       if [ ! -f data/census2000/district_of_columbia/block/tl_2009_11_tabblock00.shp ]; \
+       then                                                            \
+               wget -O dcblocks.zip $(DC_BLOCKS_URL);                  \
+               unzip dcblocks.zip -d ./data/census2000/district_of_columbia/block;     \
+               rm dcblocks.zip;                                        \
        fi;
 
+md_blocks:
+       mkdir -p data/census2000/maryland/block
+       if [ ! -f data/census2000/maryland/block/tl_2009_24_tabblock00.shp ]; \
+       then                                                                  \
+               wget -O mdblocks.zip $(MD_BLOCKS_URL);                        \
+               unzip mdblocks.zip -d ./data/census2000/maryland/block;       \
+               rm mdblocks.zip;                                              \
+       fi;
 
-# This task does a couple of things. First, it drops and re-creates
-# the DB_NAME database (or schema, whatever). Then, it adds PL/pgSQL
-# support to the database.
-#
-# At that point, we import the two PostGIS files, lwpostgis.sql and
-# spatial_ref_sys.sql. These are magic as far as I'm concerned, but
-# PostGIS requires them.
-#
-# Then, we import the Tiger data using shp2pgsql. The shapefiles
+va_blocks:
+       mkdir -p data/census2000/virginia/block
+       if [ ! -f data/census2000/virginia/block/tl_2009_51_tabblock00.shp ]; \
+       then                                                                  \
+               wget -O vablocks.zip $(VA_BLOCKS_URL);                        \
+               unzip vablocks.zip -d ./data/census2000/virginia/block;       \
+               rm vablocks.zip;                                              \
+       fi;
+
+pa_blocks:
+       mkdir -p data/census2000/pennsylvania/block
+       if [ ! -f data/census2000/pennsylvania/block/tl_2009_42_tabblock00.shp ]; \
+       then                                                                      \
+               wget -O pablocks.zip $(PA_BLOCKS_URL);                            \
+               unzip pablocks.zip -d ./data/census2000/pennsylvania/block;       \
+               rm pablocks.zip;                                                  \
+       fi;
+
+ny_blocks:
+       mkdir -p data/census2000/new_york/block
+       if [ ! -f data/census2000/new_york/block/tl_2009_36_tabblock00.shp ]; \
+       then                                                                  \
+               wget -O nyblocks.zip $(NY_BLOCKS_URL);                        \
+               unzip nyblocks.zip -d ./data/census2000/new_york/block;       \
+               rm nyblocks.zip;                                              \
+       fi;
+
+
+tiger_lines:
+       bin/download_data
+
+
+# This imports the Tiger data using shp2pgsql. The shapefiles
 # should exist, since this task depends on the "data" task, which
 # downloads said shapefiles.
 #
-# Finally, we create the table for the demographic data (obtained from
-# the geographic header records), and populate that table with the output
-# of the sf1blocks2sql script.
+# After the TIGER import is done, we use the sf1blocks2sql script to
+# parse and import the geographic header record information.
 #
-db: data
-       dropdb -U $(DB_USER) $(DB_NAME)
-       createdb -U $(DB_USER) $(DB_NAME)
-       createlang -U $(DB_USER) plpgsql $(DB_NAME)
-
-       psql -d $(DB_NAME) \
-             -U $(DB_USER) \
-             -f /usr/share/postgresql/contrib/lwpostgis.sql
+db: data newdb tiger_blocks_table tiger_lines_table sf1_blocks_table
+# All Blocks
+#
+# The table already exists, so we can append to it, and we don't have
+# to create the GiST index.
+       for state in data/census2000/*; do                  \
+               $(PG_BINDIR)/shp2pgsql                      \
+                       -a                                  \
+                       -s $(TIGER_SRID)                    \
+                       -D                                  \
+                       $$state/block/*.shp                 \
+                       tiger_blocks                        \
+                       | psql -U $(DB_USER) -d $(DB_NAME); \
+       done;
+
+# All Lines
+#
+# Since the table and index already exist, we can utilize -a,
+# and leave -I out.
+       for state in data/census2000/*; do                          \
+               for shapefile in $$state/lines/*.shp; do            \
+                       echo "Importing $$shapefile.";              \
+                       $(PG_BINDIR)/shp2pgsql                      \
+                               -a                                  \
+                               -s $(TIGER_SRID)                    \
+                               $$shapefile                         \
+                               tiger_lines                         \
+                               | bin/filter-transactions           \
+                               | psql -U $(DB_USER) -d $(DB_NAME)  \
+                               > /dev/null;                        \
+               done;                                               \
+       done;
 
-       psql -d $(DB_NAME) \
-             -U $(DB_USER) \
-             -f /usr/share/postgresql/contrib/spatial_ref_sys.sql
+       bin/sf1blocks2sql src/Tests/Fixtures/SummaryFile1/mdgeo.uf1 sf1_blocks \
+                          | psql -U postgres -d $(DB_NAME)                     \
+                          > /dev/null
 
-       shp2pgsql -I                                                        \
-                 -s $(TIGER_SRID)                                          \
-                 data/census-2000-block/maryland/tl_2008_24_tabblock00.shp \
-                 tiger                                                     \
-                  | psql -U $(DB_USER) -d $(DB_NAME)
 
-       psql -d $(DB_NAME) \
-             -U $(DB_USER) \
-             -f sql/create-sf1_blocks-table.sql
 
-       bin/sf1blocks2sql src/Tests/Fixtures/SummaryFile1/mdgeo.uf1 sf1_blocks \
-                          | psql -U postgres -d $(DB_NAME)
+# First, we drop and re-create the DB_NAME database (or schema,
+# whatever). Then, we add PL/pgSQL support to the database.
+#
+# At that point, we import the two PostGIS files, postgis.sql and
+# spatial_ref_sys.sql. The postgis.sql file contains the geometry
+# functions, while spatial_ref_sys.sql contains a table of SRIDs, and
+# their associated properties. PostGIS requires both.
+#
+newdb:
+# Ignore the result of dropdb when it fails.
+       dropdb -U $(DB_USER) $(DB_NAME) || true
+       createdb -U $(DB_USER) $(DB_NAME)
+       createlang -U $(DB_USER) plpgsql $(DB_NAME)
 
+       psql -d $(DB_NAME)                         \
+             -U $(DB_USER)                         \
+             -f $(PG_SHAREDIR)/contrib/postgis.sql \
+             > /dev/null
+
+       psql -d $(DB_NAME)                                 \
+             -U $(DB_USER)                                 \
+             -f $(PG_SHAREDIR)/contrib/spatial_ref_sys.sql \
+            > /dev/null
+
+
+# This just runs the SQL script to create the sf1_blocks table.
+sf1_blocks_table:
+       psql -d $(DB_NAME)                      \
+             -U $(DB_USER)                      \
+             -f sql/create-sf1_blocks-table.sql \
+            > /dev/null
+
+
+# Create the tiger_blocks table, and create its GiST index. Having the
+# table already exist makes importing via shp2pgsql much easier.
+# Any blocks file will work as an argument.
+tiger_blocks_table:
+       $(PG_BINDIR)/shp2pgsql                                            \
+               -p                                                        \
+               -I                                                        \
+               -s $(TIGER_SRID)                                          \
+               data/census2000/maryland/block/tl_2009_24_tabblock00.shp  \
+               tiger_blocks                                              \
+               | psql -U postgres -d $(DB_NAME)                          \
+                > /dev/null
+
+
+# Prepare the tiger_lines table, and create the GiST index on its
+# geometry column. Any lines shapefile will do here.
+tiger_lines_table:
+       $(PG_BINDIR)/shp2pgsql                                         \
+               -p                                                     \
+               -I                                                     \
+               -s $(TIGER_SRID)                                       \
+               data/census2000/maryland/lines/tl_2009_24510_edges.shp \
+               tiger_lines                                            \
+               | psql -U postgres -d $(DB_NAME)                       \
+                > /dev/null
+
+# Add a unique index on the "tlid" column.
+       psql -U postgres \
+             -d census   \
+             -f sql/create_tlid_unique_index.sql