DB_USER=postgres
TIGER_SRID=4269
-# Root folder for the shapefiles.
-TIGER_ROOT=http://www2.census.gov/geo/tiger/TIGER2009
-
-# State-specific folders.
-DC_ROOT=$(TIGER_ROOT)/11_DISTRICT_OF_COLUMBIA
-MD_ROOT=$(TIGER_ROOT)/24_MARYLAND
-VA_ROOT=$(TIGER_ROOT)/51_VIRGINIA
-PA_ROOT=$(TIGER_ROOT)/42_PENNSYLVANIA
-NY_ROOT=$(TIGER_ROOT)/36_NEW_YORK
-
-# URLs for the TIGER/Line block-level shapefiles.
-DC_BLOCKS_URL=$(DC_ROOT)/tl_2009_11_tabblock00.zip
-MD_BLOCKS_URL=$(MD_ROOT)/tl_2009_24_tabblock00.zip
-VA_BLOCKS_URL=$(VA_ROOT)/tl_2009_51_tabblock00.zip
-PA_BLOCKS_URL=$(PA_ROOT)/tl_2009_42_tabblock00.zip
-NY_BLOCKS_URL=$(NY_ROOT)/tl_2009_36_tabblock00.zip
-
-# Starting with PostGIS 1.4.0, these paths are calculated at install
-# time using the pg_config utility. Rather than try to guess where
-# PostGIS will wind up installed, we can just check the output of
-# pg_config ourselves.
-PG_BINDIR=`pg_config --bindir`
-PG_SHAREDIR=`pg_config --sharedir`
+
+# Dark magic. We set these makefile variables to be the result of the
+# 'shell' function. The shell function, in turn, executes a Python
+# script which determines the locations of these files.
+SHP2PGSQL := $(shell bin/find_file_paths --root /usr --single shp2pgsql)
+POSTGIS_SQL := $(shell bin/find_file_paths --root /usr lwpostgis.sql postgis.sql)
+SPATIAL_REF_SYS_SQL := $(shell bin/find_file_paths --root /usr spatial_ref_sys.sql)
# Necessary to run test/data without prerequisites.
#
-.PHONY : test data
+.PHONY : test data lib
# The default task, since it comes first in the list.
#
-all: clean test
+all: clean lib test
test:
./bin/run_tests
+# Download or check out any third-party libraries.
+lib:
+ make -C lib/
+
+
# Remove byte-compiled python code.
#
clean:
find ./ -name '*.pyc' -print0 | xargs -0 rm -f
-# Download the shapefiles from Tiger if they don't already exist.
-# For now, we're only dealing with the Census 2000 Maryland Block
-# data, so the filenames are hard-coded. Easy enough to change.
-#
-data: tiger_blocks tiger_lines
-
-tiger_blocks: dc_blocks md_blocks va_blocks pa_blocks ny_blocks
-
-dc_blocks:
- mkdir -p data/census2000/dc/block
- if [ ! -f data/census2000/dc/block/tl_2009_11_tabblock00.shp ]; \
- then \
- wget -O dcblocks.zip $(DC_BLOCKS_URL); \
- unzip dcblocks.zip -d ./data/census2000/dc/block; \
- rm dcblocks.zip; \
- fi;
-
-md_blocks:
- mkdir -p data/census2000/maryland/block
- if [ ! -f data/census2000/maryland/block/tl_2009_24_tabblock00.shp ]; \
- then \
- wget -O mdblocks.zip $(MD_BLOCKS_URL); \
- unzip mdblocks.zip -d ./data/census2000/maryland/block; \
- rm mdblocks.zip; \
- fi;
-
-va_blocks:
- mkdir -p data/census2000/virginia/block
- if [ ! -f data/census2000/virginia/block/tl_2009_51_tabblock00.shp ]; \
- then \
- wget -O vablocks.zip $(VA_BLOCKS_URL); \
- unzip vablocks.zip -d ./data/census2000/virginia/block; \
- rm vablocks.zip; \
- fi;
-
-pa_blocks:
- mkdir -p data/census2000/pennsylvania/block
- if [ ! -f data/census2000/pennsylvania/block/tl_2009_42_tabblock00.shp ]; \
- then \
- wget -O pablocks.zip $(PA_BLOCKS_URL); \
- unzip pablocks.zip -d ./data/census2000/pennsylvania/block; \
- rm pablocks.zip; \
- fi;
-
-ny_blocks:
- mkdir -p data/census2000/new_york/block
- if [ ! -f data/census2000/new_york/block/tl_2009_36_tabblock00.shp ]; \
- then \
- wget -O nyblocks.zip $(NY_BLOCKS_URL); \
- unzip nyblocks.zip -d ./data/census2000/new_york/block; \
- rm nyblocks.zip; \
- fi;
-
-
-tiger_lines:
+data:
bin/download_data
-# This imports the Tiger data using shp2pgsql. The shapefiles
-# should exist, since this task depends on the "data" task, which
-# downloads said shapefiles.
+# There is a small issue here with the blocks_db and lines_db
+# targets. Each of these requires that the database exists, and might
+# therefore depend on the newdb target. However, if /each/ of them
+# depends on newdb, the database will be dropped twice and the data
+# from one of {blocks, lines} would be lost.
#
-# After the TIGER import is done, we use the sf1blocks2sql script to
-# parse and import the geographic header record information.
-#
-db: data newdb tiger_blocks_table tiger_lines_table sf1_blocks_table
+# We therefore assume that the database already exists when blocks_db
+# or lines_db are initiated.
+blocks_db: data blocks_table
# All Blocks
#
# The table already exists, so we can append to it, and we don't have
# to create the GiST index.
for state in data/census2000/*; do \
- $(PG_BINDIR)/shp2pgsql \
+ $(SHP2PGSQL) \
-a \
-s $(TIGER_SRID) \
-D \
- $$state/block/*.shp \
+ $$state/blocks/*.shp \
tiger_blocks \
| psql -U $(DB_USER) -d $(DB_NAME); \
done;
-# MD Lines
+# Summary File 1
+#
+# Run all of the geo (uf1) files through the import script. This has
+# to happen after the blocks import since we impose a foreign key
+# restriction.
+ for state in data/census2000/*; do \
+ bin/sf1blocks2sql $$state/sf1/*.uf1 sf1_blocks \
+ | psql -U $(DB_USER) -d $(DB_NAME) \
+ > /dev/null; \
+ done;
+
+# Run the query to combine the two blocks tables, and drop the
+# constituents.
+ psql -U $(DB_USER) \
+ -d $(DB_NAME) \
+ -f sql/combine-block-tables.sql
+
+
+lines_db: data tiger_lines_table
+# All Lines
#
# Since the table and index already exist, we can utilize -a,
# and leave -I out.
- for x in data/census2000/maryland/lines/*.shp; do \
- $(PG_BINDIR)/shp2pgsql \
- -a \
- -s $(TIGER_SRID) \
- -D \
- $$x \
- tiger_lines \
- | psql -U $(DB_USER) -d $(DB_NAME); \
+ for state in data/census2000/*; do \
+ for shapefile in $$state/lines/*.shp; do \
+ echo "Importing $$shapefile."; \
+ $(SHP2PGSQL) \
+ -a \
+ -s $(TIGER_SRID) \
+ $$shapefile \
+ tiger_lines \
+ | bin/filter-transactions \
+ | psql -U $(DB_USER) -d $(DB_NAME) \
+ > /dev/null; \
+ done; \
done;
- bin/sf1blocks2sql src/Tests/Fixtures/SummaryFile1/mdgeo.uf1 sf1_blocks \
- | psql -U postgres -d $(DB_NAME) \
- > /dev/null
+
+
+# This imports the Tiger data using shp2pgsql. The shapefiles
+# should exist, since this task depends on the "data" task, which
+# downloads said shapefiles.
+#
+# After the TIGER import is done, we use the sf1blocks2sql script to
+# parse and import the geographic header record information.
+#
+db: newdb blocks_data lines_data
+ # Do nothing except fulfill our prerequisites.
createdb -U $(DB_USER) $(DB_NAME)
createlang -U $(DB_USER) plpgsql $(DB_NAME)
- psql -d $(DB_NAME) \
- -U $(DB_USER) \
- -f $(PG_SHAREDIR)/contrib/postgis.sql \
+ psql -d $(DB_NAME) \
+ -U $(DB_USER) \
+ -f $(POSTGIS_SQL) \
> /dev/null
- psql -d $(DB_NAME) \
- -U $(DB_USER) \
- -f $(PG_SHAREDIR)/contrib/spatial_ref_sys.sql \
+ psql -d $(DB_NAME) \
+ -U $(DB_USER) \
+ -f $(SPATIAL_REF_SYS_SQL) \
> /dev/null
# table already exist makes importing via shp2pgsql much easier.
# Any blocks file will work as an argument.
tiger_blocks_table:
- $(PG_BINDIR)/shp2pgsql \
+ $(SHP2PGSQL) \
-p \
-I \
-s $(TIGER_SRID) \
- data/census2000/maryland/block/tl_2009_24_tabblock00.shp \
+ data/census2000/maryland/blocks/tl_2009_24_tabblock00.shp \
tiger_blocks \
- | psql -U postgres -d $(DB_NAME) \
+ | psql -U $(DB_USER) -d $(DB_NAME) \
> /dev/null
+# Create the "blocks" table, which is the result of joining
+# the tiger_blocks and sf1_blocks tables.
+blocks_table: tiger_blocks_table sf1_blocks_table
+ psql -U $(DB_USER) \
+ -d $(DB_NAME) \
+ -f sql/create-blocks-table.sql
+
# Prepare the tiger_lines table, and create the GiST index on its
# geometry column. Any lines shapefile will do here.
tiger_lines_table:
- $(PG_BINDIR)/shp2pgsql \
+ $(SHP2PGSQL) \
-p \
-I \
-s $(TIGER_SRID) \
data/census2000/maryland/lines/tl_2009_24510_edges.shp \
tiger_lines \
- | psql -U postgres -d $(DB_NAME) \
+ | psql -U $(DB_USER) -d $(DB_NAME) \
> /dev/null
+
+# Add a unique index on the "tlid" column.
+ psql -U $(DB_USER) \
+ -d $(DB_NAME) \
+ -f sql/create_tlid_unique_index.sql