DB_NAME=census
DB_USER=postgres
TIGER_SRID=4269
-SHAPELY_URL=http://pypi.python.org/packages/source/S/Shapely/Shapely-1.0.14.tar.gz
-# Starting with PostGIS 1.4.0, these paths are calculated at install
-# time using the pg_config utility. Rather than try to guess where
-# PostGIS will wind up installed, we can just check the output of
-# pg_config ourselves.
-PG_BINDIR=`pg_config --bindir`
-PG_SHAREDIR=`pg_config --sharedir`
+# Dark magic. We set these makefile variables to be the result of the
+# 'shell' function. The shell function, in turn, executes a Python
+# script which determines the locations of these files.
+SHP2PGSQL := $(shell bin/find_file_paths --root /usr --single shp2pgsql)
+POSTGIS_SQL := $(shell bin/find_file_paths --root /usr lwpostgis.sql postgis.sql)
+SPATIAL_REF_SYS_SQL := $(shell bin/find_file_paths --root /usr spatial_ref_sys.sql)
# Necessary to run test/data without prerequisites.
#
# Download or check out any third-party libraries.
lib:
- if [ ! -d lib/Shapely ]; then \
- wget -O shapely.tar.gz $(SHAPELY_URL); \
- tar -xvzf shapely.tar.gz -C lib/ ; \
- rm shapely.tar.gz; \
- mv lib/Shapely* lib/Shapely; \
- fi;
+ make -C lib/
# Remove byte-compiled python code.
bin/download_data
-# This imports the Tiger data using shp2pgsql. The shapefiles
-# should exist, since this task depends on the "data" task, which
-# downloads said shapefiles.
-#
-# After the TIGER import is done, we use the sf1blocks2sql script to
-# parse and import the geographic header record information.
+# There is a small issue here with the blocks_db and lines_db
+# targets. Each of these requires that the database exists, and might
+# therefore depend on the newdb target. However, if /each/ of them
+# depends on newdb, the database will be dropped twice and the data
+# from one of {blocks, lines} would be lost.
#
-db: data newdb tiger_blocks_table tiger_lines_table sf1_blocks_table
+# We therefore assume that the database already exists when blocks_db
+# or lines_db are initiated.
+blocks_db: data blocks_table
# All Blocks
#
# The table already exists, so we can append to it, and we don't have
# to create the GiST index.
for state in data/census2000/*; do \
- $(PG_BINDIR)/shp2pgsql \
+ $(SHP2PGSQL) \
-a \
-s $(TIGER_SRID) \
-D \
- $$state/blocks/*.shp \
+ $$state/blocks/*.shp \
tiger_blocks \
| psql -U $(DB_USER) -d $(DB_NAME); \
done;
+# Summary File 1
+#
+# Run all of the geo (uf1) files through the import script. This has
+# to happen after the blocks import since we impose a foreign key
+# restriction.
+ for state in data/census2000/*; do \
+ bin/sf1blocks2sql $$state/sf1/*.uf1 sf1_blocks \
+ | psql -U $(DB_USER) -d $(DB_NAME) \
+ > /dev/null; \
+ done;
+
+# Run the query to combine the two blocks tables, and drop the
+# constituents.
+ psql -U $(DB_USER) \
+ -d $(DB_NAME) \
+ -f sql/combine-block-tables.sql
+
+
+lines_db: data tiger_lines_table
# All Lines
#
# Since the table and index already exist, we can utilize -a,
for state in data/census2000/*; do \
for shapefile in $$state/lines/*.shp; do \
echo "Importing $$shapefile."; \
- $(PG_BINDIR)/shp2pgsql \
+ $(SHP2PGSQL) \
-a \
-s $(TIGER_SRID) \
$$shapefile \
done; \
done;
- bin/sf1blocks2sql src/Tests/Fixtures/SummaryFile1/mdgeo.uf1 sf1_blocks \
- | psql -U postgres -d $(DB_NAME) \
- > /dev/null
+
+
+# This imports the Tiger data using shp2pgsql. The shapefiles
+# should exist, since this task depends on the "data" task, which
+# downloads said shapefiles.
+#
+# After the TIGER import is done, we use the sf1blocks2sql script to
+# parse and import the geographic header record information.
+#
+db: newdb blocks_data lines_data
+ # Do nothing except fulfill our prerequisites.
createdb -U $(DB_USER) $(DB_NAME)
createlang -U $(DB_USER) plpgsql $(DB_NAME)
- psql -d $(DB_NAME) \
- -U $(DB_USER) \
- -f $(PG_SHAREDIR)/contrib/postgis.sql \
+ psql -d $(DB_NAME) \
+ -U $(DB_USER) \
+ -f $(POSTGIS_SQL) \
> /dev/null
- psql -d $(DB_NAME) \
- -U $(DB_USER) \
- -f $(PG_SHAREDIR)/contrib/spatial_ref_sys.sql \
+ psql -d $(DB_NAME) \
+ -U $(DB_USER) \
+ -f $(SPATIAL_REF_SYS_SQL) \
> /dev/null
# table already exist makes importing via shp2pgsql much easier.
# Any blocks file will work as an argument.
tiger_blocks_table:
- $(PG_BINDIR)/shp2pgsql \
+ $(SHP2PGSQL) \
-p \
-I \
-s $(TIGER_SRID) \
- data/census2000/maryland/blocks/tl_2009_24_tabblock00.shp \
+ data/census2000/maryland/blocks/tl_2009_24_tabblock00.shp \
tiger_blocks \
- | psql -U postgres -d $(DB_NAME) \
+ | psql -U $(DB_USER) -d $(DB_NAME) \
> /dev/null
+# Create the "blocks" table, which is the result of joining
+# the tiger_blocks and sf1_blocks tables.
+blocks_table: tiger_blocks_table sf1_blocks_table
+ psql -U $(DB_USER) \
+ -d $(DB_NAME) \
+ -f sql/create-blocks-table.sql
+
# Prepare the tiger_lines table, and create the GiST index on its
# geometry column. Any lines shapefile will do here.
tiger_lines_table:
- $(PG_BINDIR)/shp2pgsql \
+ $(SHP2PGSQL) \
-p \
-I \
-s $(TIGER_SRID) \
data/census2000/maryland/lines/tl_2009_24510_edges.shp \
tiger_lines \
- | psql -U postgres -d $(DB_NAME) \
+ | psql -U $(DB_USER) -d $(DB_NAME) \
> /dev/null
# Add a unique index on the "tlid" column.
- psql -U postgres \
- -d census \
+ psql -U $(DB_USER) \
+ -d $(DB_NAME) \
-f sql/create_tlid_unique_index.sql