bin/download_data
-# This imports the Tiger data using shp2pgsql. The shapefiles
-# should exist, since this task depends on the "data" task, which
-# downloads said shapefiles.
+# There is a small issue here with the blocks_db and lines_db
+# targets. Each of these requires that the database exists, and might
+# therefore depend on the newdb target. However, if /each/ of them
+# depends on newdb, the database will be dropped twice and the data
+# from one of {blocks, lines} would be lost.
#
-# After the TIGER import is done, we use the sf1blocks2sql script to
-# parse and import the geographic header record information.
-#
-db: data newdb tiger_blocks_table tiger_lines_table sf1_blocks_table
+# We therefore assume that the database already exists when blocks_db
+# or lines_db are initiated.
+blocks_db: data blocks_table
# All Blocks
#
# The table already exists, so we can append to it, and we don't have
| psql -U $(DB_USER) -d $(DB_NAME); \
done;
+# Summary File 1
+#
+# Run all of the geo (uf1) files through the import script. This has
+# to happen after the blocks import since we impose a foreign key
+# restriction.
+ for state in data/census2000/*; do \
+ bin/sf1blocks2sql $$state/sf1/*.uf1 sf1_blocks \
+ | psql -U $(DB_USER) -d $(DB_NAME) \
+ > /dev/null; \
+ done;
+
+# Run the query to combine the two blocks tables, and drop the
+# constituents.
+ psql -U $(DB_USER) \
+ -d $(DB_NAME) \
+ -f sql/combine-block-tables.sql
+
+
+lines_db: data tiger_lines_table
# All Lines
#
# Since the table and index already exist, we can utilize -a,
done; \
done;
-# Summary File 1
+
+
+# This imports the Tiger data using shp2pgsql. The shapefiles
+# should exist, since this task depends on the "data" task, which
+# downloads said shapefiles.
#
-# Run all of the geo (uf1) files through the import script. This has
-# to happen after the blocks import since we impose a foreign key
-# restriction.
- for state in data/census2000/*; do \
- bin/sf1blocks2sql $$state/sf1/*.uf1 sf1_blocks \
- | psql -U postgres -d $(DB_NAME) \
- > /dev/null; \
- done;
+# After the TIGER import is done, we use the sf1blocks2sql script to
+# parse and import the geographic header record information.
+#
+db: newdb blocks_data lines_data
+ # Do nothing except fulfill our prerequisites.
-s $(TIGER_SRID) \
data/census2000/maryland/blocks/tl_2009_24_tabblock00.shp \
tiger_blocks \
- | psql -U postgres -d $(DB_NAME) \
+ | psql -U $(DB_USER) -d $(DB_NAME) \
> /dev/null
+# Create the "blocks" table, which is the result of joining
+# the tiger_blocks and sf1_blocks tables.
+blocks_table: tiger_blocks_table sf1_blocks_table
+ psql -U $(DB_USER) \
+ -d $(DB_NAME) \
+ -f sql/create-blocks-table.sql
+
# Prepare the tiger_lines table, and create the GiST index on its
# geometry column. Any lines shapefile will do here.
-s $(TIGER_SRID) \
data/census2000/maryland/lines/tl_2009_24510_edges.shp \
tiger_lines \
- | psql -U postgres -d $(DB_NAME) \
+ | psql -U $(DB_USER) -d $(DB_NAME) \
> /dev/null
# Add a unique index on the "tlid" column.
- psql -U postgres \
- -d census \
+ psql -U $(DB_USER) \
+ -d $(DB_NAME) \
-f sql/create_tlid_unique_index.sql