- mkdir -p data/census-2000-block/maryland/
- if [ ! -f data/census-2000-block/maryland/tl_2008_24_tabblock00.shp ]; then \
- wget -O tiger.zip $(TIGER_DATA_URL); \
- unzip tiger.zip; \
- rm tiger.zip; \
- unzip srv/ftp/geo/tiger/TIGER2008/24_MARYLAND/tl_2008_24_tabblock00.zip \
- -d ./data/census-2000-block/maryland/; \
- rm -rf srv; \
- fi;
+ bin/download_data
+
+
+# This imports the Tiger data using shp2pgsql. The shapefiles
+# should exist, since this task depends on the "data" task, which
+# downloads said shapefiles.
+#
+# After the TIGER import is done, we use the sf1blocks2sql script to
+# parse and import the geographic header record information.
+#
+db: data newdb tiger_blocks_table tiger_lines_table sf1_blocks_table
+# All Blocks
+#
+# The table already exists, so we can append to it, and we don't have
+# to create the GiST index.
+ for state in data/census2000/*; do \
+ $(SHP2PGSQL) \
+ -a \
+ -s $(TIGER_SRID) \
+ -D \
+ $$state/blocks/*.shp \
+ tiger_blocks \
+ | psql -U $(DB_USER) -d $(DB_NAME); \
+ done;
+
+# All Lines
+#
+# Since the table and index already exist, we can utilize -a,
+# and leave -I out.
+ for state in data/census2000/*; do \
+ for shapefile in $$state/lines/*.shp; do \
+ echo "Importing $$shapefile."; \
+ $(SHP2PGSQL) \
+ -a \
+ -s $(TIGER_SRID) \
+ $$shapefile \
+ tiger_lines \
+ | bin/filter-transactions \
+ | psql -U $(DB_USER) -d $(DB_NAME) \
+ > /dev/null; \
+ done; \
+ done;
+
+# Summary File 1
+#
+# Run all of the geo (uf1) files through the import script. This has
+# to happen after the blocks import since we impose a foreign key
+# restriction.
+ for state in data/census2000/*; do \
+ bin/sf1blocks2sql $$state/sf1/*.uf1 sf1_blocks \
+ | psql -U postgres -d $(DB_NAME) \
+ > /dev/null; \
+ done;
+