+harford_lines:
+ mkdir -p data/census2000/maryland/lines
+ if [ ! -f data/census2000/maryland/lines/tl_2009_24025_edges.shp ]; \
+ then \
+ wget -O harfordlines.zip $(HARFORD_LINES_URL); \
+ unzip harfordlines.zip -d ./data/census2000/maryland/lines; \
+ rm harfordlines.zip; \
+ fi;
+
+howard_lines:
+ mkdir -p data/census2000/maryland/lines
+ if [ ! -f data/census2000/maryland/lines/tl_2009_24027_edges.shp ]; \
+ then \
+ wget -O howardlines.zip $(HOWARD_LINES_URL); \
+ unzip howardlines.zip -d ./data/census2000/maryland/lines; \
+ rm howardlines.zip; \
+ fi;
+
+kent_lines:
+ mkdir -p data/census2000/maryland/lines
+ if [ ! -f data/census2000/maryland/lines/tl_2009_24029_edges.shp ]; \
+ then \
+ wget -O kentlines.zip $(KENT_LINES_URL); \
+ unzip kentlines.zip -d ./data/census2000/maryland/lines; \
+ rm kentlines.zip; \
+ fi;
+
+montgomery_lines:
+ mkdir -p data/census2000/maryland/lines
+ if [ ! -f data/census2000/maryland/lines/tl_2009_24031_edges.shp ]; \
+ then \
+ wget -O montgomerylines.zip $(MONTGOMERY_LINES_URL); \
+ unzip montgomerylines.zip -d ./data/census2000/maryland/lines; \
+ rm montgomerylines.zip; \
+ fi;
+
+prince_georges_lines:
+ mkdir -p data/census2000/maryland/lines
+ if [ ! -f data/census2000/maryland/lines/tl_2009_24033_edges.shp ]; \
+ then \
+ wget -O pglines.zip $(PRINCE_GEORGES_LINES_URL); \
+ unzip pglines.zip -d ./data/census2000/maryland/lines; \
+ rm pglines.zip; \
+ fi;
+
+queen_annes_lines:
+ mkdir -p data/census2000/maryland/lines
+ if [ ! -f data/census2000/maryland/lines/tl_2009_24035_edges.shp ]; \
+ then \
+ wget -O qalines.zip $(QUEEN_ANNES_LINES_URL); \
+ unzip qalines.zip -d ./data/census2000/maryland/lines; \
+ rm qalines.zip; \
+ fi;
+
+st_marys_lines:
+ mkdir -p data/census2000/maryland/lines
+ if [ ! -f data/census2000/maryland/lines/tl_2009_24037_edges.shp ]; \
+ then \
+ wget -O smlines.zip $(ST_MARYS_LINES_URL); \
+ unzip smlines.zip -d ./data/census2000/maryland/lines; \
+ rm smlines.zip; \
+ fi;
+
+somerset_lines:
+ mkdir -p data/census2000/maryland/lines
+ if [ ! -f data/census2000/maryland/lines/tl_2009_24039_edges.shp ]; \
+ then \
+ wget -O somersetlines.zip $(SOMERSET_LINES_URL); \
+ unzip somersetlines.zip -d ./data/census2000/maryland/lines; \
+ rm somersetlines.zip; \
+ fi;
+
+talbot_lines:
+ mkdir -p data/census2000/maryland/lines
+ if [ ! -f data/census2000/maryland/lines/tl_2009_24041_edges.shp ]; \
+ then \
+ wget -O talbotlines.zip $(TALBOT_LINES_URL); \
+ unzip talbotlines.zip -d ./data/census2000/maryland/lines; \
+ rm talbotlines.zip; \
+ fi;
+
+washington_lines:
+ mkdir -p data/census2000/maryland/lines
+ if [ ! -f data/census2000/maryland/lines/tl_2009_24043_edges.shp ]; \
+ then \
+ wget -O washingtonlines.zip $(WASHINGTON_LINES_URL); \
+ unzip washingtonlines.zip -d ./data/census2000/maryland/lines; \
+ rm washingtonlines.zip; \
+ fi;
+
+wicomico_lines:
+ mkdir -p data/census2000/maryland/lines
+ if [ ! -f data/census2000/maryland/lines/tl_2009_24045_edges.shp ]; \
+ then \
+ wget -O wicomicolines.zip $(WICOMICO_LINES_URL); \
+ unzip wicomicolines.zip -d ./data/census2000/maryland/lines; \
+ rm wicomicolines.zip; \
+ fi;
+
+worcester_lines:
+ mkdir -p data/census2000/maryland/lines
+ if [ ! -f data/census2000/maryland/lines/tl_2009_24047_edges.shp ]; \
+ then \
+ wget -O worcesterlines.zip $(WORCESTER_LINES_URL); \
+ unzip worcesterlines.zip -d ./data/census2000/maryland/lines; \
+ rm worcesterlines.zip; \
+ fi;
+
+
+# This imports the Tiger data using shp2pgsql. The shapefiles
+# should exist, since this task depends on the "data" task, which
+# downloads said shapefiles.
+#
+# After the TIGER import is done, we use the sf1blocks2sql script to
+# parse and import the geographic header record information.
+#
+db: data newdb tiger_blocks_table tiger_lines_table sf1_blocks_table
+# All Blocks
+#
+# The table already exists, so we can append to it, and we don't have
+# to create the GiST index.
+ for state in data/census2000/*; do \
+ $(PG_BINDIR)/shp2pgsql \
+ -a \
+ -s $(TIGER_SRID) \
+ -D \
+ $$state/block/*.shp \
+ tiger_blocks \
+ | psql -U $(DB_USER) -d $(DB_NAME); \
+ done;
+
+# MD Lines
+#
+# Since the table and index already exist, we can utilize -a,
+# and leave -I out.
+ for x in data/census2000/maryland/lines/*.shp; do \
+ $(PG_BINDIR)/shp2pgsql \
+ -a \
+ -s $(TIGER_SRID) \
+ -D \
+ $$x \
+ tiger_lines \
+ | psql -U $(DB_USER) -d $(DB_NAME); \
+ done;
+
+ bin/sf1blocks2sql src/Tests/Fixtures/SummaryFile1/mdgeo.uf1 sf1_blocks \
+ | psql -U postgres -d $(DB_NAME) \
+ > /dev/null
+
+
+
+# First, we drop and re-create the DB_NAME database (or schema,
+# whatever). Then, we add PL/pgSQL support to the database.