X-Git-Url: http://gitweb.michael.orlitzky.com/?p=dead%2Fcensus-tools.git;a=blobdiff_plain;f=makefile;h=2631b6b11a8ed60dde2a492ca0f2db83bb2879b4;hp=7d765c6cdaab32bf2a280020097a7455e5e91183;hb=HEAD;hpb=f38573f5ab9115c2b4f315cfb7cc2be14e025f54 diff --git a/makefile b/makefile index 7d765c6..2631b6b 100644 --- a/makefile +++ b/makefile @@ -1,7 +1,6 @@ DB_NAME=census DB_USER=postgres TIGER_SRID=4269 -SHAPELY_URL=http://pypi.python.org/packages/source/S/Shapely/Shapely-1.0.14.tar.gz # Dark magic. We set these makefile variables to be the result of the @@ -27,12 +26,7 @@ test: # Download or check out any third-party libraries. lib: - if [ ! -d lib/Shapely ]; then \ - wget -O shapely.tar.gz $(SHAPELY_URL); \ - tar -xvzf shapely.tar.gz -C lib/ ; \ - rm shapely.tar.gz; \ - mv lib/Shapely* lib/Shapely; \ - fi; + make -C lib/ # Remove byte-compiled python code. @@ -45,14 +39,15 @@ data: bin/download_data -# This imports the Tiger data using shp2pgsql. The shapefiles -# should exist, since this task depends on the "data" task, which -# downloads said shapefiles. +# There is a small issue here with the blocks_db and lines_db +# targets. Each of these requires that the database exists, and might +# therefore depend on the newdb target. However, if /each/ of them +# depends on newdb, the database will be dropped twice and the data +# from one of {blocks, lines} would be lost. # -# After the TIGER import is done, we use the sf1blocks2sql script to -# parse and import the geographic header record information. -# -db: data newdb tiger_blocks_table tiger_lines_table sf1_blocks_table +# We therefore assume that the database already exists when blocks_db +# or lines_db are initiated. +blocks_db: data blocks_table # All Blocks # # The table already exists, so we can append to it, and we don't have @@ -67,6 +62,25 @@ db: data newdb tiger_blocks_table tiger_lines_table sf1_blocks_table | psql -U $(DB_USER) -d $(DB_NAME); \ done; +# Summary File 1 +# +# Run all of the geo (uf1) files through the import script. This has +# to happen after the blocks import since we impose a foreign key +# restriction. + for state in data/census2000/*; do \ + bin/sf1blocks2sql $$state/sf1/*.uf1 sf1_blocks \ + | psql -U $(DB_USER) -d $(DB_NAME) \ + > /dev/null; \ + done; + +# Run the query to combine the two blocks tables, and drop the +# constituents. + psql -U $(DB_USER) \ + -d $(DB_NAME) \ + -f sql/combine-block-tables.sql + + +lines_db: data tiger_lines_table # All Lines # # Since the table and index already exist, we can utilize -a, @@ -85,16 +99,17 @@ db: data newdb tiger_blocks_table tiger_lines_table sf1_blocks_table done; \ done; -# Summary File 1 + + +# This imports the Tiger data using shp2pgsql. The shapefiles +# should exist, since this task depends on the "data" task, which +# downloads said shapefiles. # -# Run all of the geo (uf1) files through the import script. This has -# to happen after the blocks import since we impose a foreign key -# restriction. - for state in data/census2000/*; do \ - bin/sf1blocks2sql $$state/sf1/*.uf1 sf1_blocks \ - | psql -U postgres -d $(DB_NAME) \ - > /dev/null; \ - done; +# After the TIGER import is done, we use the sf1blocks2sql script to +# parse and import the geographic header record information. +# +db: newdb blocks_data lines_data + # Do nothing except fulfill our prerequisites. @@ -141,9 +156,16 @@ tiger_blocks_table: -s $(TIGER_SRID) \ data/census2000/maryland/blocks/tl_2009_24_tabblock00.shp \ tiger_blocks \ - | psql -U postgres -d $(DB_NAME) \ + | psql -U $(DB_USER) -d $(DB_NAME) \ > /dev/null +# Create the "blocks" table, which is the result of joining +# the tiger_blocks and sf1_blocks tables. +blocks_table: tiger_blocks_table sf1_blocks_table + psql -U $(DB_USER) \ + -d $(DB_NAME) \ + -f sql/create-blocks-table.sql + # Prepare the tiger_lines table, and create the GiST index on its # geometry column. Any lines shapefile will do here. @@ -154,10 +176,10 @@ tiger_lines_table: -s $(TIGER_SRID) \ data/census2000/maryland/lines/tl_2009_24510_edges.shp \ tiger_lines \ - | psql -U postgres -d $(DB_NAME) \ + | psql -U $(DB_USER) -d $(DB_NAME) \ > /dev/null # Add a unique index on the "tlid" column. - psql -U postgres \ - -d census \ + psql -U $(DB_USER) \ + -d $(DB_NAME) \ -f sql/create_tlid_unique_index.sql