6 # Dark magic. We set these makefile variables to be the result of the
7 # 'shell' function. The shell function, in turn, executes a Python
8 # script which determines the locations of these files.
9 SHP2PGSQL := $(shell bin/find_file_paths --root /usr --single shp2pgsql)
10 POSTGIS_SQL := $(shell bin/find_file_paths --root /usr lwpostgis.sql postgis.sql)
11 SPATIAL_REF_SYS_SQL := $(shell bin/find_file_paths --root /usr spatial_ref_sys.sql)
13 # Necessary to run test/data without prerequisites.
15 .PHONY : test data lib
18 # The default task, since it comes first in the list.
27 # Download or check out any third-party libraries.
32 # Remove byte-compiled python code.
35 find ./ -name '*.pyc' -print0 | xargs -0 rm -f
42 # There is a small issue here with the blocks_db and lines_db
43 # targets. Each of these requires that the database exists, and might
44 # therefore depend on the newdb target. However, if /each/ of them
45 # depends on newdb, the database will be dropped twice and the data
46 # from one of {blocks, lines} would be lost.
48 # We therefore assume that the database already exists when blocks_db
49 # or lines_db are initiated.
50 blocks_db: data blocks_table
53 # The table already exists, so we can append to it, and we don't have
54 # to create the GiST index.
55 for state in data/census2000/*; do \
60 $$state/blocks/*.shp \
62 | psql -U $(DB_USER) -d $(DB_NAME); \
67 # Run all of the geo (uf1) files through the import script. This has
68 # to happen after the blocks import since we impose a foreign key
70 for state in data/census2000/*; do \
71 bin/sf1blocks2sql $$state/sf1/*.uf1 sf1_blocks \
72 | psql -U $(DB_USER) -d $(DB_NAME) \
76 # Run the query to combine the two blocks tables, and drop the
80 -f sql/combine-block-tables.sql
83 lines_db: data tiger_lines_table
86 # Since the table and index already exist, we can utilize -a,
88 for state in data/census2000/*; do \
89 for shapefile in $$state/lines/*.shp; do \
90 echo "Importing $$shapefile."; \
96 | bin/filter-transactions \
97 | psql -U $(DB_USER) -d $(DB_NAME) \
104 # This imports the Tiger data using shp2pgsql. The shapefiles
105 # should exist, since this task depends on the "data" task, which
106 # downloads said shapefiles.
108 # After the TIGER import is done, we use the sf1blocks2sql script to
109 # parse and import the geographic header record information.
111 db: newdb blocks_data lines_data
112 # Do nothing except fulfill our prerequisites.
116 # First, we drop and re-create the DB_NAME database (or schema,
117 # whatever). Then, we add PL/pgSQL support to the database.
119 # At that point, we import the two PostGIS files, postgis.sql and
120 # spatial_ref_sys.sql. The postgis.sql file contains the geometry
121 # functions, while spatial_ref_sys.sql contains a table of SRIDs, and
122 # their associated properties. PostGIS requires both.
125 # Ignore the result of dropdb when it fails.
126 dropdb -U $(DB_USER) $(DB_NAME) || true
127 createdb -U $(DB_USER) $(DB_NAME)
128 createlang -U $(DB_USER) plpgsql $(DB_NAME)
137 -f $(SPATIAL_REF_SYS_SQL) \
141 # This just runs the SQL script to create the sf1_blocks table.
145 -f sql/create-sf1_blocks-table.sql \
149 # Create the tiger_blocks table, and create its GiST index. Having the
150 # table already exist makes importing via shp2pgsql much easier.
151 # Any blocks file will work as an argument.
157 data/census2000/maryland/blocks/tl_2009_24_tabblock00.shp \
159 | psql -U $(DB_USER) -d $(DB_NAME) \
162 # Create the "blocks" table, which is the result of joining
163 # the tiger_blocks and sf1_blocks tables.
164 blocks_table: tiger_blocks_table sf1_blocks_table
167 -f sql/create-blocks-table.sql
170 # Prepare the tiger_lines table, and create the GiST index on its
171 # geometry column. Any lines shapefile will do here.
177 data/census2000/maryland/lines/tl_2009_24510_edges.shp \
179 | psql -U $(DB_USER) -d $(DB_NAME) \
182 # Add a unique index on the "tlid" column.
185 -f sql/create_tlid_unique_index.sql