]> gitweb.michael.orlitzky.com - dead/census-tools.git/blob - makefile
Added the linear program solving the midatlantic region.
[dead/census-tools.git] / makefile
1 DB_NAME=census
2 DB_USER=postgres
3 TIGER_SRID=4269
4
5
6 # Dark magic. We set these makefile variables to be the result of the
7 # 'shell' function. The shell function, in turn, executes a Python
8 # script which determines the locations of these files.
9 SHP2PGSQL := $(shell bin/find_file_paths --root /usr --single shp2pgsql)
10 POSTGIS_SQL := $(shell bin/find_file_paths --root /usr lwpostgis.sql postgis.sql)
11 SPATIAL_REF_SYS_SQL := $(shell bin/find_file_paths --root /usr spatial_ref_sys.sql)
12
13 # Necessary to run test/data without prerequisites.
14 #
15 .PHONY : test data lib
16
17
18 # The default task, since it comes first in the list.
19 #
20 all: clean lib test
21
22
23 test:
24 ./bin/run_tests
25
26
27 # Download or check out any third-party libraries.
28 lib:
29 make -C lib/
30
31
32 # Remove byte-compiled python code.
33 #
34 clean:
35 find ./ -name '*.pyc' -print0 | xargs -0 rm -f
36
37
38 data:
39 bin/download_data
40
41
42 # There is a small issue here with the blocks_db and lines_db
43 # targets. Each of these requires that the database exists, and might
44 # therefore depend on the newdb target. However, if /each/ of them
45 # depends on newdb, the database will be dropped twice and the data
46 # from one of {blocks, lines} would be lost.
47 #
48 # We therefore assume that the database already exists when blocks_db
49 # or lines_db are initiated.
50 blocks_db: data blocks_table
51 # All Blocks
52 #
53 # The table already exists, so we can append to it, and we don't have
54 # to create the GiST index.
55 for state in data/census2000/*; do \
56 $(SHP2PGSQL) \
57 -a \
58 -s $(TIGER_SRID) \
59 -D \
60 $$state/blocks/*.shp \
61 tiger_blocks \
62 | psql -U $(DB_USER) -d $(DB_NAME); \
63 done;
64
65 # Summary File 1
66 #
67 # Run all of the geo (uf1) files through the import script. This has
68 # to happen after the blocks import since we impose a foreign key
69 # restriction.
70 for state in data/census2000/*; do \
71 bin/sf1blocks2sql $$state/sf1/*.uf1 sf1_blocks \
72 | psql -U $(DB_USER) -d $(DB_NAME) \
73 > /dev/null; \
74 done;
75
76 # Run the query to combine the two blocks tables, and drop the
77 # constituents.
78 psql -U $(DB_USER) \
79 -d $(DB_NAME) \
80 -f sql/combine-block-tables.sql
81
82
83 lines_db: data tiger_lines_table
84 # All Lines
85 #
86 # Since the table and index already exist, we can utilize -a,
87 # and leave -I out.
88 for state in data/census2000/*; do \
89 for shapefile in $$state/lines/*.shp; do \
90 echo "Importing $$shapefile."; \
91 $(SHP2PGSQL) \
92 -a \
93 -s $(TIGER_SRID) \
94 $$shapefile \
95 tiger_lines \
96 | bin/filter-transactions \
97 | psql -U $(DB_USER) -d $(DB_NAME) \
98 > /dev/null; \
99 done; \
100 done;
101
102
103
104 # This imports the Tiger data using shp2pgsql. The shapefiles
105 # should exist, since this task depends on the "data" task, which
106 # downloads said shapefiles.
107 #
108 # After the TIGER import is done, we use the sf1blocks2sql script to
109 # parse and import the geographic header record information.
110 #
111 db: newdb blocks_data lines_data
112 # Do nothing except fulfill our prerequisites.
113
114
115
116 # First, we drop and re-create the DB_NAME database (or schema,
117 # whatever). Then, we add PL/pgSQL support to the database.
118 #
119 # At that point, we import the two PostGIS files, postgis.sql and
120 # spatial_ref_sys.sql. The postgis.sql file contains the geometry
121 # functions, while spatial_ref_sys.sql contains a table of SRIDs, and
122 # their associated properties. PostGIS requires both.
123 #
124 newdb:
125 # Ignore the result of dropdb when it fails.
126 dropdb -U $(DB_USER) $(DB_NAME) || true
127 createdb -U $(DB_USER) $(DB_NAME)
128 createlang -U $(DB_USER) plpgsql $(DB_NAME)
129
130 psql -d $(DB_NAME) \
131 -U $(DB_USER) \
132 -f $(POSTGIS_SQL) \
133 > /dev/null
134
135 psql -d $(DB_NAME) \
136 -U $(DB_USER) \
137 -f $(SPATIAL_REF_SYS_SQL) \
138 > /dev/null
139
140
141 # This just runs the SQL script to create the sf1_blocks table.
142 sf1_blocks_table:
143 psql -d $(DB_NAME) \
144 -U $(DB_USER) \
145 -f sql/create-sf1_blocks-table.sql \
146 > /dev/null
147
148
149 # Create the tiger_blocks table, and create its GiST index. Having the
150 # table already exist makes importing via shp2pgsql much easier.
151 # Any blocks file will work as an argument.
152 tiger_blocks_table:
153 $(SHP2PGSQL) \
154 -p \
155 -I \
156 -s $(TIGER_SRID) \
157 data/census2000/maryland/blocks/tl_2009_24_tabblock00.shp \
158 tiger_blocks \
159 | psql -U $(DB_USER) -d $(DB_NAME) \
160 > /dev/null
161
162 # Create the "blocks" table, which is the result of joining
163 # the tiger_blocks and sf1_blocks tables.
164 blocks_table: tiger_blocks_table sf1_blocks_table
165 psql -U $(DB_USER) \
166 -d $(DB_NAME) \
167 -f sql/create-blocks-table.sql
168
169
170 # Prepare the tiger_lines table, and create the GiST index on its
171 # geometry column. Any lines shapefile will do here.
172 tiger_lines_table:
173 $(SHP2PGSQL) \
174 -p \
175 -I \
176 -s $(TIGER_SRID) \
177 data/census2000/maryland/lines/tl_2009_24510_edges.shp \
178 tiger_lines \
179 | psql -U $(DB_USER) -d $(DB_NAME) \
180 > /dev/null
181
182 # Add a unique index on the "tlid" column.
183 psql -U $(DB_USER) \
184 -d $(DB_NAME) \
185 -f sql/create_tlid_unique_index.sql