]> gitweb.michael.orlitzky.com - dead/census-tools.git/blob - makefile
Removed a no-longer-correct comment from the makefile.
[dead/census-tools.git] / makefile
1 DB_NAME=census
2 DB_USER=postgres
3 TIGER_SRID=4269
4
5 # Root folder for the shapefiles.
6 TIGER_ROOT=http://www2.census.gov/geo/tiger/TIGER2009
7
8 # State-specific folders.
9 DC_ROOT=$(TIGER_ROOT)/11_DISTRICT_OF_COLUMBIA
10 MD_ROOT=$(TIGER_ROOT)/24_MARYLAND
11 VA_ROOT=$(TIGER_ROOT)/51_VIRGINIA
12 PA_ROOT=$(TIGER_ROOT)/42_PENNSYLVANIA
13 NY_ROOT=$(TIGER_ROOT)/36_NEW_YORK
14
15 # URLs for the TIGER/Line block-level shapefiles.
16 DC_BLOCKS_URL=$(DC_ROOT)/tl_2009_11_tabblock00.zip
17 MD_BLOCKS_URL=$(MD_ROOT)/tl_2009_24_tabblock00.zip
18 VA_BLOCKS_URL=$(VA_ROOT)/tl_2009_51_tabblock00.zip
19 PA_BLOCKS_URL=$(PA_ROOT)/tl_2009_42_tabblock00.zip
20 NY_BLOCKS_URL=$(NY_ROOT)/tl_2009_36_tabblock00.zip
21
22 # Starting with PostGIS 1.4.0, these paths are calculated at install
23 # time using the pg_config utility. Rather than try to guess where
24 # PostGIS will wind up installed, we can just check the output of
25 # pg_config ourselves.
26 PG_BINDIR=`pg_config --bindir`
27 PG_SHAREDIR=`pg_config --sharedir`
28
29 # Necessary to run test/data without prerequisites.
30 #
31 .PHONY : test data
32
33
34 # The default task, since it comes first in the list.
35 #
36 all: clean test
37
38
39 test:
40 ./bin/run_tests
41
42
43 # Remove byte-compiled python code.
44 #
45 clean:
46 find ./ -name '*.pyc' -print0 | xargs -0 rm -f
47
48
49 # Download the shapefiles from Tiger if they don't already exist.
50 data: tiger_blocks tiger_lines
51
52 tiger_blocks: dc_blocks md_blocks va_blocks pa_blocks ny_blocks
53
54 dc_blocks:
55 mkdir -p data/census2000/dc/block
56 if [ ! -f data/census2000/dc/block/tl_2009_11_tabblock00.shp ]; \
57 then \
58 wget -O dcblocks.zip $(DC_BLOCKS_URL); \
59 unzip dcblocks.zip -d ./data/census2000/dc/block; \
60 rm dcblocks.zip; \
61 fi;
62
63 md_blocks:
64 mkdir -p data/census2000/maryland/block
65 if [ ! -f data/census2000/maryland/block/tl_2009_24_tabblock00.shp ]; \
66 then \
67 wget -O mdblocks.zip $(MD_BLOCKS_URL); \
68 unzip mdblocks.zip -d ./data/census2000/maryland/block; \
69 rm mdblocks.zip; \
70 fi;
71
72 va_blocks:
73 mkdir -p data/census2000/virginia/block
74 if [ ! -f data/census2000/virginia/block/tl_2009_51_tabblock00.shp ]; \
75 then \
76 wget -O vablocks.zip $(VA_BLOCKS_URL); \
77 unzip vablocks.zip -d ./data/census2000/virginia/block; \
78 rm vablocks.zip; \
79 fi;
80
81 pa_blocks:
82 mkdir -p data/census2000/pennsylvania/block
83 if [ ! -f data/census2000/pennsylvania/block/tl_2009_42_tabblock00.shp ]; \
84 then \
85 wget -O pablocks.zip $(PA_BLOCKS_URL); \
86 unzip pablocks.zip -d ./data/census2000/pennsylvania/block; \
87 rm pablocks.zip; \
88 fi;
89
90 ny_blocks:
91 mkdir -p data/census2000/new_york/block
92 if [ ! -f data/census2000/new_york/block/tl_2009_36_tabblock00.shp ]; \
93 then \
94 wget -O nyblocks.zip $(NY_BLOCKS_URL); \
95 unzip nyblocks.zip -d ./data/census2000/new_york/block; \
96 rm nyblocks.zip; \
97 fi;
98
99
100 tiger_lines:
101 bin/download_data
102
103
104 # This imports the Tiger data using shp2pgsql. The shapefiles
105 # should exist, since this task depends on the "data" task, which
106 # downloads said shapefiles.
107 #
108 # After the TIGER import is done, we use the sf1blocks2sql script to
109 # parse and import the geographic header record information.
110 #
111 db: data newdb tiger_blocks_table tiger_lines_table sf1_blocks_table
112 # All Blocks
113 #
114 # The table already exists, so we can append to it, and we don't have
115 # to create the GiST index.
116 for state in data/census2000/*; do \
117 $(PG_BINDIR)/shp2pgsql \
118 -a \
119 -s $(TIGER_SRID) \
120 -D \
121 $$state/block/*.shp \
122 tiger_blocks \
123 | psql -U $(DB_USER) -d $(DB_NAME); \
124 done;
125
126 # MD Lines
127 #
128 # Since the table and index already exist, we can utilize -a,
129 # and leave -I out.
130 for x in data/census2000/maryland/lines/*.shp; do \
131 $(PG_BINDIR)/shp2pgsql \
132 -a \
133 -s $(TIGER_SRID) \
134 -D \
135 $$x \
136 tiger_lines \
137 | psql -U $(DB_USER) -d $(DB_NAME); \
138 done;
139
140 bin/sf1blocks2sql src/Tests/Fixtures/SummaryFile1/mdgeo.uf1 sf1_blocks \
141 | psql -U postgres -d $(DB_NAME) \
142 > /dev/null
143
144
145
146 # First, we drop and re-create the DB_NAME database (or schema,
147 # whatever). Then, we add PL/pgSQL support to the database.
148 #
149 # At that point, we import the two PostGIS files, postgis.sql and
150 # spatial_ref_sys.sql. The postgis.sql file contains the geometry
151 # functions, while spatial_ref_sys.sql contains a table of SRIDs, and
152 # their associated properties. PostGIS requires both.
153 #
154 newdb:
155 # Ignore the result of dropdb when it fails.
156 dropdb -U $(DB_USER) $(DB_NAME) || true
157 createdb -U $(DB_USER) $(DB_NAME)
158 createlang -U $(DB_USER) plpgsql $(DB_NAME)
159
160 psql -d $(DB_NAME) \
161 -U $(DB_USER) \
162 -f $(PG_SHAREDIR)/contrib/postgis.sql \
163 > /dev/null
164
165 psql -d $(DB_NAME) \
166 -U $(DB_USER) \
167 -f $(PG_SHAREDIR)/contrib/spatial_ref_sys.sql \
168 > /dev/null
169
170
171 # This just runs the SQL script to create the sf1_blocks table.
172 sf1_blocks_table:
173 psql -d $(DB_NAME) \
174 -U $(DB_USER) \
175 -f sql/create-sf1_blocks-table.sql \
176 > /dev/null
177
178
179 # Create the tiger_blocks table, and create its GiST index. Having the
180 # table already exist makes importing via shp2pgsql much easier.
181 # Any blocks file will work as an argument.
182 tiger_blocks_table:
183 $(PG_BINDIR)/shp2pgsql \
184 -p \
185 -I \
186 -s $(TIGER_SRID) \
187 data/census2000/maryland/block/tl_2009_24_tabblock00.shp \
188 tiger_blocks \
189 | psql -U postgres -d $(DB_NAME) \
190 > /dev/null
191
192
193 # Prepare the tiger_lines table, and create the GiST index on its
194 # geometry column. Any lines shapefile will do here.
195 tiger_lines_table:
196 $(PG_BINDIR)/shp2pgsql \
197 -p \
198 -I \
199 -s $(TIGER_SRID) \
200 data/census2000/maryland/lines/tl_2009_24510_edges.shp \
201 tiger_lines \
202 | psql -U postgres -d $(DB_NAME) \
203 > /dev/null