]> gitweb.michael.orlitzky.com - numerical-analysis.git/blob - src/Roots/Simple.hs
Use the 2-norm instead of the infty-norm for the fixed point error ratios.
[numerical-analysis.git] / src / Roots / Simple.hs
1 -- | The Roots.Simple module contains root-finding algorithms. That
2 -- is, procedures to (numerically) find solutions to the equation,
3 --
4 -- > f(x) = 0
5 --
6 -- where f is assumed to be continuous on the interval of interest.
7 --
8
9 module Roots.Simple
10 where
11
12 import Data.List (find)
13
14 import Vector
15
16 import qualified Roots.Fast as F
17
18 -- | Does the (continuous) function @f@ have a root on the interval
19 -- [a,b]? If f(a) <] 0 and f(b) ]> 0, we know that there's a root in
20 -- [a,b] by the intermediate value theorem. Likewise when f(a) >= 0
21 -- and f(b) <= 0.
22 --
23 -- Examples:
24 --
25 -- >>> let f x = x**3
26 -- >>> has_root f (-1) 1 Nothing
27 -- True
28 --
29 -- This fails if we don't specify an @epsilon@, because cos(-2) ==
30 -- cos(2) doesn't imply that there's a root on [-2,2].
31 --
32 -- >>> has_root cos (-2) 2 Nothing
33 -- False
34 -- >>> has_root cos (-2) 2 (Just 0.001)
35 -- True
36 --
37 has_root :: (Fractional a, Ord a, Ord b, Num b)
38 => (a -> b) -- ^ The function @f@
39 -> a -- ^ The \"left\" endpoint, @a@
40 -> a -- ^ The \"right\" endpoint, @b@
41 -> Maybe a -- ^ The size of the smallest subinterval
42 -- we'll examine, @epsilon@
43 -> Bool
44 has_root f a b epsilon =
45 F.has_root f a b epsilon Nothing Nothing
46
47
48
49
50 -- | We are given a function @f@ and an interval [a,b]. The bisection
51 -- method checks finds a root by splitting [a,b] in half repeatedly.
52 --
53 -- If one is found within some prescribed tolerance @epsilon@, it is
54 -- returned. Otherwise, the interval [a,b] is split into two
55 -- subintervals [a,c] and [c,b] of equal length which are then both
56 -- checked via the same process.
57 --
58 -- Returns 'Just' the value x for which f(x) == 0 if one is found,
59 -- or Nothing if one of the preconditions is violated.
60 --
61 -- Examples:
62 --
63 -- >>> bisect cos 1 2 0.001
64 -- Just 1.5712890625
65 --
66 -- >>> bisect sin (-1) 1 0.001
67 -- Just 0.0
68 --
69 bisect :: (Fractional a, Ord a, Num b, Ord b)
70 => (a -> b) -- ^ The function @f@ whose root we seek
71 -> a -- ^ The \"left\" endpoint of the interval, @a@
72 -> a -- ^ The \"right\" endpoint of the interval, @b@
73 -> a -- ^ The tolerance, @epsilon@
74 -> Maybe a
75 bisect f a b epsilon =
76 F.bisect f a b epsilon Nothing Nothing
77
78
79
80 -- | The sequence x_{n} of values obtained by applying Newton's method
81 -- on the function @f@ and initial guess @x0@.
82 --
83 -- Examples:
84 --
85 -- Atkinson, p. 60.
86 -- >>> let f x = x^6 - x - 1
87 -- >>> let f' x = 6*x^5 - 1
88 -- >>> tail $ take 4 $ newton_iterations f f' 2
89 -- [1.6806282722513088,1.4307389882390624,1.2549709561094362]
90 --
91 newton_iterations :: (Fractional a, Ord a)
92 => (a -> a) -- ^ The function @f@ whose root we seek
93 -> (a -> a) -- ^ The derivative of @f@
94 -> a -- ^ Initial guess, x-naught
95 -> [a]
96 newton_iterations f f' x0 =
97 iterate next x0
98 where
99 next xn =
100 xn - ( (f xn) / (f' xn) )
101
102
103
104 -- | Use Newton's method to find a root of @f@ near the initial guess
105 -- @x0@. If your guess is bad, this will recurse forever!
106 --
107 -- Examples:
108 --
109 -- Atkinson, p. 60.
110 --
111 -- >>> let f x = x^6 - x - 1
112 -- >>> let f' x = 6*x^5 - 1
113 -- >>> let Just root = newtons_method f f' (1/1000000) 2
114 -- >>> root
115 -- 1.1347241385002211
116 -- >>> abs (f root) < 1/100000
117 -- True
118 --
119 -- >>> import Data.Number.BigFloat
120 -- >>> let eps = 1/(10^20) :: BigFloat Prec50
121 -- >>> let Just root = newtons_method f f' eps 2
122 -- >>> root
123 -- 1.13472413840151949260544605450647284028100785303643e0
124 -- >>> abs (f root) < eps
125 -- True
126 --
127 newtons_method :: (Fractional a, Ord a)
128 => (a -> a) -- ^ The function @f@ whose root we seek
129 -> (a -> a) -- ^ The derivative of @f@
130 -> a -- ^ The tolerance epsilon
131 -> a -- ^ Initial guess, x-naught
132 -> Maybe a
133 newtons_method f f' epsilon x0 =
134 find (\x -> abs (f x) < epsilon) x_n
135 where
136 x_n = newton_iterations f f' x0
137
138
139
140 -- | Takes a function @f@ of two arguments and repeatedly applies @f@
141 -- to the previous two values. Returns a list containing all
142 -- generated values, f(x0, x1), f(x1, x2), f(x2, x3)...
143 --
144 -- Examples:
145 --
146 -- >>> let fibs = iterate2 (+) 0 1
147 -- >>> take 15 fibs
148 -- [0,1,1,2,3,5,8,13,21,34,55,89,144,233,377]
149 --
150 iterate2 :: (a -> a -> a) -- ^ The function @f@
151 -> a -- ^ The initial value @x0@
152 -> a -- ^ The second value, @x1@
153 -> [a] -- ^ The result list, [x0, x1, ...]
154 iterate2 f x0 x1 =
155 x0 : x1 : (go x0 x1)
156 where
157 go prev2 prev1 =
158 let next = f prev2 prev1 in
159 next : go prev1 next
160
161 -- | The sequence x_{n} of values obtained by applying the secant
162 -- method on the function @f@ and initial guesses @x0@, @x1@.
163 --
164 -- The recursion more or less implements a two-parameter 'iterate',
165 -- although one list is passed to the next iteration (as opposed to
166 -- one function argument, with iterate). At each step, we peel the
167 -- first two elements off the list and then compute/append elements
168 -- three, four... onto the end of the list.
169 --
170 -- Examples:
171 --
172 -- Atkinson, p. 67.
173 -- >>> let f x = x^6 - x - 1
174 -- >>> take 4 $ secant_iterations f 2 1
175 -- [2.0,1.0,1.0161290322580645,1.190577768676638]
176 --
177 secant_iterations :: (Fractional a, Ord a)
178 => (a -> a) -- ^ The function @f@ whose root we seek
179 -> a -- ^ Initial guess, x-naught
180 -> a -- ^ Second initial guess, x-one
181 -> [a]
182 secant_iterations f x0 x1 =
183 iterate2 g x0 x1
184 where
185 g prev2 prev1 =
186 let x_change = prev1 - prev2
187 y_change = (f prev1) - (f prev2)
188 in
189 (prev1 - (f prev1 * (x_change / y_change)))
190
191
192 -- | Use the secant method to find a root of @f@ near the initial guesses
193 -- @x0@ and @x1@. If your guesses are bad, this will recurse forever!
194 --
195 -- Examples:
196 --
197 -- Atkinson, p. 67.
198 -- >>> let f x = x^6 - x - 1
199 -- >>> let Just root = secant_method f (1/10^9) 2 1
200 -- >>> root
201 -- 1.1347241384015196
202 -- >>> abs (f root) < (1/10^9)
203 -- True
204 --
205 secant_method :: (Fractional a, Ord a)
206 => (a -> a) -- ^ The function @f@ whose root we seek
207 -> a -- ^ The tolerance epsilon
208 -> a -- ^ Initial guess, x-naught
209 -> a -- ^ Second initial guess, x-one
210 -> Maybe a
211 secant_method f epsilon x0 x1
212 = find (\x -> abs (f x) < epsilon) x_n
213 where
214 x_n = secant_iterations f x0 x1
215
216
217
218 -- | Find a fixed point of the function @f@ with the search starting
219 -- at x0. We delegate to the version that returns the number of
220 -- iterations and simply discard the number of iterations.
221 --
222 fixed_point :: (Vector a, RealFrac b)
223 => (a -> a) -- ^ The function @f@ to iterate.
224 -> b -- ^ The tolerance, @epsilon@.
225 -> a -- ^ The initial value @x0@.
226 -> a -- ^ The fixed point.
227 fixed_point f epsilon x0 =
228 snd $ F.fixed_point_with_iterations f epsilon x0
229
230
231 -- | Return the number of iterations required to find a fixed point of
232 -- the function @f@ with the search starting at x0 and tolerance
233 -- @epsilon@. We delegate to the version that returns the number of
234 -- iterations and simply discard the fixed point.
235 fixed_point_iteration_count :: (Vector a, RealFrac b)
236 => (a -> a) -- ^ The function @f@ to iterate.
237 -> b -- ^ The tolerance, @epsilon@.
238 -> a -- ^ The initial value @x0@.
239 -> Int -- ^ The fixed point.
240 fixed_point_iteration_count f epsilon x0 =
241 fst $ F.fixed_point_with_iterations f epsilon x0
242
243
244 -- | Returns a list of ratios,
245 --
246 -- ||x^{*} - x_{n+1}|| / ||x^{*} - x_{n}||^{p}
247 --
248 -- of fixed point iterations for the function @f@ with initial guess
249 -- @x0@ and @p@ some positive power.
250 --
251 -- This is used to determine the rate of convergence.
252 --
253 fixed_point_error_ratios :: (Vector a, RealFrac b)
254 => (a -> a) -- ^ The function @f@ to iterate.
255 -> a -- ^ The initial value @x0@.
256 -> a -- ^ The true solution, @x_star@.
257 -> Integer -- ^ The power @p@.
258 -> [b] -- ^ The resulting sequence of x_{n}.
259 fixed_point_error_ratios f x0 x_star p =
260 zipWith (/) en_plus_one en_exp
261 where
262 xn = F.fixed_point_iterations f x0
263 en = map (\x -> norm_2 (x_star - x)) xn
264 en_plus_one = tail en
265 en_exp = map (^p) en