zeros.py 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344
  1. from __future__ import division, print_function, absolute_import
  2. import warnings
  3. from collections import namedtuple
  4. from . import _zeros
  5. import numpy as np
  6. _iter = 100
  7. _xtol = 2e-12
  8. _rtol = 4 * np.finfo(float).eps
  9. __all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth', 'toms748', 'RootResults']
  10. _ECONVERGED = 0
  11. _ESIGNERR = -1
  12. _ECONVERR = -2
  13. _EVALUEERR = -3
  14. _EINPROGRESS = 1
  15. flag_map = {_ECONVERGED: 'converged',
  16. _ESIGNERR: 'sign error',
  17. _ECONVERR: 'convergence error',
  18. _EVALUEERR: 'value error',
  19. _EINPROGRESS: 'in progress'}
  20. class RootResults(object):
  21. """Represents the root finding result.
  22. Attributes
  23. ----------
  24. root : float
  25. Estimated root location.
  26. iterations : int
  27. Number of iterations needed to find the root.
  28. function_calls : int
  29. Number of times the function was called.
  30. converged : bool
  31. True if the routine converged.
  32. flag : str
  33. Description of the cause of termination.
  34. """
  35. def __init__(self, root, iterations, function_calls, flag):
  36. self.root = root
  37. self.iterations = iterations
  38. self.function_calls = function_calls
  39. self.converged = flag == _ECONVERGED
  40. self.flag = None
  41. try:
  42. self.flag = flag_map[flag]
  43. except KeyError:
  44. self.flag = 'unknown error %d' % (flag,)
  45. def __repr__(self):
  46. attrs = ['converged', 'flag', 'function_calls',
  47. 'iterations', 'root']
  48. m = max(map(len, attrs)) + 1
  49. return '\n'.join([a.rjust(m) + ': ' + repr(getattr(self, a))
  50. for a in attrs])
  51. def results_c(full_output, r):
  52. if full_output:
  53. x, funcalls, iterations, flag = r
  54. results = RootResults(root=x,
  55. iterations=iterations,
  56. function_calls=funcalls,
  57. flag=flag)
  58. return x, results
  59. else:
  60. return r
  61. def _results_select(full_output, r):
  62. """Select from a tuple of (root, funccalls, iterations, flag)"""
  63. x, funcalls, iterations, flag = r
  64. if full_output:
  65. results = RootResults(root=x,
  66. iterations=iterations,
  67. function_calls=funcalls,
  68. flag=flag)
  69. return x, results
  70. return x
  71. def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
  72. fprime2=None, x1=None, rtol=0.0,
  73. full_output=False, disp=True):
  74. """
  75. Find a zero of a real or complex function using the Newton-Raphson
  76. (or secant or Halley's) method.
  77. Find a zero of the function `func` given a nearby starting point `x0`.
  78. The Newton-Raphson method is used if the derivative `fprime` of `func`
  79. is provided, otherwise the secant method is used. If the second order
  80. derivative `fprime2` of `func` is also provided, then Halley's method is
  81. used.
  82. If `x0` is a sequence with more than one item, then `newton` returns an
  83. array, and `func` must be vectorized and return a sequence or array of the
  84. same shape as its first argument. If `fprime` or `fprime2` is given then
  85. its return must also have the same shape.
  86. Parameters
  87. ----------
  88. func : callable
  89. The function whose zero is wanted. It must be a function of a
  90. single variable of the form ``f(x,a,b,c...)``, where ``a,b,c...``
  91. are extra arguments that can be passed in the `args` parameter.
  92. x0 : float, sequence, or ndarray
  93. An initial estimate of the zero that should be somewhere near the
  94. actual zero. If not scalar, then `func` must be vectorized and return
  95. a sequence or array of the same shape as its first argument.
  96. fprime : callable, optional
  97. The derivative of the function when available and convenient. If it
  98. is None (default), then the secant method is used.
  99. args : tuple, optional
  100. Extra arguments to be used in the function call.
  101. tol : float, optional
  102. The allowable error of the zero value. If `func` is complex-valued,
  103. a larger `tol` is recommended as both the real and imaginary parts
  104. of `x` contribute to ``|x - x0|``.
  105. maxiter : int, optional
  106. Maximum number of iterations.
  107. fprime2 : callable, optional
  108. The second order derivative of the function when available and
  109. convenient. If it is None (default), then the normal Newton-Raphson
  110. or the secant method is used. If it is not None, then Halley's method
  111. is used.
  112. x1 : float, optional
  113. Another estimate of the zero that should be somewhere near the
  114. actual zero. Used if `fprime` is not provided.
  115. rtol : float, optional
  116. Tolerance (relative) for termination.
  117. full_output : bool, optional
  118. If `full_output` is False (default), the root is returned.
  119. If True and `x0` is scalar, the return value is ``(x, r)``, where ``x``
  120. is the root and ``r`` is a `RootResults` object.
  121. If True and `x0` is non-scalar, the return value is ``(x, converged,
  122. zero_der)`` (see Returns section for details).
  123. disp : bool, optional
  124. If True, raise a RuntimeError if the algorithm didn't converge, with
  125. the error message containing the number of iterations and current
  126. function value. Otherwise the convergence status is recorded in a
  127. `RootResults` return object.
  128. Ignored if `x0` is not scalar.
  129. *Note: this has little to do with displaying, however
  130. the `disp` keyword cannot be renamed for backwards compatibility.*
  131. Returns
  132. -------
  133. root : float, sequence, or ndarray
  134. Estimated location where function is zero.
  135. r : `RootResults`, optional
  136. Present if ``full_output=True`` and `x0` is scalar.
  137. Object containing information about the convergence. In particular,
  138. ``r.converged`` is True if the routine converged.
  139. converged : ndarray of bool, optional
  140. Present if ``full_output=True`` and `x0` is non-scalar.
  141. For vector functions, indicates which elements converged successfully.
  142. zero_der : ndarray of bool, optional
  143. Present if ``full_output=True`` and `x0` is non-scalar.
  144. For vector functions, indicates which elements had a zero derivative.
  145. See Also
  146. --------
  147. brentq, brenth, ridder, bisect
  148. fsolve : find zeros in n dimensions.
  149. Notes
  150. -----
  151. The convergence rate of the Newton-Raphson method is quadratic,
  152. the Halley method is cubic, and the secant method is
  153. sub-quadratic. This means that if the function is well behaved
  154. the actual error in the estimated zero after the n-th iteration
  155. is approximately the square (cube for Halley) of the error
  156. after the (n-1)-th step. However, the stopping criterion used
  157. here is the step size and there is no guarantee that a zero
  158. has been found. Consequently the result should be verified.
  159. Safer algorithms are brentq, brenth, ridder, and bisect,
  160. but they all require that the root first be bracketed in an
  161. interval where the function changes sign. The brentq algorithm
  162. is recommended for general use in one dimensional problems
  163. when such an interval has been found.
  164. When `newton` is used with arrays, it is best suited for the following
  165. types of problems:
  166. * The initial guesses, `x0`, are all relatively the same distance from
  167. the roots.
  168. * Some or all of the extra arguments, `args`, are also arrays so that a
  169. class of similar problems can be solved together.
  170. * The size of the initial guesses, `x0`, is larger than O(100) elements.
  171. Otherwise, a naive loop may perform as well or better than a vector.
  172. Examples
  173. --------
  174. >>> from scipy import optimize
  175. >>> import matplotlib.pyplot as plt
  176. >>> def f(x):
  177. ... return (x**3 - 1) # only one real root at x = 1
  178. ``fprime`` is not provided, use the secant method:
  179. >>> root = optimize.newton(f, 1.5)
  180. >>> root
  181. 1.0000000000000016
  182. >>> root = optimize.newton(f, 1.5, fprime2=lambda x: 6 * x)
  183. >>> root
  184. 1.0000000000000016
  185. Only ``fprime`` is provided, use the Newton-Raphson method:
  186. >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2)
  187. >>> root
  188. 1.0
  189. Both ``fprime2`` and ``fprime`` are provided, use Halley's method:
  190. >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2,
  191. ... fprime2=lambda x: 6 * x)
  192. >>> root
  193. 1.0
  194. When we want to find zeros for a set of related starting values and/or
  195. function parameters, we can provide both of those as an array of inputs:
  196. >>> f = lambda x, a: x**3 - a
  197. >>> fder = lambda x, a: 3 * x**2
  198. >>> x = np.random.randn(100)
  199. >>> a = np.arange(-50, 50)
  200. >>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, ))
  201. The above is the equivalent of solving for each value in ``(x, a)``
  202. separately in a for-loop, just faster:
  203. >>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,))
  204. ... for x0, a0 in zip(x, a)]
  205. >>> np.allclose(vec_res, loop_res)
  206. True
  207. Plot the results found for all values of ``a``:
  208. >>> analytical_result = np.sign(a) * np.abs(a)**(1/3)
  209. >>> fig = plt.figure()
  210. >>> ax = fig.add_subplot(111)
  211. >>> ax.plot(a, analytical_result, 'o')
  212. >>> ax.plot(a, vec_res, '.')
  213. >>> ax.set_xlabel('$a$')
  214. >>> ax.set_ylabel('$x$ where $f(x, a)=0$')
  215. >>> plt.show()
  216. """
  217. if tol <= 0:
  218. raise ValueError("tol too small (%g <= 0)" % tol)
  219. if maxiter < 1:
  220. raise ValueError("maxiter must be greater than 0")
  221. if np.size(x0) > 1:
  222. return _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
  223. full_output)
  224. # Convert to float (don't use float(x0); this works also for complex x0)
  225. p0 = 1.0 * x0
  226. funcalls = 0
  227. if fprime is not None:
  228. # Newton-Raphson method
  229. for itr in range(maxiter):
  230. # first evaluate fval
  231. fval = func(p0, *args)
  232. funcalls += 1
  233. # If fval is 0, a root has been found, then terminate
  234. if fval == 0:
  235. return _results_select(
  236. full_output, (p0, funcalls, itr, _ECONVERGED))
  237. fder = fprime(p0, *args)
  238. funcalls += 1
  239. if fder == 0:
  240. msg = "derivative was zero."
  241. warnings.warn(msg, RuntimeWarning)
  242. return _results_select(
  243. full_output, (p0, funcalls, itr + 1, _ECONVERR))
  244. newton_step = fval / fder
  245. if fprime2:
  246. fder2 = fprime2(p0, *args)
  247. funcalls += 1
  248. # Halley's method:
  249. # newton_step /= (1.0 - 0.5 * newton_step * fder2 / fder)
  250. # Only do it if denominator stays close enough to 1
  251. # Rationale: If 1-adj < 0, then Halley sends x in the
  252. # opposite direction to Newton. Doesn't happen if x is close
  253. # enough to root.
  254. adj = newton_step * fder2 / fder / 2
  255. if np.abs(adj) < 1:
  256. newton_step /= 1.0 - adj
  257. p = p0 - newton_step
  258. if np.isclose(p, p0, rtol=rtol, atol=tol):
  259. return _results_select(
  260. full_output, (p, funcalls, itr + 1, _ECONVERGED))
  261. p0 = p
  262. else:
  263. # Secant method
  264. if x1 is not None:
  265. if x1 == x0:
  266. raise ValueError("x1 and x0 must be different")
  267. p1 = x1
  268. else:
  269. eps = 1e-4
  270. p1 = x0 * (1 + eps)
  271. p1 += (eps if p1 >= 0 else -eps)
  272. q0 = func(p0, *args)
  273. funcalls += 1
  274. q1 = func(p1, *args)
  275. funcalls += 1
  276. if abs(q1) < abs(q0):
  277. p0, p1, q0, q1 = p1, p0, q1, q0
  278. for itr in range(maxiter):
  279. if q1 == q0:
  280. if p1 != p0:
  281. msg = "Tolerance of %s reached" % (p1 - p0)
  282. warnings.warn(msg, RuntimeWarning)
  283. p = (p1 + p0) / 2.0
  284. return _results_select(
  285. full_output, (p, funcalls, itr + 1, _ECONVERGED))
  286. else:
  287. if abs(q1) > abs(q0):
  288. p = (-q0 / q1 * p1 + p0) / (1 - q0 / q1)
  289. else:
  290. p = (-q1 / q0 * p0 + p1) / (1 - q1 / q0)
  291. if np.isclose(p, p1, rtol=rtol, atol=tol):
  292. return _results_select(
  293. full_output, (p, funcalls, itr + 1, _ECONVERGED))
  294. p0, q0 = p1, q1
  295. p1 = p
  296. q1 = func(p1, *args)
  297. funcalls += 1
  298. if disp:
  299. msg = "Failed to converge after %d iterations, value is %s" % (itr + 1, p)
  300. raise RuntimeError(msg)
  301. return _results_select(full_output, (p, funcalls, itr + 1, _ECONVERR))
  302. def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, full_output):
  303. """
  304. A vectorized version of Newton, Halley, and secant methods for arrays.
  305. Do not use this method directly. This method is called from `newton`
  306. when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`.
  307. """
  308. # Explicitly copy `x0` as `p` will be modified inplace, but, the
  309. # user's array should not be altered.
  310. try:
  311. p = np.array(x0, copy=True, dtype=float)
  312. except TypeError:
  313. # can't convert complex to float
  314. p = np.array(x0, copy=True)
  315. failures = np.ones_like(p, dtype=bool)
  316. nz_der = np.ones_like(failures)
  317. if fprime is not None:
  318. # Newton-Raphson method
  319. for iteration in range(maxiter):
  320. # first evaluate fval
  321. fval = np.asarray(func(p, *args))
  322. # If all fval are 0, all roots have been found, then terminate
  323. if not fval.any():
  324. failures = fval.astype(bool)
  325. break
  326. fder = np.asarray(fprime(p, *args))
  327. nz_der = (fder != 0)
  328. # stop iterating if all derivatives are zero
  329. if not nz_der.any():
  330. break
  331. # Newton step
  332. dp = fval[nz_der] / fder[nz_der]
  333. if fprime2 is not None:
  334. fder2 = np.asarray(fprime2(p, *args))
  335. dp = dp / (1.0 - 0.5 * dp * fder2[nz_der] / fder[nz_der])
  336. # only update nonzero derivatives
  337. p[nz_der] -= dp
  338. failures[nz_der] = np.abs(dp) >= tol # items not yet converged
  339. # stop iterating if there aren't any failures, not incl zero der
  340. if not failures[nz_der].any():
  341. break
  342. else:
  343. # Secant method
  344. dx = np.finfo(float).eps**0.33
  345. p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx)
  346. q0 = np.asarray(func(p, *args))
  347. q1 = np.asarray(func(p1, *args))
  348. active = np.ones_like(p, dtype=bool)
  349. for iteration in range(maxiter):
  350. nz_der = (q1 != q0)
  351. # stop iterating if all derivatives are zero
  352. if not nz_der.any():
  353. p = (p1 + p) / 2.0
  354. break
  355. # Secant Step
  356. dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der]
  357. # only update nonzero derivatives
  358. p[nz_der] = p1[nz_der] - dp
  359. active_zero_der = ~nz_der & active
  360. p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0
  361. active &= nz_der # don't assign zero derivatives again
  362. failures[nz_der] = np.abs(dp) >= tol # not yet converged
  363. # stop iterating if there aren't any failures, not incl zero der
  364. if not failures[nz_der].any():
  365. break
  366. p1, p = p, p1
  367. q0 = q1
  368. q1 = np.asarray(func(p1, *args))
  369. zero_der = ~nz_der & failures # don't include converged with zero-ders
  370. if zero_der.any():
  371. # Secant warnings
  372. if fprime is None:
  373. nonzero_dp = (p1 != p)
  374. # non-zero dp, but infinite newton step
  375. zero_der_nz_dp = (zero_der & nonzero_dp)
  376. if zero_der_nz_dp.any():
  377. rms = np.sqrt(
  378. sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2)
  379. )
  380. warnings.warn('RMS of {:g} reached'.format(rms), RuntimeWarning)
  381. # Newton or Halley warnings
  382. else:
  383. all_or_some = 'all' if zero_der.all() else 'some'
  384. msg = '{:s} derivatives were zero'.format(all_or_some)
  385. warnings.warn(msg, RuntimeWarning)
  386. elif failures.any():
  387. all_or_some = 'all' if failures.all() else 'some'
  388. msg = '{0:s} failed to converge after {1:d} iterations'.format(
  389. all_or_some, maxiter
  390. )
  391. if failures.all():
  392. raise RuntimeError(msg)
  393. warnings.warn(msg, RuntimeWarning)
  394. if full_output:
  395. result = namedtuple('result', ('root', 'converged', 'zero_der'))
  396. p = result(p, ~failures, zero_der)
  397. return p
  398. def bisect(f, a, b, args=(),
  399. xtol=_xtol, rtol=_rtol, maxiter=_iter,
  400. full_output=False, disp=True):
  401. """
  402. Find root of a function within an interval using bisection.
  403. Basic bisection routine to find a zero of the function `f` between the
  404. arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
  405. Slow but sure.
  406. Parameters
  407. ----------
  408. f : function
  409. Python function returning a number. `f` must be continuous, and
  410. f(a) and f(b) must have opposite signs.
  411. a : scalar
  412. One end of the bracketing interval [a,b].
  413. b : scalar
  414. The other end of the bracketing interval [a,b].
  415. xtol : number, optional
  416. The computed root ``x0`` will satisfy ``np.allclose(x, x0,
  417. atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
  418. parameter must be nonnegative.
  419. rtol : number, optional
  420. The computed root ``x0`` will satisfy ``np.allclose(x, x0,
  421. atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
  422. parameter cannot be smaller than its default value of
  423. ``4*np.finfo(float).eps``.
  424. maxiter : int, optional
  425. if convergence is not achieved in `maxiter` iterations, an error is
  426. raised. Must be >= 0.
  427. args : tuple, optional
  428. containing extra arguments for the function `f`.
  429. `f` is called by ``apply(f, (x)+args)``.
  430. full_output : bool, optional
  431. If `full_output` is False, the root is returned. If `full_output` is
  432. True, the return value is ``(x, r)``, where x is the root, and r is
  433. a `RootResults` object.
  434. disp : bool, optional
  435. If True, raise RuntimeError if the algorithm didn't converge.
  436. Otherwise the convergence status is recorded in a `RootResults`
  437. return object.
  438. Returns
  439. -------
  440. x0 : float
  441. Zero of `f` between `a` and `b`.
  442. r : `RootResults` (present if ``full_output = True``)
  443. Object containing information about the convergence. In particular,
  444. ``r.converged`` is True if the routine converged.
  445. Examples
  446. --------
  447. >>> def f(x):
  448. ... return (x**2 - 1)
  449. >>> from scipy import optimize
  450. >>> root = optimize.bisect(f, 0, 2)
  451. >>> root
  452. 1.0
  453. >>> root = optimize.bisect(f, -2, 0)
  454. >>> root
  455. -1.0
  456. See Also
  457. --------
  458. brentq, brenth, bisect, newton
  459. fixed_point : scalar fixed-point finder
  460. fsolve : n-dimensional root-finding
  461. """
  462. if not isinstance(args, tuple):
  463. args = (args,)
  464. if xtol <= 0:
  465. raise ValueError("xtol too small (%g <= 0)" % xtol)
  466. if rtol < _rtol:
  467. raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
  468. r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
  469. return results_c(full_output, r)
  470. def ridder(f, a, b, args=(),
  471. xtol=_xtol, rtol=_rtol, maxiter=_iter,
  472. full_output=False, disp=True):
  473. """
  474. Find a root of a function in an interval using Ridder's method.
  475. Parameters
  476. ----------
  477. f : function
  478. Python function returning a number. f must be continuous, and f(a) and
  479. f(b) must have opposite signs.
  480. a : scalar
  481. One end of the bracketing interval [a,b].
  482. b : scalar
  483. The other end of the bracketing interval [a,b].
  484. xtol : number, optional
  485. The computed root ``x0`` will satisfy ``np.allclose(x, x0,
  486. atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
  487. parameter must be nonnegative.
  488. rtol : number, optional
  489. The computed root ``x0`` will satisfy ``np.allclose(x, x0,
  490. atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
  491. parameter cannot be smaller than its default value of
  492. ``4*np.finfo(float).eps``.
  493. maxiter : int, optional
  494. if convergence is not achieved in `maxiter` iterations, an error is
  495. raised. Must be >= 0.
  496. args : tuple, optional
  497. containing extra arguments for the function `f`.
  498. `f` is called by ``apply(f, (x)+args)``.
  499. full_output : bool, optional
  500. If `full_output` is False, the root is returned. If `full_output` is
  501. True, the return value is ``(x, r)``, where `x` is the root, and `r` is
  502. a `RootResults` object.
  503. disp : bool, optional
  504. If True, raise RuntimeError if the algorithm didn't converge.
  505. Otherwise the convergence status is recorded in any `RootResults`
  506. return object.
  507. Returns
  508. -------
  509. x0 : float
  510. Zero of `f` between `a` and `b`.
  511. r : `RootResults` (present if ``full_output = True``)
  512. Object containing information about the convergence.
  513. In particular, ``r.converged`` is True if the routine converged.
  514. See Also
  515. --------
  516. brentq, brenth, bisect, newton : one-dimensional root-finding
  517. fixed_point : scalar fixed-point finder
  518. Notes
  519. -----
  520. Uses [Ridders1979]_ method to find a zero of the function `f` between the
  521. arguments `a` and `b`. Ridders' method is faster than bisection, but not
  522. generally as fast as the Brent routines. [Ridders1979]_ provides the
  523. classic description and source of the algorithm. A description can also be
  524. found in any recent edition of Numerical Recipes.
  525. The routine used here diverges slightly from standard presentations in
  526. order to be a bit more careful of tolerance.
  527. References
  528. ----------
  529. .. [Ridders1979]
  530. Ridders, C. F. J. "A New Algorithm for Computing a
  531. Single Root of a Real Continuous Function."
  532. IEEE Trans. Circuits Systems 26, 979-980, 1979.
  533. Examples
  534. --------
  535. >>> def f(x):
  536. ... return (x**2 - 1)
  537. >>> from scipy import optimize
  538. >>> root = optimize.ridder(f, 0, 2)
  539. >>> root
  540. 1.0
  541. >>> root = optimize.ridder(f, -2, 0)
  542. >>> root
  543. -1.0
  544. """
  545. if not isinstance(args, tuple):
  546. args = (args,)
  547. if xtol <= 0:
  548. raise ValueError("xtol too small (%g <= 0)" % xtol)
  549. if rtol < _rtol:
  550. raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
  551. r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
  552. return results_c(full_output, r)
  553. def brentq(f, a, b, args=(),
  554. xtol=_xtol, rtol=_rtol, maxiter=_iter,
  555. full_output=False, disp=True):
  556. """
  557. Find a root of a function in a bracketing interval using Brent's method.
  558. Uses the classic Brent's method to find a zero of the function `f` on
  559. the sign changing interval [a , b]. Generally considered the best of the
  560. rootfinding routines here. It is a safe version of the secant method that
  561. uses inverse quadratic extrapolation. Brent's method combines root
  562. bracketing, interval bisection, and inverse quadratic interpolation. It is
  563. sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973)
  564. claims convergence is guaranteed for functions computable within [a,b].
  565. [Brent1973]_ provides the classic description of the algorithm. Another
  566. description can be found in a recent edition of Numerical Recipes, including
  567. [PressEtal1992]_. Another description is at
  568. http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to
  569. understand the algorithm just by reading our code. Our code diverges a bit
  570. from standard presentations: we choose a different formula for the
  571. extrapolation step.
  572. Parameters
  573. ----------
  574. f : function
  575. Python function returning a number. The function :math:`f`
  576. must be continuous, and :math:`f(a)` and :math:`f(b)` must
  577. have opposite signs.
  578. a : scalar
  579. One end of the bracketing interval :math:`[a, b]`.
  580. b : scalar
  581. The other end of the bracketing interval :math:`[a, b]`.
  582. xtol : number, optional
  583. The computed root ``x0`` will satisfy ``np.allclose(x, x0,
  584. atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
  585. parameter must be nonnegative. For nice functions, Brent's
  586. method will often satisfy the above condition with ``xtol/2``
  587. and ``rtol/2``. [Brent1973]_
  588. rtol : number, optional
  589. The computed root ``x0`` will satisfy ``np.allclose(x, x0,
  590. atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
  591. parameter cannot be smaller than its default value of
  592. ``4*np.finfo(float).eps``. For nice functions, Brent's
  593. method will often satisfy the above condition with ``xtol/2``
  594. and ``rtol/2``. [Brent1973]_
  595. maxiter : int, optional
  596. if convergence is not achieved in `maxiter` iterations, an error is
  597. raised. Must be >= 0.
  598. args : tuple, optional
  599. containing extra arguments for the function `f`.
  600. `f` is called by ``apply(f, (x)+args)``.
  601. full_output : bool, optional
  602. If `full_output` is False, the root is returned. If `full_output` is
  603. True, the return value is ``(x, r)``, where `x` is the root, and `r` is
  604. a `RootResults` object.
  605. disp : bool, optional
  606. If True, raise RuntimeError if the algorithm didn't converge.
  607. Otherwise the convergence status is recorded in any `RootResults`
  608. return object.
  609. Returns
  610. -------
  611. x0 : float
  612. Zero of `f` between `a` and `b`.
  613. r : `RootResults` (present if ``full_output = True``)
  614. Object containing information about the convergence. In particular,
  615. ``r.converged`` is True if the routine converged.
  616. Notes
  617. -----
  618. `f` must be continuous. f(a) and f(b) must have opposite signs.
  619. Related functions fall into several classes:
  620. multivariate local optimizers
  621. `fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg`
  622. nonlinear least squares minimizer
  623. `leastsq`
  624. constrained multivariate optimizers
  625. `fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla`
  626. global optimizers
  627. `basinhopping`, `brute`, `differential_evolution`
  628. local scalar minimizers
  629. `fminbound`, `brent`, `golden`, `bracket`
  630. n-dimensional root-finding
  631. `fsolve`
  632. one-dimensional root-finding
  633. `brenth`, `ridder`, `bisect`, `newton`
  634. scalar fixed-point finder
  635. `fixed_point`
  636. References
  637. ----------
  638. .. [Brent1973]
  639. Brent, R. P.,
  640. *Algorithms for Minimization Without Derivatives*.
  641. Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4.
  642. .. [PressEtal1992]
  643. Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T.
  644. *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed.
  645. Cambridge, England: Cambridge University Press, pp. 352-355, 1992.
  646. Section 9.3: "Van Wijngaarden-Dekker-Brent Method."
  647. Examples
  648. --------
  649. >>> def f(x):
  650. ... return (x**2 - 1)
  651. >>> from scipy import optimize
  652. >>> root = optimize.brentq(f, -2, 0)
  653. >>> root
  654. -1.0
  655. >>> root = optimize.brentq(f, 0, 2)
  656. >>> root
  657. 1.0
  658. """
  659. if not isinstance(args, tuple):
  660. args = (args,)
  661. if xtol <= 0:
  662. raise ValueError("xtol too small (%g <= 0)" % xtol)
  663. if rtol < _rtol:
  664. raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
  665. r = _zeros._brentq(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
  666. return results_c(full_output, r)
  667. def brenth(f, a, b, args=(),
  668. xtol=_xtol, rtol=_rtol, maxiter=_iter,
  669. full_output=False, disp=True):
  670. """Find a root of a function in a bracketing interval using Brent's
  671. method with hyperbolic extrapolation.
  672. A variation on the classic Brent routine to find a zero of the function f
  673. between the arguments a and b that uses hyperbolic extrapolation instead of
  674. inverse quadratic extrapolation. There was a paper back in the 1980's ...
  675. f(a) and f(b) cannot have the same signs. Generally on a par with the
  676. brent routine, but not as heavily tested. It is a safe version of the
  677. secant method that uses hyperbolic extrapolation. The version here is by
  678. Chuck Harris.
  679. Parameters
  680. ----------
  681. f : function
  682. Python function returning a number. f must be continuous, and f(a) and
  683. f(b) must have opposite signs.
  684. a : scalar
  685. One end of the bracketing interval [a,b].
  686. b : scalar
  687. The other end of the bracketing interval [a,b].
  688. xtol : number, optional
  689. The computed root ``x0`` will satisfy ``np.allclose(x, x0,
  690. atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
  691. parameter must be nonnegative. As with `brentq`, for nice
  692. functions the method will often satisfy the above condition
  693. with ``xtol/2`` and ``rtol/2``.
  694. rtol : number, optional
  695. The computed root ``x0`` will satisfy ``np.allclose(x, x0,
  696. atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
  697. parameter cannot be smaller than its default value of
  698. ``4*np.finfo(float).eps``. As with `brentq`, for nice functions
  699. the method will often satisfy the above condition with
  700. ``xtol/2`` and ``rtol/2``.
  701. maxiter : int, optional
  702. if convergence is not achieved in `maxiter` iterations, an error is
  703. raised. Must be >= 0.
  704. args : tuple, optional
  705. containing extra arguments for the function `f`.
  706. `f` is called by ``apply(f, (x)+args)``.
  707. full_output : bool, optional
  708. If `full_output` is False, the root is returned. If `full_output` is
  709. True, the return value is ``(x, r)``, where `x` is the root, and `r` is
  710. a `RootResults` object.
  711. disp : bool, optional
  712. If True, raise RuntimeError if the algorithm didn't converge.
  713. Otherwise the convergence status is recorded in any `RootResults`
  714. return object.
  715. Returns
  716. -------
  717. x0 : float
  718. Zero of `f` between `a` and `b`.
  719. r : `RootResults` (present if ``full_output = True``)
  720. Object containing information about the convergence. In particular,
  721. ``r.converged`` is True if the routine converged.
  722. Examples
  723. --------
  724. >>> def f(x):
  725. ... return (x**2 - 1)
  726. >>> from scipy import optimize
  727. >>> root = optimize.brenth(f, -2, 0)
  728. >>> root
  729. -1.0
  730. >>> root = optimize.brenth(f, 0, 2)
  731. >>> root
  732. 1.0
  733. See Also
  734. --------
  735. fmin, fmin_powell, fmin_cg,
  736. fmin_bfgs, fmin_ncg : multivariate local optimizers
  737. leastsq : nonlinear least squares minimizer
  738. fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers
  739. basinhopping, differential_evolution, brute : global optimizers
  740. fminbound, brent, golden, bracket : local scalar minimizers
  741. fsolve : n-dimensional root-finding
  742. brentq, brenth, ridder, bisect, newton : one-dimensional root-finding
  743. fixed_point : scalar fixed-point finder
  744. """
  745. if not isinstance(args, tuple):
  746. args = (args,)
  747. if xtol <= 0:
  748. raise ValueError("xtol too small (%g <= 0)" % xtol)
  749. if rtol < _rtol:
  750. raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
  751. r = _zeros._brenth(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
  752. return results_c(full_output, r)
  753. ################################
  754. # TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions", by
  755. # Alefeld, G. E. and Potra, F. A. and Shi, Yixun,
  756. # See [1]
  757. def _within_tolerance(x, y, rtol, atol):
  758. diff = np.abs(x - y)
  759. z = np.abs(y)
  760. result = (diff <= (atol + rtol * z))
  761. return result
  762. def _notclose(fs, rtol=_rtol, atol=_xtol):
  763. # Ensure not None, not 0, all finite, and not very close to each other
  764. notclosefvals = all(fs) and all(np.isfinite(fs)) and \
  765. not any(any(
  766. np.isclose(_f, fs[i + 1:], rtol=rtol, atol=atol))
  767. for i, _f in enumerate(fs[:-1]))
  768. return notclosefvals
  769. def _secant(xvals, fvals):
  770. """Perform a secant step, taking a little care"""
  771. # Secant has many "mathematically" equivalent formulations
  772. # x2 = x0 - (x1 - x0)/(f1 - f0) * f0
  773. # = x1 - (x1 - x0)/(f1 - f0) * f1
  774. # = (-x1 * f0 + x0 * f1) / (f1 - f0)
  775. # = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)
  776. # = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)
  777. x0, x1 = xvals[:2]
  778. f0, f1 = fvals[:2]
  779. if f0 == f1:
  780. return np.nan
  781. if np.abs(f1) > np.abs(f0):
  782. x2 = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)
  783. else:
  784. x2 = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)
  785. return x2
  786. def _update_bracket(ab, fab, c, fc):
  787. """Update a bracket given (c, fc) with a < c < b. Return the discarded endpoints"""
  788. fa, fb = fab
  789. idx = (0 if np.sign(fa) * np.sign(fc) > 0 else 1)
  790. rx, rfx = ab[idx], fab[idx]
  791. fab[idx] = fc
  792. ab[idx] = c
  793. return rx, rfx
  794. def _compute_divided_differences(xvals, fvals, N=None, full=True, forward=True):
  795. """Return a matrix of divided differences for the xvals, fvals pairs
  796. DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i
  797. If full is False, just return the main diagonal(or last row):
  798. f[a], f[a, b] and f[a, b, c].
  799. If forward is False, return f[c], f[b, c], f[a, b, c]."""
  800. if full:
  801. if forward:
  802. xvals = np.asarray(xvals)
  803. else:
  804. xvals = np.array(xvals)[::-1]
  805. M = len(xvals)
  806. N = M if N is None else min(N, M)
  807. DD = np.zeros([M, N])
  808. DD[:, 0] = fvals[:]
  809. for i in range(1, N):
  810. DD[i:, i] = np.diff(DD[i - 1:, i - 1]) / (xvals[i:] - xvals[:M - i])
  811. return DD
  812. xvals = np.asarray(xvals)
  813. dd = np.array(fvals)
  814. row = np.array(fvals)
  815. idx2Use = (0 if forward else -1)
  816. dd[0] = fvals[idx2Use]
  817. for i in range(1, len(xvals)):
  818. denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1]
  819. row = np.diff(row)[:] / denom
  820. dd[i] = row[idx2Use]
  821. return dd
  822. def _interpolated_poly(xvals, fvals, x):
  823. """Compute p(x) for the polynomial passing through the specified locations.
  824. Use Neville's algorithm to compute p(x) where p is the minimal degree
  825. polynomial passing through the points xvals, fvals"""
  826. xvals = np.asarray(xvals)
  827. N = len(xvals)
  828. Q = np.zeros([N, N])
  829. D = np.zeros([N, N])
  830. Q[:, 0] = fvals[:]
  831. D[:, 0] = fvals[:]
  832. for k in range(1, N):
  833. alpha = D[k:, k - 1] - Q[k - 1:N - 1, k - 1]
  834. diffik = xvals[0:N - k] - xvals[k:N]
  835. Q[k:, k] = (xvals[k:] - x) / diffik * alpha
  836. D[k:, k] = (xvals[:N - k] - x) / diffik * alpha
  837. # Expect Q[-1, 1:] to be small relative to Q[-1, 0] as x approaches a root
  838. return np.sum(Q[-1, 1:]) + Q[-1, 0]
  839. def _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd):
  840. """Inverse cubic interpolation f-values -> x-values
  841. Given four points (fa, a), (fb, b), (fc, c), (fd, d) with
  842. fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points
  843. and compute x=IP(0).
  844. """
  845. return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0)
  846. def _newton_quadratic(ab, fab, d, fd, k):
  847. """Apply Newton-Raphson like steps, using divided differences to approximate f'
  848. ab is a real interval [a, b] containing a root,
  849. fab holds the real values of f(a), f(b)
  850. d is a real number outside [ab, b]
  851. k is the number of steps to apply
  852. """
  853. a, b = ab
  854. fa, fb = fab
  855. _, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd],
  856. forward=True, full=False)
  857. # _P is the quadratic polynomial through the 3 points
  858. def _P(x):
  859. # Horner evaluation of fa + B * (x - a) + A * (x - a) * (x - b)
  860. return (A * (x - b) + B) * (x - a) + fa
  861. if A == 0:
  862. r = a - fa / B
  863. else:
  864. r = (a if np.sign(A) * np.sign(fa) > 0 else b)
  865. # Apply k Newton-Raphson steps to _P(x), starting from x=r
  866. for i in range(k):
  867. r1 = r - _P(r) / (B + A * (2 * r - a - b))
  868. if not (ab[0] < r1 < ab[1]):
  869. if (ab[0] < r < ab[1]):
  870. return r
  871. r = sum(ab) / 2.0
  872. break
  873. r = r1
  874. return r
  875. class TOMS748Solver(object):
  876. """Solve f(x, *args) == 0 using Algorithm748 of Alefeld, Potro & Shi.
  877. """
  878. _MU = 0.5
  879. _K_MIN = 1
  880. _K_MAX = 100 # A very high value for real usage. Expect 1, 2, maybe 3.
  881. def __init__(self):
  882. self.f = None
  883. self.args = None
  884. self.function_calls = 0
  885. self.iterations = 0
  886. self.k = 2
  887. self.ab = [np.nan, np.nan] # ab=[a,b] is a global interval containing a root
  888. self.fab = [np.nan, np.nan] # fab is function values at a, b
  889. self.d = None
  890. self.fd = None
  891. self.e = None
  892. self.fe = None
  893. self.disp = False
  894. self.xtol = _xtol
  895. self.rtol = _rtol
  896. self.maxiter = _iter
  897. def configure(self, xtol, rtol, maxiter, disp, k):
  898. self.disp = disp
  899. self.xtol = xtol
  900. self.rtol = rtol
  901. self.maxiter = maxiter
  902. # Silently replace a low value of k with 1
  903. self.k = max(k, self._K_MIN)
  904. # Noisily replace a high value of k with self._K_MAX
  905. if self.k > self._K_MAX:
  906. msg = "toms748: Overriding k: ->%d" % self._K_MAX
  907. warnings.warn(msg, RuntimeWarning)
  908. self.k = self._K_MAX
  909. def _callf(self, x, error=True):
  910. """Call the user-supplied function, update book-keeping"""
  911. fx = self.f(x, *self.args)
  912. self.function_calls += 1
  913. if not np.isfinite(fx) and error:
  914. raise ValueError("Invalid function value: f(%f) -> %s " % (x, fx))
  915. return fx
  916. def get_result(self, x, flag=_ECONVERGED):
  917. r"""Package the result and statistics into a tuple."""
  918. return (x, self.function_calls, self.iterations, flag)
  919. def _update_bracket(self, c, fc):
  920. return _update_bracket(self.ab, self.fab, c, fc)
  921. def start(self, f, a, b, args=()):
  922. r"""Prepare for the iterations."""
  923. self.function_calls = 0
  924. self.iterations = 0
  925. self.f = f
  926. self.args = args
  927. self.ab[:] = [a, b]
  928. if not np.isfinite(a) or np.imag(a) != 0:
  929. raise ValueError("Invalid x value: %s " % (a))
  930. if not np.isfinite(b) or np.imag(b) != 0:
  931. raise ValueError("Invalid x value: %s " % (b))
  932. fa = self._callf(a)
  933. if not np.isfinite(fa) or np.imag(fa) != 0:
  934. raise ValueError("Invalid function value: f(%f) -> %s " % (a, fa))
  935. if fa == 0:
  936. return _ECONVERGED, a
  937. fb = self._callf(b)
  938. if not np.isfinite(fb) or np.imag(fb) != 0:
  939. raise ValueError("Invalid function value: f(%f) -> %s " % (b, fb))
  940. if fb == 0:
  941. return _ECONVERGED, b
  942. if np.sign(fb) * np.sign(fa) > 0:
  943. raise ValueError("a, b must bracket a root f(%e)=%e, f(%e)=%e " %
  944. (a, fa, b, fb))
  945. self.fab[:] = [fa, fb]
  946. return _EINPROGRESS, sum(self.ab) / 2.0
  947. def get_status(self):
  948. """Determine the current status."""
  949. a, b = self.ab[:2]
  950. if _within_tolerance(a, b, self.rtol, self.xtol):
  951. return _ECONVERGED, sum(self.ab) / 2.0
  952. if self.iterations >= self.maxiter:
  953. return _ECONVERR, sum(self.ab) / 2.0
  954. return _EINPROGRESS, sum(self.ab) / 2.0
  955. def iterate(self):
  956. """Perform one step in the algorithm.
  957. Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995]
  958. """
  959. self.iterations += 1
  960. eps = np.finfo(float).eps
  961. d, fd, e, fe = self.d, self.fd, self.e, self.fe
  962. ab_width = self.ab[1] - self.ab[0] # Need the start width below
  963. c = None
  964. for nsteps in range(2, self.k+2):
  965. # If the f-values are sufficiently separated, perform an inverse
  966. # polynomial interpolation step. Otherwise nsteps repeats of
  967. # an approximate Newton-Raphson step.
  968. if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps):
  969. c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e,
  970. self.fab[0], self.fab[1], fd, fe)
  971. if self.ab[0] < c0 < self.ab[1]:
  972. c = c0
  973. if c is None:
  974. c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps)
  975. fc = self._callf(c)
  976. if fc == 0:
  977. return _ECONVERGED, c
  978. # re-bracket
  979. e, fe = d, fd
  980. d, fd = self._update_bracket(c, fc)
  981. # u is the endpoint with the smallest f-value
  982. uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1)
  983. u, fu = self.ab[uix], self.fab[uix]
  984. _, A = _compute_divided_differences(self.ab, self.fab,
  985. forward=(uix == 0), full=False)
  986. c = u - 2 * fu / A
  987. if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]):
  988. c = sum(self.ab) / 2.0
  989. else:
  990. if np.isclose(c, u, rtol=eps, atol=0):
  991. # c didn't change (much).
  992. # Either because the f-values at the endpoints have vastly
  993. # differing magnitudes, or because the root is very close to
  994. # that endpoint
  995. frs = np.frexp(self.fab)[1]
  996. if frs[uix] < frs[1 - uix] - 50: # Differ by more than 2**50
  997. c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32
  998. else:
  999. # Make a bigger adjustment, about the
  1000. # size of the requested tolerance.
  1001. mm = (1 if uix == 0 else -1)
  1002. adj = mm * np.abs(c) * self.rtol + mm * self.xtol
  1003. c = u + adj
  1004. if not self.ab[0] < c < self.ab[1]:
  1005. c = sum(self.ab) / 2.0
  1006. fc = self._callf(c)
  1007. if fc == 0:
  1008. return _ECONVERGED, c
  1009. e, fe = d, fd
  1010. d, fd = self._update_bracket(c, fc)
  1011. # If the width of the new interval did not decrease enough, bisect
  1012. if self.ab[1] - self.ab[0] > self._MU * ab_width:
  1013. e, fe = d, fd
  1014. z = sum(self.ab) / 2.0
  1015. fz = self._callf(z)
  1016. if fz == 0:
  1017. return _ECONVERGED, z
  1018. d, fd = self._update_bracket(z, fz)
  1019. # Record d and e for next iteration
  1020. self.d, self.fd = d, fd
  1021. self.e, self.fe = e, fe
  1022. status, xn = self.get_status()
  1023. return status, xn
  1024. def solve(self, f, a, b, args=(),
  1025. xtol=_xtol, rtol=_rtol, k=2, maxiter=_iter, disp=True):
  1026. r"""Solve f(x) = 0 given an interval containing a zero."""
  1027. self.configure(xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp, k=k)
  1028. status, xn = self.start(f, a, b, args)
  1029. if status == _ECONVERGED:
  1030. return self.get_result(xn)
  1031. # The first step only has two x-values.
  1032. c = _secant(self.ab, self.fab)
  1033. if not self.ab[0] < c < self.ab[1]:
  1034. c = sum(self.ab) / 2.0
  1035. fc = self._callf(c)
  1036. if fc == 0:
  1037. return self.get_result(c)
  1038. self.d, self.fd = self._update_bracket(c, fc)
  1039. self.e, self.fe = None, None
  1040. self.iterations += 1
  1041. while True:
  1042. status, xn = self.iterate()
  1043. if status == _ECONVERGED:
  1044. return self.get_result(xn)
  1045. if status == _ECONVERR:
  1046. fmt = "Failed to converge after %d iterations, bracket is %s"
  1047. if disp:
  1048. msg = fmt % (self.iterations + 1, self.ab)
  1049. raise RuntimeError(msg)
  1050. return self.get_result(xn, _ECONVERR)
  1051. def toms748(f, a, b, args=(), k=1,
  1052. xtol=_xtol, rtol=_rtol, maxiter=_iter,
  1053. full_output=False, disp=True):
  1054. """
  1055. Find a zero using TOMS Algorithm 748 method.
  1056. Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a
  1057. zero of the function `f` on the interval `[a , b]`, where `f(a)` and
  1058. `f(b)` must have opposite signs.
  1059. It uses a mixture of inverse cubic interpolation and
  1060. "Newton-quadratic" steps. [APS1995].
  1061. Parameters
  1062. ----------
  1063. f : function
  1064. Python function returning a scalar. The function :math:`f`
  1065. must be continuous, and :math:`f(a)` and :math:`f(b)`
  1066. have opposite signs.
  1067. a : scalar,
  1068. lower boundary of the search interval
  1069. b : scalar,
  1070. upper boundary of the search interval
  1071. args : tuple, optional
  1072. containing extra arguments for the function `f`.
  1073. `f` is called by ``f(x, *args)``.
  1074. k : int, optional
  1075. The number of Newton quadratic steps to perform each iteration. ``k>=1``.
  1076. xtol : scalar, optional
  1077. The computed root ``x0`` will satisfy ``np.allclose(x, x0,
  1078. atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
  1079. parameter must be nonnegative.
  1080. rtol : scalar, optional
  1081. The computed root ``x0`` will satisfy ``np.allclose(x, x0,
  1082. atol=xtol, rtol=rtol)``, where ``x`` is the exact root.
  1083. maxiter : int, optional
  1084. if convergence is not achieved in `maxiter` iterations, an error is
  1085. raised. Must be >= 0.
  1086. full_output : bool, optional
  1087. If `full_output` is False, the root is returned. If `full_output` is
  1088. True, the return value is ``(x, r)``, where `x` is the root, and `r` is
  1089. a `RootResults` object.
  1090. disp : bool, optional
  1091. If True, raise RuntimeError if the algorithm didn't converge.
  1092. Otherwise the convergence status is recorded in the `RootResults`
  1093. return object.
  1094. Returns
  1095. -------
  1096. x0 : float
  1097. Approximate Zero of `f`
  1098. r : `RootResults` (present if ``full_output = True``)
  1099. Object containing information about the convergence. In particular,
  1100. ``r.converged`` is True if the routine converged.
  1101. See Also
  1102. --------
  1103. brentq, brenth, ridder, bisect, newton
  1104. fsolve : find zeroes in n dimensions.
  1105. Notes
  1106. -----
  1107. `f` must be continuous.
  1108. Algorithm 748 with ``k=2`` is asymptotically the most efficient
  1109. algorithm known for finding roots of a four times continuously
  1110. differentiable function.
  1111. In contrast with Brent's algorithm, which may only decrease the length of
  1112. the enclosing bracket on the last step, Algorithm 748 decreases it each
  1113. iteration with the same asymptotic efficiency as it finds the root.
  1114. For easy statement of efficiency indices, assume that `f` has 4
  1115. continuouous deriviatives.
  1116. For ``k=1``, the convergence order is at least 2.7, and with about
  1117. asymptotically 2 function evaluations per iteration, the efficiency
  1118. index is approximately 1.65.
  1119. For ``k=2``, the order is about 4.6 with asymptotically 3 function
  1120. evaluations per iteration, and the efficiency index 1.66.
  1121. For higher values of `k`, the efficiency index approaches
  1122. the `k`-th root of ``(3k-2)``, hence ``k=1`` or ``k=2`` are
  1123. usually appropriate.
  1124. References
  1125. ----------
  1126. .. [APS1995]
  1127. Alefeld, G. E. and Potra, F. A. and Shi, Yixun,
  1128. *Algorithm 748: Enclosing Zeros of Continuous Functions*,
  1129. ACM Trans. Math. Softw. Volume 221(1995)
  1130. doi = {10.1145/210089.210111}
  1131. Examples
  1132. --------
  1133. >>> def f(x):
  1134. ... return (x**3 - 1) # only one real root at x = 1
  1135. >>> from scipy import optimize
  1136. >>> root, results = optimize.toms748(f, 0, 2, full_output=True)
  1137. >>> root
  1138. 1.0
  1139. >>> results
  1140. converged: True
  1141. flag: 'converged'
  1142. function_calls: 11
  1143. iterations: 5
  1144. root: 1.0
  1145. """
  1146. if xtol <= 0:
  1147. raise ValueError("xtol too small (%g <= 0)" % xtol)
  1148. if rtol < _rtol / 4:
  1149. raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
  1150. if maxiter < 1:
  1151. raise ValueError("maxiter must be greater than 0")
  1152. if not np.isfinite(a):
  1153. raise ValueError("a is not finite %s" % a)
  1154. if not np.isfinite(b):
  1155. raise ValueError("b is not finite %s" % b)
  1156. if a >= b:
  1157. raise ValueError("a and b are not an interval [%d, %d]" % (a, b))
  1158. if not k >= 1:
  1159. raise ValueError("k too small (%s < 1)" % k)
  1160. if not isinstance(args, tuple):
  1161. args = (args,)
  1162. solver = TOMS748Solver()
  1163. result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol,
  1164. maxiter=maxiter, disp=disp)
  1165. x, function_calls, iterations, flag = result
  1166. return _results_select(full_output, (x, function_calls, iterations, flag))