minpack.py 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899
  1. from __future__ import division, print_function, absolute_import
  2. import threading
  3. import warnings
  4. from . import _minpack
  5. import numpy as np
  6. from numpy import (atleast_1d, dot, take, triu, shape, eye,
  7. transpose, zeros, product, greater, array,
  8. all, where, isscalar, asarray, inf, abs,
  9. finfo, inexact, issubdtype, dtype)
  10. from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError
  11. from scipy._lib._util import _asarray_validated, _lazywhere
  12. from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
  13. from ._lsq import least_squares
  14. from ._lsq.common import make_strictly_feasible
  15. from ._lsq.least_squares import prepare_bounds
  16. error = _minpack.error
  17. __all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
  18. def _check_func(checker, argname, thefunc, x0, args, numinputs,
  19. output_shape=None):
  20. res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
  21. if (output_shape is not None) and (shape(res) != output_shape):
  22. if (output_shape[0] != 1):
  23. if len(output_shape) > 1:
  24. if output_shape[1] == 1:
  25. return shape(res)
  26. msg = "%s: there is a mismatch between the input and output " \
  27. "shape of the '%s' argument" % (checker, argname)
  28. func_name = getattr(thefunc, '__name__', None)
  29. if func_name:
  30. msg += " '%s'." % func_name
  31. else:
  32. msg += "."
  33. msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
  34. raise TypeError(msg)
  35. if issubdtype(res.dtype, inexact):
  36. dt = res.dtype
  37. else:
  38. dt = dtype(float)
  39. return shape(res), dt
  40. def fsolve(func, x0, args=(), fprime=None, full_output=0,
  41. col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
  42. epsfcn=None, factor=100, diag=None):
  43. """
  44. Find the roots of a function.
  45. Return the roots of the (non-linear) equations defined by
  46. ``func(x) = 0`` given a starting estimate.
  47. Parameters
  48. ----------
  49. func : callable ``f(x, *args)``
  50. A function that takes at least one (possibly vector) argument,
  51. and returns a value of the same length.
  52. x0 : ndarray
  53. The starting estimate for the roots of ``func(x) = 0``.
  54. args : tuple, optional
  55. Any extra arguments to `func`.
  56. fprime : callable ``f(x, *args)``, optional
  57. A function to compute the Jacobian of `func` with derivatives
  58. across the rows. By default, the Jacobian will be estimated.
  59. full_output : bool, optional
  60. If True, return optional outputs.
  61. col_deriv : bool, optional
  62. Specify whether the Jacobian function computes derivatives down
  63. the columns (faster, because there is no transpose operation).
  64. xtol : float, optional
  65. The calculation will terminate if the relative error between two
  66. consecutive iterates is at most `xtol`.
  67. maxfev : int, optional
  68. The maximum number of calls to the function. If zero, then
  69. ``100*(N+1)`` is the maximum where N is the number of elements
  70. in `x0`.
  71. band : tuple, optional
  72. If set to a two-sequence containing the number of sub- and
  73. super-diagonals within the band of the Jacobi matrix, the
  74. Jacobi matrix is considered banded (only for ``fprime=None``).
  75. epsfcn : float, optional
  76. A suitable step length for the forward-difference
  77. approximation of the Jacobian (for ``fprime=None``). If
  78. `epsfcn` is less than the machine precision, it is assumed
  79. that the relative errors in the functions are of the order of
  80. the machine precision.
  81. factor : float, optional
  82. A parameter determining the initial step bound
  83. (``factor * || diag * x||``). Should be in the interval
  84. ``(0.1, 100)``.
  85. diag : sequence, optional
  86. N positive entries that serve as a scale factors for the
  87. variables.
  88. Returns
  89. -------
  90. x : ndarray
  91. The solution (or the result of the last iteration for
  92. an unsuccessful call).
  93. infodict : dict
  94. A dictionary of optional outputs with the keys:
  95. ``nfev``
  96. number of function calls
  97. ``njev``
  98. number of Jacobian calls
  99. ``fvec``
  100. function evaluated at the output
  101. ``fjac``
  102. the orthogonal matrix, q, produced by the QR
  103. factorization of the final approximate Jacobian
  104. matrix, stored column wise
  105. ``r``
  106. upper triangular matrix produced by QR factorization
  107. of the same matrix
  108. ``qtf``
  109. the vector ``(transpose(q) * fvec)``
  110. ier : int
  111. An integer flag. Set to 1 if a solution was found, otherwise refer
  112. to `mesg` for more information.
  113. mesg : str
  114. If no solution is found, `mesg` details the cause of failure.
  115. See Also
  116. --------
  117. root : Interface to root finding algorithms for multivariate
  118. functions. See the ``method=='hybr'`` in particular.
  119. Notes
  120. -----
  121. ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
  122. """
  123. options = {'col_deriv': col_deriv,
  124. 'xtol': xtol,
  125. 'maxfev': maxfev,
  126. 'band': band,
  127. 'eps': epsfcn,
  128. 'factor': factor,
  129. 'diag': diag}
  130. res = _root_hybr(func, x0, args, jac=fprime, **options)
  131. if full_output:
  132. x = res['x']
  133. info = dict((k, res.get(k))
  134. for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
  135. info['fvec'] = res['fun']
  136. return x, info, res['status'], res['message']
  137. else:
  138. status = res['status']
  139. msg = res['message']
  140. if status == 0:
  141. raise TypeError(msg)
  142. elif status == 1:
  143. pass
  144. elif status in [2, 3, 4, 5]:
  145. warnings.warn(msg, RuntimeWarning)
  146. else:
  147. raise TypeError(msg)
  148. return res['x']
  149. def _root_hybr(func, x0, args=(), jac=None,
  150. col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
  151. factor=100, diag=None, **unknown_options):
  152. """
  153. Find the roots of a multivariate function using MINPACK's hybrd and
  154. hybrj routines (modified Powell method).
  155. Options
  156. -------
  157. col_deriv : bool
  158. Specify whether the Jacobian function computes derivatives down
  159. the columns (faster, because there is no transpose operation).
  160. xtol : float
  161. The calculation will terminate if the relative error between two
  162. consecutive iterates is at most `xtol`.
  163. maxfev : int
  164. The maximum number of calls to the function. If zero, then
  165. ``100*(N+1)`` is the maximum where N is the number of elements
  166. in `x0`.
  167. band : tuple
  168. If set to a two-sequence containing the number of sub- and
  169. super-diagonals within the band of the Jacobi matrix, the
  170. Jacobi matrix is considered banded (only for ``fprime=None``).
  171. eps : float
  172. A suitable step length for the forward-difference
  173. approximation of the Jacobian (for ``fprime=None``). If
  174. `eps` is less than the machine precision, it is assumed
  175. that the relative errors in the functions are of the order of
  176. the machine precision.
  177. factor : float
  178. A parameter determining the initial step bound
  179. (``factor * || diag * x||``). Should be in the interval
  180. ``(0.1, 100)``.
  181. diag : sequence
  182. N positive entries that serve as a scale factors for the
  183. variables.
  184. """
  185. _check_unknown_options(unknown_options)
  186. epsfcn = eps
  187. x0 = asarray(x0).flatten()
  188. n = len(x0)
  189. if not isinstance(args, tuple):
  190. args = (args,)
  191. shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
  192. if epsfcn is None:
  193. epsfcn = finfo(dtype).eps
  194. Dfun = jac
  195. if Dfun is None:
  196. if band is None:
  197. ml, mu = -10, -10
  198. else:
  199. ml, mu = band[:2]
  200. if maxfev == 0:
  201. maxfev = 200 * (n + 1)
  202. retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
  203. ml, mu, epsfcn, factor, diag)
  204. else:
  205. _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
  206. if (maxfev == 0):
  207. maxfev = 100 * (n + 1)
  208. retval = _minpack._hybrj(func, Dfun, x0, args, 1,
  209. col_deriv, xtol, maxfev, factor, diag)
  210. x, status = retval[0], retval[-1]
  211. errors = {0: "Improper input parameters were entered.",
  212. 1: "The solution converged.",
  213. 2: "The number of calls to function has "
  214. "reached maxfev = %d." % maxfev,
  215. 3: "xtol=%f is too small, no further improvement "
  216. "in the approximate\n solution "
  217. "is possible." % xtol,
  218. 4: "The iteration is not making good progress, as measured "
  219. "by the \n improvement from the last five "
  220. "Jacobian evaluations.",
  221. 5: "The iteration is not making good progress, "
  222. "as measured by the \n improvement from the last "
  223. "ten iterations.",
  224. 'unknown': "An error occurred."}
  225. info = retval[1]
  226. info['fun'] = info.pop('fvec')
  227. sol = OptimizeResult(x=x, success=(status == 1), status=status)
  228. sol.update(info)
  229. try:
  230. sol['message'] = errors[status]
  231. except KeyError:
  232. sol['message'] = errors['unknown']
  233. return sol
  234. LEASTSQ_SUCCESS = [1, 2, 3, 4]
  235. LEASTSQ_FAILURE = [5, 6, 7, 8]
  236. def leastsq(func, x0, args=(), Dfun=None, full_output=0,
  237. col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
  238. gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
  239. """
  240. Minimize the sum of squares of a set of equations.
  241. ::
  242. x = arg min(sum(func(y)**2,axis=0))
  243. y
  244. Parameters
  245. ----------
  246. func : callable
  247. should take at least one (possibly length N vector) argument and
  248. returns M floating point numbers. It must not return NaNs or
  249. fitting might fail.
  250. x0 : ndarray
  251. The starting estimate for the minimization.
  252. args : tuple, optional
  253. Any extra arguments to func are placed in this tuple.
  254. Dfun : callable, optional
  255. A function or method to compute the Jacobian of func with derivatives
  256. across the rows. If this is None, the Jacobian will be estimated.
  257. full_output : bool, optional
  258. non-zero to return all optional outputs.
  259. col_deriv : bool, optional
  260. non-zero to specify that the Jacobian function computes derivatives
  261. down the columns (faster, because there is no transpose operation).
  262. ftol : float, optional
  263. Relative error desired in the sum of squares.
  264. xtol : float, optional
  265. Relative error desired in the approximate solution.
  266. gtol : float, optional
  267. Orthogonality desired between the function vector and the columns of
  268. the Jacobian.
  269. maxfev : int, optional
  270. The maximum number of calls to the function. If `Dfun` is provided
  271. then the default `maxfev` is 100*(N+1) where N is the number of elements
  272. in x0, otherwise the default `maxfev` is 200*(N+1).
  273. epsfcn : float, optional
  274. A variable used in determining a suitable step length for the forward-
  275. difference approximation of the Jacobian (for Dfun=None).
  276. Normally the actual step length will be sqrt(epsfcn)*x
  277. If epsfcn is less than the machine precision, it is assumed that the
  278. relative errors are of the order of the machine precision.
  279. factor : float, optional
  280. A parameter determining the initial step bound
  281. (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
  282. diag : sequence, optional
  283. N positive entries that serve as a scale factors for the variables.
  284. Returns
  285. -------
  286. x : ndarray
  287. The solution (or the result of the last iteration for an unsuccessful
  288. call).
  289. cov_x : ndarray
  290. Uses the fjac and ipvt optional outputs to construct an
  291. estimate of the jacobian around the solution. None if a
  292. singular matrix encountered (indicates very flat curvature in
  293. some direction). This matrix must be multiplied by the
  294. residual variance to get the covariance of the
  295. parameter estimates -- see curve_fit.
  296. infodict : dict
  297. a dictionary of optional outputs with the key s:
  298. ``nfev``
  299. The number of function calls
  300. ``fvec``
  301. The function evaluated at the output
  302. ``fjac``
  303. A permutation of the R matrix of a QR
  304. factorization of the final approximate
  305. Jacobian matrix, stored column wise.
  306. Together with ipvt, the covariance of the
  307. estimate can be approximated.
  308. ``ipvt``
  309. An integer array of length N which defines
  310. a permutation matrix, p, such that
  311. fjac*p = q*r, where r is upper triangular
  312. with diagonal elements of nonincreasing
  313. magnitude. Column j of p is column ipvt(j)
  314. of the identity matrix.
  315. ``qtf``
  316. The vector (transpose(q) * fvec).
  317. mesg : str
  318. A string message giving information about the cause of failure.
  319. ier : int
  320. An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
  321. found. Otherwise, the solution was not found. In either case, the
  322. optional output variable 'mesg' gives more information.
  323. Notes
  324. -----
  325. "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
  326. cov_x is a Jacobian approximation to the Hessian of the least squares
  327. objective function.
  328. This approximation assumes that the objective function is based on the
  329. difference between some observed target data (ydata) and a (non-linear)
  330. function of the parameters `f(xdata, params)` ::
  331. func(params) = ydata - f(xdata, params)
  332. so that the objective function is ::
  333. min sum((ydata - f(xdata, params))**2, axis=0)
  334. params
  335. The solution, `x`, is always a 1D array, regardless of the shape of `x0`,
  336. or whether `x0` is a scalar.
  337. """
  338. x0 = asarray(x0).flatten()
  339. n = len(x0)
  340. if not isinstance(args, tuple):
  341. args = (args,)
  342. shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
  343. m = shape[0]
  344. if n > m:
  345. raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
  346. if epsfcn is None:
  347. epsfcn = finfo(dtype).eps
  348. if Dfun is None:
  349. if maxfev == 0:
  350. maxfev = 200*(n + 1)
  351. retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
  352. gtol, maxfev, epsfcn, factor, diag)
  353. else:
  354. if col_deriv:
  355. _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
  356. else:
  357. _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
  358. if maxfev == 0:
  359. maxfev = 100 * (n + 1)
  360. retval = _minpack._lmder(func, Dfun, x0, args, full_output,
  361. col_deriv, ftol, xtol, gtol, maxfev,
  362. factor, diag)
  363. errors = {0: ["Improper input parameters.", TypeError],
  364. 1: ["Both actual and predicted relative reductions "
  365. "in the sum of squares\n are at most %f" % ftol, None],
  366. 2: ["The relative error between two consecutive "
  367. "iterates is at most %f" % xtol, None],
  368. 3: ["Both actual and predicted relative reductions in "
  369. "the sum of squares\n are at most %f and the "
  370. "relative error between two consecutive "
  371. "iterates is at \n most %f" % (ftol, xtol), None],
  372. 4: ["The cosine of the angle between func(x) and any "
  373. "column of the\n Jacobian is at most %f in "
  374. "absolute value" % gtol, None],
  375. 5: ["Number of calls to function has reached "
  376. "maxfev = %d." % maxfev, ValueError],
  377. 6: ["ftol=%f is too small, no further reduction "
  378. "in the sum of squares\n is possible.""" % ftol,
  379. ValueError],
  380. 7: ["xtol=%f is too small, no further improvement in "
  381. "the approximate\n solution is possible." % xtol,
  382. ValueError],
  383. 8: ["gtol=%f is too small, func(x) is orthogonal to the "
  384. "columns of\n the Jacobian to machine "
  385. "precision." % gtol, ValueError]}
  386. # The FORTRAN return value (possible return values are >= 0 and <= 8)
  387. info = retval[-1]
  388. if full_output:
  389. cov_x = None
  390. if info in LEASTSQ_SUCCESS:
  391. from numpy.dual import inv
  392. perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
  393. r = triu(transpose(retval[1]['fjac'])[:n, :])
  394. R = dot(r, perm)
  395. try:
  396. cov_x = inv(dot(transpose(R), R))
  397. except (LinAlgError, ValueError):
  398. pass
  399. return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
  400. else:
  401. if info in LEASTSQ_FAILURE:
  402. warnings.warn(errors[info][0], RuntimeWarning)
  403. elif info == 0:
  404. raise errors[info][1](errors[info][0])
  405. return retval[0], info
  406. def _wrap_func(func, xdata, ydata, transform):
  407. if transform is None:
  408. def func_wrapped(params):
  409. return func(xdata, *params) - ydata
  410. elif transform.ndim == 1:
  411. def func_wrapped(params):
  412. return transform * (func(xdata, *params) - ydata)
  413. else:
  414. # Chisq = (y - yd)^T C^{-1} (y-yd)
  415. # transform = L such that C = L L^T
  416. # C^{-1} = L^{-T} L^{-1}
  417. # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
  418. # Define (y-yd)' = L^{-1} (y-yd)
  419. # by solving
  420. # L (y-yd)' = (y-yd)
  421. # and minimize (y-yd)'^T (y-yd)'
  422. def func_wrapped(params):
  423. return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
  424. return func_wrapped
  425. def _wrap_jac(jac, xdata, transform):
  426. if transform is None:
  427. def jac_wrapped(params):
  428. return jac(xdata, *params)
  429. elif transform.ndim == 1:
  430. def jac_wrapped(params):
  431. return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
  432. else:
  433. def jac_wrapped(params):
  434. return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
  435. return jac_wrapped
  436. def _initialize_feasible(lb, ub):
  437. p0 = np.ones_like(lb)
  438. lb_finite = np.isfinite(lb)
  439. ub_finite = np.isfinite(ub)
  440. mask = lb_finite & ub_finite
  441. p0[mask] = 0.5 * (lb[mask] + ub[mask])
  442. mask = lb_finite & ~ub_finite
  443. p0[mask] = lb[mask] + 1
  444. mask = ~lb_finite & ub_finite
  445. p0[mask] = ub[mask] - 1
  446. return p0
  447. def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
  448. check_finite=True, bounds=(-np.inf, np.inf), method=None,
  449. jac=None, **kwargs):
  450. """
  451. Use non-linear least squares to fit a function, f, to data.
  452. Assumes ``ydata = f(xdata, *params) + eps``
  453. Parameters
  454. ----------
  455. f : callable
  456. The model function, f(x, ...). It must take the independent
  457. variable as the first argument and the parameters to fit as
  458. separate remaining arguments.
  459. xdata : An M-length sequence or an (k,M)-shaped array for functions with k predictors
  460. The independent variable where the data is measured.
  461. ydata : M-length sequence
  462. The dependent data --- nominally f(xdata, ...)
  463. p0 : None, scalar, or N-length sequence, optional
  464. Initial guess for the parameters. If None, then the initial
  465. values will all be 1 (if the number of parameters for the function
  466. can be determined using introspection, otherwise a ValueError
  467. is raised).
  468. sigma : None or M-length sequence or MxM array, optional
  469. Determines the uncertainty in `ydata`. If we define residuals as
  470. ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
  471. depends on its number of dimensions:
  472. - A 1-d `sigma` should contain values of standard deviations of
  473. errors in `ydata`. In this case, the optimized function is
  474. ``chisq = sum((r / sigma) ** 2)``.
  475. - A 2-d `sigma` should contain the covariance matrix of
  476. errors in `ydata`. In this case, the optimized function is
  477. ``chisq = r.T @ inv(sigma) @ r``.
  478. .. versionadded:: 0.19
  479. None (default) is equivalent of 1-d `sigma` filled with ones.
  480. absolute_sigma : bool, optional
  481. If True, `sigma` is used in an absolute sense and the estimated parameter
  482. covariance `pcov` reflects these absolute values.
  483. If False, only the relative magnitudes of the `sigma` values matter.
  484. The returned parameter covariance matrix `pcov` is based on scaling
  485. `sigma` by a constant factor. This constant is set by demanding that the
  486. reduced `chisq` for the optimal parameters `popt` when using the
  487. *scaled* `sigma` equals unity. In other words, `sigma` is scaled to
  488. match the sample variance of the residuals after the fit.
  489. Mathematically,
  490. ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
  491. check_finite : bool, optional
  492. If True, check that the input arrays do not contain nans of infs,
  493. and raise a ValueError if they do. Setting this parameter to
  494. False may silently produce nonsensical results if the input arrays
  495. do contain nans. Default is True.
  496. bounds : 2-tuple of array_like, optional
  497. Lower and upper bounds on parameters. Defaults to no bounds.
  498. Each element of the tuple must be either an array with the length equal
  499. to the number of parameters, or a scalar (in which case the bound is
  500. taken to be the same for all parameters.) Use ``np.inf`` with an
  501. appropriate sign to disable bounds on all or some parameters.
  502. .. versionadded:: 0.17
  503. method : {'lm', 'trf', 'dogbox'}, optional
  504. Method to use for optimization. See `least_squares` for more details.
  505. Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
  506. provided. The method 'lm' won't work when the number of observations
  507. is less than the number of variables, use 'trf' or 'dogbox' in this
  508. case.
  509. .. versionadded:: 0.17
  510. jac : callable, string or None, optional
  511. Function with signature ``jac(x, ...)`` which computes the Jacobian
  512. matrix of the model function with respect to parameters as a dense
  513. array_like structure. It will be scaled according to provided `sigma`.
  514. If None (default), the Jacobian will be estimated numerically.
  515. String keywords for 'trf' and 'dogbox' methods can be used to select
  516. a finite difference scheme, see `least_squares`.
  517. .. versionadded:: 0.18
  518. kwargs
  519. Keyword arguments passed to `leastsq` for ``method='lm'`` or
  520. `least_squares` otherwise.
  521. Returns
  522. -------
  523. popt : array
  524. Optimal values for the parameters so that the sum of the squared
  525. residuals of ``f(xdata, *popt) - ydata`` is minimized
  526. pcov : 2d array
  527. The estimated covariance of popt. The diagonals provide the variance
  528. of the parameter estimate. To compute one standard deviation errors
  529. on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
  530. How the `sigma` parameter affects the estimated covariance
  531. depends on `absolute_sigma` argument, as described above.
  532. If the Jacobian matrix at the solution doesn't have a full rank, then
  533. 'lm' method returns a matrix filled with ``np.inf``, on the other hand
  534. 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
  535. the covariance matrix.
  536. Raises
  537. ------
  538. ValueError
  539. if either `ydata` or `xdata` contain NaNs, or if incompatible options
  540. are used.
  541. RuntimeError
  542. if the least-squares minimization fails.
  543. OptimizeWarning
  544. if covariance of the parameters can not be estimated.
  545. See Also
  546. --------
  547. least_squares : Minimize the sum of squares of nonlinear functions.
  548. scipy.stats.linregress : Calculate a linear least squares regression for
  549. two sets of measurements.
  550. Notes
  551. -----
  552. With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
  553. through `leastsq`. Note that this algorithm can only deal with
  554. unconstrained problems.
  555. Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
  556. the docstring of `least_squares` for more information.
  557. Examples
  558. --------
  559. >>> import numpy as np
  560. >>> import matplotlib.pyplot as plt
  561. >>> from scipy.optimize import curve_fit
  562. >>> def func(x, a, b, c):
  563. ... return a * np.exp(-b * x) + c
  564. Define the data to be fit with some noise:
  565. >>> xdata = np.linspace(0, 4, 50)
  566. >>> y = func(xdata, 2.5, 1.3, 0.5)
  567. >>> np.random.seed(1729)
  568. >>> y_noise = 0.2 * np.random.normal(size=xdata.size)
  569. >>> ydata = y + y_noise
  570. >>> plt.plot(xdata, ydata, 'b-', label='data')
  571. Fit for the parameters a, b, c of the function `func`:
  572. >>> popt, pcov = curve_fit(func, xdata, ydata)
  573. >>> popt
  574. array([ 2.55423706, 1.35190947, 0.47450618])
  575. >>> plt.plot(xdata, func(xdata, *popt), 'r-',
  576. ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
  577. Constrain the optimization to the region of ``0 <= a <= 3``,
  578. ``0 <= b <= 1`` and ``0 <= c <= 0.5``:
  579. >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
  580. >>> popt
  581. array([ 2.43708906, 1. , 0.35015434])
  582. >>> plt.plot(xdata, func(xdata, *popt), 'g--',
  583. ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
  584. >>> plt.xlabel('x')
  585. >>> plt.ylabel('y')
  586. >>> plt.legend()
  587. >>> plt.show()
  588. """
  589. if p0 is None:
  590. # determine number of parameters by inspecting the function
  591. from scipy._lib._util import getargspec_no_self as _getargspec
  592. args, varargs, varkw, defaults = _getargspec(f)
  593. if len(args) < 2:
  594. raise ValueError("Unable to determine number of fit parameters.")
  595. n = len(args) - 1
  596. else:
  597. p0 = np.atleast_1d(p0)
  598. n = p0.size
  599. lb, ub = prepare_bounds(bounds, n)
  600. if p0 is None:
  601. p0 = _initialize_feasible(lb, ub)
  602. bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
  603. if method is None:
  604. if bounded_problem:
  605. method = 'trf'
  606. else:
  607. method = 'lm'
  608. if method == 'lm' and bounded_problem:
  609. raise ValueError("Method 'lm' only works for unconstrained problems. "
  610. "Use 'trf' or 'dogbox' instead.")
  611. # NaNs can not be handled
  612. if check_finite:
  613. ydata = np.asarray_chkfinite(ydata)
  614. else:
  615. ydata = np.asarray(ydata)
  616. if isinstance(xdata, (list, tuple, np.ndarray)):
  617. # `xdata` is passed straight to the user-defined `f`, so allow
  618. # non-array_like `xdata`.
  619. if check_finite:
  620. xdata = np.asarray_chkfinite(xdata)
  621. else:
  622. xdata = np.asarray(xdata)
  623. # optimization may produce garbage for float32 inputs, cast them to float64
  624. xdata = xdata.astype(float)
  625. ydata = ydata.astype(float)
  626. # Determine type of sigma
  627. if sigma is not None:
  628. sigma = np.asarray(sigma)
  629. # if 1-d, sigma are errors, define transform = 1/sigma
  630. if sigma.shape == (ydata.size, ):
  631. transform = 1.0 / sigma
  632. # if 2-d, sigma is the covariance matrix,
  633. # define transform = L such that L L^T = C
  634. elif sigma.shape == (ydata.size, ydata.size):
  635. try:
  636. # scipy.linalg.cholesky requires lower=True to return L L^T = A
  637. transform = cholesky(sigma, lower=True)
  638. except LinAlgError:
  639. raise ValueError("`sigma` must be positive definite.")
  640. else:
  641. raise ValueError("`sigma` has incorrect shape.")
  642. else:
  643. transform = None
  644. func = _wrap_func(f, xdata, ydata, transform)
  645. if callable(jac):
  646. jac = _wrap_jac(jac, xdata, transform)
  647. elif jac is None and method != 'lm':
  648. jac = '2-point'
  649. if method == 'lm':
  650. # Remove full_output from kwargs, otherwise we're passing it in twice.
  651. return_full = kwargs.pop('full_output', False)
  652. res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
  653. popt, pcov, infodict, errmsg, ier = res
  654. cost = np.sum(infodict['fvec'] ** 2)
  655. if ier not in [1, 2, 3, 4]:
  656. raise RuntimeError("Optimal parameters not found: " + errmsg)
  657. else:
  658. # Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
  659. if 'max_nfev' not in kwargs:
  660. kwargs['max_nfev'] = kwargs.pop('maxfev', None)
  661. res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
  662. **kwargs)
  663. if not res.success:
  664. raise RuntimeError("Optimal parameters not found: " + res.message)
  665. cost = 2 * res.cost # res.cost is half sum of squares!
  666. popt = res.x
  667. # Do Moore-Penrose inverse discarding zero singular values.
  668. _, s, VT = svd(res.jac, full_matrices=False)
  669. threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
  670. s = s[s > threshold]
  671. VT = VT[:s.size]
  672. pcov = np.dot(VT.T / s**2, VT)
  673. return_full = False
  674. warn_cov = False
  675. if pcov is None:
  676. # indeterminate covariance
  677. pcov = zeros((len(popt), len(popt)), dtype=float)
  678. pcov.fill(inf)
  679. warn_cov = True
  680. elif not absolute_sigma:
  681. if ydata.size > p0.size:
  682. s_sq = cost / (ydata.size - p0.size)
  683. pcov = pcov * s_sq
  684. else:
  685. pcov.fill(inf)
  686. warn_cov = True
  687. if warn_cov:
  688. warnings.warn('Covariance of the parameters could not be estimated',
  689. category=OptimizeWarning)
  690. if return_full:
  691. return popt, pcov, infodict, errmsg, ier
  692. else:
  693. return popt, pcov
  694. def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
  695. """Perform a simple check on the gradient for correctness.
  696. """
  697. x = atleast_1d(x0)
  698. n = len(x)
  699. x = x.reshape((n,))
  700. fvec = atleast_1d(fcn(x, *args))
  701. m = len(fvec)
  702. fvec = fvec.reshape((m,))
  703. ldfjac = m
  704. fjac = atleast_1d(Dfcn(x, *args))
  705. fjac = fjac.reshape((m, n))
  706. if col_deriv == 0:
  707. fjac = transpose(fjac)
  708. xp = zeros((n,), float)
  709. err = zeros((m,), float)
  710. fvecp = None
  711. _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
  712. fvecp = atleast_1d(fcn(xp, *args))
  713. fvecp = fvecp.reshape((m,))
  714. _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
  715. good = (product(greater(err, 0.5), axis=0))
  716. return (good, err)
  717. def _del2(p0, p1, d):
  718. return p0 - np.square(p1 - p0) / d
  719. def _relerr(actual, desired):
  720. return (actual - desired) / desired
  721. def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
  722. p0 = x0
  723. for i in range(maxiter):
  724. p1 = func(p0, *args)
  725. if use_accel:
  726. p2 = func(p1, *args)
  727. d = p2 - 2.0 * p1 + p0
  728. p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
  729. else:
  730. p = p1
  731. relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
  732. if np.all(np.abs(relerr) < xtol):
  733. return p
  734. p0 = p
  735. msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
  736. raise RuntimeError(msg)
  737. def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
  738. """
  739. Find a fixed point of the function.
  740. Given a function of one or more variables and a starting point, find a
  741. fixed-point of the function: i.e. where ``func(x0) == x0``.
  742. Parameters
  743. ----------
  744. func : function
  745. Function to evaluate.
  746. x0 : array_like
  747. Fixed point of function.
  748. args : tuple, optional
  749. Extra arguments to `func`.
  750. xtol : float, optional
  751. Convergence tolerance, defaults to 1e-08.
  752. maxiter : int, optional
  753. Maximum number of iterations, defaults to 500.
  754. method : {"del2", "iteration"}, optional
  755. Method of finding the fixed-point, defaults to "del2"
  756. which uses Steffensen's Method with Aitken's ``Del^2``
  757. convergence acceleration [1]_. The "iteration" method simply iterates
  758. the function until convergence is detected, without attempting to
  759. accelerate the convergence.
  760. References
  761. ----------
  762. .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
  763. Examples
  764. --------
  765. >>> from scipy import optimize
  766. >>> def func(x, c1, c2):
  767. ... return np.sqrt(c1/(x+c2))
  768. >>> c1 = np.array([10,12.])
  769. >>> c2 = np.array([3, 5.])
  770. >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
  771. array([ 1.4920333 , 1.37228132])
  772. """
  773. use_accel = {'del2': True, 'iteration': False}[method]
  774. x0 = _asarray_validated(x0, as_inexact=True)
  775. return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)