123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899 |
- from __future__ import division, print_function, absolute_import
- import threading
- import warnings
- from . import _minpack
- import numpy as np
- from numpy import (atleast_1d, dot, take, triu, shape, eye,
- transpose, zeros, product, greater, array,
- all, where, isscalar, asarray, inf, abs,
- finfo, inexact, issubdtype, dtype)
- from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError
- from scipy._lib._util import _asarray_validated, _lazywhere
- from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
- from ._lsq import least_squares
- from ._lsq.common import make_strictly_feasible
- from ._lsq.least_squares import prepare_bounds
- error = _minpack.error
- __all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
- def _check_func(checker, argname, thefunc, x0, args, numinputs,
- output_shape=None):
- res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
- if (output_shape is not None) and (shape(res) != output_shape):
- if (output_shape[0] != 1):
- if len(output_shape) > 1:
- if output_shape[1] == 1:
- return shape(res)
- msg = "%s: there is a mismatch between the input and output " \
- "shape of the '%s' argument" % (checker, argname)
- func_name = getattr(thefunc, '__name__', None)
- if func_name:
- msg += " '%s'." % func_name
- else:
- msg += "."
- msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
- raise TypeError(msg)
- if issubdtype(res.dtype, inexact):
- dt = res.dtype
- else:
- dt = dtype(float)
- return shape(res), dt
- def fsolve(func, x0, args=(), fprime=None, full_output=0,
- col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
- epsfcn=None, factor=100, diag=None):
- """
- Find the roots of a function.
- Return the roots of the (non-linear) equations defined by
- ``func(x) = 0`` given a starting estimate.
- Parameters
- ----------
- func : callable ``f(x, *args)``
- A function that takes at least one (possibly vector) argument,
- and returns a value of the same length.
- x0 : ndarray
- The starting estimate for the roots of ``func(x) = 0``.
- args : tuple, optional
- Any extra arguments to `func`.
- fprime : callable ``f(x, *args)``, optional
- A function to compute the Jacobian of `func` with derivatives
- across the rows. By default, the Jacobian will be estimated.
- full_output : bool, optional
- If True, return optional outputs.
- col_deriv : bool, optional
- Specify whether the Jacobian function computes derivatives down
- the columns (faster, because there is no transpose operation).
- xtol : float, optional
- The calculation will terminate if the relative error between two
- consecutive iterates is at most `xtol`.
- maxfev : int, optional
- The maximum number of calls to the function. If zero, then
- ``100*(N+1)`` is the maximum where N is the number of elements
- in `x0`.
- band : tuple, optional
- If set to a two-sequence containing the number of sub- and
- super-diagonals within the band of the Jacobi matrix, the
- Jacobi matrix is considered banded (only for ``fprime=None``).
- epsfcn : float, optional
- A suitable step length for the forward-difference
- approximation of the Jacobian (for ``fprime=None``). If
- `epsfcn` is less than the machine precision, it is assumed
- that the relative errors in the functions are of the order of
- the machine precision.
- factor : float, optional
- A parameter determining the initial step bound
- (``factor * || diag * x||``). Should be in the interval
- ``(0.1, 100)``.
- diag : sequence, optional
- N positive entries that serve as a scale factors for the
- variables.
- Returns
- -------
- x : ndarray
- The solution (or the result of the last iteration for
- an unsuccessful call).
- infodict : dict
- A dictionary of optional outputs with the keys:
- ``nfev``
- number of function calls
- ``njev``
- number of Jacobian calls
- ``fvec``
- function evaluated at the output
- ``fjac``
- the orthogonal matrix, q, produced by the QR
- factorization of the final approximate Jacobian
- matrix, stored column wise
- ``r``
- upper triangular matrix produced by QR factorization
- of the same matrix
- ``qtf``
- the vector ``(transpose(q) * fvec)``
- ier : int
- An integer flag. Set to 1 if a solution was found, otherwise refer
- to `mesg` for more information.
- mesg : str
- If no solution is found, `mesg` details the cause of failure.
- See Also
- --------
- root : Interface to root finding algorithms for multivariate
- functions. See the ``method=='hybr'`` in particular.
- Notes
- -----
- ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
- """
- options = {'col_deriv': col_deriv,
- 'xtol': xtol,
- 'maxfev': maxfev,
- 'band': band,
- 'eps': epsfcn,
- 'factor': factor,
- 'diag': diag}
- res = _root_hybr(func, x0, args, jac=fprime, **options)
- if full_output:
- x = res['x']
- info = dict((k, res.get(k))
- for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
- info['fvec'] = res['fun']
- return x, info, res['status'], res['message']
- else:
- status = res['status']
- msg = res['message']
- if status == 0:
- raise TypeError(msg)
- elif status == 1:
- pass
- elif status in [2, 3, 4, 5]:
- warnings.warn(msg, RuntimeWarning)
- else:
- raise TypeError(msg)
- return res['x']
- def _root_hybr(func, x0, args=(), jac=None,
- col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
- factor=100, diag=None, **unknown_options):
- """
- Find the roots of a multivariate function using MINPACK's hybrd and
- hybrj routines (modified Powell method).
- Options
- -------
- col_deriv : bool
- Specify whether the Jacobian function computes derivatives down
- the columns (faster, because there is no transpose operation).
- xtol : float
- The calculation will terminate if the relative error between two
- consecutive iterates is at most `xtol`.
- maxfev : int
- The maximum number of calls to the function. If zero, then
- ``100*(N+1)`` is the maximum where N is the number of elements
- in `x0`.
- band : tuple
- If set to a two-sequence containing the number of sub- and
- super-diagonals within the band of the Jacobi matrix, the
- Jacobi matrix is considered banded (only for ``fprime=None``).
- eps : float
- A suitable step length for the forward-difference
- approximation of the Jacobian (for ``fprime=None``). If
- `eps` is less than the machine precision, it is assumed
- that the relative errors in the functions are of the order of
- the machine precision.
- factor : float
- A parameter determining the initial step bound
- (``factor * || diag * x||``). Should be in the interval
- ``(0.1, 100)``.
- diag : sequence
- N positive entries that serve as a scale factors for the
- variables.
- """
- _check_unknown_options(unknown_options)
- epsfcn = eps
- x0 = asarray(x0).flatten()
- n = len(x0)
- if not isinstance(args, tuple):
- args = (args,)
- shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
- if epsfcn is None:
- epsfcn = finfo(dtype).eps
- Dfun = jac
- if Dfun is None:
- if band is None:
- ml, mu = -10, -10
- else:
- ml, mu = band[:2]
- if maxfev == 0:
- maxfev = 200 * (n + 1)
- retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
- ml, mu, epsfcn, factor, diag)
- else:
- _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
- if (maxfev == 0):
- maxfev = 100 * (n + 1)
- retval = _minpack._hybrj(func, Dfun, x0, args, 1,
- col_deriv, xtol, maxfev, factor, diag)
- x, status = retval[0], retval[-1]
- errors = {0: "Improper input parameters were entered.",
- 1: "The solution converged.",
- 2: "The number of calls to function has "
- "reached maxfev = %d." % maxfev,
- 3: "xtol=%f is too small, no further improvement "
- "in the approximate\n solution "
- "is possible." % xtol,
- 4: "The iteration is not making good progress, as measured "
- "by the \n improvement from the last five "
- "Jacobian evaluations.",
- 5: "The iteration is not making good progress, "
- "as measured by the \n improvement from the last "
- "ten iterations.",
- 'unknown': "An error occurred."}
- info = retval[1]
- info['fun'] = info.pop('fvec')
- sol = OptimizeResult(x=x, success=(status == 1), status=status)
- sol.update(info)
- try:
- sol['message'] = errors[status]
- except KeyError:
- sol['message'] = errors['unknown']
- return sol
- LEASTSQ_SUCCESS = [1, 2, 3, 4]
- LEASTSQ_FAILURE = [5, 6, 7, 8]
- def leastsq(func, x0, args=(), Dfun=None, full_output=0,
- col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
- gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
- """
- Minimize the sum of squares of a set of equations.
- ::
- x = arg min(sum(func(y)**2,axis=0))
- y
- Parameters
- ----------
- func : callable
- should take at least one (possibly length N vector) argument and
- returns M floating point numbers. It must not return NaNs or
- fitting might fail.
- x0 : ndarray
- The starting estimate for the minimization.
- args : tuple, optional
- Any extra arguments to func are placed in this tuple.
- Dfun : callable, optional
- A function or method to compute the Jacobian of func with derivatives
- across the rows. If this is None, the Jacobian will be estimated.
- full_output : bool, optional
- non-zero to return all optional outputs.
- col_deriv : bool, optional
- non-zero to specify that the Jacobian function computes derivatives
- down the columns (faster, because there is no transpose operation).
- ftol : float, optional
- Relative error desired in the sum of squares.
- xtol : float, optional
- Relative error desired in the approximate solution.
- gtol : float, optional
- Orthogonality desired between the function vector and the columns of
- the Jacobian.
- maxfev : int, optional
- The maximum number of calls to the function. If `Dfun` is provided
- then the default `maxfev` is 100*(N+1) where N is the number of elements
- in x0, otherwise the default `maxfev` is 200*(N+1).
- epsfcn : float, optional
- A variable used in determining a suitable step length for the forward-
- difference approximation of the Jacobian (for Dfun=None).
- Normally the actual step length will be sqrt(epsfcn)*x
- If epsfcn is less than the machine precision, it is assumed that the
- relative errors are of the order of the machine precision.
- factor : float, optional
- A parameter determining the initial step bound
- (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
- diag : sequence, optional
- N positive entries that serve as a scale factors for the variables.
- Returns
- -------
- x : ndarray
- The solution (or the result of the last iteration for an unsuccessful
- call).
- cov_x : ndarray
- Uses the fjac and ipvt optional outputs to construct an
- estimate of the jacobian around the solution. None if a
- singular matrix encountered (indicates very flat curvature in
- some direction). This matrix must be multiplied by the
- residual variance to get the covariance of the
- parameter estimates -- see curve_fit.
- infodict : dict
- a dictionary of optional outputs with the key s:
- ``nfev``
- The number of function calls
- ``fvec``
- The function evaluated at the output
- ``fjac``
- A permutation of the R matrix of a QR
- factorization of the final approximate
- Jacobian matrix, stored column wise.
- Together with ipvt, the covariance of the
- estimate can be approximated.
- ``ipvt``
- An integer array of length N which defines
- a permutation matrix, p, such that
- fjac*p = q*r, where r is upper triangular
- with diagonal elements of nonincreasing
- magnitude. Column j of p is column ipvt(j)
- of the identity matrix.
- ``qtf``
- The vector (transpose(q) * fvec).
- mesg : str
- A string message giving information about the cause of failure.
- ier : int
- An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
- found. Otherwise, the solution was not found. In either case, the
- optional output variable 'mesg' gives more information.
- Notes
- -----
- "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
- cov_x is a Jacobian approximation to the Hessian of the least squares
- objective function.
- This approximation assumes that the objective function is based on the
- difference between some observed target data (ydata) and a (non-linear)
- function of the parameters `f(xdata, params)` ::
- func(params) = ydata - f(xdata, params)
- so that the objective function is ::
- min sum((ydata - f(xdata, params))**2, axis=0)
- params
- The solution, `x`, is always a 1D array, regardless of the shape of `x0`,
- or whether `x0` is a scalar.
- """
- x0 = asarray(x0).flatten()
- n = len(x0)
- if not isinstance(args, tuple):
- args = (args,)
- shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
- m = shape[0]
- if n > m:
- raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
- if epsfcn is None:
- epsfcn = finfo(dtype).eps
- if Dfun is None:
- if maxfev == 0:
- maxfev = 200*(n + 1)
- retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
- gtol, maxfev, epsfcn, factor, diag)
- else:
- if col_deriv:
- _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
- else:
- _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
- if maxfev == 0:
- maxfev = 100 * (n + 1)
- retval = _minpack._lmder(func, Dfun, x0, args, full_output,
- col_deriv, ftol, xtol, gtol, maxfev,
- factor, diag)
- errors = {0: ["Improper input parameters.", TypeError],
- 1: ["Both actual and predicted relative reductions "
- "in the sum of squares\n are at most %f" % ftol, None],
- 2: ["The relative error between two consecutive "
- "iterates is at most %f" % xtol, None],
- 3: ["Both actual and predicted relative reductions in "
- "the sum of squares\n are at most %f and the "
- "relative error between two consecutive "
- "iterates is at \n most %f" % (ftol, xtol), None],
- 4: ["The cosine of the angle between func(x) and any "
- "column of the\n Jacobian is at most %f in "
- "absolute value" % gtol, None],
- 5: ["Number of calls to function has reached "
- "maxfev = %d." % maxfev, ValueError],
- 6: ["ftol=%f is too small, no further reduction "
- "in the sum of squares\n is possible.""" % ftol,
- ValueError],
- 7: ["xtol=%f is too small, no further improvement in "
- "the approximate\n solution is possible." % xtol,
- ValueError],
- 8: ["gtol=%f is too small, func(x) is orthogonal to the "
- "columns of\n the Jacobian to machine "
- "precision." % gtol, ValueError]}
- # The FORTRAN return value (possible return values are >= 0 and <= 8)
- info = retval[-1]
- if full_output:
- cov_x = None
- if info in LEASTSQ_SUCCESS:
- from numpy.dual import inv
- perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
- r = triu(transpose(retval[1]['fjac'])[:n, :])
- R = dot(r, perm)
- try:
- cov_x = inv(dot(transpose(R), R))
- except (LinAlgError, ValueError):
- pass
- return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
- else:
- if info in LEASTSQ_FAILURE:
- warnings.warn(errors[info][0], RuntimeWarning)
- elif info == 0:
- raise errors[info][1](errors[info][0])
- return retval[0], info
- def _wrap_func(func, xdata, ydata, transform):
- if transform is None:
- def func_wrapped(params):
- return func(xdata, *params) - ydata
- elif transform.ndim == 1:
- def func_wrapped(params):
- return transform * (func(xdata, *params) - ydata)
- else:
- # Chisq = (y - yd)^T C^{-1} (y-yd)
- # transform = L such that C = L L^T
- # C^{-1} = L^{-T} L^{-1}
- # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
- # Define (y-yd)' = L^{-1} (y-yd)
- # by solving
- # L (y-yd)' = (y-yd)
- # and minimize (y-yd)'^T (y-yd)'
- def func_wrapped(params):
- return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
- return func_wrapped
- def _wrap_jac(jac, xdata, transform):
- if transform is None:
- def jac_wrapped(params):
- return jac(xdata, *params)
- elif transform.ndim == 1:
- def jac_wrapped(params):
- return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
- else:
- def jac_wrapped(params):
- return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
- return jac_wrapped
- def _initialize_feasible(lb, ub):
- p0 = np.ones_like(lb)
- lb_finite = np.isfinite(lb)
- ub_finite = np.isfinite(ub)
- mask = lb_finite & ub_finite
- p0[mask] = 0.5 * (lb[mask] + ub[mask])
- mask = lb_finite & ~ub_finite
- p0[mask] = lb[mask] + 1
- mask = ~lb_finite & ub_finite
- p0[mask] = ub[mask] - 1
- return p0
- def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
- check_finite=True, bounds=(-np.inf, np.inf), method=None,
- jac=None, **kwargs):
- """
- Use non-linear least squares to fit a function, f, to data.
- Assumes ``ydata = f(xdata, *params) + eps``
- Parameters
- ----------
- f : callable
- The model function, f(x, ...). It must take the independent
- variable as the first argument and the parameters to fit as
- separate remaining arguments.
- xdata : An M-length sequence or an (k,M)-shaped array for functions with k predictors
- The independent variable where the data is measured.
- ydata : M-length sequence
- The dependent data --- nominally f(xdata, ...)
- p0 : None, scalar, or N-length sequence, optional
- Initial guess for the parameters. If None, then the initial
- values will all be 1 (if the number of parameters for the function
- can be determined using introspection, otherwise a ValueError
- is raised).
- sigma : None or M-length sequence or MxM array, optional
- Determines the uncertainty in `ydata`. If we define residuals as
- ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
- depends on its number of dimensions:
- - A 1-d `sigma` should contain values of standard deviations of
- errors in `ydata`. In this case, the optimized function is
- ``chisq = sum((r / sigma) ** 2)``.
- - A 2-d `sigma` should contain the covariance matrix of
- errors in `ydata`. In this case, the optimized function is
- ``chisq = r.T @ inv(sigma) @ r``.
- .. versionadded:: 0.19
- None (default) is equivalent of 1-d `sigma` filled with ones.
- absolute_sigma : bool, optional
- If True, `sigma` is used in an absolute sense and the estimated parameter
- covariance `pcov` reflects these absolute values.
- If False, only the relative magnitudes of the `sigma` values matter.
- The returned parameter covariance matrix `pcov` is based on scaling
- `sigma` by a constant factor. This constant is set by demanding that the
- reduced `chisq` for the optimal parameters `popt` when using the
- *scaled* `sigma` equals unity. In other words, `sigma` is scaled to
- match the sample variance of the residuals after the fit.
- Mathematically,
- ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
- check_finite : bool, optional
- If True, check that the input arrays do not contain nans of infs,
- and raise a ValueError if they do. Setting this parameter to
- False may silently produce nonsensical results if the input arrays
- do contain nans. Default is True.
- bounds : 2-tuple of array_like, optional
- Lower and upper bounds on parameters. Defaults to no bounds.
- Each element of the tuple must be either an array with the length equal
- to the number of parameters, or a scalar (in which case the bound is
- taken to be the same for all parameters.) Use ``np.inf`` with an
- appropriate sign to disable bounds on all or some parameters.
- .. versionadded:: 0.17
- method : {'lm', 'trf', 'dogbox'}, optional
- Method to use for optimization. See `least_squares` for more details.
- Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
- provided. The method 'lm' won't work when the number of observations
- is less than the number of variables, use 'trf' or 'dogbox' in this
- case.
- .. versionadded:: 0.17
- jac : callable, string or None, optional
- Function with signature ``jac(x, ...)`` which computes the Jacobian
- matrix of the model function with respect to parameters as a dense
- array_like structure. It will be scaled according to provided `sigma`.
- If None (default), the Jacobian will be estimated numerically.
- String keywords for 'trf' and 'dogbox' methods can be used to select
- a finite difference scheme, see `least_squares`.
- .. versionadded:: 0.18
- kwargs
- Keyword arguments passed to `leastsq` for ``method='lm'`` or
- `least_squares` otherwise.
- Returns
- -------
- popt : array
- Optimal values for the parameters so that the sum of the squared
- residuals of ``f(xdata, *popt) - ydata`` is minimized
- pcov : 2d array
- The estimated covariance of popt. The diagonals provide the variance
- of the parameter estimate. To compute one standard deviation errors
- on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
- How the `sigma` parameter affects the estimated covariance
- depends on `absolute_sigma` argument, as described above.
- If the Jacobian matrix at the solution doesn't have a full rank, then
- 'lm' method returns a matrix filled with ``np.inf``, on the other hand
- 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
- the covariance matrix.
- Raises
- ------
- ValueError
- if either `ydata` or `xdata` contain NaNs, or if incompatible options
- are used.
- RuntimeError
- if the least-squares minimization fails.
- OptimizeWarning
- if covariance of the parameters can not be estimated.
- See Also
- --------
- least_squares : Minimize the sum of squares of nonlinear functions.
- scipy.stats.linregress : Calculate a linear least squares regression for
- two sets of measurements.
- Notes
- -----
- With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
- through `leastsq`. Note that this algorithm can only deal with
- unconstrained problems.
- Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
- the docstring of `least_squares` for more information.
- Examples
- --------
- >>> import numpy as np
- >>> import matplotlib.pyplot as plt
- >>> from scipy.optimize import curve_fit
- >>> def func(x, a, b, c):
- ... return a * np.exp(-b * x) + c
- Define the data to be fit with some noise:
- >>> xdata = np.linspace(0, 4, 50)
- >>> y = func(xdata, 2.5, 1.3, 0.5)
- >>> np.random.seed(1729)
- >>> y_noise = 0.2 * np.random.normal(size=xdata.size)
- >>> ydata = y + y_noise
- >>> plt.plot(xdata, ydata, 'b-', label='data')
- Fit for the parameters a, b, c of the function `func`:
- >>> popt, pcov = curve_fit(func, xdata, ydata)
- >>> popt
- array([ 2.55423706, 1.35190947, 0.47450618])
- >>> plt.plot(xdata, func(xdata, *popt), 'r-',
- ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
- Constrain the optimization to the region of ``0 <= a <= 3``,
- ``0 <= b <= 1`` and ``0 <= c <= 0.5``:
- >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
- >>> popt
- array([ 2.43708906, 1. , 0.35015434])
- >>> plt.plot(xdata, func(xdata, *popt), 'g--',
- ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
- >>> plt.xlabel('x')
- >>> plt.ylabel('y')
- >>> plt.legend()
- >>> plt.show()
- """
- if p0 is None:
- # determine number of parameters by inspecting the function
- from scipy._lib._util import getargspec_no_self as _getargspec
- args, varargs, varkw, defaults = _getargspec(f)
- if len(args) < 2:
- raise ValueError("Unable to determine number of fit parameters.")
- n = len(args) - 1
- else:
- p0 = np.atleast_1d(p0)
- n = p0.size
- lb, ub = prepare_bounds(bounds, n)
- if p0 is None:
- p0 = _initialize_feasible(lb, ub)
- bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
- if method is None:
- if bounded_problem:
- method = 'trf'
- else:
- method = 'lm'
- if method == 'lm' and bounded_problem:
- raise ValueError("Method 'lm' only works for unconstrained problems. "
- "Use 'trf' or 'dogbox' instead.")
- # NaNs can not be handled
- if check_finite:
- ydata = np.asarray_chkfinite(ydata)
- else:
- ydata = np.asarray(ydata)
- if isinstance(xdata, (list, tuple, np.ndarray)):
- # `xdata` is passed straight to the user-defined `f`, so allow
- # non-array_like `xdata`.
- if check_finite:
- xdata = np.asarray_chkfinite(xdata)
- else:
- xdata = np.asarray(xdata)
- # optimization may produce garbage for float32 inputs, cast them to float64
- xdata = xdata.astype(float)
- ydata = ydata.astype(float)
- # Determine type of sigma
- if sigma is not None:
- sigma = np.asarray(sigma)
- # if 1-d, sigma are errors, define transform = 1/sigma
- if sigma.shape == (ydata.size, ):
- transform = 1.0 / sigma
- # if 2-d, sigma is the covariance matrix,
- # define transform = L such that L L^T = C
- elif sigma.shape == (ydata.size, ydata.size):
- try:
- # scipy.linalg.cholesky requires lower=True to return L L^T = A
- transform = cholesky(sigma, lower=True)
- except LinAlgError:
- raise ValueError("`sigma` must be positive definite.")
- else:
- raise ValueError("`sigma` has incorrect shape.")
- else:
- transform = None
- func = _wrap_func(f, xdata, ydata, transform)
- if callable(jac):
- jac = _wrap_jac(jac, xdata, transform)
- elif jac is None and method != 'lm':
- jac = '2-point'
- if method == 'lm':
- # Remove full_output from kwargs, otherwise we're passing it in twice.
- return_full = kwargs.pop('full_output', False)
- res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
- popt, pcov, infodict, errmsg, ier = res
- cost = np.sum(infodict['fvec'] ** 2)
- if ier not in [1, 2, 3, 4]:
- raise RuntimeError("Optimal parameters not found: " + errmsg)
- else:
- # Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
- if 'max_nfev' not in kwargs:
- kwargs['max_nfev'] = kwargs.pop('maxfev', None)
- res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
- **kwargs)
- if not res.success:
- raise RuntimeError("Optimal parameters not found: " + res.message)
- cost = 2 * res.cost # res.cost is half sum of squares!
- popt = res.x
- # Do Moore-Penrose inverse discarding zero singular values.
- _, s, VT = svd(res.jac, full_matrices=False)
- threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
- s = s[s > threshold]
- VT = VT[:s.size]
- pcov = np.dot(VT.T / s**2, VT)
- return_full = False
- warn_cov = False
- if pcov is None:
- # indeterminate covariance
- pcov = zeros((len(popt), len(popt)), dtype=float)
- pcov.fill(inf)
- warn_cov = True
- elif not absolute_sigma:
- if ydata.size > p0.size:
- s_sq = cost / (ydata.size - p0.size)
- pcov = pcov * s_sq
- else:
- pcov.fill(inf)
- warn_cov = True
- if warn_cov:
- warnings.warn('Covariance of the parameters could not be estimated',
- category=OptimizeWarning)
- if return_full:
- return popt, pcov, infodict, errmsg, ier
- else:
- return popt, pcov
- def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
- """Perform a simple check on the gradient for correctness.
- """
- x = atleast_1d(x0)
- n = len(x)
- x = x.reshape((n,))
- fvec = atleast_1d(fcn(x, *args))
- m = len(fvec)
- fvec = fvec.reshape((m,))
- ldfjac = m
- fjac = atleast_1d(Dfcn(x, *args))
- fjac = fjac.reshape((m, n))
- if col_deriv == 0:
- fjac = transpose(fjac)
- xp = zeros((n,), float)
- err = zeros((m,), float)
- fvecp = None
- _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
- fvecp = atleast_1d(fcn(xp, *args))
- fvecp = fvecp.reshape((m,))
- _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
- good = (product(greater(err, 0.5), axis=0))
- return (good, err)
- def _del2(p0, p1, d):
- return p0 - np.square(p1 - p0) / d
- def _relerr(actual, desired):
- return (actual - desired) / desired
- def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
- p0 = x0
- for i in range(maxiter):
- p1 = func(p0, *args)
- if use_accel:
- p2 = func(p1, *args)
- d = p2 - 2.0 * p1 + p0
- p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
- else:
- p = p1
- relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
- if np.all(np.abs(relerr) < xtol):
- return p
- p0 = p
- msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
- raise RuntimeError(msg)
- def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
- """
- Find a fixed point of the function.
- Given a function of one or more variables and a starting point, find a
- fixed-point of the function: i.e. where ``func(x0) == x0``.
- Parameters
- ----------
- func : function
- Function to evaluate.
- x0 : array_like
- Fixed point of function.
- args : tuple, optional
- Extra arguments to `func`.
- xtol : float, optional
- Convergence tolerance, defaults to 1e-08.
- maxiter : int, optional
- Maximum number of iterations, defaults to 500.
- method : {"del2", "iteration"}, optional
- Method of finding the fixed-point, defaults to "del2"
- which uses Steffensen's Method with Aitken's ``Del^2``
- convergence acceleration [1]_. The "iteration" method simply iterates
- the function until convergence is detected, without attempting to
- accelerate the convergence.
- References
- ----------
- .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
- Examples
- --------
- >>> from scipy import optimize
- >>> def func(x, c1, c2):
- ... return np.sqrt(c1/(x+c2))
- >>> c1 = np.array([10,12.])
- >>> c2 = np.array([3, 5.])
- >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
- array([ 1.4920333 , 1.37228132])
- """
- use_accel = {'del2': True, 'iteration': False}[method]
- x0 = _asarray_validated(x0, as_inexact=True)
- return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
|