least_squares.py 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930
  1. """Generic interface for least-square minimization."""
  2. from __future__ import division, print_function, absolute_import
  3. from warnings import warn
  4. import numpy as np
  5. from numpy.linalg import norm
  6. from scipy.sparse import issparse, csr_matrix
  7. from scipy.sparse.linalg import LinearOperator
  8. from scipy.optimize import _minpack, OptimizeResult
  9. from scipy.optimize._numdiff import approx_derivative, group_columns
  10. from scipy._lib.six import string_types
  11. from .trf import trf
  12. from .dogbox import dogbox
  13. from .common import EPS, in_bounds, make_strictly_feasible
  14. TERMINATION_MESSAGES = {
  15. -1: "Improper input parameters status returned from `leastsq`",
  16. 0: "The maximum number of function evaluations is exceeded.",
  17. 1: "`gtol` termination condition is satisfied.",
  18. 2: "`ftol` termination condition is satisfied.",
  19. 3: "`xtol` termination condition is satisfied.",
  20. 4: "Both `ftol` and `xtol` termination conditions are satisfied."
  21. }
  22. FROM_MINPACK_TO_COMMON = {
  23. 0: -1, # Improper input parameters from MINPACK.
  24. 1: 2,
  25. 2: 3,
  26. 3: 4,
  27. 4: 1,
  28. 5: 0
  29. # There are 6, 7, 8 for too small tolerance parameters,
  30. # but we guard against it by checking ftol, xtol, gtol beforehand.
  31. }
  32. def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
  33. n = x0.size
  34. if diff_step is None:
  35. epsfcn = EPS
  36. else:
  37. epsfcn = diff_step**2
  38. # Compute MINPACK's `diag`, which is inverse of our `x_scale` and
  39. # ``x_scale='jac'`` corresponds to ``diag=None``.
  40. if isinstance(x_scale, string_types) and x_scale == 'jac':
  41. diag = None
  42. else:
  43. diag = 1 / x_scale
  44. full_output = True
  45. col_deriv = False
  46. factor = 100.0
  47. if jac is None:
  48. if max_nfev is None:
  49. # n squared to account for Jacobian evaluations.
  50. max_nfev = 100 * n * (n + 1)
  51. x, info, status = _minpack._lmdif(
  52. fun, x0, (), full_output, ftol, xtol, gtol,
  53. max_nfev, epsfcn, factor, diag)
  54. else:
  55. if max_nfev is None:
  56. max_nfev = 100 * n
  57. x, info, status = _minpack._lmder(
  58. fun, jac, x0, (), full_output, col_deriv,
  59. ftol, xtol, gtol, max_nfev, factor, diag)
  60. f = info['fvec']
  61. if callable(jac):
  62. J = jac(x)
  63. else:
  64. J = np.atleast_2d(approx_derivative(fun, x))
  65. cost = 0.5 * np.dot(f, f)
  66. g = J.T.dot(f)
  67. g_norm = norm(g, ord=np.inf)
  68. nfev = info['nfev']
  69. njev = info.get('njev', None)
  70. status = FROM_MINPACK_TO_COMMON[status]
  71. active_mask = np.zeros_like(x0, dtype=int)
  72. return OptimizeResult(
  73. x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
  74. active_mask=active_mask, nfev=nfev, njev=njev, status=status)
  75. def prepare_bounds(bounds, n):
  76. lb, ub = [np.asarray(b, dtype=float) for b in bounds]
  77. if lb.ndim == 0:
  78. lb = np.resize(lb, n)
  79. if ub.ndim == 0:
  80. ub = np.resize(ub, n)
  81. return lb, ub
  82. def check_tolerance(ftol, xtol, gtol):
  83. message = "{} is too low, setting to machine epsilon {}."
  84. if ftol < EPS:
  85. warn(message.format("`ftol`", EPS))
  86. ftol = EPS
  87. if xtol < EPS:
  88. warn(message.format("`xtol`", EPS))
  89. xtol = EPS
  90. if gtol < EPS:
  91. warn(message.format("`gtol`", EPS))
  92. gtol = EPS
  93. return ftol, xtol, gtol
  94. def check_x_scale(x_scale, x0):
  95. if isinstance(x_scale, string_types) and x_scale == 'jac':
  96. return x_scale
  97. try:
  98. x_scale = np.asarray(x_scale, dtype=float)
  99. valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
  100. except (ValueError, TypeError):
  101. valid = False
  102. if not valid:
  103. raise ValueError("`x_scale` must be 'jac' or array_like with "
  104. "positive numbers.")
  105. if x_scale.ndim == 0:
  106. x_scale = np.resize(x_scale, x0.shape)
  107. if x_scale.shape != x0.shape:
  108. raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
  109. return x_scale
  110. def check_jac_sparsity(jac_sparsity, m, n):
  111. if jac_sparsity is None:
  112. return None
  113. if not issparse(jac_sparsity):
  114. jac_sparsity = np.atleast_2d(jac_sparsity)
  115. if jac_sparsity.shape != (m, n):
  116. raise ValueError("`jac_sparsity` has wrong shape.")
  117. return jac_sparsity, group_columns(jac_sparsity)
  118. # Loss functions.
  119. def huber(z, rho, cost_only):
  120. mask = z <= 1
  121. rho[0, mask] = z[mask]
  122. rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
  123. if cost_only:
  124. return
  125. rho[1, mask] = 1
  126. rho[1, ~mask] = z[~mask]**-0.5
  127. rho[2, mask] = 0
  128. rho[2, ~mask] = -0.5 * z[~mask]**-1.5
  129. def soft_l1(z, rho, cost_only):
  130. t = 1 + z
  131. rho[0] = 2 * (t**0.5 - 1)
  132. if cost_only:
  133. return
  134. rho[1] = t**-0.5
  135. rho[2] = -0.5 * t**-1.5
  136. def cauchy(z, rho, cost_only):
  137. rho[0] = np.log1p(z)
  138. if cost_only:
  139. return
  140. t = 1 + z
  141. rho[1] = 1 / t
  142. rho[2] = -1 / t**2
  143. def arctan(z, rho, cost_only):
  144. rho[0] = np.arctan(z)
  145. if cost_only:
  146. return
  147. t = 1 + z**2
  148. rho[1] = 1 / t
  149. rho[2] = -2 * z / t**2
  150. IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
  151. cauchy=cauchy, arctan=arctan)
  152. def construct_loss_function(m, loss, f_scale):
  153. if loss == 'linear':
  154. return None
  155. if not callable(loss):
  156. loss = IMPLEMENTED_LOSSES[loss]
  157. rho = np.empty((3, m))
  158. def loss_function(f, cost_only=False):
  159. z = (f / f_scale) ** 2
  160. loss(z, rho, cost_only=cost_only)
  161. if cost_only:
  162. return 0.5 * f_scale ** 2 * np.sum(rho[0])
  163. rho[0] *= f_scale ** 2
  164. rho[2] /= f_scale ** 2
  165. return rho
  166. else:
  167. def loss_function(f, cost_only=False):
  168. z = (f / f_scale) ** 2
  169. rho = loss(z)
  170. if cost_only:
  171. return 0.5 * f_scale ** 2 * np.sum(rho[0])
  172. rho[0] *= f_scale ** 2
  173. rho[2] /= f_scale ** 2
  174. return rho
  175. return loss_function
  176. def least_squares(
  177. fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
  178. ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
  179. f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
  180. jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
  181. """Solve a nonlinear least-squares problem with bounds on the variables.
  182. Given the residuals f(x) (an m-dimensional real function of n real
  183. variables) and the loss function rho(s) (a scalar function), `least_squares`
  184. finds a local minimum of the cost function F(x)::
  185. minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
  186. subject to lb <= x <= ub
  187. The purpose of the loss function rho(s) is to reduce the influence of
  188. outliers on the solution.
  189. Parameters
  190. ----------
  191. fun : callable
  192. Function which computes the vector of residuals, with the signature
  193. ``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
  194. respect to its first argument. The argument ``x`` passed to this
  195. function is an ndarray of shape (n,) (never a scalar, even for n=1).
  196. It must return a 1-d array_like of shape (m,) or a scalar. If the
  197. argument ``x`` is complex or the function ``fun`` returns complex
  198. residuals, it must be wrapped in a real function of real arguments,
  199. as shown at the end of the Examples section.
  200. x0 : array_like with shape (n,) or float
  201. Initial guess on independent variables. If float, it will be treated
  202. as a 1-d array with one element.
  203. jac : {'2-point', '3-point', 'cs', callable}, optional
  204. Method of computing the Jacobian matrix (an m-by-n matrix, where
  205. element (i, j) is the partial derivative of f[i] with respect to
  206. x[j]). The keywords select a finite difference scheme for numerical
  207. estimation. The scheme '3-point' is more accurate, but requires
  208. twice as many operations as '2-point' (default). The scheme 'cs'
  209. uses complex steps, and while potentially the most accurate, it is
  210. applicable only when `fun` correctly handles complex inputs and
  211. can be analytically continued to the complex plane. Method 'lm'
  212. always uses the '2-point' scheme. If callable, it is used as
  213. ``jac(x, *args, **kwargs)`` and should return a good approximation
  214. (or the exact value) for the Jacobian as an array_like (np.atleast_2d
  215. is applied), a sparse matrix or a `scipy.sparse.linalg.LinearOperator`.
  216. bounds : 2-tuple of array_like, optional
  217. Lower and upper bounds on independent variables. Defaults to no bounds.
  218. Each array must match the size of `x0` or be a scalar, in the latter
  219. case a bound will be the same for all variables. Use ``np.inf`` with
  220. an appropriate sign to disable bounds on all or some variables.
  221. method : {'trf', 'dogbox', 'lm'}, optional
  222. Algorithm to perform minimization.
  223. * 'trf' : Trust Region Reflective algorithm, particularly suitable
  224. for large sparse problems with bounds. Generally robust method.
  225. * 'dogbox' : dogleg algorithm with rectangular trust regions,
  226. typical use case is small problems with bounds. Not recommended
  227. for problems with rank-deficient Jacobian.
  228. * 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
  229. Doesn't handle bounds and sparse Jacobians. Usually the most
  230. efficient method for small unconstrained problems.
  231. Default is 'trf'. See Notes for more information.
  232. ftol : float, optional
  233. Tolerance for termination by the change of the cost function. Default
  234. is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
  235. and there was an adequate agreement between a local quadratic model and
  236. the true model in the last step.
  237. xtol : float, optional
  238. Tolerance for termination by the change of the independent variables.
  239. Default is 1e-8. The exact condition depends on the `method` used:
  240. * For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``
  241. * For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
  242. a trust-region radius and ``xs`` is the value of ``x``
  243. scaled according to `x_scale` parameter (see below).
  244. gtol : float, optional
  245. Tolerance for termination by the norm of the gradient. Default is 1e-8.
  246. The exact condition depends on a `method` used:
  247. * For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
  248. ``g_scaled`` is the value of the gradient scaled to account for
  249. the presence of the bounds [STIR]_.
  250. * For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
  251. ``g_free`` is the gradient with respect to the variables which
  252. are not in the optimal state on the boundary.
  253. * For 'lm' : the maximum absolute value of the cosine of angles
  254. between columns of the Jacobian and the residual vector is less
  255. than `gtol`, or the residual vector is zero.
  256. x_scale : array_like or 'jac', optional
  257. Characteristic scale of each variable. Setting `x_scale` is equivalent
  258. to reformulating the problem in scaled variables ``xs = x / x_scale``.
  259. An alternative view is that the size of a trust region along j-th
  260. dimension is proportional to ``x_scale[j]``. Improved convergence may
  261. be achieved by setting `x_scale` such that a step of a given size
  262. along any of the scaled variables has a similar effect on the cost
  263. function. If set to 'jac', the scale is iteratively updated using the
  264. inverse norms of the columns of the Jacobian matrix (as described in
  265. [JJMore]_).
  266. loss : str or callable, optional
  267. Determines the loss function. The following keyword values are allowed:
  268. * 'linear' (default) : ``rho(z) = z``. Gives a standard
  269. least-squares problem.
  270. * 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
  271. approximation of l1 (absolute value) loss. Usually a good
  272. choice for robust least squares.
  273. * 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
  274. similarly to 'soft_l1'.
  275. * 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
  276. influence, but may cause difficulties in optimization process.
  277. * 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
  278. a single residual, has properties similar to 'cauchy'.
  279. If callable, it must take a 1-d ndarray ``z=f**2`` and return an
  280. array_like with shape (3, m) where row 0 contains function values,
  281. row 1 contains first derivatives and row 2 contains second
  282. derivatives. Method 'lm' supports only 'linear' loss.
  283. f_scale : float, optional
  284. Value of soft margin between inlier and outlier residuals, default
  285. is 1.0. The loss function is evaluated as follows
  286. ``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
  287. and ``rho`` is determined by `loss` parameter. This parameter has
  288. no effect with ``loss='linear'``, but for other `loss` values it is
  289. of crucial importance.
  290. max_nfev : None or int, optional
  291. Maximum number of function evaluations before the termination.
  292. If None (default), the value is chosen automatically:
  293. * For 'trf' and 'dogbox' : 100 * n.
  294. * For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
  295. otherwise (because 'lm' counts function calls in Jacobian
  296. estimation).
  297. diff_step : None or array_like, optional
  298. Determines the relative step size for the finite difference
  299. approximation of the Jacobian. The actual step is computed as
  300. ``x * diff_step``. If None (default), then `diff_step` is taken to be
  301. a conventional "optimal" power of machine epsilon for the finite
  302. difference scheme used [NR]_.
  303. tr_solver : {None, 'exact', 'lsmr'}, optional
  304. Method for solving trust-region subproblems, relevant only for 'trf'
  305. and 'dogbox' methods.
  306. * 'exact' is suitable for not very large problems with dense
  307. Jacobian matrices. The computational complexity per iteration is
  308. comparable to a singular value decomposition of the Jacobian
  309. matrix.
  310. * 'lsmr' is suitable for problems with sparse and large Jacobian
  311. matrices. It uses the iterative procedure
  312. `scipy.sparse.linalg.lsmr` for finding a solution of a linear
  313. least-squares problem and only requires matrix-vector product
  314. evaluations.
  315. If None (default) the solver is chosen based on the type of Jacobian
  316. returned on the first iteration.
  317. tr_options : dict, optional
  318. Keyword options passed to trust-region solver.
  319. * ``tr_solver='exact'``: `tr_options` are ignored.
  320. * ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
  321. Additionally ``method='trf'`` supports 'regularize' option
  322. (bool, default is True) which adds a regularization term to the
  323. normal equation, which improves convergence if the Jacobian is
  324. rank-deficient [Byrd]_ (eq. 3.4).
  325. jac_sparsity : {None, array_like, sparse matrix}, optional
  326. Defines the sparsity structure of the Jacobian matrix for finite
  327. difference estimation, its shape must be (m, n). If the Jacobian has
  328. only few non-zero elements in *each* row, providing the sparsity
  329. structure will greatly speed up the computations [Curtis]_. A zero
  330. entry means that a corresponding element in the Jacobian is identically
  331. zero. If provided, forces the use of 'lsmr' trust-region solver.
  332. If None (default) then dense differencing will be used. Has no effect
  333. for 'lm' method.
  334. verbose : {0, 1, 2}, optional
  335. Level of algorithm's verbosity:
  336. * 0 (default) : work silently.
  337. * 1 : display a termination report.
  338. * 2 : display progress during iterations (not supported by 'lm'
  339. method).
  340. args, kwargs : tuple and dict, optional
  341. Additional arguments passed to `fun` and `jac`. Both empty by default.
  342. The calling signature is ``fun(x, *args, **kwargs)`` and the same for
  343. `jac`.
  344. Returns
  345. -------
  346. `OptimizeResult` with the following fields defined:
  347. x : ndarray, shape (n,)
  348. Solution found.
  349. cost : float
  350. Value of the cost function at the solution.
  351. fun : ndarray, shape (m,)
  352. Vector of residuals at the solution.
  353. jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
  354. Modified Jacobian matrix at the solution, in the sense that J^T J
  355. is a Gauss-Newton approximation of the Hessian of the cost function.
  356. The type is the same as the one used by the algorithm.
  357. grad : ndarray, shape (m,)
  358. Gradient of the cost function at the solution.
  359. optimality : float
  360. First-order optimality measure. In unconstrained problems, it is always
  361. the uniform norm of the gradient. In constrained problems, it is the
  362. quantity which was compared with `gtol` during iterations.
  363. active_mask : ndarray of int, shape (n,)
  364. Each component shows whether a corresponding constraint is active
  365. (that is, whether a variable is at the bound):
  366. * 0 : a constraint is not active.
  367. * -1 : a lower bound is active.
  368. * 1 : an upper bound is active.
  369. Might be somewhat arbitrary for 'trf' method as it generates a sequence
  370. of strictly feasible iterates and `active_mask` is determined within a
  371. tolerance threshold.
  372. nfev : int
  373. Number of function evaluations done. Methods 'trf' and 'dogbox' do not
  374. count function calls for numerical Jacobian approximation, as opposed
  375. to 'lm' method.
  376. njev : int or None
  377. Number of Jacobian evaluations done. If numerical Jacobian
  378. approximation is used in 'lm' method, it is set to None.
  379. status : int
  380. The reason for algorithm termination:
  381. * -1 : improper input parameters status returned from MINPACK.
  382. * 0 : the maximum number of function evaluations is exceeded.
  383. * 1 : `gtol` termination condition is satisfied.
  384. * 2 : `ftol` termination condition is satisfied.
  385. * 3 : `xtol` termination condition is satisfied.
  386. * 4 : Both `ftol` and `xtol` termination conditions are satisfied.
  387. message : str
  388. Verbal description of the termination reason.
  389. success : bool
  390. True if one of the convergence criteria is satisfied (`status` > 0).
  391. See Also
  392. --------
  393. leastsq : A legacy wrapper for the MINPACK implementation of the
  394. Levenberg-Marquadt algorithm.
  395. curve_fit : Least-squares minimization applied to a curve fitting problem.
  396. Notes
  397. -----
  398. Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
  399. algorithms implemented in MINPACK (lmder, lmdif). It runs the
  400. Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
  401. The implementation is based on paper [JJMore]_, it is very robust and
  402. efficient with a lot of smart tricks. It should be your first choice
  403. for unconstrained problems. Note that it doesn't support bounds. Also
  404. it doesn't work when m < n.
  405. Method 'trf' (Trust Region Reflective) is motivated by the process of
  406. solving a system of equations, which constitute the first-order optimality
  407. condition for a bound-constrained minimization problem as formulated in
  408. [STIR]_. The algorithm iteratively solves trust-region subproblems
  409. augmented by a special diagonal quadratic term and with trust-region shape
  410. determined by the distance from the bounds and the direction of the
  411. gradient. This enhancements help to avoid making steps directly into bounds
  412. and efficiently explore the whole space of variables. To further improve
  413. convergence, the algorithm considers search directions reflected from the
  414. bounds. To obey theoretical requirements, the algorithm keeps iterates
  415. strictly feasible. With dense Jacobians trust-region subproblems are
  416. solved by an exact method very similar to the one described in [JJMore]_
  417. (and implemented in MINPACK). The difference from the MINPACK
  418. implementation is that a singular value decomposition of a Jacobian
  419. matrix is done once per iteration, instead of a QR decomposition and series
  420. of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace
  421. approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
  422. The subspace is spanned by a scaled gradient and an approximate
  423. Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
  424. constraints are imposed the algorithm is very similar to MINPACK and has
  425. generally comparable performance. The algorithm works quite robust in
  426. unbounded and bounded problems, thus it is chosen as a default algorithm.
  427. Method 'dogbox' operates in a trust-region framework, but considers
  428. rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
  429. The intersection of a current trust region and initial bounds is again
  430. rectangular, so on each iteration a quadratic minimization problem subject
  431. to bound constraints is solved approximately by Powell's dogleg method
  432. [NumOpt]_. The required Gauss-Newton step can be computed exactly for
  433. dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
  434. sparse Jacobians. The algorithm is likely to exhibit slow convergence when
  435. the rank of Jacobian is less than the number of variables. The algorithm
  436. often outperforms 'trf' in bounded problems with a small number of
  437. variables.
  438. Robust loss functions are implemented as described in [BA]_. The idea
  439. is to modify a residual vector and a Jacobian matrix on each iteration
  440. such that computed gradient and Gauss-Newton Hessian approximation match
  441. the true gradient and Hessian approximation of the cost function. Then
  442. the algorithm proceeds in a normal way, i.e. robust loss functions are
  443. implemented as a simple wrapper over standard least-squares algorithms.
  444. .. versionadded:: 0.17.0
  445. References
  446. ----------
  447. .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
  448. and Conjugate Gradient Method for Large-Scale Bound-Constrained
  449. Minimization Problems," SIAM Journal on Scientific Computing,
  450. Vol. 21, Number 1, pp 1-23, 1999.
  451. .. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
  452. Computing. 3rd edition", Sec. 5.7.
  453. .. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
  454. solution of the trust region problem by minimization over
  455. two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
  456. 1988.
  457. .. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
  458. sparse Jacobian matrices", Journal of the Institute of
  459. Mathematics and its Applications, 13, pp. 117-120, 1974.
  460. .. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
  461. and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
  462. Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
  463. .. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
  464. Dogleg Approach for Unconstrained and Bound Constrained
  465. Nonlinear Optimization", WSEAS International Conference on
  466. Applied Mathematics, Corfu, Greece, 2004.
  467. .. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
  468. 2nd edition", Chapter 4.
  469. .. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
  470. Proceedings of the International Workshop on Vision Algorithms:
  471. Theory and Practice, pp. 298-372, 1999.
  472. Examples
  473. --------
  474. In this example we find a minimum of the Rosenbrock function without bounds
  475. on independent variables.
  476. >>> def fun_rosenbrock(x):
  477. ... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
  478. Notice that we only provide the vector of the residuals. The algorithm
  479. constructs the cost function as a sum of squares of the residuals, which
  480. gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
  481. >>> from scipy.optimize import least_squares
  482. >>> x0_rosenbrock = np.array([2, 2])
  483. >>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
  484. >>> res_1.x
  485. array([ 1., 1.])
  486. >>> res_1.cost
  487. 9.8669242910846867e-30
  488. >>> res_1.optimality
  489. 8.8928864934219529e-14
  490. We now constrain the variables, in such a way that the previous solution
  491. becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
  492. ``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
  493. to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
  494. We also provide the analytic Jacobian:
  495. >>> def jac_rosenbrock(x):
  496. ... return np.array([
  497. ... [-20 * x[0], 10],
  498. ... [-1, 0]])
  499. Putting this all together, we see that the new solution lies on the bound:
  500. >>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
  501. ... bounds=([-np.inf, 1.5], np.inf))
  502. >>> res_2.x
  503. array([ 1.22437075, 1.5 ])
  504. >>> res_2.cost
  505. 0.025213093946805685
  506. >>> res_2.optimality
  507. 1.5885401433157753e-07
  508. Now we solve a system of equations (i.e., the cost function should be zero
  509. at a minimum) for a Broyden tridiagonal vector-valued function of 100000
  510. variables:
  511. >>> def fun_broyden(x):
  512. ... f = (3 - x) * x + 1
  513. ... f[1:] -= x[:-1]
  514. ... f[:-1] -= 2 * x[1:]
  515. ... return f
  516. The corresponding Jacobian matrix is sparse. We tell the algorithm to
  517. estimate it by finite differences and provide the sparsity structure of
  518. Jacobian to significantly speed up this process.
  519. >>> from scipy.sparse import lil_matrix
  520. >>> def sparsity_broyden(n):
  521. ... sparsity = lil_matrix((n, n), dtype=int)
  522. ... i = np.arange(n)
  523. ... sparsity[i, i] = 1
  524. ... i = np.arange(1, n)
  525. ... sparsity[i, i - 1] = 1
  526. ... i = np.arange(n - 1)
  527. ... sparsity[i, i + 1] = 1
  528. ... return sparsity
  529. ...
  530. >>> n = 100000
  531. >>> x0_broyden = -np.ones(n)
  532. ...
  533. >>> res_3 = least_squares(fun_broyden, x0_broyden,
  534. ... jac_sparsity=sparsity_broyden(n))
  535. >>> res_3.cost
  536. 4.5687069299604613e-23
  537. >>> res_3.optimality
  538. 1.1650454296851518e-11
  539. Let's also solve a curve fitting problem using robust loss function to
  540. take care of outliers in the data. Define the model function as
  541. ``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
  542. observation and a, b, c are parameters to estimate.
  543. First, define the function which generates the data with noise and
  544. outliers, define the model parameters, and generate data:
  545. >>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):
  546. ... y = a + b * np.exp(t * c)
  547. ...
  548. ... rnd = np.random.RandomState(random_state)
  549. ... error = noise * rnd.randn(t.size)
  550. ... outliers = rnd.randint(0, t.size, n_outliers)
  551. ... error[outliers] *= 10
  552. ...
  553. ... return y + error
  554. ...
  555. >>> a = 0.5
  556. >>> b = 2.0
  557. >>> c = -1
  558. >>> t_min = 0
  559. >>> t_max = 10
  560. >>> n_points = 15
  561. ...
  562. >>> t_train = np.linspace(t_min, t_max, n_points)
  563. >>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
  564. Define function for computing residuals and initial estimate of
  565. parameters.
  566. >>> def fun(x, t, y):
  567. ... return x[0] + x[1] * np.exp(x[2] * t) - y
  568. ...
  569. >>> x0 = np.array([1.0, 1.0, 0.0])
  570. Compute a standard least-squares solution:
  571. >>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
  572. Now compute two solutions with two different robust loss functions. The
  573. parameter `f_scale` is set to 0.1, meaning that inlier residuals should
  574. not significantly exceed 0.1 (the noise level used).
  575. >>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
  576. ... args=(t_train, y_train))
  577. >>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
  578. ... args=(t_train, y_train))
  579. And finally plot all the curves. We see that by selecting an appropriate
  580. `loss` we can get estimates close to optimal even in the presence of
  581. strong outliers. But keep in mind that generally it is recommended to try
  582. 'soft_l1' or 'huber' losses first (if at all necessary) as the other two
  583. options may cause difficulties in optimization process.
  584. >>> t_test = np.linspace(t_min, t_max, n_points * 10)
  585. >>> y_true = gen_data(t_test, a, b, c)
  586. >>> y_lsq = gen_data(t_test, *res_lsq.x)
  587. >>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
  588. >>> y_log = gen_data(t_test, *res_log.x)
  589. ...
  590. >>> import matplotlib.pyplot as plt
  591. >>> plt.plot(t_train, y_train, 'o')
  592. >>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
  593. >>> plt.plot(t_test, y_lsq, label='linear loss')
  594. >>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
  595. >>> plt.plot(t_test, y_log, label='cauchy loss')
  596. >>> plt.xlabel("t")
  597. >>> plt.ylabel("y")
  598. >>> plt.legend()
  599. >>> plt.show()
  600. In the next example, we show how complex-valued residual functions of
  601. complex variables can be optimized with ``least_squares()``. Consider the
  602. following function:
  603. >>> def f(z):
  604. ... return z - (0.5 + 0.5j)
  605. We wrap it into a function of real variables that returns real residuals
  606. by simply handling the real and imaginary parts as independent variables:
  607. >>> def f_wrap(x):
  608. ... fx = f(x[0] + 1j*x[1])
  609. ... return np.array([fx.real, fx.imag])
  610. Thus, instead of the original m-dimensional complex function of n complex
  611. variables we optimize a 2m-dimensional real function of 2n real variables:
  612. >>> from scipy.optimize import least_squares
  613. >>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))
  614. >>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j
  615. >>> z
  616. (0.49999999999925893+0.49999999999925893j)
  617. """
  618. if method not in ['trf', 'dogbox', 'lm']:
  619. raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
  620. if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
  621. raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
  622. "callable.")
  623. if tr_solver not in [None, 'exact', 'lsmr']:
  624. raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
  625. if loss not in IMPLEMENTED_LOSSES and not callable(loss):
  626. raise ValueError("`loss` must be one of {0} or a callable."
  627. .format(IMPLEMENTED_LOSSES.keys()))
  628. if method == 'lm' and loss != 'linear':
  629. raise ValueError("method='lm' supports only 'linear' loss function.")
  630. if verbose not in [0, 1, 2]:
  631. raise ValueError("`verbose` must be in [0, 1, 2].")
  632. if len(bounds) != 2:
  633. raise ValueError("`bounds` must contain 2 elements.")
  634. if max_nfev is not None and max_nfev <= 0:
  635. raise ValueError("`max_nfev` must be None or positive integer.")
  636. if np.iscomplexobj(x0):
  637. raise ValueError("`x0` must be real.")
  638. x0 = np.atleast_1d(x0).astype(float)
  639. if x0.ndim > 1:
  640. raise ValueError("`x0` must have at most 1 dimension.")
  641. lb, ub = prepare_bounds(bounds, x0.shape[0])
  642. if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
  643. raise ValueError("Method 'lm' doesn't support bounds.")
  644. if lb.shape != x0.shape or ub.shape != x0.shape:
  645. raise ValueError("Inconsistent shapes between bounds and `x0`.")
  646. if np.any(lb >= ub):
  647. raise ValueError("Each lower bound must be strictly less than each "
  648. "upper bound.")
  649. if not in_bounds(x0, lb, ub):
  650. raise ValueError("`x0` is infeasible.")
  651. x_scale = check_x_scale(x_scale, x0)
  652. ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol)
  653. def fun_wrapped(x):
  654. return np.atleast_1d(fun(x, *args, **kwargs))
  655. if method == 'trf':
  656. x0 = make_strictly_feasible(x0, lb, ub)
  657. f0 = fun_wrapped(x0)
  658. if f0.ndim != 1:
  659. raise ValueError("`fun` must return at most 1-d array_like.")
  660. if not np.all(np.isfinite(f0)):
  661. raise ValueError("Residuals are not finite in the initial point.")
  662. n = x0.size
  663. m = f0.size
  664. if method == 'lm' and m < n:
  665. raise ValueError("Method 'lm' doesn't work when the number of "
  666. "residuals is less than the number of variables.")
  667. loss_function = construct_loss_function(m, loss, f_scale)
  668. if callable(loss):
  669. rho = loss_function(f0)
  670. if rho.shape != (3, m):
  671. raise ValueError("The return value of `loss` callable has wrong "
  672. "shape.")
  673. initial_cost = 0.5 * np.sum(rho[0])
  674. elif loss_function is not None:
  675. initial_cost = loss_function(f0, cost_only=True)
  676. else:
  677. initial_cost = 0.5 * np.dot(f0, f0)
  678. if callable(jac):
  679. J0 = jac(x0, *args, **kwargs)
  680. if issparse(J0):
  681. J0 = csr_matrix(J0)
  682. def jac_wrapped(x, _=None):
  683. return csr_matrix(jac(x, *args, **kwargs))
  684. elif isinstance(J0, LinearOperator):
  685. def jac_wrapped(x, _=None):
  686. return jac(x, *args, **kwargs)
  687. else:
  688. J0 = np.atleast_2d(J0)
  689. def jac_wrapped(x, _=None):
  690. return np.atleast_2d(jac(x, *args, **kwargs))
  691. else: # Estimate Jacobian by finite differences.
  692. if method == 'lm':
  693. if jac_sparsity is not None:
  694. raise ValueError("method='lm' does not support "
  695. "`jac_sparsity`.")
  696. if jac != '2-point':
  697. warn("jac='{0}' works equivalently to '2-point' "
  698. "for method='lm'.".format(jac))
  699. J0 = jac_wrapped = None
  700. else:
  701. if jac_sparsity is not None and tr_solver == 'exact':
  702. raise ValueError("tr_solver='exact' is incompatible "
  703. "with `jac_sparsity`.")
  704. jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
  705. def jac_wrapped(x, f):
  706. J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
  707. f0=f, bounds=bounds, args=args,
  708. kwargs=kwargs, sparsity=jac_sparsity)
  709. if J.ndim != 2: # J is guaranteed not sparse.
  710. J = np.atleast_2d(J)
  711. return J
  712. J0 = jac_wrapped(x0, f0)
  713. if J0 is not None:
  714. if J0.shape != (m, n):
  715. raise ValueError(
  716. "The return value of `jac` has wrong shape: expected {0}, "
  717. "actual {1}.".format((m, n), J0.shape))
  718. if not isinstance(J0, np.ndarray):
  719. if method == 'lm':
  720. raise ValueError("method='lm' works only with dense "
  721. "Jacobian matrices.")
  722. if tr_solver == 'exact':
  723. raise ValueError(
  724. "tr_solver='exact' works only with dense "
  725. "Jacobian matrices.")
  726. jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
  727. if isinstance(J0, LinearOperator) and jac_scale:
  728. raise ValueError("x_scale='jac' can't be used when `jac` "
  729. "returns LinearOperator.")
  730. if tr_solver is None:
  731. if isinstance(J0, np.ndarray):
  732. tr_solver = 'exact'
  733. else:
  734. tr_solver = 'lsmr'
  735. if method == 'lm':
  736. result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
  737. max_nfev, x_scale, diff_step)
  738. elif method == 'trf':
  739. result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
  740. gtol, max_nfev, x_scale, loss_function, tr_solver,
  741. tr_options.copy(), verbose)
  742. elif method == 'dogbox':
  743. if tr_solver == 'lsmr' and 'regularize' in tr_options:
  744. warn("The keyword 'regularize' in `tr_options` is not relevant "
  745. "for 'dogbox' method.")
  746. tr_options = tr_options.copy()
  747. del tr_options['regularize']
  748. result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
  749. xtol, gtol, max_nfev, x_scale, loss_function,
  750. tr_solver, tr_options, verbose)
  751. result.message = TERMINATION_MESSAGES[result.status]
  752. result.success = result.status > 0
  753. if verbose >= 1:
  754. print(result.message)
  755. print("Function evaluations {0}, initial cost {1:.4e}, final cost "
  756. "{2:.4e}, first-order optimality {3:.2e}."
  757. .format(result.nfev, initial_cost, result.cost,
  758. result.optimality))
  759. return result