dogbox.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. """
  2. dogleg algorithm with rectangular trust regions for least-squares minimization.
  3. The description of the algorithm can be found in [Voglis]_. The algorithm does
  4. trust-region iterations, but the shape of trust regions is rectangular as
  5. opposed to conventional elliptical. The intersection of a trust region and
  6. an initial feasible region is again some rectangle. Thus on each iteration a
  7. bound-constrained quadratic optimization problem is solved.
  8. A quadratic problem is solved by well-known dogleg approach, where the
  9. function is minimized along piecewise-linear "dogleg" path [NumOpt]_,
  10. Chapter 4. If Jacobian is not rank-deficient then the function is decreasing
  11. along this path, and optimization amounts to simply following along this
  12. path as long as a point stays within the bounds. A constrained Cauchy step
  13. (along the anti-gradient) is considered for safety in rank deficient cases,
  14. in this situations the convergence might be slow.
  15. If during iterations some variable hit the initial bound and the component
  16. of anti-gradient points outside the feasible region, then a next dogleg step
  17. won't make any progress. At this state such variables satisfy first-order
  18. optimality conditions and they are excluded before computing a next dogleg
  19. step.
  20. Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense
  21. Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for
  22. dense and sparse matrices, or Jacobian being LinearOperator). The second
  23. option allows to solve very large problems (up to couple of millions of
  24. residuals on a regular PC), provided the Jacobian matrix is sufficiently
  25. sparse. But note that dogbox is not very good for solving problems with
  26. large number of constraints, because of variables exclusion-inclusion on each
  27. iteration (a required number of function evaluations might be high or accuracy
  28. of a solution will be poor), thus its large-scale usage is probably limited
  29. to unconstrained problems.
  30. References
  31. ----------
  32. .. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg
  33. Approach for Unconstrained and Bound Constrained Nonlinear
  34. Optimization", WSEAS International Conference on Applied
  35. Mathematics, Corfu, Greece, 2004.
  36. .. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition".
  37. """
  38. from __future__ import division, print_function, absolute_import
  39. import numpy as np
  40. from numpy.linalg import lstsq, norm
  41. from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr
  42. from scipy.optimize import OptimizeResult
  43. from scipy._lib.six import string_types
  44. from .common import (
  45. step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic,
  46. build_quadratic_1d, minimize_quadratic_1d, compute_grad,
  47. compute_jac_scale, check_termination, scale_for_robust_loss_function,
  48. print_header_nonlinear, print_iteration_nonlinear)
  49. def lsmr_operator(Jop, d, active_set):
  50. """Compute LinearOperator to use in LSMR by dogbox algorithm.
  51. `active_set` mask is used to excluded active variables from computations
  52. of matrix-vector products.
  53. """
  54. m, n = Jop.shape
  55. def matvec(x):
  56. x_free = x.ravel().copy()
  57. x_free[active_set] = 0
  58. return Jop.matvec(x * d)
  59. def rmatvec(x):
  60. r = d * Jop.rmatvec(x)
  61. r[active_set] = 0
  62. return r
  63. return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)
  64. def find_intersection(x, tr_bounds, lb, ub):
  65. """Find intersection of trust-region bounds and initial bounds.
  66. Returns
  67. -------
  68. lb_total, ub_total : ndarray with shape of x
  69. Lower and upper bounds of the intersection region.
  70. orig_l, orig_u : ndarray of bool with shape of x
  71. True means that an original bound is taken as a corresponding bound
  72. in the intersection region.
  73. tr_l, tr_u : ndarray of bool with shape of x
  74. True means that a trust-region bound is taken as a corresponding bound
  75. in the intersection region.
  76. """
  77. lb_centered = lb - x
  78. ub_centered = ub - x
  79. lb_total = np.maximum(lb_centered, -tr_bounds)
  80. ub_total = np.minimum(ub_centered, tr_bounds)
  81. orig_l = np.equal(lb_total, lb_centered)
  82. orig_u = np.equal(ub_total, ub_centered)
  83. tr_l = np.equal(lb_total, -tr_bounds)
  84. tr_u = np.equal(ub_total, tr_bounds)
  85. return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u
  86. def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub):
  87. """Find dogleg step in a rectangular region.
  88. Returns
  89. -------
  90. step : ndarray, shape (n,)
  91. Computed dogleg step.
  92. bound_hits : ndarray of int, shape (n,)
  93. Each component shows whether a corresponding variable hits the
  94. initial bound after the step is taken:
  95. * 0 - a variable doesn't hit the bound.
  96. * -1 - lower bound is hit.
  97. * 1 - upper bound is hit.
  98. tr_hit : bool
  99. Whether the step hit the boundary of the trust-region.
  100. """
  101. lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection(
  102. x, tr_bounds, lb, ub
  103. )
  104. bound_hits = np.zeros_like(x, dtype=int)
  105. if in_bounds(newton_step, lb_total, ub_total):
  106. return newton_step, bound_hits, False
  107. to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total)
  108. # The classical dogleg algorithm would check if Cauchy step fits into
  109. # the bounds, and just return it constrained version if not. But in a
  110. # rectangular trust region it makes sense to try to improve constrained
  111. # Cauchy step too. Thus we don't distinguish these two cases.
  112. cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g
  113. step_diff = newton_step - cauchy_step
  114. step_size, hits = step_size_to_bound(cauchy_step, step_diff,
  115. lb_total, ub_total)
  116. bound_hits[(hits < 0) & orig_l] = -1
  117. bound_hits[(hits > 0) & orig_u] = 1
  118. tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u)
  119. return cauchy_step + step_size * step_diff, bound_hits, tr_hit
  120. def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
  121. loss_function, tr_solver, tr_options, verbose):
  122. f = f0
  123. f_true = f.copy()
  124. nfev = 1
  125. J = J0
  126. njev = 1
  127. if loss_function is not None:
  128. rho = loss_function(f)
  129. cost = 0.5 * np.sum(rho[0])
  130. J, f = scale_for_robust_loss_function(J, f, rho)
  131. else:
  132. cost = 0.5 * np.dot(f, f)
  133. g = compute_grad(J, f)
  134. jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
  135. if jac_scale:
  136. scale, scale_inv = compute_jac_scale(J)
  137. else:
  138. scale, scale_inv = x_scale, 1 / x_scale
  139. Delta = norm(x0 * scale_inv, ord=np.inf)
  140. if Delta == 0:
  141. Delta = 1.0
  142. on_bound = np.zeros_like(x0, dtype=int)
  143. on_bound[np.equal(x0, lb)] = -1
  144. on_bound[np.equal(x0, ub)] = 1
  145. x = x0
  146. step = np.empty_like(x0)
  147. if max_nfev is None:
  148. max_nfev = x0.size * 100
  149. termination_status = None
  150. iteration = 0
  151. step_norm = None
  152. actual_reduction = None
  153. if verbose == 2:
  154. print_header_nonlinear()
  155. while True:
  156. active_set = on_bound * g < 0
  157. free_set = ~active_set
  158. g_free = g[free_set]
  159. g_full = g.copy()
  160. g[active_set] = 0
  161. g_norm = norm(g, ord=np.inf)
  162. if g_norm < gtol:
  163. termination_status = 1
  164. if verbose == 2:
  165. print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
  166. step_norm, g_norm)
  167. if termination_status is not None or nfev == max_nfev:
  168. break
  169. x_free = x[free_set]
  170. lb_free = lb[free_set]
  171. ub_free = ub[free_set]
  172. scale_free = scale[free_set]
  173. # Compute (Gauss-)Newton and build quadratic model for Cauchy step.
  174. if tr_solver == 'exact':
  175. J_free = J[:, free_set]
  176. newton_step = lstsq(J_free, -f, rcond=-1)[0]
  177. # Coefficients for the quadratic model along the anti-gradient.
  178. a, b = build_quadratic_1d(J_free, g_free, -g_free)
  179. elif tr_solver == 'lsmr':
  180. Jop = aslinearoperator(J)
  181. # We compute lsmr step in scaled variables and then
  182. # transform back to normal variables, if lsmr would give exact lsq
  183. # solution this would be equivalent to not doing any
  184. # transformations, but from experience it's better this way.
  185. # We pass active_set to make computations as if we selected
  186. # the free subset of J columns, but without actually doing any
  187. # slicing, which is expensive for sparse matrices and impossible
  188. # for LinearOperator.
  189. lsmr_op = lsmr_operator(Jop, scale, active_set)
  190. newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]
  191. newton_step *= scale_free
  192. # Components of g for active variables were zeroed, so this call
  193. # is correct and equivalent to using J_free and g_free.
  194. a, b = build_quadratic_1d(Jop, g, -g)
  195. actual_reduction = -1.0
  196. while actual_reduction <= 0 and nfev < max_nfev:
  197. tr_bounds = Delta * scale_free
  198. step_free, on_bound_free, tr_hit = dogleg_step(
  199. x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free)
  200. step.fill(0.0)
  201. step[free_set] = step_free
  202. if tr_solver == 'exact':
  203. predicted_reduction = -evaluate_quadratic(J_free, g_free,
  204. step_free)
  205. elif tr_solver == 'lsmr':
  206. predicted_reduction = -evaluate_quadratic(Jop, g, step)
  207. x_new = x + step
  208. f_new = fun(x_new)
  209. nfev += 1
  210. step_h_norm = norm(step * scale_inv, ord=np.inf)
  211. if not np.all(np.isfinite(f_new)):
  212. Delta = 0.25 * step_h_norm
  213. continue
  214. # Usual trust-region step quality estimation.
  215. if loss_function is not None:
  216. cost_new = loss_function(f_new, cost_only=True)
  217. else:
  218. cost_new = 0.5 * np.dot(f_new, f_new)
  219. actual_reduction = cost - cost_new
  220. Delta, ratio = update_tr_radius(
  221. Delta, actual_reduction, predicted_reduction,
  222. step_h_norm, tr_hit
  223. )
  224. step_norm = norm(step)
  225. termination_status = check_termination(
  226. actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
  227. if termination_status is not None:
  228. break
  229. if actual_reduction > 0:
  230. on_bound[free_set] = on_bound_free
  231. x = x_new
  232. # Set variables exactly at the boundary.
  233. mask = on_bound == -1
  234. x[mask] = lb[mask]
  235. mask = on_bound == 1
  236. x[mask] = ub[mask]
  237. f = f_new
  238. f_true = f.copy()
  239. cost = cost_new
  240. J = jac(x, f)
  241. njev += 1
  242. if loss_function is not None:
  243. rho = loss_function(f)
  244. J, f = scale_for_robust_loss_function(J, f, rho)
  245. g = compute_grad(J, f)
  246. if jac_scale:
  247. scale, scale_inv = compute_jac_scale(J, scale_inv)
  248. else:
  249. step_norm = 0
  250. actual_reduction = 0
  251. iteration += 1
  252. if termination_status is None:
  253. termination_status = 0
  254. return OptimizeResult(
  255. x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm,
  256. active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)