trf_linear.py 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. """The adaptation of Trust Region Reflective algorithm for a linear
  2. least-squares problem."""
  3. from __future__ import division, print_function, absolute_import
  4. import numpy as np
  5. from numpy.linalg import norm
  6. from scipy.linalg import qr, solve_triangular
  7. from scipy.sparse.linalg import lsmr
  8. from scipy.optimize import OptimizeResult
  9. from .givens_elimination import givens_elimination
  10. from .common import (
  11. EPS, step_size_to_bound, find_active_constraints, in_bounds,
  12. make_strictly_feasible, build_quadratic_1d, evaluate_quadratic,
  13. minimize_quadratic_1d, CL_scaling_vector, reflective_transformation,
  14. print_header_linear, print_iteration_linear, compute_grad,
  15. regularized_lsq_operator, right_multiplied_operator)
  16. def regularized_lsq_with_qr(m, n, R, QTb, perm, diag, copy_R=True):
  17. """Solve regularized least squares using information from QR-decomposition.
  18. The initial problem is to solve the following system in a least-squares
  19. sense:
  20. ::
  21. A x = b
  22. D x = 0
  23. Where D is diagonal matrix. The method is based on QR decomposition
  24. of the form A P = Q R, where P is a column permutation matrix, Q is an
  25. orthogonal matrix and R is an upper triangular matrix.
  26. Parameters
  27. ----------
  28. m, n : int
  29. Initial shape of A.
  30. R : ndarray, shape (n, n)
  31. Upper triangular matrix from QR decomposition of A.
  32. QTb : ndarray, shape (n,)
  33. First n components of Q^T b.
  34. perm : ndarray, shape (n,)
  35. Array defining column permutation of A, such that i-th column of
  36. P is perm[i]-th column of identity matrix.
  37. diag : ndarray, shape (n,)
  38. Array containing diagonal elements of D.
  39. Returns
  40. -------
  41. x : ndarray, shape (n,)
  42. Found least-squares solution.
  43. """
  44. if copy_R:
  45. R = R.copy()
  46. v = QTb.copy()
  47. givens_elimination(R, v, diag[perm])
  48. abs_diag_R = np.abs(np.diag(R))
  49. threshold = EPS * max(m, n) * np.max(abs_diag_R)
  50. nns, = np.nonzero(abs_diag_R > threshold)
  51. R = R[np.ix_(nns, nns)]
  52. v = v[nns]
  53. x = np.zeros(n)
  54. x[perm[nns]] = solve_triangular(R, v)
  55. return x
  56. def backtracking(A, g, x, p, theta, p_dot_g, lb, ub):
  57. """Find an appropriate step size using backtracking line search."""
  58. alpha = 1
  59. while True:
  60. x_new, _ = reflective_transformation(x + alpha * p, lb, ub)
  61. step = x_new - x
  62. cost_change = -evaluate_quadratic(A, g, step)
  63. if cost_change > -0.1 * alpha * p_dot_g:
  64. break
  65. alpha *= 0.5
  66. active = find_active_constraints(x_new, lb, ub)
  67. if np.any(active != 0):
  68. x_new, _ = reflective_transformation(x + theta * alpha * p, lb, ub)
  69. x_new = make_strictly_feasible(x_new, lb, ub, rstep=0)
  70. step = x_new - x
  71. cost_change = -evaluate_quadratic(A, g, step)
  72. return x, step, cost_change
  73. def select_step(x, A_h, g_h, c_h, p, p_h, d, lb, ub, theta):
  74. """Select the best step according to Trust Region Reflective algorithm."""
  75. if in_bounds(x + p, lb, ub):
  76. return p
  77. p_stride, hits = step_size_to_bound(x, p, lb, ub)
  78. r_h = np.copy(p_h)
  79. r_h[hits.astype(bool)] *= -1
  80. r = d * r_h
  81. # Restrict step, such that it hits the bound.
  82. p *= p_stride
  83. p_h *= p_stride
  84. x_on_bound = x + p
  85. # Find the step size along reflected direction.
  86. r_stride_u, _ = step_size_to_bound(x_on_bound, r, lb, ub)
  87. # Stay interior.
  88. r_stride_l = (1 - theta) * r_stride_u
  89. r_stride_u *= theta
  90. if r_stride_u > 0:
  91. a, b, c = build_quadratic_1d(A_h, g_h, r_h, s0=p_h, diag=c_h)
  92. r_stride, r_value = minimize_quadratic_1d(
  93. a, b, r_stride_l, r_stride_u, c=c)
  94. r_h = p_h + r_h * r_stride
  95. r = d * r_h
  96. else:
  97. r_value = np.inf
  98. # Now correct p_h to make it strictly interior.
  99. p_h *= theta
  100. p *= theta
  101. p_value = evaluate_quadratic(A_h, g_h, p_h, diag=c_h)
  102. ag_h = -g_h
  103. ag = d * ag_h
  104. ag_stride_u, _ = step_size_to_bound(x, ag, lb, ub)
  105. ag_stride_u *= theta
  106. a, b = build_quadratic_1d(A_h, g_h, ag_h, diag=c_h)
  107. ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride_u)
  108. ag *= ag_stride
  109. if p_value < r_value and p_value < ag_value:
  110. return p
  111. elif r_value < p_value and r_value < ag_value:
  112. return r
  113. else:
  114. return ag
  115. def trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol, max_iter,
  116. verbose):
  117. m, n = A.shape
  118. x, _ = reflective_transformation(x_lsq, lb, ub)
  119. x = make_strictly_feasible(x, lb, ub, rstep=0.1)
  120. if lsq_solver == 'exact':
  121. QT, R, perm = qr(A, mode='economic', pivoting=True)
  122. QT = QT.T
  123. if m < n:
  124. R = np.vstack((R, np.zeros((n - m, n))))
  125. QTr = np.zeros(n)
  126. k = min(m, n)
  127. elif lsq_solver == 'lsmr':
  128. r_aug = np.zeros(m + n)
  129. auto_lsmr_tol = False
  130. if lsmr_tol is None:
  131. lsmr_tol = 1e-2 * tol
  132. elif lsmr_tol == 'auto':
  133. auto_lsmr_tol = True
  134. r = A.dot(x) - b
  135. g = compute_grad(A, r)
  136. cost = 0.5 * np.dot(r, r)
  137. initial_cost = cost
  138. termination_status = None
  139. step_norm = None
  140. cost_change = None
  141. if max_iter is None:
  142. max_iter = 100
  143. if verbose == 2:
  144. print_header_linear()
  145. for iteration in range(max_iter):
  146. v, dv = CL_scaling_vector(x, g, lb, ub)
  147. g_scaled = g * v
  148. g_norm = norm(g_scaled, ord=np.inf)
  149. if g_norm < tol:
  150. termination_status = 1
  151. if verbose == 2:
  152. print_iteration_linear(iteration, cost, cost_change,
  153. step_norm, g_norm)
  154. if termination_status is not None:
  155. break
  156. diag_h = g * dv
  157. diag_root_h = diag_h ** 0.5
  158. d = v ** 0.5
  159. g_h = d * g
  160. A_h = right_multiplied_operator(A, d)
  161. if lsq_solver == 'exact':
  162. QTr[:k] = QT.dot(r)
  163. p_h = -regularized_lsq_with_qr(m, n, R * d[perm], QTr, perm,
  164. diag_root_h, copy_R=False)
  165. elif lsq_solver == 'lsmr':
  166. lsmr_op = regularized_lsq_operator(A_h, diag_root_h)
  167. r_aug[:m] = r
  168. if auto_lsmr_tol:
  169. eta = 1e-2 * min(0.5, g_norm)
  170. lsmr_tol = max(EPS, min(0.1, eta * g_norm))
  171. p_h = -lsmr(lsmr_op, r_aug, atol=lsmr_tol, btol=lsmr_tol)[0]
  172. p = d * p_h
  173. p_dot_g = np.dot(p, g)
  174. if p_dot_g > 0:
  175. termination_status = -1
  176. theta = 1 - min(0.005, g_norm)
  177. step = select_step(x, A_h, g_h, diag_h, p, p_h, d, lb, ub, theta)
  178. cost_change = -evaluate_quadratic(A, g, step)
  179. # Perhaps almost never executed, the idea is that `p` is descent
  180. # direction thus we must find acceptable cost decrease using simple
  181. # "backtracking", otherwise algorithm's logic would break.
  182. if cost_change < 0:
  183. x, step, cost_change = backtracking(
  184. A, g, x, p, theta, p_dot_g, lb, ub)
  185. else:
  186. x = make_strictly_feasible(x + step, lb, ub, rstep=0)
  187. step_norm = norm(step)
  188. r = A.dot(x) - b
  189. g = compute_grad(A, r)
  190. if cost_change < tol * cost:
  191. termination_status = 2
  192. cost = 0.5 * np.dot(r, r)
  193. if termination_status is None:
  194. termination_status = 0
  195. active_mask = find_active_constraints(x, lb, ub, rtol=tol)
  196. return OptimizeResult(
  197. x=x, fun=r, cost=cost, optimality=g_norm, active_mask=active_mask,
  198. nit=iteration + 1, status=termination_status,
  199. initial_cost=initial_cost)