bdf.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. from __future__ import division, print_function, absolute_import
  2. import numpy as np
  3. from scipy.linalg import lu_factor, lu_solve
  4. from scipy.sparse import issparse, csc_matrix, eye
  5. from scipy.sparse.linalg import splu
  6. from scipy.optimize._numdiff import group_columns
  7. from .common import (validate_max_step, validate_tol, select_initial_step,
  8. norm, EPS, num_jac, validate_first_step,
  9. warn_extraneous)
  10. from .base import OdeSolver, DenseOutput
  11. MAX_ORDER = 5
  12. NEWTON_MAXITER = 4
  13. MIN_FACTOR = 0.2
  14. MAX_FACTOR = 10
  15. def compute_R(order, factor):
  16. """Compute the matrix for changing the differences array."""
  17. I = np.arange(1, order + 1)[:, None]
  18. J = np.arange(1, order + 1)
  19. M = np.zeros((order + 1, order + 1))
  20. M[1:, 1:] = (I - 1 - factor * J) / I
  21. M[0] = 1
  22. return np.cumprod(M, axis=0)
  23. def change_D(D, order, factor):
  24. """Change differences array in-place when step size is changed."""
  25. R = compute_R(order, factor)
  26. U = compute_R(order, 1)
  27. RU = R.dot(U)
  28. D[:order + 1] = np.dot(RU.T, D[:order + 1])
  29. def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol):
  30. """Solve the algebraic system resulting from BDF method."""
  31. d = 0
  32. y = y_predict.copy()
  33. dy_norm_old = None
  34. converged = False
  35. for k in range(NEWTON_MAXITER):
  36. f = fun(t_new, y)
  37. if not np.all(np.isfinite(f)):
  38. break
  39. dy = solve_lu(LU, c * f - psi - d)
  40. dy_norm = norm(dy / scale)
  41. if dy_norm_old is None:
  42. rate = None
  43. else:
  44. rate = dy_norm / dy_norm_old
  45. if (rate is not None and (rate >= 1 or
  46. rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol)):
  47. break
  48. y += dy
  49. d += dy
  50. if (dy_norm == 0 or
  51. rate is not None and rate / (1 - rate) * dy_norm < tol):
  52. converged = True
  53. break
  54. dy_norm_old = dy_norm
  55. return converged, k + 1, y, d
  56. class BDF(OdeSolver):
  57. """Implicit method based on backward-differentiation formulas.
  58. This is a variable order method with the order varying automatically from
  59. 1 to 5. The general framework of the BDF algorithm is described in [1]_.
  60. This class implements a quasi-constant step size as explained in [2]_.
  61. The error estimation strategy for the constant-step BDF is derived in [3]_.
  62. An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented.
  63. Can be applied in the complex domain.
  64. Parameters
  65. ----------
  66. fun : callable
  67. Right-hand side of the system. The calling signature is ``fun(t, y)``.
  68. Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
  69. It can either have shape (n,); then ``fun`` must return array_like with
  70. shape (n,). Alternatively it can have shape (n, k); then ``fun``
  71. must return an array_like with shape (n, k), i.e. each column
  72. corresponds to a single column in ``y``. The choice between the two
  73. options is determined by `vectorized` argument (see below). The
  74. vectorized implementation allows a faster approximation of the Jacobian
  75. by finite differences (required for this solver).
  76. t0 : float
  77. Initial time.
  78. y0 : array_like, shape (n,)
  79. Initial state.
  80. t_bound : float
  81. Boundary time - the integration won't continue beyond it. It also
  82. determines the direction of the integration.
  83. first_step : float or None, optional
  84. Initial step size. Default is ``None`` which means that the algorithm
  85. should choose.
  86. max_step : float, optional
  87. Maximum allowed step size. Default is np.inf, i.e. the step size is not
  88. bounded and determined solely by the solver.
  89. rtol, atol : float and array_like, optional
  90. Relative and absolute tolerances. The solver keeps the local error
  91. estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
  92. relative accuracy (number of correct digits). But if a component of `y`
  93. is approximately below `atol`, the error only needs to fall within
  94. the same `atol` threshold, and the number of correct digits is not
  95. guaranteed. If components of y have different scales, it might be
  96. beneficial to set different `atol` values for different components by
  97. passing array_like with shape (n,) for `atol`. Default values are
  98. 1e-3 for `rtol` and 1e-6 for `atol`.
  99. jac : {None, array_like, sparse_matrix, callable}, optional
  100. Jacobian matrix of the right-hand side of the system with respect to y,
  101. required by this method. The Jacobian matrix has shape (n, n) and its
  102. element (i, j) is equal to ``d f_i / d y_j``.
  103. There are three ways to define the Jacobian:
  104. * If array_like or sparse_matrix, the Jacobian is assumed to
  105. be constant.
  106. * If callable, the Jacobian is assumed to depend on both
  107. t and y; it will be called as ``jac(t, y)`` as necessary.
  108. For the 'Radau' and 'BDF' methods, the return value might be a
  109. sparse matrix.
  110. * If None (default), the Jacobian will be approximated by
  111. finite differences.
  112. It is generally recommended to provide the Jacobian rather than
  113. relying on a finite-difference approximation.
  114. jac_sparsity : {None, array_like, sparse matrix}, optional
  115. Defines a sparsity structure of the Jacobian matrix for a
  116. finite-difference approximation. Its shape must be (n, n). This argument
  117. is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
  118. elements in *each* row, providing the sparsity structure will greatly
  119. speed up the computations [4]_. A zero entry means that a corresponding
  120. element in the Jacobian is always zero. If None (default), the Jacobian
  121. is assumed to be dense.
  122. vectorized : bool, optional
  123. Whether `fun` is implemented in a vectorized fashion. Default is False.
  124. Attributes
  125. ----------
  126. n : int
  127. Number of equations.
  128. status : string
  129. Current status of the solver: 'running', 'finished' or 'failed'.
  130. t_bound : float
  131. Boundary time.
  132. direction : float
  133. Integration direction: +1 or -1.
  134. t : float
  135. Current time.
  136. y : ndarray
  137. Current state.
  138. t_old : float
  139. Previous time. None if no steps were made yet.
  140. step_size : float
  141. Size of the last successful step. None if no steps were made yet.
  142. nfev : int
  143. Number of evaluations of the right-hand side.
  144. njev : int
  145. Number of evaluations of the Jacobian.
  146. nlu : int
  147. Number of LU decompositions.
  148. References
  149. ----------
  150. .. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical
  151. Solution of Ordinary Differential Equations", ACM Transactions on
  152. Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975.
  153. .. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
  154. COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
  155. .. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I:
  156. Nonstiff Problems", Sec. III.2.
  157. .. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
  158. sparse Jacobian matrices", Journal of the Institute of Mathematics
  159. and its Applications, 13, pp. 117-120, 1974.
  160. """
  161. def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
  162. rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
  163. vectorized=False, first_step=None, **extraneous):
  164. warn_extraneous(extraneous)
  165. super(BDF, self).__init__(fun, t0, y0, t_bound, vectorized,
  166. support_complex=True)
  167. self.max_step = validate_max_step(max_step)
  168. self.rtol, self.atol = validate_tol(rtol, atol, self.n)
  169. f = self.fun(self.t, self.y)
  170. if first_step is None:
  171. self.h_abs = select_initial_step(self.fun, self.t, self.y, f,
  172. self.direction, 1,
  173. self.rtol, self.atol)
  174. else:
  175. self.h_abs = validate_first_step(first_step, t0, t_bound)
  176. self.h_abs_old = None
  177. self.error_norm_old = None
  178. self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
  179. self.jac_factor = None
  180. self.jac, self.J = self._validate_jac(jac, jac_sparsity)
  181. if issparse(self.J):
  182. def lu(A):
  183. self.nlu += 1
  184. return splu(A)
  185. def solve_lu(LU, b):
  186. return LU.solve(b)
  187. I = eye(self.n, format='csc', dtype=self.y.dtype)
  188. else:
  189. def lu(A):
  190. self.nlu += 1
  191. return lu_factor(A, overwrite_a=True)
  192. def solve_lu(LU, b):
  193. return lu_solve(LU, b, overwrite_b=True)
  194. I = np.identity(self.n, dtype=self.y.dtype)
  195. self.lu = lu
  196. self.solve_lu = solve_lu
  197. self.I = I
  198. kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0])
  199. self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1))))
  200. self.alpha = (1 - kappa) * self.gamma
  201. self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2)
  202. D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype)
  203. D[0] = self.y
  204. D[1] = f * self.h_abs * self.direction
  205. self.D = D
  206. self.order = 1
  207. self.n_equal_steps = 0
  208. self.LU = None
  209. def _validate_jac(self, jac, sparsity):
  210. t0 = self.t
  211. y0 = self.y
  212. if jac is None:
  213. if sparsity is not None:
  214. if issparse(sparsity):
  215. sparsity = csc_matrix(sparsity)
  216. groups = group_columns(sparsity)
  217. sparsity = (sparsity, groups)
  218. def jac_wrapped(t, y):
  219. self.njev += 1
  220. f = self.fun_single(t, y)
  221. J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
  222. self.atol, self.jac_factor,
  223. sparsity)
  224. return J
  225. J = jac_wrapped(t0, y0)
  226. elif callable(jac):
  227. J = jac(t0, y0)
  228. self.njev += 1
  229. if issparse(J):
  230. J = csc_matrix(J, dtype=y0.dtype)
  231. def jac_wrapped(t, y):
  232. self.njev += 1
  233. return csc_matrix(jac(t, y), dtype=y0.dtype)
  234. else:
  235. J = np.asarray(J, dtype=y0.dtype)
  236. def jac_wrapped(t, y):
  237. self.njev += 1
  238. return np.asarray(jac(t, y), dtype=y0.dtype)
  239. if J.shape != (self.n, self.n):
  240. raise ValueError("`jac` is expected to have shape {}, but "
  241. "actually has {}."
  242. .format((self.n, self.n), J.shape))
  243. else:
  244. if issparse(jac):
  245. J = csc_matrix(jac, dtype=y0.dtype)
  246. else:
  247. J = np.asarray(jac, dtype=y0.dtype)
  248. if J.shape != (self.n, self.n):
  249. raise ValueError("`jac` is expected to have shape {}, but "
  250. "actually has {}."
  251. .format((self.n, self.n), J.shape))
  252. jac_wrapped = None
  253. return jac_wrapped, J
  254. def _step_impl(self):
  255. t = self.t
  256. D = self.D
  257. max_step = self.max_step
  258. min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
  259. if self.h_abs > max_step:
  260. h_abs = max_step
  261. change_D(D, self.order, max_step / self.h_abs)
  262. self.n_equal_steps = 0
  263. elif self.h_abs < min_step:
  264. h_abs = min_step
  265. change_D(D, self.order, min_step / self.h_abs)
  266. self.n_equal_steps = 0
  267. else:
  268. h_abs = self.h_abs
  269. atol = self.atol
  270. rtol = self.rtol
  271. order = self.order
  272. alpha = self.alpha
  273. gamma = self.gamma
  274. error_const = self.error_const
  275. J = self.J
  276. LU = self.LU
  277. current_jac = self.jac is None
  278. step_accepted = False
  279. while not step_accepted:
  280. if h_abs < min_step:
  281. return False, self.TOO_SMALL_STEP
  282. h = h_abs * self.direction
  283. t_new = t + h
  284. if self.direction * (t_new - self.t_bound) > 0:
  285. t_new = self.t_bound
  286. change_D(D, order, np.abs(t_new - t) / h_abs)
  287. self.n_equal_steps = 0
  288. LU = None
  289. h = t_new - t
  290. h_abs = np.abs(h)
  291. y_predict = np.sum(D[:order + 1], axis=0)
  292. scale = atol + rtol * np.abs(y_predict)
  293. psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order]
  294. converged = False
  295. c = h / alpha[order]
  296. while not converged:
  297. if LU is None:
  298. LU = self.lu(self.I - c * J)
  299. converged, n_iter, y_new, d = solve_bdf_system(
  300. self.fun, t_new, y_predict, c, psi, LU, self.solve_lu,
  301. scale, self.newton_tol)
  302. if not converged:
  303. if current_jac:
  304. break
  305. J = self.jac(t_new, y_predict)
  306. LU = None
  307. current_jac = True
  308. if not converged:
  309. factor = 0.5
  310. h_abs *= factor
  311. change_D(D, order, factor)
  312. self.n_equal_steps = 0
  313. LU = None
  314. continue
  315. safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
  316. + n_iter)
  317. scale = atol + rtol * np.abs(y_new)
  318. error = error_const[order] * d
  319. error_norm = norm(error / scale)
  320. if error_norm > 1:
  321. factor = max(MIN_FACTOR,
  322. safety * error_norm ** (-1 / (order + 1)))
  323. h_abs *= factor
  324. change_D(D, order, factor)
  325. self.n_equal_steps = 0
  326. # As we didn't have problems with convergence, we don't
  327. # reset LU here.
  328. else:
  329. step_accepted = True
  330. self.n_equal_steps += 1
  331. self.t = t_new
  332. self.y = y_new
  333. self.h_abs = h_abs
  334. self.J = J
  335. self.LU = LU
  336. # Update differences. The principal relation here is
  337. # D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D
  338. # contained difference for previous interpolating polynomial and
  339. # d = D^{k + 1} y_n. Thus this elegant code follows.
  340. D[order + 2] = d - D[order + 1]
  341. D[order + 1] = d
  342. for i in reversed(range(order + 1)):
  343. D[i] += D[i + 1]
  344. if self.n_equal_steps < order + 1:
  345. return True, None
  346. if order > 1:
  347. error_m = error_const[order - 1] * D[order]
  348. error_m_norm = norm(error_m / scale)
  349. else:
  350. error_m_norm = np.inf
  351. if order < MAX_ORDER:
  352. error_p = error_const[order + 1] * D[order + 2]
  353. error_p_norm = norm(error_p / scale)
  354. else:
  355. error_p_norm = np.inf
  356. error_norms = np.array([error_m_norm, error_norm, error_p_norm])
  357. factors = error_norms ** (-1 / np.arange(order, order + 3))
  358. delta_order = np.argmax(factors) - 1
  359. order += delta_order
  360. self.order = order
  361. factor = min(MAX_FACTOR, safety * np.max(factors))
  362. self.h_abs *= factor
  363. change_D(D, order, factor)
  364. self.n_equal_steps = 0
  365. self.LU = None
  366. return True, None
  367. def _dense_output_impl(self):
  368. return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction,
  369. self.order, self.D[:self.order + 1].copy())
  370. class BdfDenseOutput(DenseOutput):
  371. def __init__(self, t_old, t, h, order, D):
  372. super(BdfDenseOutput, self).__init__(t_old, t)
  373. self.order = order
  374. self.t_shift = self.t - h * np.arange(self.order)
  375. self.denom = h * (1 + np.arange(self.order))
  376. self.D = D
  377. def _call_impl(self, t):
  378. if t.ndim == 0:
  379. x = (t - self.t_shift) / self.denom
  380. p = np.cumprod(x)
  381. else:
  382. x = (t - self.t_shift[:, None]) / self.denom[:, None]
  383. p = np.cumprod(x, axis=0)
  384. y = np.dot(self.D[1:].T, p)
  385. if y.ndim == 1:
  386. y += self.D[0]
  387. else:
  388. y += self.D[0, :, None]
  389. return y