_expm_multiply.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703
  1. """Compute the action of the matrix exponential.
  2. """
  3. from __future__ import division, print_function, absolute_import
  4. import numpy as np
  5. import scipy.linalg
  6. import scipy.sparse.linalg
  7. from scipy.sparse.linalg import aslinearoperator
  8. __all__ = ['expm_multiply']
  9. def _exact_inf_norm(A):
  10. # A compatibility function which should eventually disappear.
  11. if scipy.sparse.isspmatrix(A):
  12. return max(abs(A).sum(axis=1).flat)
  13. else:
  14. return np.linalg.norm(A, np.inf)
  15. def _exact_1_norm(A):
  16. # A compatibility function which should eventually disappear.
  17. if scipy.sparse.isspmatrix(A):
  18. return max(abs(A).sum(axis=0).flat)
  19. else:
  20. return np.linalg.norm(A, 1)
  21. def _trace(A):
  22. # A compatibility function which should eventually disappear.
  23. if scipy.sparse.isspmatrix(A):
  24. return A.diagonal().sum()
  25. else:
  26. return np.trace(A)
  27. def _ident_like(A):
  28. # A compatibility function which should eventually disappear.
  29. if scipy.sparse.isspmatrix(A):
  30. return scipy.sparse.construct.eye(A.shape[0], A.shape[1],
  31. dtype=A.dtype, format=A.format)
  32. else:
  33. return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
  34. def expm_multiply(A, B, start=None, stop=None, num=None, endpoint=None):
  35. """
  36. Compute the action of the matrix exponential of A on B.
  37. Parameters
  38. ----------
  39. A : transposable linear operator
  40. The operator whose exponential is of interest.
  41. B : ndarray
  42. The matrix or vector to be multiplied by the matrix exponential of A.
  43. start : scalar, optional
  44. The starting time point of the sequence.
  45. stop : scalar, optional
  46. The end time point of the sequence, unless `endpoint` is set to False.
  47. In that case, the sequence consists of all but the last of ``num + 1``
  48. evenly spaced time points, so that `stop` is excluded.
  49. Note that the step size changes when `endpoint` is False.
  50. num : int, optional
  51. Number of time points to use.
  52. endpoint : bool, optional
  53. If True, `stop` is the last time point. Otherwise, it is not included.
  54. Returns
  55. -------
  56. expm_A_B : ndarray
  57. The result of the action :math:`e^{t_k A} B`.
  58. Notes
  59. -----
  60. The optional arguments defining the sequence of evenly spaced time points
  61. are compatible with the arguments of `numpy.linspace`.
  62. The output ndarray shape is somewhat complicated so I explain it here.
  63. The ndim of the output could be either 1, 2, or 3.
  64. It would be 1 if you are computing the expm action on a single vector
  65. at a single time point.
  66. It would be 2 if you are computing the expm action on a vector
  67. at multiple time points, or if you are computing the expm action
  68. on a matrix at a single time point.
  69. It would be 3 if you want the action on a matrix with multiple
  70. columns at multiple time points.
  71. If multiple time points are requested, expm_A_B[0] will always
  72. be the action of the expm at the first time point,
  73. regardless of whether the action is on a vector or a matrix.
  74. References
  75. ----------
  76. .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011)
  77. "Computing the Action of the Matrix Exponential,
  78. with an Application to Exponential Integrators."
  79. SIAM Journal on Scientific Computing,
  80. 33 (2). pp. 488-511. ISSN 1064-8275
  81. http://eprints.ma.man.ac.uk/1591/
  82. .. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010)
  83. "Computing Matrix Functions."
  84. Acta Numerica,
  85. 19. 159-208. ISSN 0962-4929
  86. http://eprints.ma.man.ac.uk/1451/
  87. Examples
  88. --------
  89. >>> from scipy.sparse import csc_matrix
  90. >>> from scipy.sparse.linalg import expm, expm_multiply
  91. >>> A = csc_matrix([[1, 0], [0, 1]])
  92. >>> A.todense()
  93. matrix([[1, 0],
  94. [0, 1]], dtype=int64)
  95. >>> B = np.array([np.exp(-1.), np.exp(-2.)])
  96. >>> B
  97. array([ 0.36787944, 0.13533528])
  98. >>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True)
  99. array([[ 1. , 0.36787944],
  100. [ 1.64872127, 0.60653066],
  101. [ 2.71828183, 1. ]])
  102. >>> expm(A).dot(B) # Verify 1st timestep
  103. array([ 1. , 0.36787944])
  104. >>> expm(1.5*A).dot(B) # Verify 2nd timestep
  105. array([ 1.64872127, 0.60653066])
  106. >>> expm(2*A).dot(B) # Verify 3rd timestep
  107. array([ 2.71828183, 1. ])
  108. """
  109. if all(arg is None for arg in (start, stop, num, endpoint)):
  110. X = _expm_multiply_simple(A, B)
  111. else:
  112. X, status = _expm_multiply_interval(A, B, start, stop, num, endpoint)
  113. return X
  114. def _expm_multiply_simple(A, B, t=1.0, balance=False):
  115. """
  116. Compute the action of the matrix exponential at a single time point.
  117. Parameters
  118. ----------
  119. A : transposable linear operator
  120. The operator whose exponential is of interest.
  121. B : ndarray
  122. The matrix to be multiplied by the matrix exponential of A.
  123. t : float
  124. A time point.
  125. balance : bool
  126. Indicates whether or not to apply balancing.
  127. Returns
  128. -------
  129. F : ndarray
  130. :math:`e^{t A} B`
  131. Notes
  132. -----
  133. This is algorithm (3.2) in Al-Mohy and Higham (2011).
  134. """
  135. if balance:
  136. raise NotImplementedError
  137. if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
  138. raise ValueError('expected A to be like a square matrix')
  139. if A.shape[1] != B.shape[0]:
  140. raise ValueError('the matrices A and B have incompatible shapes')
  141. ident = _ident_like(A)
  142. n = A.shape[0]
  143. if len(B.shape) == 1:
  144. n0 = 1
  145. elif len(B.shape) == 2:
  146. n0 = B.shape[1]
  147. else:
  148. raise ValueError('expected B to be like a matrix or a vector')
  149. u_d = 2**-53
  150. tol = u_d
  151. mu = _trace(A) / float(n)
  152. A = A - mu * ident
  153. A_1_norm = _exact_1_norm(A)
  154. if t*A_1_norm == 0:
  155. m_star, s = 0, 1
  156. else:
  157. ell = 2
  158. norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
  159. m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
  160. return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance)
  161. def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False):
  162. """
  163. A helper function.
  164. """
  165. if balance:
  166. raise NotImplementedError
  167. if tol is None:
  168. u_d = 2 ** -53
  169. tol = u_d
  170. F = B
  171. eta = np.exp(t*mu / float(s))
  172. for i in range(s):
  173. c1 = _exact_inf_norm(B)
  174. for j in range(m_star):
  175. coeff = t / float(s*(j+1))
  176. B = coeff * A.dot(B)
  177. c2 = _exact_inf_norm(B)
  178. F = F + B
  179. if c1 + c2 <= tol * _exact_inf_norm(F):
  180. break
  181. c1 = c2
  182. F = eta * F
  183. B = F
  184. return F
  185. # This table helps to compute bounds.
  186. # They seem to have been difficult to calculate, involving symbolic
  187. # manipulation of equations, followed by numerical root finding.
  188. _theta = {
  189. # The first 30 values are from table A.3 of Computing Matrix Functions.
  190. 1: 2.29e-16,
  191. 2: 2.58e-8,
  192. 3: 1.39e-5,
  193. 4: 3.40e-4,
  194. 5: 2.40e-3,
  195. 6: 9.07e-3,
  196. 7: 2.38e-2,
  197. 8: 5.00e-2,
  198. 9: 8.96e-2,
  199. 10: 1.44e-1,
  200. # 11
  201. 11: 2.14e-1,
  202. 12: 3.00e-1,
  203. 13: 4.00e-1,
  204. 14: 5.14e-1,
  205. 15: 6.41e-1,
  206. 16: 7.81e-1,
  207. 17: 9.31e-1,
  208. 18: 1.09,
  209. 19: 1.26,
  210. 20: 1.44,
  211. # 21
  212. 21: 1.62,
  213. 22: 1.82,
  214. 23: 2.01,
  215. 24: 2.22,
  216. 25: 2.43,
  217. 26: 2.64,
  218. 27: 2.86,
  219. 28: 3.08,
  220. 29: 3.31,
  221. 30: 3.54,
  222. # The rest are from table 3.1 of
  223. # Computing the Action of the Matrix Exponential.
  224. 35: 4.7,
  225. 40: 6.0,
  226. 45: 7.2,
  227. 50: 8.5,
  228. 55: 9.9,
  229. }
  230. def _onenormest_matrix_power(A, p,
  231. t=2, itmax=5, compute_v=False, compute_w=False):
  232. """
  233. Efficiently estimate the 1-norm of A^p.
  234. Parameters
  235. ----------
  236. A : ndarray
  237. Matrix whose 1-norm of a power is to be computed.
  238. p : int
  239. Non-negative integer power.
  240. t : int, optional
  241. A positive parameter controlling the tradeoff between
  242. accuracy versus time and memory usage.
  243. Larger values take longer and use more memory
  244. but give more accurate output.
  245. itmax : int, optional
  246. Use at most this many iterations.
  247. compute_v : bool, optional
  248. Request a norm-maximizing linear operator input vector if True.
  249. compute_w : bool, optional
  250. Request a norm-maximizing linear operator output vector if True.
  251. Returns
  252. -------
  253. est : float
  254. An underestimate of the 1-norm of the sparse matrix.
  255. v : ndarray, optional
  256. The vector such that ||Av||_1 == est*||v||_1.
  257. It can be thought of as an input to the linear operator
  258. that gives an output with particularly large norm.
  259. w : ndarray, optional
  260. The vector Av which has relatively large 1-norm.
  261. It can be thought of as an output of the linear operator
  262. that is relatively large in norm compared to the input.
  263. """
  264. #XXX Eventually turn this into an API function in the _onenormest module,
  265. #XXX and remove its underscore,
  266. #XXX but wait until expm_multiply goes into scipy.
  267. return scipy.sparse.linalg.onenormest(aslinearoperator(A) ** p)
  268. class LazyOperatorNormInfo:
  269. """
  270. Information about an operator is lazily computed.
  271. The information includes the exact 1-norm of the operator,
  272. in addition to estimates of 1-norms of powers of the operator.
  273. This uses the notation of Computing the Action (2011).
  274. This class is specialized enough to probably not be of general interest
  275. outside of this module.
  276. """
  277. def __init__(self, A, A_1_norm=None, ell=2, scale=1):
  278. """
  279. Provide the operator and some norm-related information.
  280. Parameters
  281. ----------
  282. A : linear operator
  283. The operator of interest.
  284. A_1_norm : float, optional
  285. The exact 1-norm of A.
  286. ell : int, optional
  287. A technical parameter controlling norm estimation quality.
  288. scale : int, optional
  289. If specified, return the norms of scale*A instead of A.
  290. """
  291. self._A = A
  292. self._A_1_norm = A_1_norm
  293. self._ell = ell
  294. self._d = {}
  295. self._scale = scale
  296. def set_scale(self,scale):
  297. """
  298. Set the scale parameter.
  299. """
  300. self._scale = scale
  301. def onenorm(self):
  302. """
  303. Compute the exact 1-norm.
  304. """
  305. if self._A_1_norm is None:
  306. self._A_1_norm = _exact_1_norm(self._A)
  307. return self._scale*self._A_1_norm
  308. def d(self, p):
  309. """
  310. Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.
  311. """
  312. if p not in self._d:
  313. est = _onenormest_matrix_power(self._A, p, self._ell)
  314. self._d[p] = est ** (1.0 / p)
  315. return self._scale*self._d[p]
  316. def alpha(self, p):
  317. """
  318. Lazily compute max(d(p), d(p+1)).
  319. """
  320. return max(self.d(p), self.d(p+1))
  321. def _compute_cost_div_m(m, p, norm_info):
  322. """
  323. A helper function for computing bounds.
  324. This is equation (3.10).
  325. It measures cost in terms of the number of required matrix products.
  326. Parameters
  327. ----------
  328. m : int
  329. A valid key of _theta.
  330. p : int
  331. A matrix power.
  332. norm_info : LazyOperatorNormInfo
  333. Information about 1-norms of related operators.
  334. Returns
  335. -------
  336. cost_div_m : int
  337. Required number of matrix products divided by m.
  338. """
  339. return int(np.ceil(norm_info.alpha(p) / _theta[m]))
  340. def _compute_p_max(m_max):
  341. """
  342. Compute the largest positive integer p such that p*(p-1) <= m_max + 1.
  343. Do this in a slightly dumb way, but safe and not too slow.
  344. Parameters
  345. ----------
  346. m_max : int
  347. A count related to bounds.
  348. """
  349. sqrt_m_max = np.sqrt(m_max)
  350. p_low = int(np.floor(sqrt_m_max))
  351. p_high = int(np.ceil(sqrt_m_max + 1))
  352. return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1)
  353. def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):
  354. """
  355. A helper function for the _expm_multiply_* functions.
  356. Parameters
  357. ----------
  358. norm_info : LazyOperatorNormInfo
  359. Information about norms of certain linear operators of interest.
  360. n0 : int
  361. Number of columns in the _expm_multiply_* B matrix.
  362. tol : float
  363. Expected to be
  364. :math:`2^{-24}` for single precision or
  365. :math:`2^{-53}` for double precision.
  366. m_max : int
  367. A value related to a bound.
  368. ell : int
  369. The number of columns used in the 1-norm approximation.
  370. This is usually taken to be small, maybe between 1 and 5.
  371. Returns
  372. -------
  373. best_m : int
  374. Related to bounds for error control.
  375. best_s : int
  376. Amount of scaling.
  377. Notes
  378. -----
  379. This is code fragment (3.1) in Al-Mohy and Higham (2011).
  380. The discussion of default values for m_max and ell
  381. is given between the definitions of equation (3.11)
  382. and the definition of equation (3.12).
  383. """
  384. if ell < 1:
  385. raise ValueError('expected ell to be a positive integer')
  386. best_m = None
  387. best_s = None
  388. if _condition_3_13(norm_info.onenorm(), n0, m_max, ell):
  389. for m, theta in _theta.items():
  390. s = int(np.ceil(norm_info.onenorm() / theta))
  391. if best_m is None or m * s < best_m * best_s:
  392. best_m = m
  393. best_s = s
  394. else:
  395. # Equation (3.11).
  396. for p in range(2, _compute_p_max(m_max) + 1):
  397. for m in range(p*(p-1)-1, m_max+1):
  398. if m in _theta:
  399. s = _compute_cost_div_m(m, p, norm_info)
  400. if best_m is None or m * s < best_m * best_s:
  401. best_m = m
  402. best_s = s
  403. best_s = max(best_s, 1)
  404. return best_m, best_s
  405. def _condition_3_13(A_1_norm, n0, m_max, ell):
  406. """
  407. A helper function for the _expm_multiply_* functions.
  408. Parameters
  409. ----------
  410. A_1_norm : float
  411. The precomputed 1-norm of A.
  412. n0 : int
  413. Number of columns in the _expm_multiply_* B matrix.
  414. m_max : int
  415. A value related to a bound.
  416. ell : int
  417. The number of columns used in the 1-norm approximation.
  418. This is usually taken to be small, maybe between 1 and 5.
  419. Returns
  420. -------
  421. value : bool
  422. Indicates whether or not the condition has been met.
  423. Notes
  424. -----
  425. This is condition (3.13) in Al-Mohy and Higham (2011).
  426. """
  427. # This is the rhs of equation (3.12).
  428. p_max = _compute_p_max(m_max)
  429. a = 2 * ell * p_max * (p_max + 3)
  430. # Evaluate the condition (3.13).
  431. b = _theta[m_max] / float(n0 * m_max)
  432. return A_1_norm <= a * b
  433. def _expm_multiply_interval(A, B, start=None, stop=None,
  434. num=None, endpoint=None, balance=False, status_only=False):
  435. """
  436. Compute the action of the matrix exponential at multiple time points.
  437. Parameters
  438. ----------
  439. A : transposable linear operator
  440. The operator whose exponential is of interest.
  441. B : ndarray
  442. The matrix to be multiplied by the matrix exponential of A.
  443. start : scalar, optional
  444. The starting time point of the sequence.
  445. stop : scalar, optional
  446. The end time point of the sequence, unless `endpoint` is set to False.
  447. In that case, the sequence consists of all but the last of ``num + 1``
  448. evenly spaced time points, so that `stop` is excluded.
  449. Note that the step size changes when `endpoint` is False.
  450. num : int, optional
  451. Number of time points to use.
  452. endpoint : bool, optional
  453. If True, `stop` is the last time point. Otherwise, it is not included.
  454. balance : bool
  455. Indicates whether or not to apply balancing.
  456. status_only : bool
  457. A flag that is set to True for some debugging and testing operations.
  458. Returns
  459. -------
  460. F : ndarray
  461. :math:`e^{t_k A} B`
  462. status : int
  463. An integer status for testing and debugging.
  464. Notes
  465. -----
  466. This is algorithm (5.2) in Al-Mohy and Higham (2011).
  467. There seems to be a typo, where line 15 of the algorithm should be
  468. moved to line 6.5 (between lines 6 and 7).
  469. """
  470. if balance:
  471. raise NotImplementedError
  472. if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
  473. raise ValueError('expected A to be like a square matrix')
  474. if A.shape[1] != B.shape[0]:
  475. raise ValueError('the matrices A and B have incompatible shapes')
  476. ident = _ident_like(A)
  477. n = A.shape[0]
  478. if len(B.shape) == 1:
  479. n0 = 1
  480. elif len(B.shape) == 2:
  481. n0 = B.shape[1]
  482. else:
  483. raise ValueError('expected B to be like a matrix or a vector')
  484. u_d = 2**-53
  485. tol = u_d
  486. mu = _trace(A) / float(n)
  487. # Get the linspace samples, attempting to preserve the linspace defaults.
  488. linspace_kwargs = {'retstep': True}
  489. if num is not None:
  490. linspace_kwargs['num'] = num
  491. if endpoint is not None:
  492. linspace_kwargs['endpoint'] = endpoint
  493. samples, step = np.linspace(start, stop, **linspace_kwargs)
  494. # Convert the linspace output to the notation used by the publication.
  495. nsamples = len(samples)
  496. if nsamples < 2:
  497. raise ValueError('at least two time points are required')
  498. q = nsamples - 1
  499. h = step
  500. t_0 = samples[0]
  501. t_q = samples[q]
  502. # Define the output ndarray.
  503. # Use an ndim=3 shape, such that the last two indices
  504. # are the ones that may be involved in level 3 BLAS operations.
  505. X_shape = (nsamples,) + B.shape
  506. X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float))
  507. t = t_q - t_0
  508. A = A - mu * ident
  509. A_1_norm = _exact_1_norm(A)
  510. ell = 2
  511. norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
  512. if t*A_1_norm == 0:
  513. m_star, s = 0, 1
  514. else:
  515. m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
  516. # Compute the expm action up to the initial time point.
  517. X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s)
  518. # Compute the expm action at the rest of the time points.
  519. if q <= s:
  520. if status_only:
  521. return 0
  522. else:
  523. return _expm_multiply_interval_core_0(A, X,
  524. h, mu, q, norm_info, tol, ell,n0)
  525. elif not (q % s):
  526. if status_only:
  527. return 1
  528. else:
  529. return _expm_multiply_interval_core_1(A, X,
  530. h, mu, m_star, s, q, tol)
  531. elif (q % s):
  532. if status_only:
  533. return 2
  534. else:
  535. return _expm_multiply_interval_core_2(A, X,
  536. h, mu, m_star, s, q, tol)
  537. else:
  538. raise Exception('internal error')
  539. def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0):
  540. """
  541. A helper function, for the case q <= s.
  542. """
  543. # Compute the new values of m_star and s which should be applied
  544. # over intervals of size t/q
  545. if norm_info.onenorm() == 0:
  546. m_star, s = 0, 1
  547. else:
  548. norm_info.set_scale(1./q)
  549. m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
  550. norm_info.set_scale(1)
  551. for k in range(q):
  552. X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s)
  553. return X, 0
  554. def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol):
  555. """
  556. A helper function, for the case q > s and q % s == 0.
  557. """
  558. d = q // s
  559. input_shape = X.shape[1:]
  560. K_shape = (m_star + 1, ) + input_shape
  561. K = np.empty(K_shape, dtype=X.dtype)
  562. for i in range(s):
  563. Z = X[i*d]
  564. K[0] = Z
  565. high_p = 0
  566. for k in range(1, d+1):
  567. F = K[0]
  568. c1 = _exact_inf_norm(F)
  569. for p in range(1, m_star+1):
  570. if p > high_p:
  571. K[p] = h * A.dot(K[p-1]) / float(p)
  572. coeff = float(pow(k, p))
  573. F = F + coeff * K[p]
  574. inf_norm_K_p_1 = _exact_inf_norm(K[p])
  575. c2 = coeff * inf_norm_K_p_1
  576. if c1 + c2 <= tol * _exact_inf_norm(F):
  577. break
  578. c1 = c2
  579. X[k + i*d] = np.exp(k*h*mu) * F
  580. return X, 1
  581. def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol):
  582. """
  583. A helper function, for the case q > s and q % s > 0.
  584. """
  585. d = q // s
  586. j = q // d
  587. r = q - d * j
  588. input_shape = X.shape[1:]
  589. K_shape = (m_star + 1, ) + input_shape
  590. K = np.empty(K_shape, dtype=X.dtype)
  591. for i in range(j + 1):
  592. Z = X[i*d]
  593. K[0] = Z
  594. high_p = 0
  595. if i < j:
  596. effective_d = d
  597. else:
  598. effective_d = r
  599. for k in range(1, effective_d+1):
  600. F = K[0]
  601. c1 = _exact_inf_norm(F)
  602. for p in range(1, m_star+1):
  603. if p == high_p + 1:
  604. K[p] = h * A.dot(K[p-1]) / float(p)
  605. high_p = p
  606. coeff = float(pow(k, p))
  607. F = F + coeff * K[p]
  608. inf_norm_K_p_1 = _exact_inf_norm(K[p])
  609. c2 = coeff * inf_norm_K_p_1
  610. if c1 + c2 <= tol * _exact_inf_norm(F):
  611. break
  612. c1 = c2
  613. X[k + i*d] = np.exp(k*h*mu) * F
  614. return X, 2