test_minpack.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841
  1. """
  2. Unit tests for optimization routines from minpack.py.
  3. """
  4. from __future__ import division, print_function, absolute_import
  5. import warnings
  6. from numpy.testing import (assert_, assert_almost_equal, assert_array_equal,
  7. assert_array_almost_equal, assert_allclose)
  8. from pytest import raises as assert_raises
  9. import numpy as np
  10. from numpy import array, float64, matrix
  11. from multiprocessing.pool import ThreadPool
  12. from scipy import optimize
  13. from scipy.special import lambertw
  14. from scipy.optimize.minpack import leastsq, curve_fit, fixed_point
  15. from scipy._lib._numpy_compat import _assert_warns, suppress_warnings
  16. from scipy.optimize import OptimizeWarning
  17. class ReturnShape(object):
  18. """This class exists to create a callable that does not have a '__name__' attribute.
  19. __init__ takes the argument 'shape', which should be a tuple of ints. When an instance
  20. it called with a single argument 'x', it returns numpy.ones(shape).
  21. """
  22. def __init__(self, shape):
  23. self.shape = shape
  24. def __call__(self, x):
  25. return np.ones(self.shape)
  26. def dummy_func(x, shape):
  27. """A function that returns an array of ones of the given shape.
  28. `x` is ignored.
  29. """
  30. return np.ones(shape)
  31. def sequence_parallel(fs):
  32. pool = ThreadPool(len(fs))
  33. try:
  34. return pool.map(lambda f: f(), fs)
  35. finally:
  36. pool.terminate()
  37. # Function and jacobian for tests of solvers for systems of nonlinear
  38. # equations
  39. def pressure_network(flow_rates, Qtot, k):
  40. """Evaluate non-linear equation system representing
  41. the pressures and flows in a system of n parallel pipes::
  42. f_i = P_i - P_0, for i = 1..n
  43. f_0 = sum(Q_i) - Qtot
  44. Where Q_i is the flow rate in pipe i and P_i the pressure in that pipe.
  45. Pressure is modeled as a P=kQ**2 where k is a valve coefficient and
  46. Q is the flow rate.
  47. Parameters
  48. ----------
  49. flow_rates : float
  50. A 1D array of n flow rates [kg/s].
  51. k : float
  52. A 1D array of n valve coefficients [1/kg m].
  53. Qtot : float
  54. A scalar, the total input flow rate [kg/s].
  55. Returns
  56. -------
  57. F : float
  58. A 1D array, F[i] == f_i.
  59. """
  60. P = k * flow_rates**2
  61. F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot))
  62. return F
  63. def pressure_network_jacobian(flow_rates, Qtot, k):
  64. """Return the jacobian of the equation system F(flow_rates)
  65. computed by `pressure_network` with respect to
  66. *flow_rates*. See `pressure_network` for the detailed
  67. description of parrameters.
  68. Returns
  69. -------
  70. jac : float
  71. *n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)``
  72. and *f_i* and *Q_i* are described in the doc for `pressure_network`
  73. """
  74. n = len(flow_rates)
  75. pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0])
  76. jac = np.empty((n, n))
  77. jac[:n-1, :n-1] = pdiff * 0
  78. jac[:n-1, n-1] = 0
  79. jac[n-1, :] = np.ones(n)
  80. return jac
  81. def pressure_network_fun_and_grad(flow_rates, Qtot, k):
  82. return (pressure_network(flow_rates, Qtot, k),
  83. pressure_network_jacobian(flow_rates, Qtot, k))
  84. class TestFSolve(object):
  85. def test_pressure_network_no_gradient(self):
  86. # fsolve without gradient, equal pipes -> equal flows.
  87. k = np.ones(4) * 0.5
  88. Qtot = 4
  89. initial_guess = array([2., 0., 2., 0.])
  90. final_flows, info, ier, mesg = optimize.fsolve(
  91. pressure_network, initial_guess, args=(Qtot, k),
  92. full_output=True)
  93. assert_array_almost_equal(final_flows, np.ones(4))
  94. assert_(ier == 1, mesg)
  95. def test_pressure_network_with_gradient(self):
  96. # fsolve with gradient, equal pipes -> equal flows
  97. k = np.ones(4) * 0.5
  98. Qtot = 4
  99. initial_guess = array([2., 0., 2., 0.])
  100. final_flows = optimize.fsolve(
  101. pressure_network, initial_guess, args=(Qtot, k),
  102. fprime=pressure_network_jacobian)
  103. assert_array_almost_equal(final_flows, np.ones(4))
  104. def test_wrong_shape_func_callable(self):
  105. func = ReturnShape(1)
  106. # x0 is a list of two elements, but func will return an array with
  107. # length 1, so this should result in a TypeError.
  108. x0 = [1.5, 2.0]
  109. assert_raises(TypeError, optimize.fsolve, func, x0)
  110. def test_wrong_shape_func_function(self):
  111. # x0 is a list of two elements, but func will return an array with
  112. # length 1, so this should result in a TypeError.
  113. x0 = [1.5, 2.0]
  114. assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),))
  115. def test_wrong_shape_fprime_callable(self):
  116. func = ReturnShape(1)
  117. deriv_func = ReturnShape((2,2))
  118. assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
  119. def test_wrong_shape_fprime_function(self):
  120. func = lambda x: dummy_func(x, (2,))
  121. deriv_func = lambda x: dummy_func(x, (3,3))
  122. assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
  123. def test_func_can_raise(self):
  124. def func(*args):
  125. raise ValueError('I raised')
  126. with assert_raises(ValueError, match='I raised'):
  127. optimize.fsolve(func, x0=[0])
  128. def test_Dfun_can_raise(self):
  129. func = lambda x: x - np.array([10])
  130. def deriv_func(*args):
  131. raise ValueError('I raised')
  132. with assert_raises(ValueError, match='I raised'):
  133. optimize.fsolve(func, x0=[0], fprime=deriv_func)
  134. def test_float32(self):
  135. func = lambda x: np.array([x[0] - 100, x[1] - 1000], dtype=np.float32)**2
  136. p = optimize.fsolve(func, np.array([1, 1], np.float32))
  137. assert_allclose(func(p), [0, 0], atol=1e-3)
  138. def test_reentrant_func(self):
  139. def func(*args):
  140. self.test_pressure_network_no_gradient()
  141. return pressure_network(*args)
  142. # fsolve without gradient, equal pipes -> equal flows.
  143. k = np.ones(4) * 0.5
  144. Qtot = 4
  145. initial_guess = array([2., 0., 2., 0.])
  146. final_flows, info, ier, mesg = optimize.fsolve(
  147. func, initial_guess, args=(Qtot, k),
  148. full_output=True)
  149. assert_array_almost_equal(final_flows, np.ones(4))
  150. assert_(ier == 1, mesg)
  151. def test_reentrant_Dfunc(self):
  152. def deriv_func(*args):
  153. self.test_pressure_network_with_gradient()
  154. return pressure_network_jacobian(*args)
  155. # fsolve with gradient, equal pipes -> equal flows
  156. k = np.ones(4) * 0.5
  157. Qtot = 4
  158. initial_guess = array([2., 0., 2., 0.])
  159. final_flows = optimize.fsolve(
  160. pressure_network, initial_guess, args=(Qtot, k),
  161. fprime=deriv_func)
  162. assert_array_almost_equal(final_flows, np.ones(4))
  163. def test_concurrent_no_gradient(self):
  164. return sequence_parallel([self.test_pressure_network_no_gradient] * 10)
  165. def test_concurrent_with_gradient(self):
  166. return sequence_parallel([self.test_pressure_network_with_gradient] * 10)
  167. class TestRootHybr(object):
  168. def test_pressure_network_no_gradient(self):
  169. # root/hybr without gradient, equal pipes -> equal flows
  170. k = np.ones(4) * 0.5
  171. Qtot = 4
  172. initial_guess = array([2., 0., 2., 0.])
  173. final_flows = optimize.root(pressure_network, initial_guess,
  174. method='hybr', args=(Qtot, k)).x
  175. assert_array_almost_equal(final_flows, np.ones(4))
  176. def test_pressure_network_with_gradient(self):
  177. # root/hybr with gradient, equal pipes -> equal flows
  178. k = np.ones(4) * 0.5
  179. Qtot = 4
  180. initial_guess = matrix([2., 0., 2., 0.])
  181. final_flows = optimize.root(pressure_network, initial_guess,
  182. args=(Qtot, k), method='hybr',
  183. jac=pressure_network_jacobian).x
  184. assert_array_almost_equal(final_flows, np.ones(4))
  185. def test_pressure_network_with_gradient_combined(self):
  186. # root/hybr with gradient and function combined, equal pipes -> equal
  187. # flows
  188. k = np.ones(4) * 0.5
  189. Qtot = 4
  190. initial_guess = array([2., 0., 2., 0.])
  191. final_flows = optimize.root(pressure_network_fun_and_grad,
  192. initial_guess, args=(Qtot, k),
  193. method='hybr', jac=True).x
  194. assert_array_almost_equal(final_flows, np.ones(4))
  195. class TestRootLM(object):
  196. def test_pressure_network_no_gradient(self):
  197. # root/lm without gradient, equal pipes -> equal flows
  198. k = np.ones(4) * 0.5
  199. Qtot = 4
  200. initial_guess = array([2., 0., 2., 0.])
  201. final_flows = optimize.root(pressure_network, initial_guess,
  202. method='lm', args=(Qtot, k)).x
  203. assert_array_almost_equal(final_flows, np.ones(4))
  204. class TestLeastSq(object):
  205. def setup_method(self):
  206. x = np.linspace(0, 10, 40)
  207. a,b,c = 3.1, 42, -304.2
  208. self.x = x
  209. self.abc = a,b,c
  210. y_true = a*x**2 + b*x + c
  211. np.random.seed(0)
  212. self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape)
  213. def residuals(self, p, y, x):
  214. a,b,c = p
  215. err = y-(a*x**2 + b*x + c)
  216. return err
  217. def residuals_jacobian(self, _p, _y, x):
  218. return -np.vstack([x**2, x, np.ones_like(x)]).T
  219. def test_basic(self):
  220. p0 = array([0,0,0])
  221. params_fit, ier = leastsq(self.residuals, p0,
  222. args=(self.y_meas, self.x))
  223. assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
  224. # low precision due to random
  225. assert_array_almost_equal(params_fit, self.abc, decimal=2)
  226. def test_basic_with_gradient(self):
  227. p0 = array([0,0,0])
  228. params_fit, ier = leastsq(self.residuals, p0,
  229. args=(self.y_meas, self.x),
  230. Dfun=self.residuals_jacobian)
  231. assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
  232. # low precision due to random
  233. assert_array_almost_equal(params_fit, self.abc, decimal=2)
  234. def test_full_output(self):
  235. p0 = matrix([0,0,0])
  236. full_output = leastsq(self.residuals, p0,
  237. args=(self.y_meas, self.x),
  238. full_output=True)
  239. params_fit, cov_x, infodict, mesg, ier = full_output
  240. assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg)
  241. def test_input_untouched(self):
  242. p0 = array([0,0,0],dtype=float64)
  243. p0_copy = array(p0, copy=True)
  244. full_output = leastsq(self.residuals, p0,
  245. args=(self.y_meas, self.x),
  246. full_output=True)
  247. params_fit, cov_x, infodict, mesg, ier = full_output
  248. assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg)
  249. assert_array_equal(p0, p0_copy)
  250. def test_wrong_shape_func_callable(self):
  251. func = ReturnShape(1)
  252. # x0 is a list of two elements, but func will return an array with
  253. # length 1, so this should result in a TypeError.
  254. x0 = [1.5, 2.0]
  255. assert_raises(TypeError, optimize.leastsq, func, x0)
  256. def test_wrong_shape_func_function(self):
  257. # x0 is a list of two elements, but func will return an array with
  258. # length 1, so this should result in a TypeError.
  259. x0 = [1.5, 2.0]
  260. assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),))
  261. def test_wrong_shape_Dfun_callable(self):
  262. func = ReturnShape(1)
  263. deriv_func = ReturnShape((2,2))
  264. assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
  265. def test_wrong_shape_Dfun_function(self):
  266. func = lambda x: dummy_func(x, (2,))
  267. deriv_func = lambda x: dummy_func(x, (3,3))
  268. assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
  269. def test_float32(self):
  270. # Regression test for gh-1447
  271. def func(p,x,y):
  272. q = p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))+p[3]
  273. return q - y
  274. x = np.array([1.475,1.429,1.409,1.419,1.455,1.519,1.472, 1.368,1.286,
  275. 1.231], dtype=np.float32)
  276. y = np.array([0.0168,0.0193,0.0211,0.0202,0.0171,0.0151,0.0185,0.0258,
  277. 0.034,0.0396], dtype=np.float32)
  278. p0 = np.array([1.0,1.0,1.0,1.0])
  279. p1, success = optimize.leastsq(func, p0, args=(x,y))
  280. assert_(success in [1,2,3,4])
  281. assert_((func(p1,x,y)**2).sum() < 1e-4 * (func(p0,x,y)**2).sum())
  282. def test_func_can_raise(self):
  283. def func(*args):
  284. raise ValueError('I raised')
  285. with assert_raises(ValueError, match='I raised'):
  286. optimize.leastsq(func, x0=[0])
  287. def test_Dfun_can_raise(self):
  288. func = lambda x: x - np.array([10])
  289. def deriv_func(*args):
  290. raise ValueError('I raised')
  291. with assert_raises(ValueError, match='I raised'):
  292. optimize.leastsq(func, x0=[0], Dfun=deriv_func)
  293. def test_reentrant_func(self):
  294. def func(*args):
  295. self.test_basic()
  296. return self.residuals(*args)
  297. p0 = array([0,0,0])
  298. params_fit, ier = leastsq(func, p0,
  299. args=(self.y_meas, self.x))
  300. assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
  301. # low precision due to random
  302. assert_array_almost_equal(params_fit, self.abc, decimal=2)
  303. def test_reentrant_Dfun(self):
  304. def deriv_func(*args):
  305. self.test_basic()
  306. return self.residuals_jacobian(*args)
  307. p0 = array([0,0,0])
  308. params_fit, ier = leastsq(self.residuals, p0,
  309. args=(self.y_meas, self.x),
  310. Dfun=deriv_func)
  311. assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
  312. # low precision due to random
  313. assert_array_almost_equal(params_fit, self.abc, decimal=2)
  314. def test_concurrent_no_gradient(self):
  315. return sequence_parallel([self.test_basic] * 10)
  316. def test_concurrent_with_gradient(self):
  317. return sequence_parallel([self.test_basic_with_gradient] * 10)
  318. class TestCurveFit(object):
  319. def setup_method(self):
  320. self.y = array([1.0, 3.2, 9.5, 13.7])
  321. self.x = array([1.0, 2.0, 3.0, 4.0])
  322. def test_one_argument(self):
  323. def func(x,a):
  324. return x**a
  325. popt, pcov = curve_fit(func, self.x, self.y)
  326. assert_(len(popt) == 1)
  327. assert_(pcov.shape == (1,1))
  328. assert_almost_equal(popt[0], 1.9149, decimal=4)
  329. assert_almost_equal(pcov[0,0], 0.0016, decimal=4)
  330. # Test if we get the same with full_output. Regression test for #1415.
  331. res = curve_fit(func, self.x, self.y, full_output=1)
  332. (popt2, pcov2, infodict, errmsg, ier) = res
  333. assert_array_almost_equal(popt, popt2)
  334. def test_two_argument(self):
  335. def func(x, a, b):
  336. return b*x**a
  337. popt, pcov = curve_fit(func, self.x, self.y)
  338. assert_(len(popt) == 2)
  339. assert_(pcov.shape == (2,2))
  340. assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
  341. assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
  342. decimal=4)
  343. def test_func_is_classmethod(self):
  344. class test_self(object):
  345. """This class tests if curve_fit passes the correct number of
  346. arguments when the model function is a class instance method.
  347. """
  348. def func(self, x, a, b):
  349. return b * x**a
  350. test_self_inst = test_self()
  351. popt, pcov = curve_fit(test_self_inst.func, self.x, self.y)
  352. assert_(pcov.shape == (2,2))
  353. assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
  354. assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
  355. decimal=4)
  356. def test_regression_2639(self):
  357. # This test fails if epsfcn in leastsq is too large.
  358. x = [574.14200000000005, 574.154, 574.16499999999996,
  359. 574.17700000000002, 574.18799999999999, 574.19899999999996,
  360. 574.21100000000001, 574.22199999999998, 574.23400000000004,
  361. 574.245]
  362. y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0,
  363. 1550.0, 949.0, 841.0]
  364. guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0,
  365. 0.0035019999999983615, 859.0]
  366. good = [5.74177150e+02, 5.74209188e+02, 1.74187044e+03, 1.58646166e+03,
  367. 1.0068462e-02, 8.57450661e+02]
  368. def f_double_gauss(x, x0, x1, A0, A1, sigma, c):
  369. return (A0*np.exp(-(x-x0)**2/(2.*sigma**2))
  370. + A1*np.exp(-(x-x1)**2/(2.*sigma**2)) + c)
  371. popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000)
  372. assert_allclose(popt, good, rtol=1e-5)
  373. def test_pcov(self):
  374. xdata = np.array([0, 1, 2, 3, 4, 5])
  375. ydata = np.array([1, 1, 5, 7, 8, 12])
  376. sigma = np.array([1, 2, 1, 2, 1, 2])
  377. def f(x, a, b):
  378. return a*x + b
  379. for method in ['lm', 'trf', 'dogbox']:
  380. popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
  381. method=method)
  382. perr_scaled = np.sqrt(np.diag(pcov))
  383. assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
  384. popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
  385. method=method)
  386. perr_scaled = np.sqrt(np.diag(pcov))
  387. assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
  388. popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
  389. absolute_sigma=True, method=method)
  390. perr = np.sqrt(np.diag(pcov))
  391. assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3)
  392. popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
  393. absolute_sigma=True, method=method)
  394. perr = np.sqrt(np.diag(pcov))
  395. assert_allclose(perr, [3*0.30714756, 3*0.85045308], rtol=1e-3)
  396. # infinite variances
  397. def f_flat(x, a, b):
  398. return a*x
  399. pcov_expected = np.array([np.inf]*4).reshape(2, 2)
  400. with suppress_warnings() as sup:
  401. sup.filter(OptimizeWarning,
  402. "Covariance of the parameters could not be estimated")
  403. popt, pcov = curve_fit(f_flat, xdata, ydata, p0=[2, 0], sigma=sigma)
  404. popt1, pcov1 = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0])
  405. assert_(pcov.shape == (2, 2))
  406. assert_array_equal(pcov, pcov_expected)
  407. assert_(pcov1.shape == (2, 2))
  408. assert_array_equal(pcov1, pcov_expected)
  409. def test_array_like(self):
  410. # Test sequence input. Regression test for gh-3037.
  411. def f_linear(x, a, b):
  412. return a*x + b
  413. x = [1, 2, 3, 4]
  414. y = [3, 5, 7, 9]
  415. assert_allclose(curve_fit(f_linear, x, y)[0], [2, 1], atol=1e-10)
  416. def test_indeterminate_covariance(self):
  417. # Test that a warning is returned when pcov is indeterminate
  418. xdata = np.array([1, 2, 3, 4, 5, 6])
  419. ydata = np.array([1, 2, 3, 4, 5.5, 6])
  420. _assert_warns(OptimizeWarning, curve_fit,
  421. lambda x, a, b: a*x, xdata, ydata)
  422. def test_NaN_handling(self):
  423. # Test for correct handling of NaNs in input data: gh-3422
  424. # create input with NaNs
  425. xdata = np.array([1, np.nan, 3])
  426. ydata = np.array([1, 2, 3])
  427. assert_raises(ValueError, curve_fit,
  428. lambda x, a, b: a*x + b, xdata, ydata)
  429. assert_raises(ValueError, curve_fit,
  430. lambda x, a, b: a*x + b, ydata, xdata)
  431. assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b,
  432. xdata, ydata, **{"check_finite": True})
  433. def test_method_argument(self):
  434. def f(x, a, b):
  435. return a * np.exp(-b*x)
  436. xdata = np.linspace(0, 1, 11)
  437. ydata = f(xdata, 2., 2.)
  438. for method in ['trf', 'dogbox', 'lm', None]:
  439. popt, pcov = curve_fit(f, xdata, ydata, method=method)
  440. assert_allclose(popt, [2., 2.])
  441. assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown')
  442. def test_bounds(self):
  443. def f(x, a, b):
  444. return a * np.exp(-b*x)
  445. xdata = np.linspace(0, 1, 11)
  446. ydata = f(xdata, 2., 2.)
  447. # The minimum w/out bounds is at [2., 2.],
  448. # and with bounds it's at [1.5, smth].
  449. bounds = ([1., 0], [1.5, 3.])
  450. for method in [None, 'trf', 'dogbox']:
  451. popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds,
  452. method=method)
  453. assert_allclose(popt[0], 1.5)
  454. # With bounds, the starting estimate is feasible.
  455. popt, pcov = curve_fit(f, xdata, ydata, method='trf',
  456. bounds=([0., 0], [0.6, np.inf]))
  457. assert_allclose(popt[0], 0.6)
  458. # method='lm' doesn't support bounds.
  459. assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds,
  460. method='lm')
  461. def test_bounds_p0(self):
  462. # This test is for issue #5719. The problem was that an initial guess
  463. # was ignored when 'trf' or 'dogbox' methods were invoked.
  464. def f(x, a):
  465. return np.sin(x + a)
  466. xdata = np.linspace(-2*np.pi, 2*np.pi, 40)
  467. ydata = np.sin(xdata)
  468. bounds = (-3 * np.pi, 3 * np.pi)
  469. for method in ['trf', 'dogbox']:
  470. popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi)
  471. popt_2, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi,
  472. bounds=bounds, method=method)
  473. # If the initial guess is ignored, then popt_2 would be close 0.
  474. assert_allclose(popt_1, popt_2)
  475. def test_jac(self):
  476. # Test that Jacobian callable is handled correctly and
  477. # weighted if sigma is provided.
  478. def f(x, a, b):
  479. return a * np.exp(-b*x)
  480. def jac(x, a, b):
  481. e = np.exp(-b*x)
  482. return np.vstack((e, -a * x * e)).T
  483. xdata = np.linspace(0, 1, 11)
  484. ydata = f(xdata, 2., 2.)
  485. # Test numerical options for least_squares backend.
  486. for method in ['trf', 'dogbox']:
  487. for scheme in ['2-point', '3-point', 'cs']:
  488. popt, pcov = curve_fit(f, xdata, ydata, jac=scheme,
  489. method=method)
  490. assert_allclose(popt, [2, 2])
  491. # Test the analytic option.
  492. for method in ['lm', 'trf', 'dogbox']:
  493. popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac)
  494. assert_allclose(popt, [2, 2])
  495. # Now add an outlier and provide sigma.
  496. ydata[5] = 100
  497. sigma = np.ones(xdata.shape[0])
  498. sigma[5] = 200
  499. for method in ['lm', 'trf', 'dogbox']:
  500. popt, pcov = curve_fit(f, xdata, ydata, sigma=sigma, method=method,
  501. jac=jac)
  502. # Still the optimization process is influenced somehow,
  503. # have to set rtol=1e-3.
  504. assert_allclose(popt, [2, 2], rtol=1e-3)
  505. def test_maxfev_and_bounds(self):
  506. # gh-6340: with no bounds, curve_fit accepts parameter maxfev (via leastsq)
  507. # but with bounds, the parameter is `max_nfev` (via least_squares)
  508. x = np.arange(0, 10)
  509. y = 2*x
  510. popt1, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), maxfev=100)
  511. popt2, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), max_nfev=100)
  512. assert_allclose(popt1, 2, atol=1e-14)
  513. assert_allclose(popt2, 2, atol=1e-14)
  514. def test_curvefit_simplecovariance(self):
  515. def func(x, a, b):
  516. return a * np.exp(-b*x)
  517. def jac(x, a, b):
  518. e = np.exp(-b*x)
  519. return np.vstack((e, -a * x * e)).T
  520. np.random.seed(0)
  521. xdata = np.linspace(0, 4, 50)
  522. y = func(xdata, 2.5, 1.3)
  523. ydata = y + 0.2 * np.random.normal(size=len(xdata))
  524. sigma = np.zeros(len(xdata)) + 0.2
  525. covar = np.diag(sigma**2)
  526. for jac1, jac2 in [(jac, jac), (None, None)]:
  527. for absolute_sigma in [False, True]:
  528. popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
  529. jac=jac1, absolute_sigma=absolute_sigma)
  530. popt2, pcov2 = curve_fit(func, xdata, ydata, sigma=covar,
  531. jac=jac2, absolute_sigma=absolute_sigma)
  532. assert_allclose(popt1, popt2, atol=1e-14)
  533. assert_allclose(pcov1, pcov2, atol=1e-14)
  534. def test_curvefit_covariance(self):
  535. def funcp(x, a, b):
  536. rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
  537. return rotn.dot(a * np.exp(-b*x))
  538. def jacp(x, a, b):
  539. rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
  540. e = np.exp(-b*x)
  541. return rotn.dot(np.vstack((e, -a * x * e)).T)
  542. def func(x, a, b):
  543. return a * np.exp(-b*x)
  544. def jac(x, a, b):
  545. e = np.exp(-b*x)
  546. return np.vstack((e, -a * x * e)).T
  547. np.random.seed(0)
  548. xdata = np.arange(1, 4)
  549. y = func(xdata, 2.5, 1.0)
  550. ydata = y + 0.2 * np.random.normal(size=len(xdata))
  551. sigma = np.zeros(len(xdata)) + 0.2
  552. covar = np.diag(sigma**2)
  553. # Get a rotation matrix, and obtain ydatap = R ydata
  554. # Chisq = ydata^T C^{-1} ydata
  555. # = ydata^T R^T R C^{-1} R^T R ydata
  556. # = ydatap^T Cp^{-1} ydatap
  557. # Cp^{-1} = R C^{-1} R^T
  558. # Cp = R C R^T, since R^-1 = R^T
  559. rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
  560. ydatap = rotn.dot(ydata)
  561. covarp = rotn.dot(covar).dot(rotn.T)
  562. for jac1, jac2 in [(jac, jacp), (None, None)]:
  563. for absolute_sigma in [False, True]:
  564. popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
  565. jac=jac1, absolute_sigma=absolute_sigma)
  566. popt2, pcov2 = curve_fit(funcp, xdata, ydatap, sigma=covarp,
  567. jac=jac2, absolute_sigma=absolute_sigma)
  568. assert_allclose(popt1, popt2, atol=1e-14)
  569. assert_allclose(pcov1, pcov2, atol=1e-14)
  570. def test_dtypes(self):
  571. # regression test for gh-9581: curve_fit fails if x and y dtypes differ
  572. x = np.arange(-3, 5)
  573. y = 1.5*x + 3.0 + 0.5*np.sin(x)
  574. def func(x, a, b):
  575. return a*x + b
  576. for method in ['lm', 'trf', 'dogbox']:
  577. for dtx in [np.float32, np.float64]:
  578. for dty in [np.float32, np.float64]:
  579. x = x.astype(dtx)
  580. y = y.astype(dty)
  581. with warnings.catch_warnings():
  582. warnings.simplefilter("error", OptimizeWarning)
  583. p, cov = curve_fit(func, x, y, method=method)
  584. assert np.isfinite(cov).all()
  585. assert not np.allclose(p, 1) # curve_fit's initial value
  586. def test_dtypes2(self):
  587. # regression test for gh-7117: curve_fit fails if
  588. # both inputs are float32
  589. def hyperbola(x, s_1, s_2, o_x, o_y, c):
  590. b_2 = (s_1 + s_2) / 2
  591. b_1 = (s_2 - s_1) / 2
  592. return o_y + b_1*(x-o_x) + b_2*np.sqrt((x-o_x)**2 + c**2/4)
  593. min_fit = np.array([-3.0, 0.0, -2.0, -10.0, 0.0])
  594. max_fit = np.array([0.0, 3.0, 3.0, 0.0, 10.0])
  595. guess = np.array([-2.5/3.0, 4/3.0, 1.0, -4.0, 0.5])
  596. params = [-2, .4, -1, -5, 9.5]
  597. xdata = np.array([-32, -16, -8, 4, 4, 8, 16, 32])
  598. ydata = hyperbola(xdata, *params)
  599. # run optimization twice, with xdata being float32 and float64
  600. popt_64, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess,
  601. bounds=(min_fit, max_fit))
  602. xdata = xdata.astype(np.float32)
  603. ydata = hyperbola(xdata, *params)
  604. popt_32, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess,
  605. bounds=(min_fit, max_fit))
  606. assert_allclose(popt_32, popt_64, atol=2e-5)
  607. class TestFixedPoint(object):
  608. def test_scalar_trivial(self):
  609. # f(x) = 2x; fixed point should be x=0
  610. def func(x):
  611. return 2.0*x
  612. x0 = 1.0
  613. x = fixed_point(func, x0)
  614. assert_almost_equal(x, 0.0)
  615. def test_scalar_basic1(self):
  616. # f(x) = x**2; x0=1.05; fixed point should be x=1
  617. def func(x):
  618. return x**2
  619. x0 = 1.05
  620. x = fixed_point(func, x0)
  621. assert_almost_equal(x, 1.0)
  622. def test_scalar_basic2(self):
  623. # f(x) = x**0.5; x0=1.05; fixed point should be x=1
  624. def func(x):
  625. return x**0.5
  626. x0 = 1.05
  627. x = fixed_point(func, x0)
  628. assert_almost_equal(x, 1.0)
  629. def test_array_trivial(self):
  630. def func(x):
  631. return 2.0*x
  632. x0 = [0.3, 0.15]
  633. olderr = np.seterr(all='ignore')
  634. try:
  635. x = fixed_point(func, x0)
  636. finally:
  637. np.seterr(**olderr)
  638. assert_almost_equal(x, [0.0, 0.0])
  639. def test_array_basic1(self):
  640. # f(x) = c * x**2; fixed point should be x=1/c
  641. def func(x, c):
  642. return c * x**2
  643. c = array([0.75, 1.0, 1.25])
  644. x0 = [1.1, 1.15, 0.9]
  645. olderr = np.seterr(all='ignore')
  646. try:
  647. x = fixed_point(func, x0, args=(c,))
  648. finally:
  649. np.seterr(**olderr)
  650. assert_almost_equal(x, 1.0/c)
  651. def test_array_basic2(self):
  652. # f(x) = c * x**0.5; fixed point should be x=c**2
  653. def func(x, c):
  654. return c * x**0.5
  655. c = array([0.75, 1.0, 1.25])
  656. x0 = [0.8, 1.1, 1.1]
  657. x = fixed_point(func, x0, args=(c,))
  658. assert_almost_equal(x, c**2)
  659. def test_lambertw(self):
  660. # python-list/2010-December/594592.html
  661. xxroot = fixed_point(lambda xx: np.exp(-2.0*xx)/2.0, 1.0,
  662. args=(), xtol=1e-12, maxiter=500)
  663. assert_allclose(xxroot, np.exp(-2.0*xxroot)/2.0)
  664. assert_allclose(xxroot, lambertw(1)/2)
  665. def test_no_acceleration(self):
  666. # github issue 5460
  667. ks = 2
  668. kl = 6
  669. m = 1.3
  670. n0 = 1.001
  671. i0 = ((m-1)/m)*(kl/ks/m)**(1/(m-1))
  672. def func(n):
  673. return np.log(kl/ks/n) / np.log((i0*n/(n - 1))) + 1
  674. n = fixed_point(func, n0, method='iteration')
  675. assert_allclose(n, m)