test_optimize.py 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. """
  2. Unit tests for optimization routines from optimize.py
  3. Authors:
  4. Ed Schofield, Nov 2005
  5. Andrew Straw, April 2008
  6. To run it in its simplest form::
  7. nosetests test_optimize.py
  8. """
  9. from __future__ import division, print_function, absolute_import
  10. import itertools
  11. import numpy as np
  12. from numpy.testing import (assert_allclose, assert_equal,
  13. assert_,
  14. assert_almost_equal, assert_warns,
  15. assert_array_less)
  16. import pytest
  17. from pytest import raises as assert_raises
  18. from scipy._lib._numpy_compat import suppress_warnings
  19. from scipy import optimize
  20. def test_check_grad():
  21. # Verify if check_grad is able to estimate the derivative of the
  22. # logistic function.
  23. def logit(x):
  24. return 1 / (1 + np.exp(-x))
  25. def der_logit(x):
  26. return np.exp(-x) / (1 + np.exp(-x))**2
  27. x0 = np.array([1.5])
  28. r = optimize.check_grad(logit, der_logit, x0)
  29. assert_almost_equal(r, 0)
  30. r = optimize.check_grad(logit, der_logit, x0, epsilon=1e-6)
  31. assert_almost_equal(r, 0)
  32. # Check if the epsilon parameter is being considered.
  33. r = abs(optimize.check_grad(logit, der_logit, x0, epsilon=1e-1) - 0)
  34. assert_(r > 1e-7)
  35. class CheckOptimize(object):
  36. """ Base test case for a simple constrained entropy maximization problem
  37. (the machine translation example of Berger et al in
  38. Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
  39. """
  40. def setup_method(self):
  41. self.F = np.array([[1,1,1],[1,1,0],[1,0,1],[1,0,0],[1,0,0]])
  42. self.K = np.array([1., 0.3, 0.5])
  43. self.startparams = np.zeros(3, np.float64)
  44. self.solution = np.array([0., -0.524869316, 0.487525860])
  45. self.maxiter = 1000
  46. self.funccalls = 0
  47. self.gradcalls = 0
  48. self.trace = []
  49. def func(self, x):
  50. self.funccalls += 1
  51. if self.funccalls > 6000:
  52. raise RuntimeError("too many iterations in optimization routine")
  53. log_pdot = np.dot(self.F, x)
  54. logZ = np.log(sum(np.exp(log_pdot)))
  55. f = logZ - np.dot(self.K, x)
  56. self.trace.append(x)
  57. return f
  58. def grad(self, x):
  59. self.gradcalls += 1
  60. log_pdot = np.dot(self.F, x)
  61. logZ = np.log(sum(np.exp(log_pdot)))
  62. p = np.exp(log_pdot - logZ)
  63. return np.dot(self.F.transpose(), p) - self.K
  64. def hess(self, x):
  65. log_pdot = np.dot(self.F, x)
  66. logZ = np.log(sum(np.exp(log_pdot)))
  67. p = np.exp(log_pdot - logZ)
  68. return np.dot(self.F.T,
  69. np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))
  70. def hessp(self, x, p):
  71. return np.dot(self.hess(x), p)
  72. class CheckOptimizeParameterized(CheckOptimize):
  73. def test_cg(self):
  74. # conjugate gradient optimization routine
  75. if self.use_wrapper:
  76. opts = {'maxiter': self.maxiter, 'disp': self.disp,
  77. 'return_all': False}
  78. res = optimize.minimize(self.func, self.startparams, args=(),
  79. method='CG', jac=self.grad,
  80. options=opts)
  81. params, fopt, func_calls, grad_calls, warnflag = \
  82. res['x'], res['fun'], res['nfev'], res['njev'], res['status']
  83. else:
  84. retval = optimize.fmin_cg(self.func, self.startparams,
  85. self.grad, (), maxiter=self.maxiter,
  86. full_output=True, disp=self.disp,
  87. retall=False)
  88. (params, fopt, func_calls, grad_calls, warnflag) = retval
  89. assert_allclose(self.func(params), self.func(self.solution),
  90. atol=1e-6)
  91. # Ensure that function call counts are 'known good'; these are from
  92. # Scipy 0.7.0. Don't allow them to increase.
  93. assert_(self.funccalls == 9, self.funccalls)
  94. assert_(self.gradcalls == 7, self.gradcalls)
  95. # Ensure that the function behaves the same; this is from Scipy 0.7.0
  96. assert_allclose(self.trace[2:4],
  97. [[0, -0.5, 0.5],
  98. [0, -5.05700028e-01, 4.95985862e-01]],
  99. atol=1e-14, rtol=1e-7)
  100. def test_cg_cornercase(self):
  101. def f(r):
  102. return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2
  103. # Check several initial guesses. (Too far away from the
  104. # minimum, the function ends up in the flat region of exp.)
  105. for x0 in np.linspace(-0.75, 3, 71):
  106. sol = optimize.minimize(f, [x0], method='CG')
  107. assert_(sol.success)
  108. assert_allclose(sol.x, [0.5], rtol=1e-5)
  109. def test_bfgs(self):
  110. # Broyden-Fletcher-Goldfarb-Shanno optimization routine
  111. if self.use_wrapper:
  112. opts = {'maxiter': self.maxiter, 'disp': self.disp,
  113. 'return_all': False}
  114. res = optimize.minimize(self.func, self.startparams,
  115. jac=self.grad, method='BFGS', args=(),
  116. options=opts)
  117. params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = (
  118. res['x'], res['fun'], res['jac'], res['hess_inv'],
  119. res['nfev'], res['njev'], res['status'])
  120. else:
  121. retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
  122. args=(), maxiter=self.maxiter,
  123. full_output=True, disp=self.disp,
  124. retall=False)
  125. (params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval
  126. assert_allclose(self.func(params), self.func(self.solution),
  127. atol=1e-6)
  128. # Ensure that function call counts are 'known good'; these are from
  129. # Scipy 0.7.0. Don't allow them to increase.
  130. assert_(self.funccalls == 10, self.funccalls)
  131. assert_(self.gradcalls == 8, self.gradcalls)
  132. # Ensure that the function behaves the same; this is from Scipy 0.7.0
  133. assert_allclose(self.trace[6:8],
  134. [[0, -5.25060743e-01, 4.87748473e-01],
  135. [0, -5.24885582e-01, 4.87530347e-01]],
  136. atol=1e-14, rtol=1e-7)
  137. def test_bfgs_infinite(self):
  138. # Test corner case where -Inf is the minimum. See gh-2019.
  139. func = lambda x: -np.e**-x
  140. fprime = lambda x: -func(x)
  141. x0 = [0]
  142. olderr = np.seterr(over='ignore')
  143. try:
  144. if self.use_wrapper:
  145. opts = {'disp': self.disp}
  146. x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
  147. args=(), options=opts)['x']
  148. else:
  149. x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp)
  150. assert_(not np.isfinite(func(x)))
  151. finally:
  152. np.seterr(**olderr)
  153. def test_powell(self):
  154. # Powell (direction set) optimization routine
  155. if self.use_wrapper:
  156. opts = {'maxiter': self.maxiter, 'disp': self.disp,
  157. 'return_all': False}
  158. res = optimize.minimize(self.func, self.startparams, args=(),
  159. method='Powell', options=opts)
  160. params, fopt, direc, numiter, func_calls, warnflag = (
  161. res['x'], res['fun'], res['direc'], res['nit'],
  162. res['nfev'], res['status'])
  163. else:
  164. retval = optimize.fmin_powell(self.func, self.startparams,
  165. args=(), maxiter=self.maxiter,
  166. full_output=True, disp=self.disp,
  167. retall=False)
  168. (params, fopt, direc, numiter, func_calls, warnflag) = retval
  169. assert_allclose(self.func(params), self.func(self.solution),
  170. atol=1e-6)
  171. # Ensure that function call counts are 'known good'; these are from
  172. # Scipy 0.7.0. Don't allow them to increase.
  173. #
  174. # However, some leeway must be added: the exact evaluation
  175. # count is sensitive to numerical error, and floating-point
  176. # computations are not bit-for-bit reproducible across
  177. # machines, and when using e.g. MKL, data alignment
  178. # etc. affect the rounding error.
  179. #
  180. assert_(self.funccalls <= 116 + 20, self.funccalls)
  181. assert_(self.gradcalls == 0, self.gradcalls)
  182. # Ensure that the function behaves the same; this is from Scipy 0.7.0
  183. assert_allclose(self.trace[34:39],
  184. [[0.72949016, -0.44156936, 0.47100962],
  185. [0.72949016, -0.44156936, 0.48052496],
  186. [1.45898031, -0.88313872, 0.95153458],
  187. [0.72949016, -0.44156936, 0.47576729],
  188. [1.72949016, -0.44156936, 0.47576729]],
  189. atol=1e-14, rtol=1e-7)
  190. def test_neldermead(self):
  191. # Nelder-Mead simplex algorithm
  192. if self.use_wrapper:
  193. opts = {'maxiter': self.maxiter, 'disp': self.disp,
  194. 'return_all': False}
  195. res = optimize.minimize(self.func, self.startparams, args=(),
  196. method='Nelder-mead', options=opts)
  197. params, fopt, numiter, func_calls, warnflag, final_simplex = (
  198. res['x'], res['fun'], res['nit'], res['nfev'],
  199. res['status'], res['final_simplex'])
  200. else:
  201. retval = optimize.fmin(self.func, self.startparams,
  202. args=(), maxiter=self.maxiter,
  203. full_output=True, disp=self.disp,
  204. retall=False)
  205. (params, fopt, numiter, func_calls, warnflag) = retval
  206. assert_allclose(self.func(params), self.func(self.solution),
  207. atol=1e-6)
  208. # Ensure that function call counts are 'known good'; these are from
  209. # Scipy 0.7.0. Don't allow them to increase.
  210. assert_(self.funccalls == 167, self.funccalls)
  211. assert_(self.gradcalls == 0, self.gradcalls)
  212. # Ensure that the function behaves the same; this is from Scipy 0.7.0
  213. assert_allclose(self.trace[76:78],
  214. [[0.1928968, -0.62780447, 0.35166118],
  215. [0.19572515, -0.63648426, 0.35838135]],
  216. atol=1e-14, rtol=1e-7)
  217. def test_neldermead_initial_simplex(self):
  218. # Nelder-Mead simplex algorithm
  219. simplex = np.zeros((4, 3))
  220. simplex[...] = self.startparams
  221. for j in range(3):
  222. simplex[j+1,j] += 0.1
  223. if self.use_wrapper:
  224. opts = {'maxiter': self.maxiter, 'disp': False,
  225. 'return_all': True, 'initial_simplex': simplex}
  226. res = optimize.minimize(self.func, self.startparams, args=(),
  227. method='Nelder-mead', options=opts)
  228. params, fopt, numiter, func_calls, warnflag = \
  229. res['x'], res['fun'], res['nit'], res['nfev'], \
  230. res['status']
  231. assert_allclose(res['allvecs'][0], simplex[0])
  232. else:
  233. retval = optimize.fmin(self.func, self.startparams,
  234. args=(), maxiter=self.maxiter,
  235. full_output=True, disp=False, retall=False,
  236. initial_simplex=simplex)
  237. (params, fopt, numiter, func_calls, warnflag) = retval
  238. assert_allclose(self.func(params), self.func(self.solution),
  239. atol=1e-6)
  240. # Ensure that function call counts are 'known good'; these are from
  241. # Scipy 0.17.0. Don't allow them to increase.
  242. assert_(self.funccalls == 100, self.funccalls)
  243. assert_(self.gradcalls == 0, self.gradcalls)
  244. # Ensure that the function behaves the same; this is from Scipy 0.15.0
  245. assert_allclose(self.trace[50:52],
  246. [[0.14687474, -0.5103282, 0.48252111],
  247. [0.14474003, -0.5282084, 0.48743951]],
  248. atol=1e-14, rtol=1e-7)
  249. def test_neldermead_initial_simplex_bad(self):
  250. # Check it fails with a bad simplices
  251. bad_simplices = []
  252. simplex = np.zeros((3, 2))
  253. simplex[...] = self.startparams[:2]
  254. for j in range(2):
  255. simplex[j+1,j] += 0.1
  256. bad_simplices.append(simplex)
  257. simplex = np.zeros((3, 3))
  258. bad_simplices.append(simplex)
  259. for simplex in bad_simplices:
  260. if self.use_wrapper:
  261. opts = {'maxiter': self.maxiter, 'disp': False,
  262. 'return_all': False, 'initial_simplex': simplex}
  263. assert_raises(ValueError,
  264. optimize.minimize, self.func, self.startparams, args=(),
  265. method='Nelder-mead', options=opts)
  266. else:
  267. assert_raises(ValueError, optimize.fmin, self.func, self.startparams,
  268. args=(), maxiter=self.maxiter,
  269. full_output=True, disp=False, retall=False,
  270. initial_simplex=simplex)
  271. def test_ncg_negative_maxiter(self):
  272. # Regression test for gh-8241
  273. opts = {'maxiter': -1}
  274. result = optimize.minimize(self.func, self.startparams,
  275. method='Newton-CG', jac=self.grad,
  276. args=(), options=opts)
  277. assert_(result.status == 1)
  278. def test_ncg(self):
  279. # line-search Newton conjugate gradient optimization routine
  280. if self.use_wrapper:
  281. opts = {'maxiter': self.maxiter, 'disp': self.disp,
  282. 'return_all': False}
  283. retval = optimize.minimize(self.func, self.startparams,
  284. method='Newton-CG', jac=self.grad,
  285. args=(), options=opts)['x']
  286. else:
  287. retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
  288. args=(), maxiter=self.maxiter,
  289. full_output=False, disp=self.disp,
  290. retall=False)
  291. params = retval
  292. assert_allclose(self.func(params), self.func(self.solution),
  293. atol=1e-6)
  294. # Ensure that function call counts are 'known good'; these are from
  295. # Scipy 0.7.0. Don't allow them to increase.
  296. assert_(self.funccalls == 7, self.funccalls)
  297. assert_(self.gradcalls <= 22, self.gradcalls) # 0.13.0
  298. #assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
  299. #assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
  300. #assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
  301. # Ensure that the function behaves the same; this is from Scipy 0.7.0
  302. assert_allclose(self.trace[3:5],
  303. [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
  304. [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
  305. atol=1e-6, rtol=1e-7)
  306. def test_ncg_hess(self):
  307. # Newton conjugate gradient with Hessian
  308. if self.use_wrapper:
  309. opts = {'maxiter': self.maxiter, 'disp': self.disp,
  310. 'return_all': False}
  311. retval = optimize.minimize(self.func, self.startparams,
  312. method='Newton-CG', jac=self.grad,
  313. hess=self.hess,
  314. args=(), options=opts)['x']
  315. else:
  316. retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
  317. fhess=self.hess,
  318. args=(), maxiter=self.maxiter,
  319. full_output=False, disp=self.disp,
  320. retall=False)
  321. params = retval
  322. assert_allclose(self.func(params), self.func(self.solution),
  323. atol=1e-6)
  324. # Ensure that function call counts are 'known good'; these are from
  325. # Scipy 0.7.0. Don't allow them to increase.
  326. assert_(self.funccalls == 7, self.funccalls)
  327. assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
  328. # assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
  329. # assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
  330. # Ensure that the function behaves the same; this is from Scipy 0.7.0
  331. assert_allclose(self.trace[3:5],
  332. [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
  333. [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
  334. atol=1e-6, rtol=1e-7)
  335. def test_ncg_hessp(self):
  336. # Newton conjugate gradient with Hessian times a vector p.
  337. if self.use_wrapper:
  338. opts = {'maxiter': self.maxiter, 'disp': self.disp,
  339. 'return_all': False}
  340. retval = optimize.minimize(self.func, self.startparams,
  341. method='Newton-CG', jac=self.grad,
  342. hessp=self.hessp,
  343. args=(), options=opts)['x']
  344. else:
  345. retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
  346. fhess_p=self.hessp,
  347. args=(), maxiter=self.maxiter,
  348. full_output=False, disp=self.disp,
  349. retall=False)
  350. params = retval
  351. assert_allclose(self.func(params), self.func(self.solution),
  352. atol=1e-6)
  353. # Ensure that function call counts are 'known good'; these are from
  354. # Scipy 0.7.0. Don't allow them to increase.
  355. assert_(self.funccalls == 7, self.funccalls)
  356. assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
  357. # assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
  358. # assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
  359. # Ensure that the function behaves the same; this is from Scipy 0.7.0
  360. assert_allclose(self.trace[3:5],
  361. [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
  362. [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
  363. atol=1e-6, rtol=1e-7)
  364. def test_neldermead_xatol_fatol():
  365. # gh4484
  366. # test we can call with fatol, xatol specified
  367. func = lambda x: x[0]**2 + x[1]**2
  368. optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2,
  369. xatol=1e-3, fatol=1e-3)
  370. assert_warns(DeprecationWarning,
  371. optimize._minimize._minimize_neldermead,
  372. func, [1, 1], xtol=1e-3, ftol=1e-3, maxiter=2)
  373. def test_neldermead_adaptive():
  374. func = lambda x: np.sum(x**2)
  375. p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159, 0.32308456,
  376. 0.9692297, 0.4471682, 0.77411992, 0.80441652, 0.35994957, 0.75487856,
  377. 0.99973421, 0.65063887, 0.09626474]
  378. res = optimize.minimize(func, p0, method='Nelder-Mead')
  379. assert_equal(res.success, False)
  380. res = optimize.minimize(func, p0, method='Nelder-Mead',
  381. options={'adaptive':True})
  382. assert_equal(res.success, True)
  383. class TestOptimizeWrapperDisp(CheckOptimizeParameterized):
  384. use_wrapper = True
  385. disp = True
  386. class TestOptimizeWrapperNoDisp(CheckOptimizeParameterized):
  387. use_wrapper = True
  388. disp = False
  389. class TestOptimizeNoWrapperDisp(CheckOptimizeParameterized):
  390. use_wrapper = False
  391. disp = True
  392. class TestOptimizeNoWrapperNoDisp(CheckOptimizeParameterized):
  393. use_wrapper = False
  394. disp = False
  395. class TestOptimizeSimple(CheckOptimize):
  396. def test_bfgs_nan(self):
  397. # Test corner case where nan is fed to optimizer. See gh-2067.
  398. func = lambda x: x
  399. fprime = lambda x: np.ones_like(x)
  400. x0 = [np.nan]
  401. with np.errstate(over='ignore', invalid='ignore'):
  402. x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
  403. assert_(np.isnan(func(x)))
  404. def test_bfgs_nan_return(self):
  405. # Test corner cases where fun returns NaN. See gh-4793.
  406. # First case: NaN from first call.
  407. func = lambda x: np.nan
  408. with np.errstate(invalid='ignore'):
  409. result = optimize.minimize(func, 0)
  410. assert_(np.isnan(result['fun']))
  411. assert_(result['success'] is False)
  412. # Second case: NaN from second call.
  413. func = lambda x: 0 if x == 0 else np.nan
  414. fprime = lambda x: np.ones_like(x) # Steer away from zero.
  415. with np.errstate(invalid='ignore'):
  416. result = optimize.minimize(func, 0, jac=fprime)
  417. assert_(np.isnan(result['fun']))
  418. assert_(result['success'] is False)
  419. def test_bfgs_numerical_jacobian(self):
  420. # BFGS with numerical jacobian and a vector epsilon parameter.
  421. # define the epsilon parameter using a random vector
  422. epsilon = np.sqrt(np.finfo(float).eps) * np.random.rand(len(self.solution))
  423. params = optimize.fmin_bfgs(self.func, self.startparams,
  424. epsilon=epsilon, args=(),
  425. maxiter=self.maxiter, disp=False)
  426. assert_allclose(self.func(params), self.func(self.solution),
  427. atol=1e-6)
  428. def test_bfgs_gh_2169(self):
  429. def f(x):
  430. if x < 0:
  431. return 1.79769313e+308
  432. else:
  433. return x + 1./x
  434. xs = optimize.fmin_bfgs(f, [10.], disp=False)
  435. assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4)
  436. def test_l_bfgs_b(self):
  437. # limited-memory bound-constrained BFGS algorithm
  438. retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
  439. self.grad, args=(),
  440. maxiter=self.maxiter)
  441. (params, fopt, d) = retval
  442. assert_allclose(self.func(params), self.func(self.solution),
  443. atol=1e-6)
  444. # Ensure that function call counts are 'known good'; these are from
  445. # Scipy 0.7.0. Don't allow them to increase.
  446. assert_(self.funccalls == 7, self.funccalls)
  447. assert_(self.gradcalls == 5, self.gradcalls)
  448. # Ensure that the function behaves the same; this is from Scipy 0.7.0
  449. assert_allclose(self.trace[3:5],
  450. [[0., -0.52489628, 0.48753042],
  451. [0., -0.52489628, 0.48753042]],
  452. atol=1e-14, rtol=1e-7)
  453. def test_l_bfgs_b_numjac(self):
  454. # L-BFGS-B with numerical jacobian
  455. retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
  456. approx_grad=True,
  457. maxiter=self.maxiter)
  458. (params, fopt, d) = retval
  459. assert_allclose(self.func(params), self.func(self.solution),
  460. atol=1e-6)
  461. def test_l_bfgs_b_funjac(self):
  462. # L-BFGS-B with combined objective function and jacobian
  463. def fun(x):
  464. return self.func(x), self.grad(x)
  465. retval = optimize.fmin_l_bfgs_b(fun, self.startparams,
  466. maxiter=self.maxiter)
  467. (params, fopt, d) = retval
  468. assert_allclose(self.func(params), self.func(self.solution),
  469. atol=1e-6)
  470. def test_l_bfgs_b_maxiter(self):
  471. # gh7854
  472. # Ensure that not more than maxiters are ever run.
  473. class Callback(object):
  474. def __init__(self):
  475. self.nit = 0
  476. self.fun = None
  477. self.x = None
  478. def __call__(self, x):
  479. self.x = x
  480. self.fun = optimize.rosen(x)
  481. self.nit += 1
  482. c = Callback()
  483. res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b',
  484. callback=c, options={'maxiter': 5})
  485. assert_equal(res.nit, 5)
  486. assert_almost_equal(res.x, c.x)
  487. assert_almost_equal(res.fun, c.fun)
  488. assert_equal(res.status, 1)
  489. assert_(res.success is False)
  490. assert_equal(res.message.decode(), 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT')
  491. def test_minimize_l_bfgs_b(self):
  492. # Minimize with L-BFGS-B method
  493. opts = {'disp': False, 'maxiter': self.maxiter}
  494. r = optimize.minimize(self.func, self.startparams,
  495. method='L-BFGS-B', jac=self.grad,
  496. options=opts)
  497. assert_allclose(self.func(r.x), self.func(self.solution),
  498. atol=1e-6)
  499. # approximate jacobian
  500. ra = optimize.minimize(self.func, self.startparams,
  501. method='L-BFGS-B', options=opts)
  502. assert_allclose(self.func(ra.x), self.func(self.solution),
  503. atol=1e-6)
  504. # check that function evaluations in approximate jacobian are counted
  505. assert_(ra.nfev > r.nfev)
  506. def test_minimize_l_bfgs_b_ftol(self):
  507. # Check that the `ftol` parameter in l_bfgs_b works as expected
  508. v0 = None
  509. for tol in [1e-1, 1e-4, 1e-7, 1e-10]:
  510. opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol}
  511. sol = optimize.minimize(self.func, self.startparams,
  512. method='L-BFGS-B', jac=self.grad,
  513. options=opts)
  514. v = self.func(sol.x)
  515. if v0 is None:
  516. v0 = v
  517. else:
  518. assert_(v < v0)
  519. assert_allclose(v, self.func(self.solution), rtol=tol)
  520. def test_minimize_l_bfgs_maxls(self):
  521. # check that the maxls is passed down to the Fortran routine
  522. sol = optimize.minimize(optimize.rosen, np.array([-1.2,1.0]),
  523. method='L-BFGS-B', jac=optimize.rosen_der,
  524. options={'disp': False, 'maxls': 1})
  525. assert_(not sol.success)
  526. def test_minimize_l_bfgs_b_maxfun_interruption(self):
  527. # gh-6162
  528. f = optimize.rosen
  529. g = optimize.rosen_der
  530. values = []
  531. x0 = np.ones(7) * 1000
  532. def objfun(x):
  533. value = f(x)
  534. values.append(value)
  535. return value
  536. # Look for an interesting test case.
  537. # Request a maxfun that stops at a particularly bad function
  538. # evaluation somewhere between 100 and 300 evaluations.
  539. low, medium, high = 30, 100, 300
  540. optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)
  541. v, k = max((y, i) for i, y in enumerate(values[medium:]))
  542. maxfun = medium + k
  543. # If the minimization strategy is reasonable,
  544. # the minimize() result should not be worse than the best
  545. # of the first 30 function evaluations.
  546. target = min(values[:low])
  547. xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)
  548. assert_array_less(fmin, target)
  549. def test_custom(self):
  550. # This function comes from the documentation example.
  551. def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1,
  552. maxiter=100, callback=None, **options):
  553. bestx = x0
  554. besty = fun(x0)
  555. funcalls = 1
  556. niter = 0
  557. improved = True
  558. stop = False
  559. while improved and not stop and niter < maxiter:
  560. improved = False
  561. niter += 1
  562. for dim in range(np.size(x0)):
  563. for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:
  564. testx = np.copy(bestx)
  565. testx[dim] = s
  566. testy = fun(testx, *args)
  567. funcalls += 1
  568. if testy < besty:
  569. besty = testy
  570. bestx = testx
  571. improved = True
  572. if callback is not None:
  573. callback(bestx)
  574. if maxfev is not None and funcalls >= maxfev:
  575. stop = True
  576. break
  577. return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
  578. nfev=funcalls, success=(niter > 1))
  579. x0 = [1.35, 0.9, 0.8, 1.1, 1.2]
  580. res = optimize.minimize(optimize.rosen, x0, method=custmin,
  581. options=dict(stepsize=0.05))
  582. assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4)
  583. def test_minimize_tol_parameter(self):
  584. # Check that the minimize() tol= argument does something
  585. def func(z):
  586. x, y = z
  587. return x**2*y**2 + x**4 + 1
  588. def dfunc(z):
  589. x, y = z
  590. return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])
  591. for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
  592. 'newton-cg', 'l-bfgs-b', 'tnc',
  593. 'cobyla', 'slsqp']:
  594. if method in ('nelder-mead', 'powell', 'cobyla'):
  595. jac = None
  596. else:
  597. jac = dfunc
  598. sol1 = optimize.minimize(func, [1, 1], jac=jac, tol=1e-10,
  599. method=method)
  600. sol2 = optimize.minimize(func, [1, 1], jac=jac, tol=1.0,
  601. method=method)
  602. assert_(func(sol1.x) < func(sol2.x),
  603. "%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x)))
  604. @pytest.mark.parametrize('method', ['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs',
  605. 'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc',
  606. 'fmin_slsqp',
  607. 'Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', 'L-BFGS-B',
  608. 'TNC', 'SLSQP', 'trust-constr', 'dogleg', 'trust-ncg',
  609. 'trust-exact', 'trust-krylov'])
  610. def test_minimize_callback_copies_array(self, method):
  611. # Check that arrays passed to callbacks are not modified
  612. # inplace by the optimizer afterward
  613. if method in ('fmin_tnc', 'fmin_l_bfgs_b'):
  614. func = lambda x: (optimize.rosen(x), optimize.rosen_der(x))
  615. else:
  616. func = optimize.rosen
  617. jac = optimize.rosen_der
  618. hess = optimize.rosen_hess
  619. x0 = np.zeros(10)
  620. # Set options
  621. kwargs = {}
  622. if method.startswith('fmin'):
  623. routine = getattr(optimize, method)
  624. if method == 'fmin_slsqp':
  625. kwargs['iter'] = 5
  626. elif method == 'fmin_tnc':
  627. kwargs['maxfun'] = 100
  628. else:
  629. kwargs['maxiter'] = 5
  630. else:
  631. def routine(*a, **kw):
  632. kw['method'] = method
  633. return optimize.minimize(*a, **kw)
  634. if method == 'TNC':
  635. kwargs['options'] = dict(maxiter=100)
  636. else:
  637. kwargs['options'] = dict(maxiter=5)
  638. if method in ('fmin_ncg',):
  639. kwargs['fprime'] = jac
  640. elif method in ('Newton-CG',):
  641. kwargs['jac'] = jac
  642. elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
  643. 'trust-constr'):
  644. kwargs['jac'] = jac
  645. kwargs['hess'] = hess
  646. # Run with callback
  647. results = []
  648. def callback(x, *args, **kwargs):
  649. results.append((x, np.copy(x)))
  650. sol = routine(func, x0, callback=callback, **kwargs)
  651. # Check returned arrays coincide with their copies and have no memory overlap
  652. assert_(len(results) > 2)
  653. assert_(all(np.all(x == y) for x, y in results))
  654. assert_(not any(np.may_share_memory(x[0], y[0]) for x, y in itertools.combinations(results, 2)))
  655. @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
  656. 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp'])
  657. def test_no_increase(self, method):
  658. # Check that the solver doesn't return a value worse than the
  659. # initial point.
  660. def func(x):
  661. return (x - 1)**2
  662. def bad_grad(x):
  663. # purposefully invalid gradient function, simulates a case
  664. # where line searches start failing
  665. return 2*(x - 1) * (-1) - 2
  666. x0 = np.array([2.0])
  667. f0 = func(x0)
  668. jac = bad_grad
  669. if method in ['nelder-mead', 'powell', 'cobyla']:
  670. jac = None
  671. sol = optimize.minimize(func, x0, jac=jac, method=method,
  672. options=dict(maxiter=20))
  673. assert_equal(func(sol.x), sol.fun)
  674. if method == 'slsqp':
  675. pytest.xfail("SLSQP returns slightly worse")
  676. assert_(func(sol.x) <= f0)
  677. def test_slsqp_respect_bounds(self):
  678. # Regression test for gh-3108
  679. def f(x):
  680. return sum((x - np.array([1., 2., 3., 4.]))**2)
  681. def cons(x):
  682. a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]])
  683. return np.concatenate([np.dot(a, x) + np.array([5, 10]), x])
  684. x0 = np.array([0.5, 1., 1.5, 2.])
  685. res = optimize.minimize(f, x0, method='slsqp',
  686. constraints={'type': 'ineq', 'fun': cons})
  687. assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12)
  688. def test_minimize_automethod(self):
  689. def f(x):
  690. return x**2
  691. def cons(x):
  692. return x - 2
  693. x0 = np.array([10.])
  694. sol_0 = optimize.minimize(f, x0)
  695. sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}])
  696. sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)])
  697. sol_3 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(5, 10)])
  698. sol_4 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(1, 10)])
  699. for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]:
  700. assert_(sol.success)
  701. assert_allclose(sol_0.x, 0, atol=1e-7)
  702. assert_allclose(sol_1.x, 2, atol=1e-7)
  703. assert_allclose(sol_2.x, 5, atol=1e-7)
  704. assert_allclose(sol_3.x, 5, atol=1e-7)
  705. assert_allclose(sol_4.x, 2, atol=1e-7)
  706. def test_minimize_coerce_args_param(self):
  707. # Regression test for gh-3503
  708. def Y(x, c):
  709. return np.sum((x-c)**2)
  710. def dY_dx(x, c=None):
  711. return 2*(x-c)
  712. c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5])
  713. xinit = np.random.randn(len(c))
  714. optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS")
  715. def test_initial_step_scaling(self):
  716. # Check that optimizer initial step is not huge even if the
  717. # function and gradients are
  718. scales = [1e-50, 1, 1e50]
  719. methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG']
  720. def f(x):
  721. if first_step_size[0] is None and x[0] != x0[0]:
  722. first_step_size[0] = abs(x[0] - x0[0])
  723. if abs(x).max() > 1e4:
  724. raise AssertionError("Optimization stepped far away!")
  725. return scale*(x[0] - 1)**2
  726. def g(x):
  727. return np.array([scale*(x[0] - 1)])
  728. for scale, method in itertools.product(scales, methods):
  729. if method in ('CG', 'BFGS'):
  730. options = dict(gtol=scale*1e-8)
  731. else:
  732. options = dict()
  733. if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'):
  734. # XXX: return initial point if they see small gradient
  735. continue
  736. x0 = [-1.0]
  737. first_step_size = [None]
  738. res = optimize.minimize(f, x0, jac=g, method=method,
  739. options=options)
  740. err_msg = "{0} {1}: {2}: {3}".format(method, scale, first_step_size,
  741. res)
  742. assert_(res.success, err_msg)
  743. assert_allclose(res.x, [1.0], err_msg=err_msg)
  744. assert_(res.nit <= 3, err_msg)
  745. if scale > 1e-10:
  746. if method in ('CG', 'BFGS'):
  747. assert_allclose(first_step_size[0], 1.01, err_msg=err_msg)
  748. else:
  749. # Newton-CG and L-BFGS-B use different logic for the first step,
  750. # but are both scaling invariant with step sizes ~ 1
  751. assert_(first_step_size[0] > 0.5 and first_step_size[0] < 3,
  752. err_msg)
  753. else:
  754. # step size has upper bound of ||grad||, so line
  755. # search makes many small steps
  756. pass
  757. class TestLBFGSBBounds(object):
  758. def setup_method(self):
  759. self.bounds = ((1, None), (None, None))
  760. self.solution = (1, 0)
  761. def fun(self, x, p=2.0):
  762. return 1.0 / p * (x[0]**p + x[1]**p)
  763. def jac(self, x, p=2.0):
  764. return x**(p - 1)
  765. def fj(self, x, p=2.0):
  766. return self.fun(x, p), self.jac(x, p)
  767. def test_l_bfgs_b_bounds(self):
  768. x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1],
  769. fprime=self.jac,
  770. bounds=self.bounds)
  771. assert_(d['warnflag'] == 0, d['task'])
  772. assert_allclose(x, self.solution, atol=1e-6)
  773. def test_l_bfgs_b_funjac(self):
  774. # L-BFGS-B with fun and jac combined and extra arguments
  775. x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ),
  776. bounds=self.bounds)
  777. assert_(d['warnflag'] == 0, d['task'])
  778. assert_allclose(x, self.solution, atol=1e-6)
  779. def test_minimize_l_bfgs_b_bounds(self):
  780. # Minimize with method='L-BFGS-B' with bounds
  781. res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
  782. jac=self.jac, bounds=self.bounds)
  783. assert_(res['success'], res['message'])
  784. assert_allclose(res.x, self.solution, atol=1e-6)
  785. class TestOptimizeScalar(object):
  786. def setup_method(self):
  787. self.solution = 1.5
  788. def fun(self, x, a=1.5):
  789. """Objective function"""
  790. return (x - a)**2 - 0.8
  791. def test_brent(self):
  792. x = optimize.brent(self.fun)
  793. assert_allclose(x, self.solution, atol=1e-6)
  794. x = optimize.brent(self.fun, brack=(-3, -2))
  795. assert_allclose(x, self.solution, atol=1e-6)
  796. x = optimize.brent(self.fun, full_output=True)
  797. assert_allclose(x[0], self.solution, atol=1e-6)
  798. x = optimize.brent(self.fun, brack=(-15, -1, 15))
  799. assert_allclose(x, self.solution, atol=1e-6)
  800. def test_golden(self):
  801. x = optimize.golden(self.fun)
  802. assert_allclose(x, self.solution, atol=1e-6)
  803. x = optimize.golden(self.fun, brack=(-3, -2))
  804. assert_allclose(x, self.solution, atol=1e-6)
  805. x = optimize.golden(self.fun, full_output=True)
  806. assert_allclose(x[0], self.solution, atol=1e-6)
  807. x = optimize.golden(self.fun, brack=(-15, -1, 15))
  808. assert_allclose(x, self.solution, atol=1e-6)
  809. x = optimize.golden(self.fun, tol=0)
  810. assert_allclose(x, self.solution)
  811. maxiter_test_cases = [0, 1, 5]
  812. for maxiter in maxiter_test_cases:
  813. x0 = optimize.golden(self.fun, maxiter=0, full_output=True)
  814. x = optimize.golden(self.fun, maxiter=maxiter, full_output=True)
  815. nfev0, nfev = x0[2], x[2]
  816. assert_equal(nfev - nfev0, maxiter)
  817. def test_fminbound(self):
  818. x = optimize.fminbound(self.fun, 0, 1)
  819. assert_allclose(x, 1, atol=1e-4)
  820. x = optimize.fminbound(self.fun, 1, 5)
  821. assert_allclose(x, self.solution, atol=1e-6)
  822. x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
  823. assert_allclose(x, self.solution, atol=1e-6)
  824. assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)
  825. def test_fminbound_scalar(self):
  826. with pytest.raises(ValueError, match='.*must be scalar.*'):
  827. optimize.fminbound(self.fun, np.zeros((1, 2)), 1)
  828. x = optimize.fminbound(self.fun, 1, np.array(5))
  829. assert_allclose(x, self.solution, atol=1e-6)
  830. def test_minimize_scalar(self):
  831. # combine all tests above for the minimize_scalar wrapper
  832. x = optimize.minimize_scalar(self.fun).x
  833. assert_allclose(x, self.solution, atol=1e-6)
  834. x = optimize.minimize_scalar(self.fun, method='Brent')
  835. assert_(x.success)
  836. x = optimize.minimize_scalar(self.fun, method='Brent',
  837. options=dict(maxiter=3))
  838. assert_(not x.success)
  839. x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
  840. args=(1.5, ), method='Brent').x
  841. assert_allclose(x, self.solution, atol=1e-6)
  842. x = optimize.minimize_scalar(self.fun, method='Brent',
  843. args=(1.5,)).x
  844. assert_allclose(x, self.solution, atol=1e-6)
  845. x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
  846. args=(1.5, ), method='Brent').x
  847. assert_allclose(x, self.solution, atol=1e-6)
  848. x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
  849. args=(1.5, ), method='golden').x
  850. assert_allclose(x, self.solution, atol=1e-6)
  851. x = optimize.minimize_scalar(self.fun, method='golden',
  852. args=(1.5,)).x
  853. assert_allclose(x, self.solution, atol=1e-6)
  854. x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
  855. args=(1.5, ), method='golden').x
  856. assert_allclose(x, self.solution, atol=1e-6)
  857. x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,),
  858. method='Bounded').x
  859. assert_allclose(x, 1, atol=1e-4)
  860. x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),
  861. method='bounded').x
  862. assert_allclose(x, self.solution, atol=1e-6)
  863. x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]),
  864. np.array([5])),
  865. args=(np.array([1.5]), ),
  866. method='bounded').x
  867. assert_allclose(x, self.solution, atol=1e-6)
  868. assert_raises(ValueError, optimize.minimize_scalar, self.fun,
  869. bounds=(5, 1), method='bounded', args=(1.5, ))
  870. assert_raises(ValueError, optimize.minimize_scalar, self.fun,
  871. bounds=(np.zeros(2), 1), method='bounded', args=(1.5, ))
  872. x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)),
  873. method='bounded').x
  874. assert_allclose(x, self.solution, atol=1e-6)
  875. def test_minimize_scalar_custom(self):
  876. # This function comes from the documentation example.
  877. def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1,
  878. maxiter=100, callback=None, **options):
  879. bestx = (bracket[1] + bracket[0]) / 2.0
  880. besty = fun(bestx)
  881. funcalls = 1
  882. niter = 0
  883. improved = True
  884. stop = False
  885. while improved and not stop and niter < maxiter:
  886. improved = False
  887. niter += 1
  888. for testx in [bestx - stepsize, bestx + stepsize]:
  889. testy = fun(testx, *args)
  890. funcalls += 1
  891. if testy < besty:
  892. besty = testy
  893. bestx = testx
  894. improved = True
  895. if callback is not None:
  896. callback(bestx)
  897. if maxfev is not None and funcalls >= maxfev:
  898. stop = True
  899. break
  900. return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
  901. nfev=funcalls, success=(niter > 1))
  902. res = optimize.minimize_scalar(self.fun, bracket=(0, 4), method=custmin,
  903. options=dict(stepsize=0.05))
  904. assert_allclose(res.x, self.solution, atol=1e-6)
  905. def test_minimize_scalar_coerce_args_param(self):
  906. # Regression test for gh-3503
  907. optimize.minimize_scalar(self.fun, args=1.5)
  908. def test_brent_negative_tolerance():
  909. assert_raises(ValueError, optimize.brent, np.cos, tol=-.01)
  910. class TestNewtonCg(object):
  911. def test_rosenbrock(self):
  912. x0 = np.array([-1.2, 1.0])
  913. sol = optimize.minimize(optimize.rosen, x0,
  914. jac=optimize.rosen_der,
  915. hess=optimize.rosen_hess,
  916. tol=1e-5,
  917. method='Newton-CG')
  918. assert_(sol.success, sol.message)
  919. assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
  920. def test_himmelblau(self):
  921. x0 = np.array(himmelblau_x0)
  922. sol = optimize.minimize(himmelblau,
  923. x0,
  924. jac=himmelblau_grad,
  925. hess=himmelblau_hess,
  926. method='Newton-CG',
  927. tol=1e-6)
  928. assert_(sol.success, sol.message)
  929. assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4)
  930. assert_allclose(sol.fun, himmelblau_min, atol=1e-4)
  931. class TestRosen(object):
  932. def test_hess(self):
  933. # Compare rosen_hess(x) times p with rosen_hess_prod(x,p). See gh-1775
  934. x = np.array([3, 4, 5])
  935. p = np.array([2, 2, 2])
  936. hp = optimize.rosen_hess_prod(x, p)
  937. dothp = np.dot(optimize.rosen_hess(x), p)
  938. assert_equal(hp, dothp)
  939. def himmelblau(p):
  940. """
  941. R^2 -> R^1 test function for optimization. The function has four local
  942. minima where himmelblau(xopt) == 0.
  943. """
  944. x, y = p
  945. a = x*x + y - 11
  946. b = x + y*y - 7
  947. return a*a + b*b
  948. def himmelblau_grad(p):
  949. x, y = p
  950. return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14,
  951. 2*x**2 + 4*x*y + 4*y**3 - 26*y - 22])
  952. def himmelblau_hess(p):
  953. x, y = p
  954. return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y],
  955. [4*x + 4*y, 4*x + 12*y**2 - 26]])
  956. himmelblau_x0 = [-0.27, -0.9]
  957. himmelblau_xopt = [3, 2]
  958. himmelblau_min = 0.0
  959. def test_minimize_multiple_constraints():
  960. # Regression test for gh-4240.
  961. def func(x):
  962. return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
  963. def func1(x):
  964. return np.array([x[1]])
  965. def func2(x):
  966. return np.array([x[2]])
  967. cons = ({'type': 'ineq', 'fun': func},
  968. {'type': 'ineq', 'fun': func1},
  969. {'type': 'ineq', 'fun': func2})
  970. f = lambda x: -1 * (x[0] + x[1] + x[2])
  971. res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons)
  972. assert_allclose(res.x, [125, 0, 0], atol=1e-10)
  973. class TestOptimizeResultAttributes(object):
  974. # Test that all minimizers return an OptimizeResult containing
  975. # all the OptimizeResult attributes
  976. def setup_method(self):
  977. self.x0 = [5, 5]
  978. self.func = optimize.rosen
  979. self.jac = optimize.rosen_der
  980. self.hess = optimize.rosen_hess
  981. self.hessp = optimize.rosen_hess_prod
  982. self.bounds = [(0., 10.), (0., 10.)]
  983. def test_attributes_present(self):
  984. methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG',
  985. 'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP', 'dogleg',
  986. 'trust-ncg']
  987. attributes = ['nit', 'nfev', 'x', 'success', 'status', 'fun',
  988. 'message']
  989. skip = {'COBYLA': ['nit']}
  990. for method in methods:
  991. with suppress_warnings() as sup:
  992. sup.filter(RuntimeWarning,
  993. "Method .+ does not use (gradient|Hessian.*) information")
  994. res = optimize.minimize(self.func, self.x0, method=method,
  995. jac=self.jac, hess=self.hess,
  996. hessp=self.hessp)
  997. for attribute in attributes:
  998. if method in skip and attribute in skip[method]:
  999. continue
  1000. assert_(hasattr(res, attribute))
  1001. assert_(attribute in dir(res))
  1002. class TestBrute:
  1003. # Test the "brute force" method
  1004. def setup_method(self):
  1005. self.params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
  1006. self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
  1007. self.solution = np.array([-1.05665192, 1.80834843])
  1008. def f1(self, z, *params):
  1009. x, y = z
  1010. a, b, c, d, e, f, g, h, i, j, k, l, scale = params
  1011. return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
  1012. def f2(self, z, *params):
  1013. x, y = z
  1014. a, b, c, d, e, f, g, h, i, j, k, l, scale = params
  1015. return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
  1016. def f3(self, z, *params):
  1017. x, y = z
  1018. a, b, c, d, e, f, g, h, i, j, k, l, scale = params
  1019. return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
  1020. def func(self, z, *params):
  1021. return self.f1(z, *params) + self.f2(z, *params) + self.f3(z, *params)
  1022. def test_brute(self):
  1023. # test fmin
  1024. resbrute = optimize.brute(self.func, self.rranges, args=self.params,
  1025. full_output=True, finish=optimize.fmin)
  1026. assert_allclose(resbrute[0], self.solution, atol=1e-3)
  1027. assert_allclose(resbrute[1], self.func(self.solution, *self.params),
  1028. atol=1e-3)
  1029. # test minimize
  1030. resbrute = optimize.brute(self.func, self.rranges, args=self.params,
  1031. full_output=True,
  1032. finish=optimize.minimize)
  1033. assert_allclose(resbrute[0], self.solution, atol=1e-3)
  1034. assert_allclose(resbrute[1], self.func(self.solution, *self.params),
  1035. atol=1e-3)
  1036. def test_1D(self):
  1037. # test that for a 1D problem the test function is passed an array,
  1038. # not a scalar.
  1039. def f(x):
  1040. assert_(len(x.shape) == 1)
  1041. assert_(x.shape[0] == 1)
  1042. return x ** 2
  1043. optimize.brute(f, [(-1, 1)], Ns=3, finish=None)
  1044. class TestIterationLimits(object):
  1045. # Tests that optimisation does not give up before trying requested
  1046. # number of iterations or evaluations. And that it does not succeed
  1047. # by exceeding the limits.
  1048. def setup_method(self):
  1049. self.funcalls = 0
  1050. def slow_func(self, v):
  1051. self.funcalls += 1
  1052. r,t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0],v[1])
  1053. return np.sin(r*20 + t)+r*0.5
  1054. def test_neldermead_limit(self):
  1055. self.check_limits("Nelder-Mead", 200)
  1056. def test_powell_limit(self):
  1057. self.check_limits("powell", 1000)
  1058. def check_limits(self, method, default_iters):
  1059. for start_v in [[0.1,0.1], [1,1], [2,2]]:
  1060. for mfev in [50, 500, 5000]:
  1061. self.funcalls = 0
  1062. res = optimize.minimize(self.slow_func, start_v,
  1063. method=method, options={"maxfev":mfev})
  1064. assert_(self.funcalls == res["nfev"])
  1065. if res["success"]:
  1066. assert_(res["nfev"] < mfev)
  1067. else:
  1068. assert_(res["nfev"] >= mfev)
  1069. for mit in [50, 500,5000]:
  1070. res = optimize.minimize(self.slow_func, start_v,
  1071. method=method, options={"maxiter":mit})
  1072. if res["success"]:
  1073. assert_(res["nit"] <= mit)
  1074. else:
  1075. assert_(res["nit"] >= mit)
  1076. for mfev,mit in [[50,50], [5000,5000],[5000,np.inf]]:
  1077. self.funcalls = 0
  1078. res = optimize.minimize(self.slow_func, start_v,
  1079. method=method, options={"maxiter":mit, "maxfev":mfev})
  1080. assert_(self.funcalls == res["nfev"])
  1081. if res["success"]:
  1082. assert_(res["nfev"] < mfev and res["nit"] <= mit)
  1083. else:
  1084. assert_(res["nfev"] >= mfev or res["nit"] >= mit)
  1085. for mfev,mit in [[np.inf,None], [None,np.inf]]:
  1086. self.funcalls = 0
  1087. res = optimize.minimize(self.slow_func, start_v,
  1088. method=method, options={"maxiter":mit, "maxfev":mfev})
  1089. assert_(self.funcalls == res["nfev"])
  1090. if res["success"]:
  1091. if mfev is None:
  1092. assert_(res["nfev"] < default_iters*2)
  1093. else:
  1094. assert_(res["nit"] <= default_iters*2)
  1095. else:
  1096. assert_(res["nfev"] >= default_iters*2 or
  1097. res["nit"] >= default_iters*2)