shape_base.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888
  1. from __future__ import division, absolute_import, print_function
  2. __all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
  3. 'stack', 'vstack']
  4. import functools
  5. import operator
  6. import types
  7. import warnings
  8. from . import numeric as _nx
  9. from . import overrides
  10. from .numeric import array, asanyarray, newaxis
  11. from .multiarray import normalize_axis_index
  12. array_function_dispatch = functools.partial(
  13. overrides.array_function_dispatch, module='numpy')
  14. def _atleast_1d_dispatcher(*arys):
  15. return arys
  16. @array_function_dispatch(_atleast_1d_dispatcher)
  17. def atleast_1d(*arys):
  18. """
  19. Convert inputs to arrays with at least one dimension.
  20. Scalar inputs are converted to 1-dimensional arrays, whilst
  21. higher-dimensional inputs are preserved.
  22. Parameters
  23. ----------
  24. arys1, arys2, ... : array_like
  25. One or more input arrays.
  26. Returns
  27. -------
  28. ret : ndarray
  29. An array, or list of arrays, each with ``a.ndim >= 1``.
  30. Copies are made only if necessary.
  31. See Also
  32. --------
  33. atleast_2d, atleast_3d
  34. Examples
  35. --------
  36. >>> np.atleast_1d(1.0)
  37. array([ 1.])
  38. >>> x = np.arange(9.0).reshape(3,3)
  39. >>> np.atleast_1d(x)
  40. array([[ 0., 1., 2.],
  41. [ 3., 4., 5.],
  42. [ 6., 7., 8.]])
  43. >>> np.atleast_1d(x) is x
  44. True
  45. >>> np.atleast_1d(1, [3, 4])
  46. [array([1]), array([3, 4])]
  47. """
  48. res = []
  49. for ary in arys:
  50. ary = asanyarray(ary)
  51. if ary.ndim == 0:
  52. result = ary.reshape(1)
  53. else:
  54. result = ary
  55. res.append(result)
  56. if len(res) == 1:
  57. return res[0]
  58. else:
  59. return res
  60. def _atleast_2d_dispatcher(*arys):
  61. return arys
  62. @array_function_dispatch(_atleast_2d_dispatcher)
  63. def atleast_2d(*arys):
  64. """
  65. View inputs as arrays with at least two dimensions.
  66. Parameters
  67. ----------
  68. arys1, arys2, ... : array_like
  69. One or more array-like sequences. Non-array inputs are converted
  70. to arrays. Arrays that already have two or more dimensions are
  71. preserved.
  72. Returns
  73. -------
  74. res, res2, ... : ndarray
  75. An array, or list of arrays, each with ``a.ndim >= 2``.
  76. Copies are avoided where possible, and views with two or more
  77. dimensions are returned.
  78. See Also
  79. --------
  80. atleast_1d, atleast_3d
  81. Examples
  82. --------
  83. >>> np.atleast_2d(3.0)
  84. array([[ 3.]])
  85. >>> x = np.arange(3.0)
  86. >>> np.atleast_2d(x)
  87. array([[ 0., 1., 2.]])
  88. >>> np.atleast_2d(x).base is x
  89. True
  90. >>> np.atleast_2d(1, [1, 2], [[1, 2]])
  91. [array([[1]]), array([[1, 2]]), array([[1, 2]])]
  92. """
  93. res = []
  94. for ary in arys:
  95. ary = asanyarray(ary)
  96. if ary.ndim == 0:
  97. result = ary.reshape(1, 1)
  98. elif ary.ndim == 1:
  99. result = ary[newaxis,:]
  100. else:
  101. result = ary
  102. res.append(result)
  103. if len(res) == 1:
  104. return res[0]
  105. else:
  106. return res
  107. def _atleast_3d_dispatcher(*arys):
  108. return arys
  109. @array_function_dispatch(_atleast_3d_dispatcher)
  110. def atleast_3d(*arys):
  111. """
  112. View inputs as arrays with at least three dimensions.
  113. Parameters
  114. ----------
  115. arys1, arys2, ... : array_like
  116. One or more array-like sequences. Non-array inputs are converted to
  117. arrays. Arrays that already have three or more dimensions are
  118. preserved.
  119. Returns
  120. -------
  121. res1, res2, ... : ndarray
  122. An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
  123. avoided where possible, and views with three or more dimensions are
  124. returned. For example, a 1-D array of shape ``(N,)`` becomes a view
  125. of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
  126. view of shape ``(M, N, 1)``.
  127. See Also
  128. --------
  129. atleast_1d, atleast_2d
  130. Examples
  131. --------
  132. >>> np.atleast_3d(3.0)
  133. array([[[ 3.]]])
  134. >>> x = np.arange(3.0)
  135. >>> np.atleast_3d(x).shape
  136. (1, 3, 1)
  137. >>> x = np.arange(12.0).reshape(4,3)
  138. >>> np.atleast_3d(x).shape
  139. (4, 3, 1)
  140. >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
  141. True
  142. >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
  143. ... print(arr, arr.shape)
  144. ...
  145. [[[1]
  146. [2]]] (1, 2, 1)
  147. [[[1]
  148. [2]]] (1, 2, 1)
  149. [[[1 2]]] (1, 1, 2)
  150. """
  151. res = []
  152. for ary in arys:
  153. ary = asanyarray(ary)
  154. if ary.ndim == 0:
  155. result = ary.reshape(1, 1, 1)
  156. elif ary.ndim == 1:
  157. result = ary[newaxis,:, newaxis]
  158. elif ary.ndim == 2:
  159. result = ary[:,:, newaxis]
  160. else:
  161. result = ary
  162. res.append(result)
  163. if len(res) == 1:
  164. return res[0]
  165. else:
  166. return res
  167. def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
  168. if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
  169. warnings.warn('arrays to stack must be passed as a "sequence" type '
  170. 'such as list or tuple. Support for non-sequence '
  171. 'iterables such as generators is deprecated as of '
  172. 'NumPy 1.16 and will raise an error in the future.',
  173. FutureWarning, stacklevel=stacklevel)
  174. return ()
  175. return arrays
  176. def _warn_for_nonsequence(arrays):
  177. if not overrides.ENABLE_ARRAY_FUNCTION:
  178. _arrays_for_stack_dispatcher(arrays, stacklevel=4)
  179. def _vhstack_dispatcher(tup):
  180. return _arrays_for_stack_dispatcher(tup)
  181. @array_function_dispatch(_vhstack_dispatcher)
  182. def vstack(tup):
  183. """
  184. Stack arrays in sequence vertically (row wise).
  185. This is equivalent to concatenation along the first axis after 1-D arrays
  186. of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
  187. `vsplit`.
  188. This function makes most sense for arrays with up to 3 dimensions. For
  189. instance, for pixel-data with a height (first axis), width (second axis),
  190. and r/g/b channels (third axis). The functions `concatenate`, `stack` and
  191. `block` provide more general stacking and concatenation operations.
  192. Parameters
  193. ----------
  194. tup : sequence of ndarrays
  195. The arrays must have the same shape along all but the first axis.
  196. 1-D arrays must have the same length.
  197. Returns
  198. -------
  199. stacked : ndarray
  200. The array formed by stacking the given arrays, will be at least 2-D.
  201. See Also
  202. --------
  203. stack : Join a sequence of arrays along a new axis.
  204. hstack : Stack arrays in sequence horizontally (column wise).
  205. dstack : Stack arrays in sequence depth wise (along third dimension).
  206. concatenate : Join a sequence of arrays along an existing axis.
  207. vsplit : Split array into a list of multiple sub-arrays vertically.
  208. block : Assemble arrays from blocks.
  209. Examples
  210. --------
  211. >>> a = np.array([1, 2, 3])
  212. >>> b = np.array([2, 3, 4])
  213. >>> np.vstack((a,b))
  214. array([[1, 2, 3],
  215. [2, 3, 4]])
  216. >>> a = np.array([[1], [2], [3]])
  217. >>> b = np.array([[2], [3], [4]])
  218. >>> np.vstack((a,b))
  219. array([[1],
  220. [2],
  221. [3],
  222. [2],
  223. [3],
  224. [4]])
  225. """
  226. _warn_for_nonsequence(tup)
  227. return _nx.concatenate([atleast_2d(_m) for _m in tup], 0)
  228. @array_function_dispatch(_vhstack_dispatcher)
  229. def hstack(tup):
  230. """
  231. Stack arrays in sequence horizontally (column wise).
  232. This is equivalent to concatenation along the second axis, except for 1-D
  233. arrays where it concatenates along the first axis. Rebuilds arrays divided
  234. by `hsplit`.
  235. This function makes most sense for arrays with up to 3 dimensions. For
  236. instance, for pixel-data with a height (first axis), width (second axis),
  237. and r/g/b channels (third axis). The functions `concatenate`, `stack` and
  238. `block` provide more general stacking and concatenation operations.
  239. Parameters
  240. ----------
  241. tup : sequence of ndarrays
  242. The arrays must have the same shape along all but the second axis,
  243. except 1-D arrays which can be any length.
  244. Returns
  245. -------
  246. stacked : ndarray
  247. The array formed by stacking the given arrays.
  248. See Also
  249. --------
  250. stack : Join a sequence of arrays along a new axis.
  251. vstack : Stack arrays in sequence vertically (row wise).
  252. dstack : Stack arrays in sequence depth wise (along third axis).
  253. concatenate : Join a sequence of arrays along an existing axis.
  254. hsplit : Split array along second axis.
  255. block : Assemble arrays from blocks.
  256. Examples
  257. --------
  258. >>> a = np.array((1,2,3))
  259. >>> b = np.array((2,3,4))
  260. >>> np.hstack((a,b))
  261. array([1, 2, 3, 2, 3, 4])
  262. >>> a = np.array([[1],[2],[3]])
  263. >>> b = np.array([[2],[3],[4]])
  264. >>> np.hstack((a,b))
  265. array([[1, 2],
  266. [2, 3],
  267. [3, 4]])
  268. """
  269. _warn_for_nonsequence(tup)
  270. arrs = [atleast_1d(_m) for _m in tup]
  271. # As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
  272. if arrs and arrs[0].ndim == 1:
  273. return _nx.concatenate(arrs, 0)
  274. else:
  275. return _nx.concatenate(arrs, 1)
  276. def _stack_dispatcher(arrays, axis=None, out=None):
  277. arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
  278. if out is not None:
  279. # optimize for the typical case where only arrays is provided
  280. arrays = list(arrays)
  281. arrays.append(out)
  282. return arrays
  283. @array_function_dispatch(_stack_dispatcher)
  284. def stack(arrays, axis=0, out=None):
  285. """
  286. Join a sequence of arrays along a new axis.
  287. The `axis` parameter specifies the index of the new axis in the dimensions
  288. of the result. For example, if ``axis=0`` it will be the first dimension
  289. and if ``axis=-1`` it will be the last dimension.
  290. .. versionadded:: 1.10.0
  291. Parameters
  292. ----------
  293. arrays : sequence of array_like
  294. Each array must have the same shape.
  295. axis : int, optional
  296. The axis in the result array along which the input arrays are stacked.
  297. out : ndarray, optional
  298. If provided, the destination to place the result. The shape must be
  299. correct, matching that of what stack would have returned if no
  300. out argument were specified.
  301. Returns
  302. -------
  303. stacked : ndarray
  304. The stacked array has one more dimension than the input arrays.
  305. See Also
  306. --------
  307. concatenate : Join a sequence of arrays along an existing axis.
  308. split : Split array into a list of multiple sub-arrays of equal size.
  309. block : Assemble arrays from blocks.
  310. Examples
  311. --------
  312. >>> arrays = [np.random.randn(3, 4) for _ in range(10)]
  313. >>> np.stack(arrays, axis=0).shape
  314. (10, 3, 4)
  315. >>> np.stack(arrays, axis=1).shape
  316. (3, 10, 4)
  317. >>> np.stack(arrays, axis=2).shape
  318. (3, 4, 10)
  319. >>> a = np.array([1, 2, 3])
  320. >>> b = np.array([2, 3, 4])
  321. >>> np.stack((a, b))
  322. array([[1, 2, 3],
  323. [2, 3, 4]])
  324. >>> np.stack((a, b), axis=-1)
  325. array([[1, 2],
  326. [2, 3],
  327. [3, 4]])
  328. """
  329. _warn_for_nonsequence(arrays)
  330. arrays = [asanyarray(arr) for arr in arrays]
  331. if not arrays:
  332. raise ValueError('need at least one array to stack')
  333. shapes = {arr.shape for arr in arrays}
  334. if len(shapes) != 1:
  335. raise ValueError('all input arrays must have the same shape')
  336. result_ndim = arrays[0].ndim + 1
  337. axis = normalize_axis_index(axis, result_ndim)
  338. sl = (slice(None),) * axis + (_nx.newaxis,)
  339. expanded_arrays = [arr[sl] for arr in arrays]
  340. return _nx.concatenate(expanded_arrays, axis=axis, out=out)
  341. def _block_format_index(index):
  342. """
  343. Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
  344. """
  345. idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
  346. return 'arrays' + idx_str
  347. def _block_check_depths_match(arrays, parent_index=[]):
  348. """
  349. Recursive function checking that the depths of nested lists in `arrays`
  350. all match. Mismatch raises a ValueError as described in the block
  351. docstring below.
  352. The entire index (rather than just the depth) needs to be calculated
  353. for each innermost list, in case an error needs to be raised, so that
  354. the index of the offending list can be printed as part of the error.
  355. Parameters
  356. ----------
  357. arrays : nested list of arrays
  358. The arrays to check
  359. parent_index : list of int
  360. The full index of `arrays` within the nested lists passed to
  361. `_block_check_depths_match` at the top of the recursion.
  362. Returns
  363. -------
  364. first_index : list of int
  365. The full index of an element from the bottom of the nesting in
  366. `arrays`. If any element at the bottom is an empty list, this will
  367. refer to it, and the last index along the empty axis will be `None`.
  368. max_arr_ndim : int
  369. The maximum of the ndims of the arrays nested in `arrays`.
  370. final_size: int
  371. The number of elements in the final array. This is used the motivate
  372. the choice of algorithm used using benchmarking wisdom.
  373. """
  374. if type(arrays) is tuple:
  375. # not strictly necessary, but saves us from:
  376. # - more than one way to do things - no point treating tuples like
  377. # lists
  378. # - horribly confusing behaviour that results when tuples are
  379. # treated like ndarray
  380. raise TypeError(
  381. '{} is a tuple. '
  382. 'Only lists can be used to arrange blocks, and np.block does '
  383. 'not allow implicit conversion from tuple to ndarray.'.format(
  384. _block_format_index(parent_index)
  385. )
  386. )
  387. elif type(arrays) is list and len(arrays) > 0:
  388. idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
  389. for i, arr in enumerate(arrays))
  390. first_index, max_arr_ndim, final_size = next(idxs_ndims)
  391. for index, ndim, size in idxs_ndims:
  392. final_size += size
  393. if ndim > max_arr_ndim:
  394. max_arr_ndim = ndim
  395. if len(index) != len(first_index):
  396. raise ValueError(
  397. "List depths are mismatched. First element was at depth "
  398. "{}, but there is an element at depth {} ({})".format(
  399. len(first_index),
  400. len(index),
  401. _block_format_index(index)
  402. )
  403. )
  404. # propagate our flag that indicates an empty list at the bottom
  405. if index[-1] is None:
  406. first_index = index
  407. return first_index, max_arr_ndim, final_size
  408. elif type(arrays) is list and len(arrays) == 0:
  409. # We've 'bottomed out' on an empty list
  410. return parent_index + [None], 0, 0
  411. else:
  412. # We've 'bottomed out' - arrays is either a scalar or an array
  413. size = _nx.size(arrays)
  414. return parent_index, _nx.ndim(arrays), size
  415. def _atleast_nd(a, ndim):
  416. # Ensures `a` has at least `ndim` dimensions by prepending
  417. # ones to `a.shape` as necessary
  418. return array(a, ndmin=ndim, copy=False, subok=True)
  419. def _accumulate(values):
  420. # Helper function because Python 2.7 doesn't have
  421. # itertools.accumulate
  422. value = 0
  423. accumulated = []
  424. for v in values:
  425. value += v
  426. accumulated.append(value)
  427. return accumulated
  428. def _concatenate_shapes(shapes, axis):
  429. """Given array shapes, return the resulting shape and slices prefixes.
  430. These help in nested concatation.
  431. Returns
  432. -------
  433. shape: tuple of int
  434. This tuple satisfies:
  435. ```
  436. shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
  437. shape == concatenate(arrs, axis).shape
  438. ```
  439. slice_prefixes: tuple of (slice(start, end), )
  440. For a list of arrays being concatenated, this returns the slice
  441. in the larger array at axis that needs to be sliced into.
  442. For example, the following holds:
  443. ```
  444. ret = concatenate([a, b, c], axis)
  445. _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
  446. ret[(slice(None),) * axis + sl_a] == a
  447. ret[(slice(None),) * axis + sl_b] == b
  448. ret[(slice(None),) * axis + sl_c] == c
  449. ```
  450. Thses are called slice prefixes since they are used in the recursive
  451. blocking algorithm to compute the left-most slices during the
  452. recursion. Therefore, they must be prepended to rest of the slice
  453. that was computed deeper in the recusion.
  454. These are returned as tuples to ensure that they can quickly be added
  455. to existing slice tuple without creating a new tuple everytime.
  456. """
  457. # Cache a result that will be reused.
  458. shape_at_axis = [shape[axis] for shape in shapes]
  459. # Take a shape, any shape
  460. first_shape = shapes[0]
  461. first_shape_pre = first_shape[:axis]
  462. first_shape_post = first_shape[axis+1:]
  463. if any(shape[:axis] != first_shape_pre or
  464. shape[axis+1:] != first_shape_post for shape in shapes):
  465. raise ValueError(
  466. 'Mismatched array shapes in block along axis {}.'.format(axis))
  467. shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
  468. offsets_at_axis = _accumulate(shape_at_axis)
  469. slice_prefixes = [(slice(start, end),)
  470. for start, end in zip([0] + offsets_at_axis,
  471. offsets_at_axis)]
  472. return shape, slice_prefixes
  473. def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
  474. """
  475. Returns the shape of the final array, along with a list
  476. of slices and a list of arrays that can be used for assignment inside the
  477. new array
  478. Parameters
  479. ----------
  480. arrays : nested list of arrays
  481. The arrays to check
  482. max_depth : list of int
  483. The number of nested lists
  484. result_ndim: int
  485. The number of dimensions in thefinal array.
  486. Returns
  487. -------
  488. shape : tuple of int
  489. The shape that the final array will take on.
  490. slices: list of tuple of slices
  491. The slices into the full array required for assignment. These are
  492. required to be prepended with ``(Ellipsis, )`` to obtain to correct
  493. final index.
  494. arrays: list of ndarray
  495. The data to assign to each slice of the full array
  496. """
  497. if depth < max_depth:
  498. shapes, slices, arrays = zip(
  499. *[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
  500. for arr in arrays])
  501. axis = result_ndim - max_depth + depth
  502. shape, slice_prefixes = _concatenate_shapes(shapes, axis)
  503. # Prepend the slice prefix and flatten the slices
  504. slices = [slice_prefix + the_slice
  505. for slice_prefix, inner_slices in zip(slice_prefixes, slices)
  506. for the_slice in inner_slices]
  507. # Flatten the array list
  508. arrays = functools.reduce(operator.add, arrays)
  509. return shape, slices, arrays
  510. else:
  511. # We've 'bottomed out' - arrays is either a scalar or an array
  512. # type(arrays) is not list
  513. # Return the slice and the array inside a list to be consistent with
  514. # the recursive case.
  515. arr = _atleast_nd(arrays, result_ndim)
  516. return arr.shape, [()], [arr]
  517. def _block(arrays, max_depth, result_ndim, depth=0):
  518. """
  519. Internal implementation of block based on repeated concatenation.
  520. `arrays` is the argument passed to
  521. block. `max_depth` is the depth of nested lists within `arrays` and
  522. `result_ndim` is the greatest of the dimensions of the arrays in
  523. `arrays` and the depth of the lists in `arrays` (see block docstring
  524. for details).
  525. """
  526. if depth < max_depth:
  527. arrs = [_block(arr, max_depth, result_ndim, depth+1)
  528. for arr in arrays]
  529. return _nx.concatenate(arrs, axis=-(max_depth-depth))
  530. else:
  531. # We've 'bottomed out' - arrays is either a scalar or an array
  532. # type(arrays) is not list
  533. return _atleast_nd(arrays, result_ndim)
  534. def _block_dispatcher(arrays):
  535. # Use type(...) is list to match the behavior of np.block(), which special
  536. # cases list specifically rather than allowing for generic iterables or
  537. # tuple. Also, we know that list.__array_function__ will never exist.
  538. if type(arrays) is list:
  539. for subarrays in arrays:
  540. for subarray in _block_dispatcher(subarrays):
  541. yield subarray
  542. else:
  543. yield arrays
  544. @array_function_dispatch(_block_dispatcher)
  545. def block(arrays):
  546. """
  547. Assemble an nd-array from nested lists of blocks.
  548. Blocks in the innermost lists are concatenated (see `concatenate`) along
  549. the last dimension (-1), then these are concatenated along the
  550. second-last dimension (-2), and so on until the outermost list is reached.
  551. Blocks can be of any dimension, but will not be broadcasted using the normal
  552. rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
  553. the same for all blocks. This is primarily useful for working with scalars,
  554. and means that code like ``np.block([v, 1])`` is valid, where
  555. ``v.ndim == 1``.
  556. When the nested list is two levels deep, this allows block matrices to be
  557. constructed from their components.
  558. .. versionadded:: 1.13.0
  559. Parameters
  560. ----------
  561. arrays : nested list of array_like or scalars (but not tuples)
  562. If passed a single ndarray or scalar (a nested list of depth 0), this
  563. is returned unmodified (and not copied).
  564. Elements shapes must match along the appropriate axes (without
  565. broadcasting), but leading 1s will be prepended to the shape as
  566. necessary to make the dimensions match.
  567. Returns
  568. -------
  569. block_array : ndarray
  570. The array assembled from the given blocks.
  571. The dimensionality of the output is equal to the greatest of:
  572. * the dimensionality of all the inputs
  573. * the depth to which the input list is nested
  574. Raises
  575. ------
  576. ValueError
  577. * If list depths are mismatched - for instance, ``[[a, b], c]`` is
  578. illegal, and should be spelt ``[[a, b], [c]]``
  579. * If lists are empty - for instance, ``[[a, b], []]``
  580. See Also
  581. --------
  582. concatenate : Join a sequence of arrays together.
  583. stack : Stack arrays in sequence along a new dimension.
  584. hstack : Stack arrays in sequence horizontally (column wise).
  585. vstack : Stack arrays in sequence vertically (row wise).
  586. dstack : Stack arrays in sequence depth wise (along third dimension).
  587. vsplit : Split array into a list of multiple sub-arrays vertically.
  588. Notes
  589. -----
  590. When called with only scalars, ``np.block`` is equivalent to an ndarray
  591. call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
  592. ``np.array([[1, 2], [3, 4]])``.
  593. This function does not enforce that the blocks lie on a fixed grid.
  594. ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
  595. AAAbb
  596. AAAbb
  597. cccDD
  598. But is also allowed to produce, for some ``a, b, c, d``::
  599. AAAbb
  600. AAAbb
  601. cDDDD
  602. Since concatenation happens along the last axis first, `block` is _not_
  603. capable of producing the following directly::
  604. AAAbb
  605. cccbb
  606. cccDD
  607. Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
  608. equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
  609. Examples
  610. --------
  611. The most common use of this function is to build a block matrix
  612. >>> A = np.eye(2) * 2
  613. >>> B = np.eye(3) * 3
  614. >>> np.block([
  615. ... [A, np.zeros((2, 3))],
  616. ... [np.ones((3, 2)), B ]
  617. ... ])
  618. array([[ 2., 0., 0., 0., 0.],
  619. [ 0., 2., 0., 0., 0.],
  620. [ 1., 1., 3., 0., 0.],
  621. [ 1., 1., 0., 3., 0.],
  622. [ 1., 1., 0., 0., 3.]])
  623. With a list of depth 1, `block` can be used as `hstack`
  624. >>> np.block([1, 2, 3]) # hstack([1, 2, 3])
  625. array([1, 2, 3])
  626. >>> a = np.array([1, 2, 3])
  627. >>> b = np.array([2, 3, 4])
  628. >>> np.block([a, b, 10]) # hstack([a, b, 10])
  629. array([1, 2, 3, 2, 3, 4, 10])
  630. >>> A = np.ones((2, 2), int)
  631. >>> B = 2 * A
  632. >>> np.block([A, B]) # hstack([A, B])
  633. array([[1, 1, 2, 2],
  634. [1, 1, 2, 2]])
  635. With a list of depth 2, `block` can be used in place of `vstack`:
  636. >>> a = np.array([1, 2, 3])
  637. >>> b = np.array([2, 3, 4])
  638. >>> np.block([[a], [b]]) # vstack([a, b])
  639. array([[1, 2, 3],
  640. [2, 3, 4]])
  641. >>> A = np.ones((2, 2), int)
  642. >>> B = 2 * A
  643. >>> np.block([[A], [B]]) # vstack([A, B])
  644. array([[1, 1],
  645. [1, 1],
  646. [2, 2],
  647. [2, 2]])
  648. It can also be used in places of `atleast_1d` and `atleast_2d`
  649. >>> a = np.array(0)
  650. >>> b = np.array([1])
  651. >>> np.block([a]) # atleast_1d(a)
  652. array([0])
  653. >>> np.block([b]) # atleast_1d(b)
  654. array([1])
  655. >>> np.block([[a]]) # atleast_2d(a)
  656. array([[0]])
  657. >>> np.block([[b]]) # atleast_2d(b)
  658. array([[1]])
  659. """
  660. arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
  661. # It was found through benchmarking that making an array of final size
  662. # around 256x256 was faster by straight concatenation on a
  663. # i7-7700HQ processor and dual channel ram 2400MHz.
  664. # It didn't seem to matter heavily on the dtype used.
  665. #
  666. # A 2D array using repeated concatenation requires 2 copies of the array.
  667. #
  668. # The fastest algorithm will depend on the ratio of CPU power to memory
  669. # speed.
  670. # One can monitor the results of the benchmark
  671. # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
  672. # to tune this parameter until a C version of the `_block_info_recursion`
  673. # algorithm is implemented which would likely be faster than the python
  674. # version.
  675. if list_ndim * final_size > (2 * 512 * 512):
  676. return _block_slicing(arrays, list_ndim, result_ndim)
  677. else:
  678. return _block_concatenate(arrays, list_ndim, result_ndim)
  679. # Theses helper functions are mostly used for testing.
  680. # They allow us to write tests that directly call `_block_slicing`
  681. # or `_block_concatenate` wtihout blocking large arrays to forse the wisdom
  682. # to trigger the desired path.
  683. def _block_setup(arrays):
  684. """
  685. Returns
  686. (`arrays`, list_ndim, result_ndim, final_size)
  687. """
  688. bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
  689. list_ndim = len(bottom_index)
  690. if bottom_index and bottom_index[-1] is None:
  691. raise ValueError(
  692. 'List at {} cannot be empty'.format(
  693. _block_format_index(bottom_index)
  694. )
  695. )
  696. result_ndim = max(arr_ndim, list_ndim)
  697. return arrays, list_ndim, result_ndim, final_size
  698. def _block_slicing(arrays, list_ndim, result_ndim):
  699. shape, slices, arrays = _block_info_recursion(
  700. arrays, list_ndim, result_ndim)
  701. dtype = _nx.result_type(*[arr.dtype for arr in arrays])
  702. # Test preferring F only in the case that all input arrays are F
  703. F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
  704. C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
  705. order = 'F' if F_order and not C_order else 'C'
  706. result = _nx.empty(shape=shape, dtype=dtype, order=order)
  707. # Note: In a c implementation, the function
  708. # PyArray_CreateMultiSortedStridePerm could be used for more advanced
  709. # guessing of the desired order.
  710. for the_slice, arr in zip(slices, arrays):
  711. result[(Ellipsis,) + the_slice] = arr
  712. return result
  713. def _block_concatenate(arrays, list_ndim, result_ndim):
  714. result = _block(arrays, list_ndim, result_ndim)
  715. if list_ndim == 0:
  716. # Catch an edge case where _block returns a view because
  717. # `arrays` is a single numpy array and not a list of numpy arrays.
  718. # This might copy scalars or lists twice, but this isn't a likely
  719. # usecase for those interested in performance
  720. result = result.copy()
  721. return result