npyio.py 83 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330
  1. from __future__ import division, absolute_import, print_function
  2. import sys
  3. import os
  4. import re
  5. import functools
  6. import itertools
  7. import warnings
  8. import weakref
  9. from operator import itemgetter, index as opindex
  10. import numpy as np
  11. from . import format
  12. from ._datasource import DataSource
  13. from numpy.core import overrides
  14. from numpy.core.multiarray import packbits, unpackbits
  15. from numpy.core.overrides import set_module
  16. from numpy.core._internal import recursive
  17. from ._iotools import (
  18. LineSplitter, NameValidator, StringConverter, ConverterError,
  19. ConverterLockError, ConversionWarning, _is_string_like,
  20. has_nested_fields, flatten_dtype, easy_dtype, _decode_line
  21. )
  22. from numpy.compat import (
  23. asbytes, asstr, asunicode, asbytes_nested, bytes, basestring, unicode,
  24. os_fspath, os_PathLike
  25. )
  26. from numpy.core.numeric import pickle
  27. if sys.version_info[0] >= 3:
  28. from collections.abc import Mapping
  29. else:
  30. from future_builtins import map
  31. from collections import Mapping
  32. @set_module('numpy')
  33. def loads(*args, **kwargs):
  34. # NumPy 1.15.0, 2017-12-10
  35. warnings.warn(
  36. "np.loads is deprecated, use pickle.loads instead",
  37. DeprecationWarning, stacklevel=2)
  38. return pickle.loads(*args, **kwargs)
  39. __all__ = [
  40. 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
  41. 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
  42. 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
  43. ]
  44. array_function_dispatch = functools.partial(
  45. overrides.array_function_dispatch, module='numpy')
  46. class BagObj(object):
  47. """
  48. BagObj(obj)
  49. Convert attribute look-ups to getitems on the object passed in.
  50. Parameters
  51. ----------
  52. obj : class instance
  53. Object on which attribute look-up is performed.
  54. Examples
  55. --------
  56. >>> from numpy.lib.npyio import BagObj as BO
  57. >>> class BagDemo(object):
  58. ... def __getitem__(self, key): # An instance of BagObj(BagDemo)
  59. ... # will call this method when any
  60. ... # attribute look-up is required
  61. ... result = "Doesn't matter what you want, "
  62. ... return result + "you're gonna get this"
  63. ...
  64. >>> demo_obj = BagDemo()
  65. >>> bagobj = BO(demo_obj)
  66. >>> bagobj.hello_there
  67. "Doesn't matter what you want, you're gonna get this"
  68. >>> bagobj.I_can_be_anything
  69. "Doesn't matter what you want, you're gonna get this"
  70. """
  71. def __init__(self, obj):
  72. # Use weakref to make NpzFile objects collectable by refcount
  73. self._obj = weakref.proxy(obj)
  74. def __getattribute__(self, key):
  75. try:
  76. return object.__getattribute__(self, '_obj')[key]
  77. except KeyError:
  78. raise AttributeError(key)
  79. def __dir__(self):
  80. """
  81. Enables dir(bagobj) to list the files in an NpzFile.
  82. This also enables tab-completion in an interpreter or IPython.
  83. """
  84. return list(object.__getattribute__(self, '_obj').keys())
  85. def zipfile_factory(file, *args, **kwargs):
  86. """
  87. Create a ZipFile.
  88. Allows for Zip64, and the `file` argument can accept file, str, or
  89. pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
  90. constructor.
  91. """
  92. if not hasattr(file, 'read'):
  93. file = os_fspath(file)
  94. import zipfile
  95. kwargs['allowZip64'] = True
  96. return zipfile.ZipFile(file, *args, **kwargs)
  97. class NpzFile(Mapping):
  98. """
  99. NpzFile(fid)
  100. A dictionary-like object with lazy-loading of files in the zipped
  101. archive provided on construction.
  102. `NpzFile` is used to load files in the NumPy ``.npz`` data archive
  103. format. It assumes that files in the archive have a ``.npy`` extension,
  104. other files are ignored.
  105. The arrays and file strings are lazily loaded on either
  106. getitem access using ``obj['key']`` or attribute lookup using
  107. ``obj.f.key``. A list of all files (without ``.npy`` extensions) can
  108. be obtained with ``obj.files`` and the ZipFile object itself using
  109. ``obj.zip``.
  110. Attributes
  111. ----------
  112. files : list of str
  113. List of all files in the archive with a ``.npy`` extension.
  114. zip : ZipFile instance
  115. The ZipFile object initialized with the zipped archive.
  116. f : BagObj instance
  117. An object on which attribute can be performed as an alternative
  118. to getitem access on the `NpzFile` instance itself.
  119. allow_pickle : bool, optional
  120. Allow loading pickled data. Default: False
  121. .. versionchanged:: 1.16.3
  122. Made default False in response to CVE-2019-6446.
  123. pickle_kwargs : dict, optional
  124. Additional keyword arguments to pass on to pickle.load.
  125. These are only useful when loading object arrays saved on
  126. Python 2 when using Python 3.
  127. Parameters
  128. ----------
  129. fid : file or str
  130. The zipped archive to open. This is either a file-like object
  131. or a string containing the path to the archive.
  132. own_fid : bool, optional
  133. Whether NpzFile should close the file handle.
  134. Requires that `fid` is a file-like object.
  135. Examples
  136. --------
  137. >>> from tempfile import TemporaryFile
  138. >>> outfile = TemporaryFile()
  139. >>> x = np.arange(10)
  140. >>> y = np.sin(x)
  141. >>> np.savez(outfile, x=x, y=y)
  142. >>> outfile.seek(0)
  143. >>> npz = np.load(outfile)
  144. >>> isinstance(npz, np.lib.io.NpzFile)
  145. True
  146. >>> npz.files
  147. ['y', 'x']
  148. >>> npz['x'] # getitem access
  149. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  150. >>> npz.f.x # attribute lookup
  151. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  152. """
  153. def __init__(self, fid, own_fid=False, allow_pickle=False,
  154. pickle_kwargs=None):
  155. # Import is postponed to here since zipfile depends on gzip, an
  156. # optional component of the so-called standard library.
  157. _zip = zipfile_factory(fid)
  158. self._files = _zip.namelist()
  159. self.files = []
  160. self.allow_pickle = allow_pickle
  161. self.pickle_kwargs = pickle_kwargs
  162. for x in self._files:
  163. if x.endswith('.npy'):
  164. self.files.append(x[:-4])
  165. else:
  166. self.files.append(x)
  167. self.zip = _zip
  168. self.f = BagObj(self)
  169. if own_fid:
  170. self.fid = fid
  171. else:
  172. self.fid = None
  173. def __enter__(self):
  174. return self
  175. def __exit__(self, exc_type, exc_value, traceback):
  176. self.close()
  177. def close(self):
  178. """
  179. Close the file.
  180. """
  181. if self.zip is not None:
  182. self.zip.close()
  183. self.zip = None
  184. if self.fid is not None:
  185. self.fid.close()
  186. self.fid = None
  187. self.f = None # break reference cycle
  188. def __del__(self):
  189. self.close()
  190. # Implement the Mapping ABC
  191. def __iter__(self):
  192. return iter(self.files)
  193. def __len__(self):
  194. return len(self.files)
  195. def __getitem__(self, key):
  196. # FIXME: This seems like it will copy strings around
  197. # more than is strictly necessary. The zipfile
  198. # will read the string and then
  199. # the format.read_array will copy the string
  200. # to another place in memory.
  201. # It would be better if the zipfile could read
  202. # (or at least uncompress) the data
  203. # directly into the array memory.
  204. member = False
  205. if key in self._files:
  206. member = True
  207. elif key in self.files:
  208. member = True
  209. key += '.npy'
  210. if member:
  211. bytes = self.zip.open(key)
  212. magic = bytes.read(len(format.MAGIC_PREFIX))
  213. bytes.close()
  214. if magic == format.MAGIC_PREFIX:
  215. bytes = self.zip.open(key)
  216. return format.read_array(bytes,
  217. allow_pickle=self.allow_pickle,
  218. pickle_kwargs=self.pickle_kwargs)
  219. else:
  220. return self.zip.read(key)
  221. else:
  222. raise KeyError("%s is not a file in the archive" % key)
  223. if sys.version_info.major == 3:
  224. # deprecate the python 2 dict apis that we supported by accident in
  225. # python 3. We forgot to implement itervalues() at all in earlier
  226. # versions of numpy, so no need to deprecated it here.
  227. def iteritems(self):
  228. # Numpy 1.15, 2018-02-20
  229. warnings.warn(
  230. "NpzFile.iteritems is deprecated in python 3, to match the "
  231. "removal of dict.itertems. Use .items() instead.",
  232. DeprecationWarning, stacklevel=2)
  233. return self.items()
  234. def iterkeys(self):
  235. # Numpy 1.15, 2018-02-20
  236. warnings.warn(
  237. "NpzFile.iterkeys is deprecated in python 3, to match the "
  238. "removal of dict.iterkeys. Use .keys() instead.",
  239. DeprecationWarning, stacklevel=2)
  240. return self.keys()
  241. @set_module('numpy')
  242. def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
  243. encoding='ASCII'):
  244. """
  245. Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
  246. Parameters
  247. ----------
  248. file : file-like object, string, or pathlib.Path
  249. The file to read. File-like objects must support the
  250. ``seek()`` and ``read()`` methods. Pickled files require that the
  251. file-like object support the ``readline()`` method as well.
  252. mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
  253. If not None, then memory-map the file, using the given mode (see
  254. `numpy.memmap` for a detailed description of the modes). A
  255. memory-mapped array is kept on disk. However, it can be accessed
  256. and sliced like any ndarray. Memory mapping is especially useful
  257. for accessing small fragments of large files without reading the
  258. entire file into memory.
  259. allow_pickle : bool, optional
  260. Allow loading pickled object arrays stored in npy files. Reasons for
  261. disallowing pickles include security, as loading pickled data can
  262. execute arbitrary code. If pickles are disallowed, loading object
  263. arrays will fail. Default: False
  264. .. versionchanged:: 1.16.3
  265. Made default False in response to CVE-2019-6446.
  266. fix_imports : bool, optional
  267. Only useful when loading Python 2 generated pickled files on Python 3,
  268. which includes npy/npz files containing object arrays. If `fix_imports`
  269. is True, pickle will try to map the old Python 2 names to the new names
  270. used in Python 3.
  271. encoding : str, optional
  272. What encoding to use when reading Python 2 strings. Only useful when
  273. loading Python 2 generated pickled files in Python 3, which includes
  274. npy/npz files containing object arrays. Values other than 'latin1',
  275. 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
  276. data. Default: 'ASCII'
  277. Returns
  278. -------
  279. result : array, tuple, dict, etc.
  280. Data stored in the file. For ``.npz`` files, the returned instance
  281. of NpzFile class must be closed to avoid leaking file descriptors.
  282. Raises
  283. ------
  284. IOError
  285. If the input file does not exist or cannot be read.
  286. ValueError
  287. The file contains an object array, but allow_pickle=False given.
  288. See Also
  289. --------
  290. save, savez, savez_compressed, loadtxt
  291. memmap : Create a memory-map to an array stored in a file on disk.
  292. lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
  293. Notes
  294. -----
  295. - If the file contains pickle data, then whatever object is stored
  296. in the pickle is returned.
  297. - If the file is a ``.npy`` file, then a single array is returned.
  298. - If the file is a ``.npz`` file, then a dictionary-like object is
  299. returned, containing ``{filename: array}`` key-value pairs, one for
  300. each file in the archive.
  301. - If the file is a ``.npz`` file, the returned value supports the
  302. context manager protocol in a similar fashion to the open function::
  303. with load('foo.npz') as data:
  304. a = data['a']
  305. The underlying file descriptor is closed when exiting the 'with'
  306. block.
  307. Examples
  308. --------
  309. Store data to disk, and load it again:
  310. >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
  311. >>> np.load('/tmp/123.npy')
  312. array([[1, 2, 3],
  313. [4, 5, 6]])
  314. Store compressed data to disk, and load it again:
  315. >>> a=np.array([[1, 2, 3], [4, 5, 6]])
  316. >>> b=np.array([1, 2])
  317. >>> np.savez('/tmp/123.npz', a=a, b=b)
  318. >>> data = np.load('/tmp/123.npz')
  319. >>> data['a']
  320. array([[1, 2, 3],
  321. [4, 5, 6]])
  322. >>> data['b']
  323. array([1, 2])
  324. >>> data.close()
  325. Mem-map the stored array, and then access the second row
  326. directly from disk:
  327. >>> X = np.load('/tmp/123.npy', mmap_mode='r')
  328. >>> X[1, :]
  329. memmap([4, 5, 6])
  330. """
  331. if encoding not in ('ASCII', 'latin1', 'bytes'):
  332. # The 'encoding' value for pickle also affects what encoding
  333. # the serialized binary data of NumPy arrays is loaded
  334. # in. Pickle does not pass on the encoding information to
  335. # NumPy. The unpickling code in numpy.core.multiarray is
  336. # written to assume that unicode data appearing where binary
  337. # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
  338. #
  339. # Other encoding values can corrupt binary data, and we
  340. # purposefully disallow them. For the same reason, the errors=
  341. # argument is not exposed, as values other than 'strict'
  342. # result can similarly silently corrupt numerical data.
  343. raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
  344. if sys.version_info[0] >= 3:
  345. pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
  346. else:
  347. # Nothing to do on Python 2
  348. pickle_kwargs = {}
  349. # TODO: Use contextlib.ExitStack once we drop Python 2
  350. if hasattr(file, 'read'):
  351. fid = file
  352. own_fid = False
  353. else:
  354. fid = open(os_fspath(file), "rb")
  355. own_fid = True
  356. try:
  357. # Code to distinguish from NumPy binary files and pickles.
  358. _ZIP_PREFIX = b'PK\x03\x04'
  359. _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
  360. N = len(format.MAGIC_PREFIX)
  361. magic = fid.read(N)
  362. # If the file size is less than N, we need to make sure not
  363. # to seek past the beginning of the file
  364. fid.seek(-min(N, len(magic)), 1) # back-up
  365. if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
  366. # zip-file (assume .npz)
  367. # Transfer file ownership to NpzFile
  368. ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
  369. pickle_kwargs=pickle_kwargs)
  370. own_fid = False
  371. return ret
  372. elif magic == format.MAGIC_PREFIX:
  373. # .npy file
  374. if mmap_mode:
  375. return format.open_memmap(file, mode=mmap_mode)
  376. else:
  377. return format.read_array(fid, allow_pickle=allow_pickle,
  378. pickle_kwargs=pickle_kwargs)
  379. else:
  380. # Try a pickle
  381. if not allow_pickle:
  382. raise ValueError("Cannot load file containing pickled data "
  383. "when allow_pickle=False")
  384. try:
  385. return pickle.load(fid, **pickle_kwargs)
  386. except Exception:
  387. raise IOError(
  388. "Failed to interpret file %s as a pickle" % repr(file))
  389. finally:
  390. if own_fid:
  391. fid.close()
  392. def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
  393. return (arr,)
  394. @array_function_dispatch(_save_dispatcher)
  395. def save(file, arr, allow_pickle=True, fix_imports=True):
  396. """
  397. Save an array to a binary file in NumPy ``.npy`` format.
  398. Parameters
  399. ----------
  400. file : file, str, or pathlib.Path
  401. File or filename to which the data is saved. If file is a file-object,
  402. then the filename is unchanged. If file is a string or Path, a ``.npy``
  403. extension will be appended to the file name if it does not already
  404. have one.
  405. arr : array_like
  406. Array data to be saved.
  407. allow_pickle : bool, optional
  408. Allow saving object arrays using Python pickles. Reasons for disallowing
  409. pickles include security (loading pickled data can execute arbitrary
  410. code) and portability (pickled objects may not be loadable on different
  411. Python installations, for example if the stored objects require libraries
  412. that are not available, and not all pickled data is compatible between
  413. Python 2 and Python 3).
  414. Default: True
  415. fix_imports : bool, optional
  416. Only useful in forcing objects in object arrays on Python 3 to be
  417. pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
  418. will try to map the new Python 3 names to the old module names used in
  419. Python 2, so that the pickle data stream is readable with Python 2.
  420. See Also
  421. --------
  422. savez : Save several arrays into a ``.npz`` archive
  423. savetxt, load
  424. Notes
  425. -----
  426. For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
  427. Examples
  428. --------
  429. >>> from tempfile import TemporaryFile
  430. >>> outfile = TemporaryFile()
  431. >>> x = np.arange(10)
  432. >>> np.save(outfile, x)
  433. >>> outfile.seek(0) # Only needed here to simulate closing & reopening file
  434. >>> np.load(outfile)
  435. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  436. """
  437. own_fid = False
  438. if hasattr(file, 'read'):
  439. fid = file
  440. else:
  441. file = os_fspath(file)
  442. if not file.endswith('.npy'):
  443. file = file + '.npy'
  444. fid = open(file, "wb")
  445. own_fid = True
  446. if sys.version_info[0] >= 3:
  447. pickle_kwargs = dict(fix_imports=fix_imports)
  448. else:
  449. # Nothing to do on Python 2
  450. pickle_kwargs = None
  451. try:
  452. arr = np.asanyarray(arr)
  453. format.write_array(fid, arr, allow_pickle=allow_pickle,
  454. pickle_kwargs=pickle_kwargs)
  455. finally:
  456. if own_fid:
  457. fid.close()
  458. def _savez_dispatcher(file, *args, **kwds):
  459. for a in args:
  460. yield a
  461. for v in kwds.values():
  462. yield v
  463. @array_function_dispatch(_savez_dispatcher)
  464. def savez(file, *args, **kwds):
  465. """
  466. Save several arrays into a single file in uncompressed ``.npz`` format.
  467. If arguments are passed in with no keywords, the corresponding variable
  468. names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
  469. arguments are given, the corresponding variable names, in the ``.npz``
  470. file will match the keyword names.
  471. Parameters
  472. ----------
  473. file : str or file
  474. Either the file name (string) or an open file (file-like object)
  475. where the data will be saved. If file is a string or a Path, the
  476. ``.npz`` extension will be appended to the file name if it is not
  477. already there.
  478. args : Arguments, optional
  479. Arrays to save to the file. Since it is not possible for Python to
  480. know the names of the arrays outside `savez`, the arrays will be saved
  481. with names "arr_0", "arr_1", and so on. These arguments can be any
  482. expression.
  483. kwds : Keyword arguments, optional
  484. Arrays to save to the file. Arrays will be saved in the file with the
  485. keyword names.
  486. Returns
  487. -------
  488. None
  489. See Also
  490. --------
  491. save : Save a single array to a binary file in NumPy format.
  492. savetxt : Save an array to a file as plain text.
  493. savez_compressed : Save several arrays into a compressed ``.npz`` archive
  494. Notes
  495. -----
  496. The ``.npz`` file format is a zipped archive of files named after the
  497. variables they contain. The archive is not compressed and each file
  498. in the archive contains one variable in ``.npy`` format. For a
  499. description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
  500. When opening the saved ``.npz`` file with `load` a `NpzFile` object is
  501. returned. This is a dictionary-like object which can be queried for
  502. its list of arrays (with the ``.files`` attribute), and for the arrays
  503. themselves.
  504. Examples
  505. --------
  506. >>> from tempfile import TemporaryFile
  507. >>> outfile = TemporaryFile()
  508. >>> x = np.arange(10)
  509. >>> y = np.sin(x)
  510. Using `savez` with \\*args, the arrays are saved with default names.
  511. >>> np.savez(outfile, x, y)
  512. >>> outfile.seek(0) # Only needed here to simulate closing & reopening file
  513. >>> npzfile = np.load(outfile)
  514. >>> npzfile.files
  515. ['arr_1', 'arr_0']
  516. >>> npzfile['arr_0']
  517. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  518. Using `savez` with \\**kwds, the arrays are saved with the keyword names.
  519. >>> outfile = TemporaryFile()
  520. >>> np.savez(outfile, x=x, y=y)
  521. >>> outfile.seek(0)
  522. >>> npzfile = np.load(outfile)
  523. >>> npzfile.files
  524. ['y', 'x']
  525. >>> npzfile['x']
  526. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  527. """
  528. _savez(file, args, kwds, False)
  529. def _savez_compressed_dispatcher(file, *args, **kwds):
  530. for a in args:
  531. yield a
  532. for v in kwds.values():
  533. yield v
  534. @array_function_dispatch(_savez_compressed_dispatcher)
  535. def savez_compressed(file, *args, **kwds):
  536. """
  537. Save several arrays into a single file in compressed ``.npz`` format.
  538. If keyword arguments are given, then filenames are taken from the keywords.
  539. If arguments are passed in with no keywords, then stored file names are
  540. arr_0, arr_1, etc.
  541. Parameters
  542. ----------
  543. file : str or file
  544. Either the file name (string) or an open file (file-like object)
  545. where the data will be saved. If file is a string or a Path, the
  546. ``.npz`` extension will be appended to the file name if it is not
  547. already there.
  548. args : Arguments, optional
  549. Arrays to save to the file. Since it is not possible for Python to
  550. know the names of the arrays outside `savez`, the arrays will be saved
  551. with names "arr_0", "arr_1", and so on. These arguments can be any
  552. expression.
  553. kwds : Keyword arguments, optional
  554. Arrays to save to the file. Arrays will be saved in the file with the
  555. keyword names.
  556. Returns
  557. -------
  558. None
  559. See Also
  560. --------
  561. numpy.save : Save a single array to a binary file in NumPy format.
  562. numpy.savetxt : Save an array to a file as plain text.
  563. numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
  564. numpy.load : Load the files created by savez_compressed.
  565. Notes
  566. -----
  567. The ``.npz`` file format is a zipped archive of files named after the
  568. variables they contain. The archive is compressed with
  569. ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
  570. in ``.npy`` format. For a description of the ``.npy`` format, see
  571. :py:mod:`numpy.lib.format`.
  572. When opening the saved ``.npz`` file with `load` a `NpzFile` object is
  573. returned. This is a dictionary-like object which can be queried for
  574. its list of arrays (with the ``.files`` attribute), and for the arrays
  575. themselves.
  576. Examples
  577. --------
  578. >>> test_array = np.random.rand(3, 2)
  579. >>> test_vector = np.random.rand(4)
  580. >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
  581. >>> loaded = np.load('/tmp/123.npz')
  582. >>> print(np.array_equal(test_array, loaded['a']))
  583. True
  584. >>> print(np.array_equal(test_vector, loaded['b']))
  585. True
  586. """
  587. _savez(file, args, kwds, True)
  588. def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
  589. # Import is postponed to here since zipfile depends on gzip, an optional
  590. # component of the so-called standard library.
  591. import zipfile
  592. if not hasattr(file, 'read'):
  593. file = os_fspath(file)
  594. if not file.endswith('.npz'):
  595. file = file + '.npz'
  596. namedict = kwds
  597. for i, val in enumerate(args):
  598. key = 'arr_%d' % i
  599. if key in namedict.keys():
  600. raise ValueError(
  601. "Cannot use un-named variables and keyword %s" % key)
  602. namedict[key] = val
  603. if compress:
  604. compression = zipfile.ZIP_DEFLATED
  605. else:
  606. compression = zipfile.ZIP_STORED
  607. zipf = zipfile_factory(file, mode="w", compression=compression)
  608. if sys.version_info >= (3, 6):
  609. # Since Python 3.6 it is possible to write directly to a ZIP file.
  610. for key, val in namedict.items():
  611. fname = key + '.npy'
  612. val = np.asanyarray(val)
  613. force_zip64 = val.nbytes >= 2**30
  614. with zipf.open(fname, 'w', force_zip64=force_zip64) as fid:
  615. format.write_array(fid, val,
  616. allow_pickle=allow_pickle,
  617. pickle_kwargs=pickle_kwargs)
  618. else:
  619. # Stage arrays in a temporary file on disk, before writing to zip.
  620. # Import deferred for startup time improvement
  621. import tempfile
  622. # Since target file might be big enough to exceed capacity of a global
  623. # temporary directory, create temp file side-by-side with the target file.
  624. file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
  625. fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
  626. os.close(fd)
  627. try:
  628. for key, val in namedict.items():
  629. fname = key + '.npy'
  630. fid = open(tmpfile, 'wb')
  631. try:
  632. format.write_array(fid, np.asanyarray(val),
  633. allow_pickle=allow_pickle,
  634. pickle_kwargs=pickle_kwargs)
  635. fid.close()
  636. fid = None
  637. zipf.write(tmpfile, arcname=fname)
  638. except IOError as exc:
  639. raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
  640. finally:
  641. if fid:
  642. fid.close()
  643. finally:
  644. os.remove(tmpfile)
  645. zipf.close()
  646. def _getconv(dtype):
  647. """ Find the correct dtype converter. Adapted from matplotlib """
  648. def floatconv(x):
  649. x.lower()
  650. if '0x' in x:
  651. return float.fromhex(x)
  652. return float(x)
  653. typ = dtype.type
  654. if issubclass(typ, np.bool_):
  655. return lambda x: bool(int(x))
  656. if issubclass(typ, np.uint64):
  657. return np.uint64
  658. if issubclass(typ, np.int64):
  659. return np.int64
  660. if issubclass(typ, np.integer):
  661. return lambda x: int(float(x))
  662. elif issubclass(typ, np.longdouble):
  663. return np.longdouble
  664. elif issubclass(typ, np.floating):
  665. return floatconv
  666. elif issubclass(typ, complex):
  667. return lambda x: complex(asstr(x).replace('+-', '-'))
  668. elif issubclass(typ, np.bytes_):
  669. return asbytes
  670. elif issubclass(typ, np.unicode_):
  671. return asunicode
  672. else:
  673. return asstr
  674. # amount of lines loadtxt reads in one chunk, can be overridden for testing
  675. _loadtxt_chunksize = 50000
  676. @set_module('numpy')
  677. def loadtxt(fname, dtype=float, comments='#', delimiter=None,
  678. converters=None, skiprows=0, usecols=None, unpack=False,
  679. ndmin=0, encoding='bytes', max_rows=None):
  680. """
  681. Load data from a text file.
  682. Each row in the text file must have the same number of values.
  683. Parameters
  684. ----------
  685. fname : file, str, or pathlib.Path
  686. File, filename, or generator to read. If the filename extension is
  687. ``.gz`` or ``.bz2``, the file is first decompressed. Note that
  688. generators should return byte strings for Python 3k.
  689. dtype : data-type, optional
  690. Data-type of the resulting array; default: float. If this is a
  691. structured data-type, the resulting array will be 1-dimensional, and
  692. each row will be interpreted as an element of the array. In this
  693. case, the number of columns used must match the number of fields in
  694. the data-type.
  695. comments : str or sequence of str, optional
  696. The characters or list of characters used to indicate the start of a
  697. comment. None implies no comments. For backwards compatibility, byte
  698. strings will be decoded as 'latin1'. The default is '#'.
  699. delimiter : str, optional
  700. The string used to separate values. For backwards compatibility, byte
  701. strings will be decoded as 'latin1'. The default is whitespace.
  702. converters : dict, optional
  703. A dictionary mapping column number to a function that will parse the
  704. column string into the desired value. E.g., if column 0 is a date
  705. string: ``converters = {0: datestr2num}``. Converters can also be
  706. used to provide a default value for missing data (but see also
  707. `genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``.
  708. Default: None.
  709. skiprows : int, optional
  710. Skip the first `skiprows` lines; default: 0.
  711. usecols : int or sequence, optional
  712. Which columns to read, with 0 being the first. For example,
  713. ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
  714. The default, None, results in all columns being read.
  715. .. versionchanged:: 1.11.0
  716. When a single column has to be read it is possible to use
  717. an integer instead of a tuple. E.g ``usecols = 3`` reads the
  718. fourth column the same way as ``usecols = (3,)`` would.
  719. unpack : bool, optional
  720. If True, the returned array is transposed, so that arguments may be
  721. unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
  722. data-type, arrays are returned for each field. Default is False.
  723. ndmin : int, optional
  724. The returned array will have at least `ndmin` dimensions.
  725. Otherwise mono-dimensional axes will be squeezed.
  726. Legal values: 0 (default), 1 or 2.
  727. .. versionadded:: 1.6.0
  728. encoding : str, optional
  729. Encoding used to decode the inputfile. Does not apply to input streams.
  730. The special value 'bytes' enables backward compatibility workarounds
  731. that ensures you receive byte arrays as results if possible and passes
  732. 'latin1' encoded strings to converters. Override this value to receive
  733. unicode arrays and pass strings as input to converters. If set to None
  734. the system default is used. The default value is 'bytes'.
  735. .. versionadded:: 1.14.0
  736. max_rows : int, optional
  737. Read `max_rows` lines of content after `skiprows` lines. The default
  738. is to read all the lines.
  739. .. versionadded:: 1.16.0
  740. Returns
  741. -------
  742. out : ndarray
  743. Data read from the text file.
  744. See Also
  745. --------
  746. load, fromstring, fromregex
  747. genfromtxt : Load data with missing values handled as specified.
  748. scipy.io.loadmat : reads MATLAB data files
  749. Notes
  750. -----
  751. This function aims to be a fast reader for simply formatted files. The
  752. `genfromtxt` function provides more sophisticated handling of, e.g.,
  753. lines with missing values.
  754. .. versionadded:: 1.10.0
  755. The strings produced by the Python float.hex method can be used as
  756. input for floats.
  757. Examples
  758. --------
  759. >>> from io import StringIO # StringIO behaves like a file object
  760. >>> c = StringIO(u"0 1\\n2 3")
  761. >>> np.loadtxt(c)
  762. array([[ 0., 1.],
  763. [ 2., 3.]])
  764. >>> d = StringIO(u"M 21 72\\nF 35 58")
  765. >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
  766. ... 'formats': ('S1', 'i4', 'f4')})
  767. array([('M', 21, 72.0), ('F', 35, 58.0)],
  768. dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
  769. >>> c = StringIO(u"1,0,2\\n3,0,4")
  770. >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
  771. >>> x
  772. array([ 1., 3.])
  773. >>> y
  774. array([ 2., 4.])
  775. """
  776. # Type conversions for Py3 convenience
  777. if comments is not None:
  778. if isinstance(comments, (basestring, bytes)):
  779. comments = [comments]
  780. comments = [_decode_line(x) for x in comments]
  781. # Compile regex for comments beforehand
  782. comments = (re.escape(comment) for comment in comments)
  783. regex_comments = re.compile('|'.join(comments))
  784. if delimiter is not None:
  785. delimiter = _decode_line(delimiter)
  786. user_converters = converters
  787. if encoding == 'bytes':
  788. encoding = None
  789. byte_converters = True
  790. else:
  791. byte_converters = False
  792. if usecols is not None:
  793. # Allow usecols to be a single int or a sequence of ints
  794. try:
  795. usecols_as_list = list(usecols)
  796. except TypeError:
  797. usecols_as_list = [usecols]
  798. for col_idx in usecols_as_list:
  799. try:
  800. opindex(col_idx)
  801. except TypeError as e:
  802. e.args = (
  803. "usecols must be an int or a sequence of ints but "
  804. "it contains at least one element of type %s" %
  805. type(col_idx),
  806. )
  807. raise
  808. # Fall back to existing code
  809. usecols = usecols_as_list
  810. fown = False
  811. try:
  812. if isinstance(fname, os_PathLike):
  813. fname = os_fspath(fname)
  814. if _is_string_like(fname):
  815. fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
  816. fencoding = getattr(fh, 'encoding', 'latin1')
  817. fh = iter(fh)
  818. fown = True
  819. else:
  820. fh = iter(fname)
  821. fencoding = getattr(fname, 'encoding', 'latin1')
  822. except TypeError:
  823. raise ValueError('fname must be a string, file handle, or generator')
  824. # input may be a python2 io stream
  825. if encoding is not None:
  826. fencoding = encoding
  827. # we must assume local encoding
  828. # TODO emit portability warning?
  829. elif fencoding is None:
  830. import locale
  831. fencoding = locale.getpreferredencoding()
  832. # not to be confused with the flatten_dtype we import...
  833. @recursive
  834. def flatten_dtype_internal(self, dt):
  835. """Unpack a structured data-type, and produce re-packing info."""
  836. if dt.names is None:
  837. # If the dtype is flattened, return.
  838. # If the dtype has a shape, the dtype occurs
  839. # in the list more than once.
  840. shape = dt.shape
  841. if len(shape) == 0:
  842. return ([dt.base], None)
  843. else:
  844. packing = [(shape[-1], list)]
  845. if len(shape) > 1:
  846. for dim in dt.shape[-2::-1]:
  847. packing = [(dim*packing[0][0], packing*dim)]
  848. return ([dt.base] * int(np.prod(dt.shape)), packing)
  849. else:
  850. types = []
  851. packing = []
  852. for field in dt.names:
  853. tp, bytes = dt.fields[field]
  854. flat_dt, flat_packing = self(tp)
  855. types.extend(flat_dt)
  856. # Avoid extra nesting for subarrays
  857. if tp.ndim > 0:
  858. packing.extend(flat_packing)
  859. else:
  860. packing.append((len(flat_dt), flat_packing))
  861. return (types, packing)
  862. @recursive
  863. def pack_items(self, items, packing):
  864. """Pack items into nested lists based on re-packing info."""
  865. if packing is None:
  866. return items[0]
  867. elif packing is tuple:
  868. return tuple(items)
  869. elif packing is list:
  870. return list(items)
  871. else:
  872. start = 0
  873. ret = []
  874. for length, subpacking in packing:
  875. ret.append(self(items[start:start+length], subpacking))
  876. start += length
  877. return tuple(ret)
  878. def split_line(line):
  879. """Chop off comments, strip, and split at delimiter. """
  880. line = _decode_line(line, encoding=encoding)
  881. if comments is not None:
  882. line = regex_comments.split(line, maxsplit=1)[0]
  883. line = line.strip('\r\n')
  884. if line:
  885. return line.split(delimiter)
  886. else:
  887. return []
  888. def read_data(chunk_size):
  889. """Parse each line, including the first.
  890. The file read, `fh`, is a global defined above.
  891. Parameters
  892. ----------
  893. chunk_size : int
  894. At most `chunk_size` lines are read at a time, with iteration
  895. until all lines are read.
  896. """
  897. X = []
  898. line_iter = itertools.chain([first_line], fh)
  899. line_iter = itertools.islice(line_iter, max_rows)
  900. for i, line in enumerate(line_iter):
  901. vals = split_line(line)
  902. if len(vals) == 0:
  903. continue
  904. if usecols:
  905. vals = [vals[j] for j in usecols]
  906. if len(vals) != N:
  907. line_num = i + skiprows + 1
  908. raise ValueError("Wrong number of columns at line %d"
  909. % line_num)
  910. # Convert each value according to its column and store
  911. items = [conv(val) for (conv, val) in zip(converters, vals)]
  912. # Then pack it according to the dtype's nesting
  913. items = pack_items(items, packing)
  914. X.append(items)
  915. if len(X) > chunk_size:
  916. yield X
  917. X = []
  918. if X:
  919. yield X
  920. try:
  921. # Make sure we're dealing with a proper dtype
  922. dtype = np.dtype(dtype)
  923. defconv = _getconv(dtype)
  924. # Skip the first `skiprows` lines
  925. for i in range(skiprows):
  926. next(fh)
  927. # Read until we find a line with some values, and use
  928. # it to estimate the number of columns, N.
  929. first_vals = None
  930. try:
  931. while not first_vals:
  932. first_line = next(fh)
  933. first_vals = split_line(first_line)
  934. except StopIteration:
  935. # End of lines reached
  936. first_line = ''
  937. first_vals = []
  938. warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2)
  939. N = len(usecols or first_vals)
  940. dtype_types, packing = flatten_dtype_internal(dtype)
  941. if len(dtype_types) > 1:
  942. # We're dealing with a structured array, each field of
  943. # the dtype matches a column
  944. converters = [_getconv(dt) for dt in dtype_types]
  945. else:
  946. # All fields have the same dtype
  947. converters = [defconv for i in range(N)]
  948. if N > 1:
  949. packing = [(N, tuple)]
  950. # By preference, use the converters specified by the user
  951. for i, conv in (user_converters or {}).items():
  952. if usecols:
  953. try:
  954. i = usecols.index(i)
  955. except ValueError:
  956. # Unused converter specified
  957. continue
  958. if byte_converters:
  959. # converters may use decode to workaround numpy's old behaviour,
  960. # so encode the string again before passing to the user converter
  961. def tobytes_first(x, conv):
  962. if type(x) is bytes:
  963. return conv(x)
  964. return conv(x.encode("latin1"))
  965. import functools
  966. converters[i] = functools.partial(tobytes_first, conv=conv)
  967. else:
  968. converters[i] = conv
  969. converters = [conv if conv is not bytes else
  970. lambda x: x.encode(fencoding) for conv in converters]
  971. # read data in chunks and fill it into an array via resize
  972. # over-allocating and shrinking the array later may be faster but is
  973. # probably not relevant compared to the cost of actually reading and
  974. # converting the data
  975. X = None
  976. for x in read_data(_loadtxt_chunksize):
  977. if X is None:
  978. X = np.array(x, dtype)
  979. else:
  980. nshape = list(X.shape)
  981. pos = nshape[0]
  982. nshape[0] += len(x)
  983. X.resize(nshape, refcheck=False)
  984. X[pos:, ...] = x
  985. finally:
  986. if fown:
  987. fh.close()
  988. if X is None:
  989. X = np.array([], dtype)
  990. # Multicolumn data are returned with shape (1, N, M), i.e.
  991. # (1, 1, M) for a single row - remove the singleton dimension there
  992. if X.ndim == 3 and X.shape[:2] == (1, 1):
  993. X.shape = (1, -1)
  994. # Verify that the array has at least dimensions `ndmin`.
  995. # Check correctness of the values of `ndmin`
  996. if ndmin not in [0, 1, 2]:
  997. raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
  998. # Tweak the size and shape of the arrays - remove extraneous dimensions
  999. if X.ndim > ndmin:
  1000. X = np.squeeze(X)
  1001. # and ensure we have the minimum number of dimensions asked for
  1002. # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
  1003. if X.ndim < ndmin:
  1004. if ndmin == 1:
  1005. X = np.atleast_1d(X)
  1006. elif ndmin == 2:
  1007. X = np.atleast_2d(X).T
  1008. if unpack:
  1009. if len(dtype_types) > 1:
  1010. # For structured arrays, return an array for each field.
  1011. return [X[field] for field in dtype.names]
  1012. else:
  1013. return X.T
  1014. else:
  1015. return X
  1016. def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
  1017. header=None, footer=None, comments=None,
  1018. encoding=None):
  1019. return (X,)
  1020. @array_function_dispatch(_savetxt_dispatcher)
  1021. def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
  1022. footer='', comments='# ', encoding=None):
  1023. """
  1024. Save an array to a text file.
  1025. Parameters
  1026. ----------
  1027. fname : filename or file handle
  1028. If the filename ends in ``.gz``, the file is automatically saved in
  1029. compressed gzip format. `loadtxt` understands gzipped files
  1030. transparently.
  1031. X : 1D or 2D array_like
  1032. Data to be saved to a text file.
  1033. fmt : str or sequence of strs, optional
  1034. A single format (%10.5f), a sequence of formats, or a
  1035. multi-format string, e.g. 'Iteration %d -- %10.5f', in which
  1036. case `delimiter` is ignored. For complex `X`, the legal options
  1037. for `fmt` are:
  1038. * a single specifier, `fmt='%.4e'`, resulting in numbers formatted
  1039. like `' (%s+%sj)' % (fmt, fmt)`
  1040. * a full string specifying every real and imaginary part, e.g.
  1041. `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
  1042. * a list of specifiers, one per column - in this case, the real
  1043. and imaginary part must have separate specifiers,
  1044. e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
  1045. delimiter : str, optional
  1046. String or character separating columns.
  1047. newline : str, optional
  1048. String or character separating lines.
  1049. .. versionadded:: 1.5.0
  1050. header : str, optional
  1051. String that will be written at the beginning of the file.
  1052. .. versionadded:: 1.7.0
  1053. footer : str, optional
  1054. String that will be written at the end of the file.
  1055. .. versionadded:: 1.7.0
  1056. comments : str, optional
  1057. String that will be prepended to the ``header`` and ``footer`` strings,
  1058. to mark them as comments. Default: '# ', as expected by e.g.
  1059. ``numpy.loadtxt``.
  1060. .. versionadded:: 1.7.0
  1061. encoding : {None, str}, optional
  1062. Encoding used to encode the outputfile. Does not apply to output
  1063. streams. If the encoding is something other than 'bytes' or 'latin1'
  1064. you will not be able to load the file in NumPy versions < 1.14. Default
  1065. is 'latin1'.
  1066. .. versionadded:: 1.14.0
  1067. See Also
  1068. --------
  1069. save : Save an array to a binary file in NumPy ``.npy`` format
  1070. savez : Save several arrays into an uncompressed ``.npz`` archive
  1071. savez_compressed : Save several arrays into a compressed ``.npz`` archive
  1072. Notes
  1073. -----
  1074. Further explanation of the `fmt` parameter
  1075. (``%[flag]width[.precision]specifier``):
  1076. flags:
  1077. ``-`` : left justify
  1078. ``+`` : Forces to precede result with + or -.
  1079. ``0`` : Left pad the number with zeros instead of space (see width).
  1080. width:
  1081. Minimum number of characters to be printed. The value is not truncated
  1082. if it has more characters.
  1083. precision:
  1084. - For integer specifiers (eg. ``d,i,o,x``), the minimum number of
  1085. digits.
  1086. - For ``e, E`` and ``f`` specifiers, the number of digits to print
  1087. after the decimal point.
  1088. - For ``g`` and ``G``, the maximum number of significant digits.
  1089. - For ``s``, the maximum number of characters.
  1090. specifiers:
  1091. ``c`` : character
  1092. ``d`` or ``i`` : signed decimal integer
  1093. ``e`` or ``E`` : scientific notation with ``e`` or ``E``.
  1094. ``f`` : decimal floating point
  1095. ``g,G`` : use the shorter of ``e,E`` or ``f``
  1096. ``o`` : signed octal
  1097. ``s`` : string of characters
  1098. ``u`` : unsigned decimal integer
  1099. ``x,X`` : unsigned hexadecimal integer
  1100. This explanation of ``fmt`` is not complete, for an exhaustive
  1101. specification see [1]_.
  1102. References
  1103. ----------
  1104. .. [1] `Format Specification Mini-Language
  1105. <https://docs.python.org/library/string.html#format-specification-mini-language>`_,
  1106. Python Documentation.
  1107. Examples
  1108. --------
  1109. >>> x = y = z = np.arange(0.0,5.0,1.0)
  1110. >>> np.savetxt('test.out', x, delimiter=',') # X is an array
  1111. >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
  1112. >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
  1113. """
  1114. # Py3 conversions first
  1115. if isinstance(fmt, bytes):
  1116. fmt = asstr(fmt)
  1117. delimiter = asstr(delimiter)
  1118. class WriteWrap(object):
  1119. """Convert to unicode in py2 or to bytes on bytestream inputs.
  1120. """
  1121. def __init__(self, fh, encoding):
  1122. self.fh = fh
  1123. self.encoding = encoding
  1124. self.do_write = self.first_write
  1125. def close(self):
  1126. self.fh.close()
  1127. def write(self, v):
  1128. self.do_write(v)
  1129. def write_bytes(self, v):
  1130. if isinstance(v, bytes):
  1131. self.fh.write(v)
  1132. else:
  1133. self.fh.write(v.encode(self.encoding))
  1134. def write_normal(self, v):
  1135. self.fh.write(asunicode(v))
  1136. def first_write(self, v):
  1137. try:
  1138. self.write_normal(v)
  1139. self.write = self.write_normal
  1140. except TypeError:
  1141. # input is probably a bytestream
  1142. self.write_bytes(v)
  1143. self.write = self.write_bytes
  1144. own_fh = False
  1145. if isinstance(fname, os_PathLike):
  1146. fname = os_fspath(fname)
  1147. if _is_string_like(fname):
  1148. # datasource doesn't support creating a new file ...
  1149. open(fname, 'wt').close()
  1150. fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
  1151. own_fh = True
  1152. # need to convert str to unicode for text io output
  1153. if sys.version_info[0] == 2:
  1154. fh = WriteWrap(fh, encoding or 'latin1')
  1155. elif hasattr(fname, 'write'):
  1156. # wrap to handle byte output streams
  1157. fh = WriteWrap(fname, encoding or 'latin1')
  1158. else:
  1159. raise ValueError('fname must be a string or file handle')
  1160. try:
  1161. X = np.asarray(X)
  1162. # Handle 1-dimensional arrays
  1163. if X.ndim == 0 or X.ndim > 2:
  1164. raise ValueError(
  1165. "Expected 1D or 2D array, got %dD array instead" % X.ndim)
  1166. elif X.ndim == 1:
  1167. # Common case -- 1d array of numbers
  1168. if X.dtype.names is None:
  1169. X = np.atleast_2d(X).T
  1170. ncol = 1
  1171. # Complex dtype -- each field indicates a separate column
  1172. else:
  1173. ncol = len(X.dtype.names)
  1174. else:
  1175. ncol = X.shape[1]
  1176. iscomplex_X = np.iscomplexobj(X)
  1177. # `fmt` can be a string with multiple insertion points or a
  1178. # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
  1179. if type(fmt) in (list, tuple):
  1180. if len(fmt) != ncol:
  1181. raise AttributeError('fmt has wrong shape. %s' % str(fmt))
  1182. format = asstr(delimiter).join(map(asstr, fmt))
  1183. elif isinstance(fmt, str):
  1184. n_fmt_chars = fmt.count('%')
  1185. error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
  1186. if n_fmt_chars == 1:
  1187. if iscomplex_X:
  1188. fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
  1189. else:
  1190. fmt = [fmt, ] * ncol
  1191. format = delimiter.join(fmt)
  1192. elif iscomplex_X and n_fmt_chars != (2 * ncol):
  1193. raise error
  1194. elif ((not iscomplex_X) and n_fmt_chars != ncol):
  1195. raise error
  1196. else:
  1197. format = fmt
  1198. else:
  1199. raise ValueError('invalid fmt: %r' % (fmt,))
  1200. if len(header) > 0:
  1201. header = header.replace('\n', '\n' + comments)
  1202. fh.write(comments + header + newline)
  1203. if iscomplex_X:
  1204. for row in X:
  1205. row2 = []
  1206. for number in row:
  1207. row2.append(number.real)
  1208. row2.append(number.imag)
  1209. s = format % tuple(row2) + newline
  1210. fh.write(s.replace('+-', '-'))
  1211. else:
  1212. for row in X:
  1213. try:
  1214. v = format % tuple(row) + newline
  1215. except TypeError:
  1216. raise TypeError("Mismatch between array dtype ('%s') and "
  1217. "format specifier ('%s')"
  1218. % (str(X.dtype), format))
  1219. fh.write(v)
  1220. if len(footer) > 0:
  1221. footer = footer.replace('\n', '\n' + comments)
  1222. fh.write(comments + footer + newline)
  1223. finally:
  1224. if own_fh:
  1225. fh.close()
  1226. @set_module('numpy')
  1227. def fromregex(file, regexp, dtype, encoding=None):
  1228. """
  1229. Construct an array from a text file, using regular expression parsing.
  1230. The returned array is always a structured array, and is constructed from
  1231. all matches of the regular expression in the file. Groups in the regular
  1232. expression are converted to fields of the structured array.
  1233. Parameters
  1234. ----------
  1235. file : str or file
  1236. File name or file object to read.
  1237. regexp : str or regexp
  1238. Regular expression used to parse the file.
  1239. Groups in the regular expression correspond to fields in the dtype.
  1240. dtype : dtype or list of dtypes
  1241. Dtype for the structured array.
  1242. encoding : str, optional
  1243. Encoding used to decode the inputfile. Does not apply to input streams.
  1244. .. versionadded:: 1.14.0
  1245. Returns
  1246. -------
  1247. output : ndarray
  1248. The output array, containing the part of the content of `file` that
  1249. was matched by `regexp`. `output` is always a structured array.
  1250. Raises
  1251. ------
  1252. TypeError
  1253. When `dtype` is not a valid dtype for a structured array.
  1254. See Also
  1255. --------
  1256. fromstring, loadtxt
  1257. Notes
  1258. -----
  1259. Dtypes for structured arrays can be specified in several forms, but all
  1260. forms specify at least the data type and field name. For details see
  1261. `doc.structured_arrays`.
  1262. Examples
  1263. --------
  1264. >>> f = open('test.dat', 'w')
  1265. >>> f.write("1312 foo\\n1534 bar\\n444 qux")
  1266. >>> f.close()
  1267. >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
  1268. >>> output = np.fromregex('test.dat', regexp,
  1269. ... [('num', np.int64), ('key', 'S3')])
  1270. >>> output
  1271. array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
  1272. dtype=[('num', '<i8'), ('key', '|S3')])
  1273. >>> output['num']
  1274. array([1312, 1534, 444], dtype=int64)
  1275. """
  1276. own_fh = False
  1277. if not hasattr(file, "read"):
  1278. file = np.lib._datasource.open(file, 'rt', encoding=encoding)
  1279. own_fh = True
  1280. try:
  1281. if not isinstance(dtype, np.dtype):
  1282. dtype = np.dtype(dtype)
  1283. content = file.read()
  1284. if isinstance(content, bytes) and isinstance(regexp, np.unicode):
  1285. regexp = asbytes(regexp)
  1286. elif isinstance(content, np.unicode) and isinstance(regexp, bytes):
  1287. regexp = asstr(regexp)
  1288. if not hasattr(regexp, 'match'):
  1289. regexp = re.compile(regexp)
  1290. seq = regexp.findall(content)
  1291. if seq and not isinstance(seq[0], tuple):
  1292. # Only one group is in the regexp.
  1293. # Create the new array as a single data-type and then
  1294. # re-interpret as a single-field structured array.
  1295. newdtype = np.dtype(dtype[dtype.names[0]])
  1296. output = np.array(seq, dtype=newdtype)
  1297. output.dtype = dtype
  1298. else:
  1299. output = np.array(seq, dtype=dtype)
  1300. return output
  1301. finally:
  1302. if own_fh:
  1303. file.close()
  1304. #####--------------------------------------------------------------------------
  1305. #---- --- ASCII functions ---
  1306. #####--------------------------------------------------------------------------
  1307. @set_module('numpy')
  1308. def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
  1309. skip_header=0, skip_footer=0, converters=None,
  1310. missing_values=None, filling_values=None, usecols=None,
  1311. names=None, excludelist=None, deletechars=None,
  1312. replace_space='_', autostrip=False, case_sensitive=True,
  1313. defaultfmt="f%i", unpack=None, usemask=False, loose=True,
  1314. invalid_raise=True, max_rows=None, encoding='bytes'):
  1315. """
  1316. Load data from a text file, with missing values handled as specified.
  1317. Each line past the first `skip_header` lines is split at the `delimiter`
  1318. character, and characters following the `comments` character are discarded.
  1319. Parameters
  1320. ----------
  1321. fname : file, str, pathlib.Path, list of str, generator
  1322. File, filename, list, or generator to read. If the filename
  1323. extension is `.gz` or `.bz2`, the file is first decompressed. Note
  1324. that generators must return byte strings in Python 3k. The strings
  1325. in a list or produced by a generator are treated as lines.
  1326. dtype : dtype, optional
  1327. Data type of the resulting array.
  1328. If None, the dtypes will be determined by the contents of each
  1329. column, individually.
  1330. comments : str, optional
  1331. The character used to indicate the start of a comment.
  1332. All the characters occurring on a line after a comment are discarded
  1333. delimiter : str, int, or sequence, optional
  1334. The string used to separate values. By default, any consecutive
  1335. whitespaces act as delimiter. An integer or sequence of integers
  1336. can also be provided as width(s) of each field.
  1337. skiprows : int, optional
  1338. `skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
  1339. skip_header : int, optional
  1340. The number of lines to skip at the beginning of the file.
  1341. skip_footer : int, optional
  1342. The number of lines to skip at the end of the file.
  1343. converters : variable, optional
  1344. The set of functions that convert the data of a column to a value.
  1345. The converters can also be used to provide a default value
  1346. for missing data: ``converters = {3: lambda s: float(s or 0)}``.
  1347. missing : variable, optional
  1348. `missing` was removed in numpy 1.10. Please use `missing_values`
  1349. instead.
  1350. missing_values : variable, optional
  1351. The set of strings corresponding to missing data.
  1352. filling_values : variable, optional
  1353. The set of values to be used as default when the data are missing.
  1354. usecols : sequence, optional
  1355. Which columns to read, with 0 being the first. For example,
  1356. ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
  1357. names : {None, True, str, sequence}, optional
  1358. If `names` is True, the field names are read from the first line after
  1359. the first `skip_header` lines. This line can optionally be proceeded
  1360. by a comment delimiter. If `names` is a sequence or a single-string of
  1361. comma-separated names, the names will be used to define the field names
  1362. in a structured dtype. If `names` is None, the names of the dtype
  1363. fields will be used, if any.
  1364. excludelist : sequence, optional
  1365. A list of names to exclude. This list is appended to the default list
  1366. ['return','file','print']. Excluded names are appended an underscore:
  1367. for example, `file` would become `file_`.
  1368. deletechars : str, optional
  1369. A string combining invalid characters that must be deleted from the
  1370. names.
  1371. defaultfmt : str, optional
  1372. A format used to define default field names, such as "f%i" or "f_%02i".
  1373. autostrip : bool, optional
  1374. Whether to automatically strip white spaces from the variables.
  1375. replace_space : char, optional
  1376. Character(s) used in replacement of white spaces in the variables
  1377. names. By default, use a '_'.
  1378. case_sensitive : {True, False, 'upper', 'lower'}, optional
  1379. If True, field names are case sensitive.
  1380. If False or 'upper', field names are converted to upper case.
  1381. If 'lower', field names are converted to lower case.
  1382. unpack : bool, optional
  1383. If True, the returned array is transposed, so that arguments may be
  1384. unpacked using ``x, y, z = loadtxt(...)``
  1385. usemask : bool, optional
  1386. If True, return a masked array.
  1387. If False, return a regular array.
  1388. loose : bool, optional
  1389. If True, do not raise errors for invalid values.
  1390. invalid_raise : bool, optional
  1391. If True, an exception is raised if an inconsistency is detected in the
  1392. number of columns.
  1393. If False, a warning is emitted and the offending lines are skipped.
  1394. max_rows : int, optional
  1395. The maximum number of rows to read. Must not be used with skip_footer
  1396. at the same time. If given, the value must be at least 1. Default is
  1397. to read the entire file.
  1398. .. versionadded:: 1.10.0
  1399. encoding : str, optional
  1400. Encoding used to decode the inputfile. Does not apply when `fname` is
  1401. a file object. The special value 'bytes' enables backward compatibility
  1402. workarounds that ensure that you receive byte arrays when possible
  1403. and passes latin1 encoded strings to converters. Override this value to
  1404. receive unicode arrays and pass strings as input to converters. If set
  1405. to None the system default is used. The default value is 'bytes'.
  1406. .. versionadded:: 1.14.0
  1407. Returns
  1408. -------
  1409. out : ndarray
  1410. Data read from the text file. If `usemask` is True, this is a
  1411. masked array.
  1412. See Also
  1413. --------
  1414. numpy.loadtxt : equivalent function when no data is missing.
  1415. Notes
  1416. -----
  1417. * When spaces are used as delimiters, or when no delimiter has been given
  1418. as input, there should not be any missing data between two fields.
  1419. * When the variables are named (either by a flexible dtype or with `names`,
  1420. there must not be any header in the file (else a ValueError
  1421. exception is raised).
  1422. * Individual values are not stripped of spaces by default.
  1423. When using a custom converter, make sure the function does remove spaces.
  1424. References
  1425. ----------
  1426. .. [1] NumPy User Guide, section `I/O with NumPy
  1427. <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
  1428. Examples
  1429. ---------
  1430. >>> from io import StringIO
  1431. >>> import numpy as np
  1432. Comma delimited file with mixed dtype
  1433. >>> s = StringIO(u"1,1.3,abcde")
  1434. >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
  1435. ... ('mystring','S5')], delimiter=",")
  1436. >>> data
  1437. array((1, 1.3, 'abcde'),
  1438. dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
  1439. Using dtype = None
  1440. >>> s.seek(0) # needed for StringIO example only
  1441. >>> data = np.genfromtxt(s, dtype=None,
  1442. ... names = ['myint','myfloat','mystring'], delimiter=",")
  1443. >>> data
  1444. array((1, 1.3, 'abcde'),
  1445. dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
  1446. Specifying dtype and names
  1447. >>> s.seek(0)
  1448. >>> data = np.genfromtxt(s, dtype="i8,f8,S5",
  1449. ... names=['myint','myfloat','mystring'], delimiter=",")
  1450. >>> data
  1451. array((1, 1.3, 'abcde'),
  1452. dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
  1453. An example with fixed-width columns
  1454. >>> s = StringIO(u"11.3abcde")
  1455. >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
  1456. ... delimiter=[1,3,5])
  1457. >>> data
  1458. array((1, 1.3, 'abcde'),
  1459. dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
  1460. """
  1461. if max_rows is not None:
  1462. if skip_footer:
  1463. raise ValueError(
  1464. "The keywords 'skip_footer' and 'max_rows' can not be "
  1465. "specified at the same time.")
  1466. if max_rows < 1:
  1467. raise ValueError("'max_rows' must be at least 1.")
  1468. if usemask:
  1469. from numpy.ma import MaskedArray, make_mask_descr
  1470. # Check the input dictionary of converters
  1471. user_converters = converters or {}
  1472. if not isinstance(user_converters, dict):
  1473. raise TypeError(
  1474. "The input argument 'converter' should be a valid dictionary "
  1475. "(got '%s' instead)" % type(user_converters))
  1476. if encoding == 'bytes':
  1477. encoding = None
  1478. byte_converters = True
  1479. else:
  1480. byte_converters = False
  1481. # Initialize the filehandle, the LineSplitter and the NameValidator
  1482. own_fhd = False
  1483. try:
  1484. if isinstance(fname, os_PathLike):
  1485. fname = os_fspath(fname)
  1486. if isinstance(fname, basestring):
  1487. fhd = iter(np.lib._datasource.open(fname, 'rt', encoding=encoding))
  1488. own_fhd = True
  1489. else:
  1490. fhd = iter(fname)
  1491. except TypeError:
  1492. raise TypeError(
  1493. "fname must be a string, filehandle, list of strings, "
  1494. "or generator. Got %s instead." % type(fname))
  1495. split_line = LineSplitter(delimiter=delimiter, comments=comments,
  1496. autostrip=autostrip, encoding=encoding)
  1497. validate_names = NameValidator(excludelist=excludelist,
  1498. deletechars=deletechars,
  1499. case_sensitive=case_sensitive,
  1500. replace_space=replace_space)
  1501. # Skip the first `skip_header` rows
  1502. for i in range(skip_header):
  1503. next(fhd)
  1504. # Keep on until we find the first valid values
  1505. first_values = None
  1506. try:
  1507. while not first_values:
  1508. first_line = _decode_line(next(fhd), encoding)
  1509. if (names is True) and (comments is not None):
  1510. if comments in first_line:
  1511. first_line = (
  1512. ''.join(first_line.split(comments)[1:]))
  1513. first_values = split_line(first_line)
  1514. except StopIteration:
  1515. # return an empty array if the datafile is empty
  1516. first_line = ''
  1517. first_values = []
  1518. warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
  1519. # Should we take the first values as names ?
  1520. if names is True:
  1521. fval = first_values[0].strip()
  1522. if comments is not None:
  1523. if fval in comments:
  1524. del first_values[0]
  1525. # Check the columns to use: make sure `usecols` is a list
  1526. if usecols is not None:
  1527. try:
  1528. usecols = [_.strip() for _ in usecols.split(",")]
  1529. except AttributeError:
  1530. try:
  1531. usecols = list(usecols)
  1532. except TypeError:
  1533. usecols = [usecols, ]
  1534. nbcols = len(usecols or first_values)
  1535. # Check the names and overwrite the dtype.names if needed
  1536. if names is True:
  1537. names = validate_names([str(_.strip()) for _ in first_values])
  1538. first_line = ''
  1539. elif _is_string_like(names):
  1540. names = validate_names([_.strip() for _ in names.split(',')])
  1541. elif names:
  1542. names = validate_names(names)
  1543. # Get the dtype
  1544. if dtype is not None:
  1545. dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
  1546. excludelist=excludelist,
  1547. deletechars=deletechars,
  1548. case_sensitive=case_sensitive,
  1549. replace_space=replace_space)
  1550. # Make sure the names is a list (for 2.5)
  1551. if names is not None:
  1552. names = list(names)
  1553. if usecols:
  1554. for (i, current) in enumerate(usecols):
  1555. # if usecols is a list of names, convert to a list of indices
  1556. if _is_string_like(current):
  1557. usecols[i] = names.index(current)
  1558. elif current < 0:
  1559. usecols[i] = current + len(first_values)
  1560. # If the dtype is not None, make sure we update it
  1561. if (dtype is not None) and (len(dtype) > nbcols):
  1562. descr = dtype.descr
  1563. dtype = np.dtype([descr[_] for _ in usecols])
  1564. names = list(dtype.names)
  1565. # If `names` is not None, update the names
  1566. elif (names is not None) and (len(names) > nbcols):
  1567. names = [names[_] for _ in usecols]
  1568. elif (names is not None) and (dtype is not None):
  1569. names = list(dtype.names)
  1570. # Process the missing values ...............................
  1571. # Rename missing_values for convenience
  1572. user_missing_values = missing_values or ()
  1573. if isinstance(user_missing_values, bytes):
  1574. user_missing_values = user_missing_values.decode('latin1')
  1575. # Define the list of missing_values (one column: one list)
  1576. missing_values = [list(['']) for _ in range(nbcols)]
  1577. # We have a dictionary: process it field by field
  1578. if isinstance(user_missing_values, dict):
  1579. # Loop on the items
  1580. for (key, val) in user_missing_values.items():
  1581. # Is the key a string ?
  1582. if _is_string_like(key):
  1583. try:
  1584. # Transform it into an integer
  1585. key = names.index(key)
  1586. except ValueError:
  1587. # We couldn't find it: the name must have been dropped
  1588. continue
  1589. # Redefine the key as needed if it's a column number
  1590. if usecols:
  1591. try:
  1592. key = usecols.index(key)
  1593. except ValueError:
  1594. pass
  1595. # Transform the value as a list of string
  1596. if isinstance(val, (list, tuple)):
  1597. val = [str(_) for _ in val]
  1598. else:
  1599. val = [str(val), ]
  1600. # Add the value(s) to the current list of missing
  1601. if key is None:
  1602. # None acts as default
  1603. for miss in missing_values:
  1604. miss.extend(val)
  1605. else:
  1606. missing_values[key].extend(val)
  1607. # We have a sequence : each item matches a column
  1608. elif isinstance(user_missing_values, (list, tuple)):
  1609. for (value, entry) in zip(user_missing_values, missing_values):
  1610. value = str(value)
  1611. if value not in entry:
  1612. entry.append(value)
  1613. # We have a string : apply it to all entries
  1614. elif isinstance(user_missing_values, basestring):
  1615. user_value = user_missing_values.split(",")
  1616. for entry in missing_values:
  1617. entry.extend(user_value)
  1618. # We have something else: apply it to all entries
  1619. else:
  1620. for entry in missing_values:
  1621. entry.extend([str(user_missing_values)])
  1622. # Process the filling_values ...............................
  1623. # Rename the input for convenience
  1624. user_filling_values = filling_values
  1625. if user_filling_values is None:
  1626. user_filling_values = []
  1627. # Define the default
  1628. filling_values = [None] * nbcols
  1629. # We have a dictionary : update each entry individually
  1630. if isinstance(user_filling_values, dict):
  1631. for (key, val) in user_filling_values.items():
  1632. if _is_string_like(key):
  1633. try:
  1634. # Transform it into an integer
  1635. key = names.index(key)
  1636. except ValueError:
  1637. # We couldn't find it: the name must have been dropped,
  1638. continue
  1639. # Redefine the key if it's a column number and usecols is defined
  1640. if usecols:
  1641. try:
  1642. key = usecols.index(key)
  1643. except ValueError:
  1644. pass
  1645. # Add the value to the list
  1646. filling_values[key] = val
  1647. # We have a sequence : update on a one-to-one basis
  1648. elif isinstance(user_filling_values, (list, tuple)):
  1649. n = len(user_filling_values)
  1650. if (n <= nbcols):
  1651. filling_values[:n] = user_filling_values
  1652. else:
  1653. filling_values = user_filling_values[:nbcols]
  1654. # We have something else : use it for all entries
  1655. else:
  1656. filling_values = [user_filling_values] * nbcols
  1657. # Initialize the converters ................................
  1658. if dtype is None:
  1659. # Note: we can't use a [...]*nbcols, as we would have 3 times the same
  1660. # ... converter, instead of 3 different converters.
  1661. converters = [StringConverter(None, missing_values=miss, default=fill)
  1662. for (miss, fill) in zip(missing_values, filling_values)]
  1663. else:
  1664. dtype_flat = flatten_dtype(dtype, flatten_base=True)
  1665. # Initialize the converters
  1666. if len(dtype_flat) > 1:
  1667. # Flexible type : get a converter from each dtype
  1668. zipit = zip(dtype_flat, missing_values, filling_values)
  1669. converters = [StringConverter(dt, locked=True,
  1670. missing_values=miss, default=fill)
  1671. for (dt, miss, fill) in zipit]
  1672. else:
  1673. # Set to a default converter (but w/ different missing values)
  1674. zipit = zip(missing_values, filling_values)
  1675. converters = [StringConverter(dtype, locked=True,
  1676. missing_values=miss, default=fill)
  1677. for (miss, fill) in zipit]
  1678. # Update the converters to use the user-defined ones
  1679. uc_update = []
  1680. for (j, conv) in user_converters.items():
  1681. # If the converter is specified by column names, use the index instead
  1682. if _is_string_like(j):
  1683. try:
  1684. j = names.index(j)
  1685. i = j
  1686. except ValueError:
  1687. continue
  1688. elif usecols:
  1689. try:
  1690. i = usecols.index(j)
  1691. except ValueError:
  1692. # Unused converter specified
  1693. continue
  1694. else:
  1695. i = j
  1696. # Find the value to test - first_line is not filtered by usecols:
  1697. if len(first_line):
  1698. testing_value = first_values[j]
  1699. else:
  1700. testing_value = None
  1701. if conv is bytes:
  1702. user_conv = asbytes
  1703. elif byte_converters:
  1704. # converters may use decode to workaround numpy's old behaviour,
  1705. # so encode the string again before passing to the user converter
  1706. def tobytes_first(x, conv):
  1707. if type(x) is bytes:
  1708. return conv(x)
  1709. return conv(x.encode("latin1"))
  1710. import functools
  1711. user_conv = functools.partial(tobytes_first, conv=conv)
  1712. else:
  1713. user_conv = conv
  1714. converters[i].update(user_conv, locked=True,
  1715. testing_value=testing_value,
  1716. default=filling_values[i],
  1717. missing_values=missing_values[i],)
  1718. uc_update.append((i, user_conv))
  1719. # Make sure we have the corrected keys in user_converters...
  1720. user_converters.update(uc_update)
  1721. # Fixme: possible error as following variable never used.
  1722. # miss_chars = [_.missing_values for _ in converters]
  1723. # Initialize the output lists ...
  1724. # ... rows
  1725. rows = []
  1726. append_to_rows = rows.append
  1727. # ... masks
  1728. if usemask:
  1729. masks = []
  1730. append_to_masks = masks.append
  1731. # ... invalid
  1732. invalid = []
  1733. append_to_invalid = invalid.append
  1734. # Parse each line
  1735. for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
  1736. values = split_line(line)
  1737. nbvalues = len(values)
  1738. # Skip an empty line
  1739. if nbvalues == 0:
  1740. continue
  1741. if usecols:
  1742. # Select only the columns we need
  1743. try:
  1744. values = [values[_] for _ in usecols]
  1745. except IndexError:
  1746. append_to_invalid((i + skip_header + 1, nbvalues))
  1747. continue
  1748. elif nbvalues != nbcols:
  1749. append_to_invalid((i + skip_header + 1, nbvalues))
  1750. continue
  1751. # Store the values
  1752. append_to_rows(tuple(values))
  1753. if usemask:
  1754. append_to_masks(tuple([v.strip() in m
  1755. for (v, m) in zip(values,
  1756. missing_values)]))
  1757. if len(rows) == max_rows:
  1758. break
  1759. if own_fhd:
  1760. fhd.close()
  1761. # Upgrade the converters (if needed)
  1762. if dtype is None:
  1763. for (i, converter) in enumerate(converters):
  1764. current_column = [itemgetter(i)(_m) for _m in rows]
  1765. try:
  1766. converter.iterupgrade(current_column)
  1767. except ConverterLockError:
  1768. errmsg = "Converter #%i is locked and cannot be upgraded: " % i
  1769. current_column = map(itemgetter(i), rows)
  1770. for (j, value) in enumerate(current_column):
  1771. try:
  1772. converter.upgrade(value)
  1773. except (ConverterError, ValueError):
  1774. errmsg += "(occurred line #%i for value '%s')"
  1775. errmsg %= (j + 1 + skip_header, value)
  1776. raise ConverterError(errmsg)
  1777. # Check that we don't have invalid values
  1778. nbinvalid = len(invalid)
  1779. if nbinvalid > 0:
  1780. nbrows = len(rows) + nbinvalid - skip_footer
  1781. # Construct the error message
  1782. template = " Line #%%i (got %%i columns instead of %i)" % nbcols
  1783. if skip_footer > 0:
  1784. nbinvalid_skipped = len([_ for _ in invalid
  1785. if _[0] > nbrows + skip_header])
  1786. invalid = invalid[:nbinvalid - nbinvalid_skipped]
  1787. skip_footer -= nbinvalid_skipped
  1788. #
  1789. # nbrows -= skip_footer
  1790. # errmsg = [template % (i, nb)
  1791. # for (i, nb) in invalid if i < nbrows]
  1792. # else:
  1793. errmsg = [template % (i, nb)
  1794. for (i, nb) in invalid]
  1795. if len(errmsg):
  1796. errmsg.insert(0, "Some errors were detected !")
  1797. errmsg = "\n".join(errmsg)
  1798. # Raise an exception ?
  1799. if invalid_raise:
  1800. raise ValueError(errmsg)
  1801. # Issue a warning ?
  1802. else:
  1803. warnings.warn(errmsg, ConversionWarning, stacklevel=2)
  1804. # Strip the last skip_footer data
  1805. if skip_footer > 0:
  1806. rows = rows[:-skip_footer]
  1807. if usemask:
  1808. masks = masks[:-skip_footer]
  1809. # Convert each value according to the converter:
  1810. # We want to modify the list in place to avoid creating a new one...
  1811. if loose:
  1812. rows = list(
  1813. zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
  1814. for (i, conv) in enumerate(converters)]))
  1815. else:
  1816. rows = list(
  1817. zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
  1818. for (i, conv) in enumerate(converters)]))
  1819. # Reset the dtype
  1820. data = rows
  1821. if dtype is None:
  1822. # Get the dtypes from the types of the converters
  1823. column_types = [conv.type for conv in converters]
  1824. # Find the columns with strings...
  1825. strcolidx = [i for (i, v) in enumerate(column_types)
  1826. if v == np.unicode_]
  1827. if byte_converters and strcolidx:
  1828. # convert strings back to bytes for backward compatibility
  1829. warnings.warn(
  1830. "Reading unicode strings without specifying the encoding "
  1831. "argument is deprecated. Set the encoding, use None for the "
  1832. "system default.",
  1833. np.VisibleDeprecationWarning, stacklevel=2)
  1834. def encode_unicode_cols(row_tup):
  1835. row = list(row_tup)
  1836. for i in strcolidx:
  1837. row[i] = row[i].encode('latin1')
  1838. return tuple(row)
  1839. try:
  1840. data = [encode_unicode_cols(r) for r in data]
  1841. except UnicodeEncodeError:
  1842. pass
  1843. else:
  1844. for i in strcolidx:
  1845. column_types[i] = np.bytes_
  1846. # Update string types to be the right length
  1847. sized_column_types = column_types[:]
  1848. for i, col_type in enumerate(column_types):
  1849. if np.issubdtype(col_type, np.character):
  1850. n_chars = max(len(row[i]) for row in data)
  1851. sized_column_types[i] = (col_type, n_chars)
  1852. if names is None:
  1853. # If the dtype is uniform (before sizing strings)
  1854. base = {
  1855. c_type
  1856. for c, c_type in zip(converters, column_types)
  1857. if c._checked}
  1858. if len(base) == 1:
  1859. uniform_type, = base
  1860. (ddtype, mdtype) = (uniform_type, bool)
  1861. else:
  1862. ddtype = [(defaultfmt % i, dt)
  1863. for (i, dt) in enumerate(sized_column_types)]
  1864. if usemask:
  1865. mdtype = [(defaultfmt % i, bool)
  1866. for (i, dt) in enumerate(sized_column_types)]
  1867. else:
  1868. ddtype = list(zip(names, sized_column_types))
  1869. mdtype = list(zip(names, [bool] * len(sized_column_types)))
  1870. output = np.array(data, dtype=ddtype)
  1871. if usemask:
  1872. outputmask = np.array(masks, dtype=mdtype)
  1873. else:
  1874. # Overwrite the initial dtype names if needed
  1875. if names and dtype.names is not None:
  1876. dtype.names = names
  1877. # Case 1. We have a structured type
  1878. if len(dtype_flat) > 1:
  1879. # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
  1880. # First, create the array using a flattened dtype:
  1881. # [('a', int), ('b1', int), ('b2', float)]
  1882. # Then, view the array using the specified dtype.
  1883. if 'O' in (_.char for _ in dtype_flat):
  1884. if has_nested_fields(dtype):
  1885. raise NotImplementedError(
  1886. "Nested fields involving objects are not supported...")
  1887. else:
  1888. output = np.array(data, dtype=dtype)
  1889. else:
  1890. rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
  1891. output = rows.view(dtype)
  1892. # Now, process the rowmasks the same way
  1893. if usemask:
  1894. rowmasks = np.array(
  1895. masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
  1896. # Construct the new dtype
  1897. mdtype = make_mask_descr(dtype)
  1898. outputmask = rowmasks.view(mdtype)
  1899. # Case #2. We have a basic dtype
  1900. else:
  1901. # We used some user-defined converters
  1902. if user_converters:
  1903. ishomogeneous = True
  1904. descr = []
  1905. for i, ttype in enumerate([conv.type for conv in converters]):
  1906. # Keep the dtype of the current converter
  1907. if i in user_converters:
  1908. ishomogeneous &= (ttype == dtype.type)
  1909. if np.issubdtype(ttype, np.character):
  1910. ttype = (ttype, max(len(row[i]) for row in data))
  1911. descr.append(('', ttype))
  1912. else:
  1913. descr.append(('', dtype))
  1914. # So we changed the dtype ?
  1915. if not ishomogeneous:
  1916. # We have more than one field
  1917. if len(descr) > 1:
  1918. dtype = np.dtype(descr)
  1919. # We have only one field: drop the name if not needed.
  1920. else:
  1921. dtype = np.dtype(ttype)
  1922. #
  1923. output = np.array(data, dtype)
  1924. if usemask:
  1925. if dtype.names is not None:
  1926. mdtype = [(_, bool) for _ in dtype.names]
  1927. else:
  1928. mdtype = bool
  1929. outputmask = np.array(masks, dtype=mdtype)
  1930. # Try to take care of the missing data we missed
  1931. names = output.dtype.names
  1932. if usemask and names:
  1933. for (name, conv) in zip(names, converters):
  1934. missing_values = [conv(_) for _ in conv.missing_values
  1935. if _ != '']
  1936. for mval in missing_values:
  1937. outputmask[name] |= (output[name] == mval)
  1938. # Construct the final array
  1939. if usemask:
  1940. output = output.view(MaskedArray)
  1941. output._mask = outputmask
  1942. if unpack:
  1943. return output.squeeze().T
  1944. return output.squeeze()
  1945. def ndfromtxt(fname, **kwargs):
  1946. """
  1947. Load ASCII data stored in a file and return it as a single array.
  1948. Parameters
  1949. ----------
  1950. fname, kwargs : For a description of input parameters, see `genfromtxt`.
  1951. See Also
  1952. --------
  1953. numpy.genfromtxt : generic function.
  1954. """
  1955. kwargs['usemask'] = False
  1956. return genfromtxt(fname, **kwargs)
  1957. def mafromtxt(fname, **kwargs):
  1958. """
  1959. Load ASCII data stored in a text file and return a masked array.
  1960. Parameters
  1961. ----------
  1962. fname, kwargs : For a description of input parameters, see `genfromtxt`.
  1963. See Also
  1964. --------
  1965. numpy.genfromtxt : generic function to load ASCII data.
  1966. """
  1967. kwargs['usemask'] = True
  1968. return genfromtxt(fname, **kwargs)
  1969. def recfromtxt(fname, **kwargs):
  1970. """
  1971. Load ASCII data from a file and return it in a record array.
  1972. If ``usemask=False`` a standard `recarray` is returned,
  1973. if ``usemask=True`` a MaskedRecords array is returned.
  1974. Parameters
  1975. ----------
  1976. fname, kwargs : For a description of input parameters, see `genfromtxt`.
  1977. See Also
  1978. --------
  1979. numpy.genfromtxt : generic function
  1980. Notes
  1981. -----
  1982. By default, `dtype` is None, which means that the data-type of the output
  1983. array will be determined from the data.
  1984. """
  1985. kwargs.setdefault("dtype", None)
  1986. usemask = kwargs.get('usemask', False)
  1987. output = genfromtxt(fname, **kwargs)
  1988. if usemask:
  1989. from numpy.ma.mrecords import MaskedRecords
  1990. output = output.view(MaskedRecords)
  1991. else:
  1992. output = output.view(np.recarray)
  1993. return output
  1994. def recfromcsv(fname, **kwargs):
  1995. """
  1996. Load ASCII data stored in a comma-separated file.
  1997. The returned array is a record array (if ``usemask=False``, see
  1998. `recarray`) or a masked record array (if ``usemask=True``,
  1999. see `ma.mrecords.MaskedRecords`).
  2000. Parameters
  2001. ----------
  2002. fname, kwargs : For a description of input parameters, see `genfromtxt`.
  2003. See Also
  2004. --------
  2005. numpy.genfromtxt : generic function to load ASCII data.
  2006. Notes
  2007. -----
  2008. By default, `dtype` is None, which means that the data-type of the output
  2009. array will be determined from the data.
  2010. """
  2011. # Set default kwargs for genfromtxt as relevant to csv import.
  2012. kwargs.setdefault("case_sensitive", "lower")
  2013. kwargs.setdefault("names", True)
  2014. kwargs.setdefault("delimiter", ",")
  2015. kwargs.setdefault("dtype", None)
  2016. output = genfromtxt(fname, **kwargs)
  2017. usemask = kwargs.get("usemask", False)
  2018. if usemask:
  2019. from numpy.ma.mrecords import MaskedRecords
  2020. output = output.view(MaskedRecords)
  2021. else:
  2022. output = output.view(np.recarray)
  2023. return output