test_convert_to.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. # -*- coding: utf-8 -*-
  2. import collections
  3. from collections import OrderedDict, defaultdict
  4. from datetime import datetime
  5. import numpy as np
  6. import pytest
  7. import pytz
  8. from pandas.compat import long
  9. from pandas import DataFrame, MultiIndex, Series, Timestamp, compat, date_range
  10. from pandas.tests.frame.common import TestData
  11. import pandas.util.testing as tm
  12. class TestDataFrameConvertTo(TestData):
  13. def test_to_dict_timestamp(self):
  14. # GH11247
  15. # split/records producing np.datetime64 rather than Timestamps
  16. # on datetime64[ns] dtypes only
  17. tsmp = Timestamp('20130101')
  18. test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})
  19. test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})
  20. expected_records = [{'A': tsmp, 'B': tsmp},
  21. {'A': tsmp, 'B': tsmp}]
  22. expected_records_mixed = [{'A': tsmp, 'B': 1},
  23. {'A': tsmp, 'B': 2}]
  24. assert (test_data.to_dict(orient='records') ==
  25. expected_records)
  26. assert (test_data_mixed.to_dict(orient='records') ==
  27. expected_records_mixed)
  28. expected_series = {
  29. 'A': Series([tsmp, tsmp], name='A'),
  30. 'B': Series([tsmp, tsmp], name='B'),
  31. }
  32. expected_series_mixed = {
  33. 'A': Series([tsmp, tsmp], name='A'),
  34. 'B': Series([1, 2], name='B'),
  35. }
  36. tm.assert_dict_equal(test_data.to_dict(orient='series'),
  37. expected_series)
  38. tm.assert_dict_equal(test_data_mixed.to_dict(orient='series'),
  39. expected_series_mixed)
  40. expected_split = {
  41. 'index': [0, 1],
  42. 'data': [[tsmp, tsmp],
  43. [tsmp, tsmp]],
  44. 'columns': ['A', 'B']
  45. }
  46. expected_split_mixed = {
  47. 'index': [0, 1],
  48. 'data': [[tsmp, 1],
  49. [tsmp, 2]],
  50. 'columns': ['A', 'B']
  51. }
  52. tm.assert_dict_equal(test_data.to_dict(orient='split'),
  53. expected_split)
  54. tm.assert_dict_equal(test_data_mixed.to_dict(orient='split'),
  55. expected_split_mixed)
  56. def test_to_dict_index_not_unique_with_index_orient(self):
  57. # GH22801
  58. # Data loss when indexes are not unique. Raise ValueError.
  59. df = DataFrame({'a': [1, 2], 'b': [0.5, 0.75]}, index=['A', 'A'])
  60. pytest.raises(ValueError, df.to_dict, orient='index')
  61. def test_to_dict_invalid_orient(self):
  62. df = DataFrame({'A': [0, 1]})
  63. pytest.raises(ValueError, df.to_dict, orient='xinvalid')
  64. def test_to_records_dt64(self):
  65. df = DataFrame([["one", "two", "three"],
  66. ["four", "five", "six"]],
  67. index=date_range("2012-01-01", "2012-01-02"))
  68. # convert_datetime64 defaults to None
  69. expected = df.index.values[0]
  70. result = df.to_records()['index'][0]
  71. assert expected == result
  72. # check for FutureWarning if convert_datetime64=False is passed
  73. with tm.assert_produces_warning(FutureWarning):
  74. expected = df.index.values[0]
  75. result = df.to_records(convert_datetime64=False)['index'][0]
  76. assert expected == result
  77. # check for FutureWarning if convert_datetime64=True is passed
  78. with tm.assert_produces_warning(FutureWarning):
  79. expected = df.index[0]
  80. result = df.to_records(convert_datetime64=True)['index'][0]
  81. assert expected == result
  82. def test_to_records_with_multindex(self):
  83. # GH3189
  84. index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
  85. ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
  86. data = np.zeros((8, 4))
  87. df = DataFrame(data, index=index)
  88. r = df.to_records(index=True)['level_0']
  89. assert 'bar' in r
  90. assert 'one' not in r
  91. def test_to_records_with_Mapping_type(self):
  92. import email
  93. from email.parser import Parser
  94. compat.Mapping.register(email.message.Message)
  95. headers = Parser().parsestr('From: <user@example.com>\n'
  96. 'To: <someone_else@example.com>\n'
  97. 'Subject: Test message\n'
  98. '\n'
  99. 'Body would go here\n')
  100. frame = DataFrame.from_records([headers])
  101. all(x in frame for x in ['Type', 'Subject', 'From'])
  102. def test_to_records_floats(self):
  103. df = DataFrame(np.random.rand(10, 10))
  104. df.to_records()
  105. def test_to_records_index_name(self):
  106. df = DataFrame(np.random.randn(3, 3))
  107. df.index.name = 'X'
  108. rs = df.to_records()
  109. assert 'X' in rs.dtype.fields
  110. df = DataFrame(np.random.randn(3, 3))
  111. rs = df.to_records()
  112. assert 'index' in rs.dtype.fields
  113. df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])
  114. df.index.names = ['A', None]
  115. rs = df.to_records()
  116. assert 'level_0' in rs.dtype.fields
  117. def test_to_records_with_unicode_index(self):
  118. # GH13172
  119. # unicode_literals conflict with to_records
  120. result = DataFrame([{u'a': u'x', u'b': 'y'}]).set_index(u'a') \
  121. .to_records()
  122. expected = np.rec.array([('x', 'y')], dtype=[('a', 'O'), ('b', 'O')])
  123. tm.assert_almost_equal(result, expected)
  124. def test_to_records_with_unicode_column_names(self):
  125. # xref issue: https://github.com/numpy/numpy/issues/2407
  126. # Issue #11879. to_records used to raise an exception when used
  127. # with column names containing non-ascii characters in Python 2
  128. result = DataFrame(data={u"accented_name_é": [1.0]}).to_records()
  129. # Note that numpy allows for unicode field names but dtypes need
  130. # to be specified using dictionary instead of list of tuples.
  131. expected = np.rec.array(
  132. [(0, 1.0)],
  133. dtype={"names": ["index", u"accented_name_é"],
  134. "formats": ['=i8', '=f8']}
  135. )
  136. tm.assert_almost_equal(result, expected)
  137. def test_to_records_with_categorical(self):
  138. # GH8626
  139. # dict creation
  140. df = DataFrame({'A': list('abc')}, dtype='category')
  141. expected = Series(list('abc'), dtype='category', name='A')
  142. tm.assert_series_equal(df['A'], expected)
  143. # list-like creation
  144. df = DataFrame(list('abc'), dtype='category')
  145. expected = Series(list('abc'), dtype='category', name=0)
  146. tm.assert_series_equal(df[0], expected)
  147. # to record array
  148. # this coerces
  149. result = df.to_records()
  150. expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
  151. dtype=[('index', '=i8'), ('0', 'O')])
  152. tm.assert_almost_equal(result, expected)
  153. @pytest.mark.parametrize("kwargs,expected", [
  154. # No dtypes --> default to array dtypes.
  155. (dict(),
  156. np.rec.array([(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
  157. dtype=[("index", "<i8"), ("A", "<i8"),
  158. ("B", "<f8"), ("C", "O")])),
  159. # Should have no effect in this case.
  160. (dict(index=True),
  161. np.rec.array([(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
  162. dtype=[("index", "<i8"), ("A", "<i8"),
  163. ("B", "<f8"), ("C", "O")])),
  164. # Column dtype applied across the board. Index unaffected.
  165. (dict(column_dtypes="<U4"),
  166. np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
  167. dtype=[("index", "<i8"), ("A", "<U4"),
  168. ("B", "<U4"), ("C", "<U4")])),
  169. # Index dtype applied across the board. Columns unaffected.
  170. (dict(index_dtypes="<U1"),
  171. np.rec.array([("0", 1, 0.2, "a"), ("1", 2, 1.5, "bc")],
  172. dtype=[("index", "<U1"), ("A", "<i8"),
  173. ("B", "<f8"), ("C", "O")])),
  174. # Pass in a type instance.
  175. (dict(column_dtypes=np.unicode),
  176. np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
  177. dtype=[("index", "<i8"), ("A", "<U"),
  178. ("B", "<U"), ("C", "<U")])),
  179. # Pass in a dictionary (name-only).
  180. (dict(column_dtypes={"A": np.int8, "B": np.float32, "C": "<U2"}),
  181. np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
  182. dtype=[("index", "<i8"), ("A", "i1"),
  183. ("B", "<f4"), ("C", "<U2")])),
  184. # Pass in a dictionary (indices-only).
  185. (dict(index_dtypes={0: "int16"}),
  186. np.rec.array([(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
  187. dtype=[("index", "i2"), ("A", "<i8"),
  188. ("B", "<f8"), ("C", "O")])),
  189. # Ignore index mappings if index is not True.
  190. (dict(index=False, index_dtypes="<U2"),
  191. np.rec.array([(1, 0.2, "a"), (2, 1.5, "bc")],
  192. dtype=[("A", "<i8"), ("B", "<f8"), ("C", "O")])),
  193. # Non-existent names / indices in mapping should not error.
  194. (dict(index_dtypes={0: "int16", "not-there": "float32"}),
  195. np.rec.array([(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
  196. dtype=[("index", "i2"), ("A", "<i8"),
  197. ("B", "<f8"), ("C", "O")])),
  198. # Names / indices not in mapping default to array dtype.
  199. (dict(column_dtypes={"A": np.int8, "B": np.float32}),
  200. np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
  201. dtype=[("index", "<i8"), ("A", "i1"),
  202. ("B", "<f4"), ("C", "O")])),
  203. # Mixture of everything.
  204. (dict(column_dtypes={"A": np.int8, "B": np.float32},
  205. index_dtypes="<U2"),
  206. np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
  207. dtype=[("index", "<U2"), ("A", "i1"),
  208. ("B", "<f4"), ("C", "O")])),
  209. # Invalid dype values.
  210. (dict(index=False, column_dtypes=list()),
  211. "Invalid dtype \\[\\] specified for column A"),
  212. (dict(index=False, column_dtypes={"A": "int32", "B": 5}),
  213. "Invalid dtype 5 specified for column B"),
  214. ])
  215. def test_to_records_dtype(self, kwargs, expected):
  216. # see gh-18146
  217. df = DataFrame({"A": [1, 2], "B": [0.2, 1.5], "C": ["a", "bc"]})
  218. if isinstance(expected, str):
  219. with pytest.raises(ValueError, match=expected):
  220. df.to_records(**kwargs)
  221. else:
  222. result = df.to_records(**kwargs)
  223. tm.assert_almost_equal(result, expected)
  224. @pytest.mark.parametrize("df,kwargs,expected", [
  225. # MultiIndex in the index.
  226. (DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
  227. columns=list("abc")).set_index(["a", "b"]),
  228. dict(column_dtypes="float64", index_dtypes={0: "int32", 1: "int8"}),
  229. np.rec.array([(1, 2, 3.), (4, 5, 6.), (7, 8, 9.)],
  230. dtype=[("a", "<i4"), ("b", "i1"), ("c", "<f8")])),
  231. # MultiIndex in the columns.
  232. (DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
  233. columns=MultiIndex.from_tuples([("a", "d"), ("b", "e"),
  234. ("c", "f")])),
  235. dict(column_dtypes={0: "<U1", 2: "float32"}, index_dtypes="float32"),
  236. np.rec.array([(0., u"1", 2, 3.), (1., u"4", 5, 6.),
  237. (2., u"7", 8, 9.)],
  238. dtype=[("index", "<f4"),
  239. ("('a', 'd')", "<U1"),
  240. ("('b', 'e')", "<i8"),
  241. ("('c', 'f')", "<f4")])),
  242. # MultiIndex in both the columns and index.
  243. (DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
  244. columns=MultiIndex.from_tuples([
  245. ("a", "d"), ("b", "e"), ("c", "f")], names=list("ab")),
  246. index=MultiIndex.from_tuples([
  247. ("d", -4), ("d", -5), ("f", -6)], names=list("cd"))),
  248. dict(column_dtypes="float64", index_dtypes={0: "<U2", 1: "int8"}),
  249. np.rec.array([("d", -4, 1., 2., 3.), ("d", -5, 4., 5., 6.),
  250. ("f", -6, 7, 8, 9.)],
  251. dtype=[("c", "<U2"), ("d", "i1"),
  252. ("('a', 'd')", "<f8"), ("('b', 'e')", "<f8"),
  253. ("('c', 'f')", "<f8")]))
  254. ])
  255. def test_to_records_dtype_mi(self, df, kwargs, expected):
  256. # see gh-18146
  257. result = df.to_records(**kwargs)
  258. tm.assert_almost_equal(result, expected)
  259. def test_to_records_dict_like(self):
  260. # see gh-18146
  261. class DictLike(object):
  262. def __init__(self, **kwargs):
  263. self.d = kwargs.copy()
  264. def __getitem__(self, key):
  265. return self.d.__getitem__(key)
  266. def __contains__(self, key):
  267. return key in self.d
  268. def keys(self):
  269. return self.d.keys()
  270. df = DataFrame({"A": [1, 2], "B": [0.2, 1.5], "C": ["a", "bc"]})
  271. dtype_mappings = dict(column_dtypes=DictLike(**{"A": np.int8,
  272. "B": np.float32}),
  273. index_dtypes="<U2")
  274. result = df.to_records(**dtype_mappings)
  275. expected = np.rec.array([("0", "1", "0.2", "a"),
  276. ("1", "2", "1.5", "bc")],
  277. dtype=[("index", "<U2"), ("A", "i1"),
  278. ("B", "<f4"), ("C", "O")])
  279. tm.assert_almost_equal(result, expected)
  280. @pytest.mark.parametrize('mapping', [
  281. dict,
  282. collections.defaultdict(list),
  283. collections.OrderedDict])
  284. def test_to_dict(self, mapping):
  285. test_data = {
  286. 'A': {'1': 1, '2': 2},
  287. 'B': {'1': '1', '2': '2', '3': '3'},
  288. }
  289. # GH16122
  290. recons_data = DataFrame(test_data).to_dict(into=mapping)
  291. for k, v in compat.iteritems(test_data):
  292. for k2, v2 in compat.iteritems(v):
  293. assert (v2 == recons_data[k][k2])
  294. recons_data = DataFrame(test_data).to_dict("l", mapping)
  295. for k, v in compat.iteritems(test_data):
  296. for k2, v2 in compat.iteritems(v):
  297. assert (v2 == recons_data[k][int(k2) - 1])
  298. recons_data = DataFrame(test_data).to_dict("s", mapping)
  299. for k, v in compat.iteritems(test_data):
  300. for k2, v2 in compat.iteritems(v):
  301. assert (v2 == recons_data[k][k2])
  302. recons_data = DataFrame(test_data).to_dict("sp", mapping)
  303. expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],
  304. 'data': [[1.0, '1'], [2.0, '2'], [np.nan, '3']]}
  305. tm.assert_dict_equal(recons_data, expected_split)
  306. recons_data = DataFrame(test_data).to_dict("r", mapping)
  307. expected_records = [{'A': 1.0, 'B': '1'},
  308. {'A': 2.0, 'B': '2'},
  309. {'A': np.nan, 'B': '3'}]
  310. assert isinstance(recons_data, list)
  311. assert (len(recons_data) == 3)
  312. for l, r in zip(recons_data, expected_records):
  313. tm.assert_dict_equal(l, r)
  314. # GH10844
  315. recons_data = DataFrame(test_data).to_dict("i")
  316. for k, v in compat.iteritems(test_data):
  317. for k2, v2 in compat.iteritems(v):
  318. assert (v2 == recons_data[k2][k])
  319. df = DataFrame(test_data)
  320. df['duped'] = df[df.columns[0]]
  321. recons_data = df.to_dict("i")
  322. comp_data = test_data.copy()
  323. comp_data['duped'] = comp_data[df.columns[0]]
  324. for k, v in compat.iteritems(comp_data):
  325. for k2, v2 in compat.iteritems(v):
  326. assert (v2 == recons_data[k2][k])
  327. @pytest.mark.parametrize('mapping', [
  328. list,
  329. collections.defaultdict,
  330. []])
  331. def test_to_dict_errors(self, mapping):
  332. # GH16122
  333. df = DataFrame(np.random.randn(3, 3))
  334. with pytest.raises(TypeError):
  335. df.to_dict(into=mapping)
  336. def test_to_dict_not_unique_warning(self):
  337. # GH16927: When converting to a dict, if a column has a non-unique name
  338. # it will be dropped, throwing a warning.
  339. df = DataFrame([[1, 2, 3]], columns=['a', 'a', 'b'])
  340. with tm.assert_produces_warning(UserWarning):
  341. df.to_dict()
  342. @pytest.mark.parametrize('tz', ['UTC', 'GMT', 'US/Eastern'])
  343. def test_to_records_datetimeindex_with_tz(self, tz):
  344. # GH13937
  345. dr = date_range('2016-01-01', periods=10,
  346. freq='S', tz=tz)
  347. df = DataFrame({'datetime': dr}, index=dr)
  348. expected = df.to_records()
  349. result = df.tz_convert("UTC").to_records()
  350. # both converted to UTC, so they are equal
  351. tm.assert_numpy_array_equal(result, expected)
  352. # orient - orient argument to to_dict function
  353. # item_getter - function for extracting value from
  354. # the resulting dict using column name and index
  355. @pytest.mark.parametrize('orient,item_getter', [
  356. ('dict', lambda d, col, idx: d[col][idx]),
  357. ('records', lambda d, col, idx: d[idx][col]),
  358. ('list', lambda d, col, idx: d[col][idx]),
  359. ('split', lambda d, col, idx: d['data'][idx][d['columns'].index(col)]),
  360. ('index', lambda d, col, idx: d[idx][col])
  361. ])
  362. def test_to_dict_box_scalars(self, orient, item_getter):
  363. # 14216, 23753
  364. # make sure that we are boxing properly
  365. df = DataFrame({'a': [1, 2], 'b': [.1, .2]})
  366. result = df.to_dict(orient=orient)
  367. assert isinstance(item_getter(result, 'a', 0), (int, long))
  368. assert isinstance(item_getter(result, 'b', 0), float)
  369. def test_frame_to_dict_tz(self):
  370. # GH18372 When converting to dict with orient='records' columns of
  371. # datetime that are tz-aware were not converted to required arrays
  372. data = [(datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),),
  373. (datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc,),)]
  374. df = DataFrame(list(data), columns=["d", ])
  375. result = df.to_dict(orient='records')
  376. expected = [
  377. {'d': Timestamp('2017-11-18 21:53:00.219225+0000', tz=pytz.utc)},
  378. {'d': Timestamp('2017-11-18 22:06:30.061810+0000', tz=pytz.utc)},
  379. ]
  380. tm.assert_dict_equal(result[0], expected[0])
  381. tm.assert_dict_equal(result[1], expected[1])
  382. @pytest.mark.parametrize('into, expected', [
  383. (dict, {0: {'int_col': 1, 'float_col': 1.0},
  384. 1: {'int_col': 2, 'float_col': 2.0},
  385. 2: {'int_col': 3, 'float_col': 3.0}}),
  386. (OrderedDict, OrderedDict([(0, {'int_col': 1, 'float_col': 1.0}),
  387. (1, {'int_col': 2, 'float_col': 2.0}),
  388. (2, {'int_col': 3, 'float_col': 3.0})])),
  389. (defaultdict(list), defaultdict(list,
  390. {0: {'int_col': 1, 'float_col': 1.0},
  391. 1: {'int_col': 2, 'float_col': 2.0},
  392. 2: {'int_col': 3, 'float_col': 3.0}}))
  393. ])
  394. def test_to_dict_index_dtypes(self, into, expected):
  395. # GH 18580
  396. # When using to_dict(orient='index') on a dataframe with int
  397. # and float columns only the int columns were cast to float
  398. df = DataFrame({'int_col': [1, 2, 3],
  399. 'float_col': [1.0, 2.0, 3.0]})
  400. result = df.to_dict(orient='index', into=into)
  401. cols = ['int_col', 'float_col']
  402. result = DataFrame.from_dict(result, orient='index')[cols]
  403. expected = DataFrame.from_dict(expected, orient='index')[cols]
  404. tm.assert_frame_equal(result, expected)
  405. def test_to_dict_numeric_names(self):
  406. # https://github.com/pandas-dev/pandas/issues/24940
  407. df = DataFrame({str(i): [i] for i in range(5)})
  408. result = set(df.to_dict('records')[0].keys())
  409. expected = set(df.columns)
  410. assert result == expected
  411. def test_to_dict_wide(self):
  412. # https://github.com/pandas-dev/pandas/issues/24939
  413. df = DataFrame({('A_{:d}'.format(i)): [i] for i in range(256)})
  414. result = df.to_dict('records')[0]
  415. expected = {'A_{:d}'.format(i): i for i in range(256)}
  416. assert result == expected