test_html.py 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161
  1. from __future__ import print_function
  2. from functools import partial
  3. import os
  4. import re
  5. import threading
  6. import numpy as np
  7. from numpy.random import rand
  8. import pytest
  9. from pandas.compat import (
  10. PY3, BytesIO, StringIO, is_platform_windows, map, reload, zip)
  11. from pandas.errors import ParserError
  12. import pandas.util._test_decorators as td
  13. from pandas import (
  14. DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv)
  15. import pandas.util.testing as tm
  16. from pandas.util.testing import makeCustomDataframe as mkdf, network
  17. from pandas.io.common import URLError, file_path_to_url
  18. import pandas.io.html
  19. from pandas.io.html import read_html
  20. HERE = os.path.dirname(__file__)
  21. @pytest.fixture(params=[
  22. 'chinese_utf-16.html',
  23. 'chinese_utf-32.html',
  24. 'chinese_utf-8.html',
  25. 'letz_latin1.html',
  26. ])
  27. def html_encoding_file(request, datapath):
  28. """Parametrized fixture for HTML encoding test filenames."""
  29. return datapath('io', 'data', 'html_encoding', request.param)
  30. def assert_framelist_equal(list1, list2, *args, **kwargs):
  31. assert len(list1) == len(list2), ('lists are not of equal size '
  32. 'len(list1) == {0}, '
  33. 'len(list2) == {1}'.format(len(list1),
  34. len(list2)))
  35. msg = 'not all list elements are DataFrames'
  36. both_frames = all(map(lambda x, y: isinstance(x, DataFrame) and
  37. isinstance(y, DataFrame), list1, list2))
  38. assert both_frames, msg
  39. for frame_i, frame_j in zip(list1, list2):
  40. tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
  41. assert not frame_i.empty, 'frames are both empty'
  42. @td.skip_if_no('bs4')
  43. def test_bs4_version_fails(monkeypatch, datapath):
  44. import bs4
  45. monkeypatch.setattr(bs4, '__version__', '4.2')
  46. with pytest.raises(ValueError, match="minimum version"):
  47. read_html(datapath("io", "data", "spam.html"), flavor='bs4')
  48. def test_invalid_flavor():
  49. url = "google.com"
  50. flavor = "invalid flavor"
  51. msg = r"\{" + flavor + r"\} is not a valid set of flavors"
  52. with pytest.raises(ValueError, match=msg):
  53. read_html(url, "google", flavor=flavor)
  54. @td.skip_if_no('bs4')
  55. @td.skip_if_no('lxml')
  56. def test_same_ordering(datapath):
  57. filename = datapath('io', 'data', 'valid_markup.html')
  58. dfs_lxml = read_html(filename, index_col=0, flavor=['lxml'])
  59. dfs_bs4 = read_html(filename, index_col=0, flavor=['bs4'])
  60. assert_framelist_equal(dfs_lxml, dfs_bs4)
  61. @pytest.mark.parametrize("flavor", [
  62. pytest.param('bs4', marks=pytest.mark.skipif(
  63. not td.safe_import('lxml'), reason='No bs4')),
  64. pytest.param('lxml', marks=pytest.mark.skipif(
  65. not td.safe_import('lxml'), reason='No lxml'))], scope="class")
  66. class TestReadHtml(object):
  67. @pytest.fixture(autouse=True)
  68. def set_files(self, datapath):
  69. self.spam_data = datapath('io', 'data', 'spam.html')
  70. self.spam_data_kwargs = {}
  71. if PY3:
  72. self.spam_data_kwargs['encoding'] = 'UTF-8'
  73. self.banklist_data = datapath("io", "data", "banklist.html")
  74. @pytest.fixture(autouse=True, scope="function")
  75. def set_defaults(self, flavor, request):
  76. self.read_html = partial(read_html, flavor=flavor)
  77. yield
  78. def test_to_html_compat(self):
  79. df = mkdf(4, 3, data_gen_f=lambda *args: rand(), c_idx_names=False,
  80. r_idx_names=False).applymap('{0:.3f}'.format).astype(float)
  81. out = df.to_html()
  82. res = self.read_html(out, attrs={'class': 'dataframe'}, index_col=0)[0]
  83. tm.assert_frame_equal(res, df)
  84. @network
  85. def test_banklist_url(self):
  86. url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
  87. df1 = self.read_html(url, 'First Federal Bank of Florida',
  88. attrs={"id": 'table'})
  89. df2 = self.read_html(url, 'Metcalf Bank', attrs={'id': 'table'})
  90. assert_framelist_equal(df1, df2)
  91. @network
  92. def test_spam_url(self):
  93. url = ('http://ndb.nal.usda.gov/ndb/foods/show/300772?fg=&man=&'
  94. 'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
  95. df1 = self.read_html(url, '.*Water.*')
  96. df2 = self.read_html(url, 'Unit')
  97. assert_framelist_equal(df1, df2)
  98. @pytest.mark.slow
  99. def test_banklist(self):
  100. df1 = self.read_html(self.banklist_data, '.*Florida.*',
  101. attrs={'id': 'table'})
  102. df2 = self.read_html(self.banklist_data, 'Metcalf Bank',
  103. attrs={'id': 'table'})
  104. assert_framelist_equal(df1, df2)
  105. def test_spam(self):
  106. df1 = self.read_html(self.spam_data, '.*Water.*')
  107. df2 = self.read_html(self.spam_data, 'Unit')
  108. assert_framelist_equal(df1, df2)
  109. assert df1[0].iloc[0, 0] == 'Proximates'
  110. assert df1[0].columns[0] == 'Nutrient'
  111. def test_spam_no_match(self):
  112. dfs = self.read_html(self.spam_data)
  113. for df in dfs:
  114. assert isinstance(df, DataFrame)
  115. def test_banklist_no_match(self):
  116. dfs = self.read_html(self.banklist_data, attrs={'id': 'table'})
  117. for df in dfs:
  118. assert isinstance(df, DataFrame)
  119. def test_spam_header(self):
  120. df = self.read_html(self.spam_data, '.*Water.*', header=2)[0]
  121. assert df.columns[0] == 'Proximates'
  122. assert not df.empty
  123. def test_skiprows_int(self):
  124. df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=1)
  125. df2 = self.read_html(self.spam_data, 'Unit', skiprows=1)
  126. assert_framelist_equal(df1, df2)
  127. def test_skiprows_xrange(self):
  128. df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=range(2))[0]
  129. df2 = self.read_html(self.spam_data, 'Unit', skiprows=range(2))[0]
  130. tm.assert_frame_equal(df1, df2)
  131. def test_skiprows_list(self):
  132. df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=[1, 2])
  133. df2 = self.read_html(self.spam_data, 'Unit', skiprows=[2, 1])
  134. assert_framelist_equal(df1, df2)
  135. def test_skiprows_set(self):
  136. df1 = self.read_html(self.spam_data, '.*Water.*', skiprows={1, 2})
  137. df2 = self.read_html(self.spam_data, 'Unit', skiprows={2, 1})
  138. assert_framelist_equal(df1, df2)
  139. def test_skiprows_slice(self):
  140. df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=1)
  141. df2 = self.read_html(self.spam_data, 'Unit', skiprows=1)
  142. assert_framelist_equal(df1, df2)
  143. def test_skiprows_slice_short(self):
  144. df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=slice(2))
  145. df2 = self.read_html(self.spam_data, 'Unit', skiprows=slice(2))
  146. assert_framelist_equal(df1, df2)
  147. def test_skiprows_slice_long(self):
  148. df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=slice(2, 5))
  149. df2 = self.read_html(self.spam_data, 'Unit', skiprows=slice(4, 1, -1))
  150. assert_framelist_equal(df1, df2)
  151. def test_skiprows_ndarray(self):
  152. df1 = self.read_html(self.spam_data, '.*Water.*',
  153. skiprows=np.arange(2))
  154. df2 = self.read_html(self.spam_data, 'Unit', skiprows=np.arange(2))
  155. assert_framelist_equal(df1, df2)
  156. def test_skiprows_invalid(self):
  157. with pytest.raises(TypeError, match=('is not a valid type '
  158. 'for skipping rows')):
  159. self.read_html(self.spam_data, '.*Water.*', skiprows='asdf')
  160. def test_index(self):
  161. df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0)
  162. df2 = self.read_html(self.spam_data, 'Unit', index_col=0)
  163. assert_framelist_equal(df1, df2)
  164. def test_header_and_index_no_types(self):
  165. df1 = self.read_html(self.spam_data, '.*Water.*', header=1,
  166. index_col=0)
  167. df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0)
  168. assert_framelist_equal(df1, df2)
  169. def test_header_and_index_with_types(self):
  170. df1 = self.read_html(self.spam_data, '.*Water.*', header=1,
  171. index_col=0)
  172. df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0)
  173. assert_framelist_equal(df1, df2)
  174. def test_infer_types(self):
  175. # 10892 infer_types removed
  176. df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0)
  177. df2 = self.read_html(self.spam_data, 'Unit', index_col=0)
  178. assert_framelist_equal(df1, df2)
  179. def test_string_io(self):
  180. with open(self.spam_data, **self.spam_data_kwargs) as f:
  181. data1 = StringIO(f.read())
  182. with open(self.spam_data, **self.spam_data_kwargs) as f:
  183. data2 = StringIO(f.read())
  184. df1 = self.read_html(data1, '.*Water.*')
  185. df2 = self.read_html(data2, 'Unit')
  186. assert_framelist_equal(df1, df2)
  187. def test_string(self):
  188. with open(self.spam_data, **self.spam_data_kwargs) as f:
  189. data = f.read()
  190. df1 = self.read_html(data, '.*Water.*')
  191. df2 = self.read_html(data, 'Unit')
  192. assert_framelist_equal(df1, df2)
  193. def test_file_like(self):
  194. with open(self.spam_data, **self.spam_data_kwargs) as f:
  195. df1 = self.read_html(f, '.*Water.*')
  196. with open(self.spam_data, **self.spam_data_kwargs) as f:
  197. df2 = self.read_html(f, 'Unit')
  198. assert_framelist_equal(df1, df2)
  199. @network
  200. def test_bad_url_protocol(self):
  201. with pytest.raises(URLError):
  202. self.read_html('git://github.com', match='.*Water.*')
  203. @network
  204. def test_invalid_url(self):
  205. try:
  206. with pytest.raises(URLError):
  207. self.read_html('http://www.a23950sdfa908sd.com',
  208. match='.*Water.*')
  209. except ValueError as e:
  210. assert 'No tables found' in str(e)
  211. @pytest.mark.slow
  212. def test_file_url(self):
  213. url = self.banklist_data
  214. dfs = self.read_html(file_path_to_url(os.path.abspath(url)),
  215. 'First',
  216. attrs={'id': 'table'})
  217. assert isinstance(dfs, list)
  218. for df in dfs:
  219. assert isinstance(df, DataFrame)
  220. @pytest.mark.slow
  221. def test_invalid_table_attrs(self):
  222. url = self.banklist_data
  223. with pytest.raises(ValueError, match='No tables found'):
  224. self.read_html(url, 'First Federal Bank of Florida',
  225. attrs={'id': 'tasdfable'})
  226. def _bank_data(self, *args, **kwargs):
  227. return self.read_html(self.banklist_data, 'Metcalf',
  228. attrs={'id': 'table'}, *args, **kwargs)
  229. @pytest.mark.slow
  230. def test_multiindex_header(self):
  231. df = self._bank_data(header=[0, 1])[0]
  232. assert isinstance(df.columns, MultiIndex)
  233. @pytest.mark.slow
  234. def test_multiindex_index(self):
  235. df = self._bank_data(index_col=[0, 1])[0]
  236. assert isinstance(df.index, MultiIndex)
  237. @pytest.mark.slow
  238. def test_multiindex_header_index(self):
  239. df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
  240. assert isinstance(df.columns, MultiIndex)
  241. assert isinstance(df.index, MultiIndex)
  242. @pytest.mark.slow
  243. def test_multiindex_header_skiprows_tuples(self):
  244. with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
  245. df = self._bank_data(header=[0, 1], skiprows=1,
  246. tupleize_cols=True)[0]
  247. assert isinstance(df.columns, Index)
  248. @pytest.mark.slow
  249. def test_multiindex_header_skiprows(self):
  250. df = self._bank_data(header=[0, 1], skiprows=1)[0]
  251. assert isinstance(df.columns, MultiIndex)
  252. @pytest.mark.slow
  253. def test_multiindex_header_index_skiprows(self):
  254. df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
  255. assert isinstance(df.index, MultiIndex)
  256. assert isinstance(df.columns, MultiIndex)
  257. @pytest.mark.slow
  258. def test_regex_idempotency(self):
  259. url = self.banklist_data
  260. dfs = self.read_html(file_path_to_url(os.path.abspath(url)),
  261. match=re.compile(re.compile('Florida')),
  262. attrs={'id': 'table'})
  263. assert isinstance(dfs, list)
  264. for df in dfs:
  265. assert isinstance(df, DataFrame)
  266. def test_negative_skiprows(self):
  267. msg = r'\(you passed a negative value\)'
  268. with pytest.raises(ValueError, match=msg):
  269. self.read_html(self.spam_data, 'Water', skiprows=-1)
  270. @network
  271. def test_multiple_matches(self):
  272. url = 'https://docs.python.org/2/'
  273. dfs = self.read_html(url, match='Python')
  274. assert len(dfs) > 1
  275. @network
  276. def test_python_docs_table(self):
  277. url = 'https://docs.python.org/2/'
  278. dfs = self.read_html(url, match='Python')
  279. zz = [df.iloc[0, 0][0:4] for df in dfs]
  280. assert sorted(zz) == sorted(['Repo', 'What'])
  281. @pytest.mark.slow
  282. def test_thousands_macau_stats(self, datapath):
  283. all_non_nan_table_index = -2
  284. macau_data = datapath("io", "data", "macau.html")
  285. dfs = self.read_html(macau_data, index_col=0,
  286. attrs={'class': 'style1'})
  287. df = dfs[all_non_nan_table_index]
  288. assert not any(s.isna().any() for _, s in df.iteritems())
  289. @pytest.mark.slow
  290. def test_thousands_macau_index_col(self, datapath):
  291. all_non_nan_table_index = -2
  292. macau_data = datapath('io', 'data', 'macau.html')
  293. dfs = self.read_html(macau_data, index_col=0, header=0)
  294. df = dfs[all_non_nan_table_index]
  295. assert not any(s.isna().any() for _, s in df.iteritems())
  296. def test_empty_tables(self):
  297. """
  298. Make sure that read_html ignores empty tables.
  299. """
  300. result = self.read_html('''
  301. <table>
  302. <thead>
  303. <tr>
  304. <th>A</th>
  305. <th>B</th>
  306. </tr>
  307. </thead>
  308. <tbody>
  309. <tr>
  310. <td>1</td>
  311. <td>2</td>
  312. </tr>
  313. </tbody>
  314. </table>
  315. <table>
  316. <tbody>
  317. </tbody>
  318. </table>
  319. ''')
  320. assert len(result) == 1
  321. def test_multiple_tbody(self):
  322. # GH-20690
  323. # Read all tbody tags within a single table.
  324. result = self.read_html('''<table>
  325. <thead>
  326. <tr>
  327. <th>A</th>
  328. <th>B</th>
  329. </tr>
  330. </thead>
  331. <tbody>
  332. <tr>
  333. <td>1</td>
  334. <td>2</td>
  335. </tr>
  336. </tbody>
  337. <tbody>
  338. <tr>
  339. <td>3</td>
  340. <td>4</td>
  341. </tr>
  342. </tbody>
  343. </table>''')[0]
  344. expected = DataFrame(data=[[1, 2], [3, 4]], columns=['A', 'B'])
  345. tm.assert_frame_equal(result, expected)
  346. def test_header_and_one_column(self):
  347. """
  348. Don't fail with bs4 when there is a header and only one column
  349. as described in issue #9178
  350. """
  351. result = self.read_html('''<table>
  352. <thead>
  353. <tr>
  354. <th>Header</th>
  355. </tr>
  356. </thead>
  357. <tbody>
  358. <tr>
  359. <td>first</td>
  360. </tr>
  361. </tbody>
  362. </table>''')[0]
  363. expected = DataFrame(data={'Header': 'first'}, index=[0])
  364. tm.assert_frame_equal(result, expected)
  365. def test_thead_without_tr(self):
  366. """
  367. Ensure parser adds <tr> within <thead> on malformed HTML.
  368. """
  369. result = self.read_html('''<table>
  370. <thead>
  371. <tr>
  372. <th>Country</th>
  373. <th>Municipality</th>
  374. <th>Year</th>
  375. </tr>
  376. </thead>
  377. <tbody>
  378. <tr>
  379. <td>Ukraine</td>
  380. <th>Odessa</th>
  381. <td>1944</td>
  382. </tr>
  383. </tbody>
  384. </table>''')[0]
  385. expected = DataFrame(data=[['Ukraine', 'Odessa', 1944]],
  386. columns=['Country', 'Municipality', 'Year'])
  387. tm.assert_frame_equal(result, expected)
  388. def test_tfoot_read(self):
  389. """
  390. Make sure that read_html reads tfoot, containing td or th.
  391. Ignores empty tfoot
  392. """
  393. data_template = '''<table>
  394. <thead>
  395. <tr>
  396. <th>A</th>
  397. <th>B</th>
  398. </tr>
  399. </thead>
  400. <tbody>
  401. <tr>
  402. <td>bodyA</td>
  403. <td>bodyB</td>
  404. </tr>
  405. </tbody>
  406. <tfoot>
  407. {footer}
  408. </tfoot>
  409. </table>'''
  410. expected1 = DataFrame(data=[['bodyA', 'bodyB']], columns=['A', 'B'])
  411. expected2 = DataFrame(data=[['bodyA', 'bodyB'], ['footA', 'footB']],
  412. columns=['A', 'B'])
  413. data1 = data_template.format(footer="")
  414. data2 = data_template.format(
  415. footer="<tr><td>footA</td><th>footB</th></tr>")
  416. result1 = self.read_html(data1)[0]
  417. result2 = self.read_html(data2)[0]
  418. tm.assert_frame_equal(result1, expected1)
  419. tm.assert_frame_equal(result2, expected2)
  420. def test_parse_header_of_non_string_column(self):
  421. # GH5048: if header is specified explicitly, an int column should be
  422. # parsed as int while its header is parsed as str
  423. result = self.read_html('''
  424. <table>
  425. <tr>
  426. <td>S</td>
  427. <td>I</td>
  428. </tr>
  429. <tr>
  430. <td>text</td>
  431. <td>1944</td>
  432. </tr>
  433. </table>
  434. ''', header=0)[0]
  435. expected = DataFrame([['text', 1944]], columns=('S', 'I'))
  436. tm.assert_frame_equal(result, expected)
  437. def test_nyse_wsj_commas_table(self, datapath):
  438. data = datapath('io', 'data', 'nyse_wsj.html')
  439. df = self.read_html(data, index_col=0, header=0,
  440. attrs={'class': 'mdcTable'})[0]
  441. expected = Index(['Issue(Roll over for charts and headlines)',
  442. 'Volume', 'Price', 'Chg', '% Chg'])
  443. nrows = 100
  444. assert df.shape[0] == nrows
  445. tm.assert_index_equal(df.columns, expected)
  446. @pytest.mark.slow
  447. def test_banklist_header(self, datapath):
  448. from pandas.io.html import _remove_whitespace
  449. def try_remove_ws(x):
  450. try:
  451. return _remove_whitespace(x)
  452. except AttributeError:
  453. return x
  454. df = self.read_html(self.banklist_data, 'Metcalf',
  455. attrs={'id': 'table'})[0]
  456. ground_truth = read_csv(datapath('io', 'data', 'banklist.csv'),
  457. converters={'Updated Date': Timestamp,
  458. 'Closing Date': Timestamp})
  459. assert df.shape == ground_truth.shape
  460. old = ['First Vietnamese American BankIn Vietnamese',
  461. 'Westernbank Puerto RicoEn Espanol',
  462. 'R-G Premier Bank of Puerto RicoEn Espanol',
  463. 'EurobankEn Espanol', 'Sanderson State BankEn Espanol',
  464. 'Washington Mutual Bank(Including its subsidiary Washington '
  465. 'Mutual Bank FSB)',
  466. 'Silver State BankEn Espanol',
  467. 'AmTrade International BankEn Espanol',
  468. 'Hamilton Bank, NAEn Espanol',
  469. 'The Citizens Savings BankPioneer Community Bank, Inc.']
  470. new = ['First Vietnamese American Bank', 'Westernbank Puerto Rico',
  471. 'R-G Premier Bank of Puerto Rico', 'Eurobank',
  472. 'Sanderson State Bank', 'Washington Mutual Bank',
  473. 'Silver State Bank', 'AmTrade International Bank',
  474. 'Hamilton Bank, NA', 'The Citizens Savings Bank']
  475. dfnew = df.applymap(try_remove_ws).replace(old, new)
  476. gtnew = ground_truth.applymap(try_remove_ws)
  477. converted = dfnew._convert(datetime=True, numeric=True)
  478. date_cols = ['Closing Date', 'Updated Date']
  479. converted[date_cols] = converted[date_cols]._convert(datetime=True,
  480. coerce=True)
  481. tm.assert_frame_equal(converted, gtnew)
  482. @pytest.mark.slow
  483. def test_gold_canyon(self):
  484. gc = 'Gold Canyon'
  485. with open(self.banklist_data, 'r') as f:
  486. raw_text = f.read()
  487. assert gc in raw_text
  488. df = self.read_html(self.banklist_data, 'Gold Canyon',
  489. attrs={'id': 'table'})[0]
  490. assert gc in df.to_string()
  491. def test_different_number_of_cols(self):
  492. expected = self.read_html("""<table>
  493. <thead>
  494. <tr style="text-align: right;">
  495. <th></th>
  496. <th>C_l0_g0</th>
  497. <th>C_l0_g1</th>
  498. <th>C_l0_g2</th>
  499. <th>C_l0_g3</th>
  500. <th>C_l0_g4</th>
  501. </tr>
  502. </thead>
  503. <tbody>
  504. <tr>
  505. <th>R_l0_g0</th>
  506. <td> 0.763</td>
  507. <td> 0.233</td>
  508. <td> nan</td>
  509. <td> nan</td>
  510. <td> nan</td>
  511. </tr>
  512. <tr>
  513. <th>R_l0_g1</th>
  514. <td> 0.244</td>
  515. <td> 0.285</td>
  516. <td> 0.392</td>
  517. <td> 0.137</td>
  518. <td> 0.222</td>
  519. </tr>
  520. </tbody>
  521. </table>""", index_col=0)[0]
  522. result = self.read_html("""<table>
  523. <thead>
  524. <tr style="text-align: right;">
  525. <th></th>
  526. <th>C_l0_g0</th>
  527. <th>C_l0_g1</th>
  528. <th>C_l0_g2</th>
  529. <th>C_l0_g3</th>
  530. <th>C_l0_g4</th>
  531. </tr>
  532. </thead>
  533. <tbody>
  534. <tr>
  535. <th>R_l0_g0</th>
  536. <td> 0.763</td>
  537. <td> 0.233</td>
  538. </tr>
  539. <tr>
  540. <th>R_l0_g1</th>
  541. <td> 0.244</td>
  542. <td> 0.285</td>
  543. <td> 0.392</td>
  544. <td> 0.137</td>
  545. <td> 0.222</td>
  546. </tr>
  547. </tbody>
  548. </table>""", index_col=0)[0]
  549. tm.assert_frame_equal(result, expected)
  550. def test_colspan_rowspan_1(self):
  551. # GH17054
  552. result = self.read_html("""
  553. <table>
  554. <tr>
  555. <th>A</th>
  556. <th colspan="1">B</th>
  557. <th rowspan="1">C</th>
  558. </tr>
  559. <tr>
  560. <td>a</td>
  561. <td>b</td>
  562. <td>c</td>
  563. </tr>
  564. </table>
  565. """)[0]
  566. expected = DataFrame([['a', 'b', 'c']], columns=['A', 'B', 'C'])
  567. tm.assert_frame_equal(result, expected)
  568. def test_colspan_rowspan_copy_values(self):
  569. # GH17054
  570. # In ASCII, with lowercase letters being copies:
  571. #
  572. # X x Y Z W
  573. # A B b z C
  574. result = self.read_html("""
  575. <table>
  576. <tr>
  577. <td colspan="2">X</td>
  578. <td>Y</td>
  579. <td rowspan="2">Z</td>
  580. <td>W</td>
  581. </tr>
  582. <tr>
  583. <td>A</td>
  584. <td colspan="2">B</td>
  585. <td>C</td>
  586. </tr>
  587. </table>
  588. """, header=0)[0]
  589. expected = DataFrame(data=[['A', 'B', 'B', 'Z', 'C']],
  590. columns=['X', 'X.1', 'Y', 'Z', 'W'])
  591. tm.assert_frame_equal(result, expected)
  592. def test_colspan_rowspan_both_not_1(self):
  593. # GH17054
  594. # In ASCII, with lowercase letters being copies:
  595. #
  596. # A B b b C
  597. # a b b b D
  598. result = self.read_html("""
  599. <table>
  600. <tr>
  601. <td rowspan="2">A</td>
  602. <td rowspan="2" colspan="3">B</td>
  603. <td>C</td>
  604. </tr>
  605. <tr>
  606. <td>D</td>
  607. </tr>
  608. </table>
  609. """, header=0)[0]
  610. expected = DataFrame(data=[['A', 'B', 'B', 'B', 'D']],
  611. columns=['A', 'B', 'B.1', 'B.2', 'C'])
  612. tm.assert_frame_equal(result, expected)
  613. def test_rowspan_at_end_of_row(self):
  614. # GH17054
  615. # In ASCII, with lowercase letters being copies:
  616. #
  617. # A B
  618. # C b
  619. result = self.read_html("""
  620. <table>
  621. <tr>
  622. <td>A</td>
  623. <td rowspan="2">B</td>
  624. </tr>
  625. <tr>
  626. <td>C</td>
  627. </tr>
  628. </table>
  629. """, header=0)[0]
  630. expected = DataFrame(data=[['C', 'B']], columns=['A', 'B'])
  631. tm.assert_frame_equal(result, expected)
  632. def test_rowspan_only_rows(self):
  633. # GH17054
  634. result = self.read_html("""
  635. <table>
  636. <tr>
  637. <td rowspan="3">A</td>
  638. <td rowspan="3">B</td>
  639. </tr>
  640. </table>
  641. """, header=0)[0]
  642. expected = DataFrame(data=[['A', 'B'], ['A', 'B']],
  643. columns=['A', 'B'])
  644. tm.assert_frame_equal(result, expected)
  645. def test_header_inferred_from_rows_with_only_th(self):
  646. # GH17054
  647. result = self.read_html("""
  648. <table>
  649. <tr>
  650. <th>A</th>
  651. <th>B</th>
  652. </tr>
  653. <tr>
  654. <th>a</th>
  655. <th>b</th>
  656. </tr>
  657. <tr>
  658. <td>1</td>
  659. <td>2</td>
  660. </tr>
  661. </table>
  662. """)[0]
  663. columns = MultiIndex(levels=[['A', 'B'], ['a', 'b']],
  664. codes=[[0, 1], [0, 1]])
  665. expected = DataFrame(data=[[1, 2]], columns=columns)
  666. tm.assert_frame_equal(result, expected)
  667. def test_parse_dates_list(self):
  668. df = DataFrame({'date': date_range('1/1/2001', periods=10)})
  669. expected = df.to_html()
  670. res = self.read_html(expected, parse_dates=[1], index_col=0)
  671. tm.assert_frame_equal(df, res[0])
  672. res = self.read_html(expected, parse_dates=['date'], index_col=0)
  673. tm.assert_frame_equal(df, res[0])
  674. def test_parse_dates_combine(self):
  675. raw_dates = Series(date_range('1/1/2001', periods=10))
  676. df = DataFrame({'date': raw_dates.map(lambda x: str(x.date())),
  677. 'time': raw_dates.map(lambda x: str(x.time()))})
  678. res = self.read_html(df.to_html(), parse_dates={'datetime': [1, 2]},
  679. index_col=1)
  680. newdf = DataFrame({'datetime': raw_dates})
  681. tm.assert_frame_equal(newdf, res[0])
  682. def test_computer_sales_page(self, datapath):
  683. data = datapath('io', 'data', 'computer_sales_page.html')
  684. msg = (r"Passed header=\[0,1\] are too many "
  685. r"rows for this multi_index of columns")
  686. with pytest.raises(ParserError, match=msg):
  687. self.read_html(data, header=[0, 1])
  688. data = datapath('io', 'data', 'computer_sales_page.html')
  689. assert self.read_html(data, header=[1, 2])
  690. def test_wikipedia_states_table(self, datapath):
  691. data = datapath('io', 'data', 'wikipedia_states.html')
  692. assert os.path.isfile(data), '%r is not a file' % data
  693. assert os.path.getsize(data), '%r is an empty file' % data
  694. result = self.read_html(data, 'Arizona', header=1)[0]
  695. assert result['sq mi'].dtype == np.dtype('float64')
  696. def test_parser_error_on_empty_header_row(self):
  697. msg = (r"Passed header=\[0,1\] are too many "
  698. r"rows for this multi_index of columns")
  699. with pytest.raises(ParserError, match=msg):
  700. self.read_html("""
  701. <table>
  702. <thead>
  703. <tr><th></th><th></tr>
  704. <tr><th>A</th><th>B</th></tr>
  705. </thead>
  706. <tbody>
  707. <tr><td>a</td><td>b</td></tr>
  708. </tbody>
  709. </table>
  710. """, header=[0, 1])
  711. def test_decimal_rows(self):
  712. # GH 12907
  713. result = self.read_html('''<html>
  714. <body>
  715. <table>
  716. <thead>
  717. <tr>
  718. <th>Header</th>
  719. </tr>
  720. </thead>
  721. <tbody>
  722. <tr>
  723. <td>1100#101</td>
  724. </tr>
  725. </tbody>
  726. </table>
  727. </body>
  728. </html>''', decimal='#')[0]
  729. expected = DataFrame(data={'Header': 1100.101}, index=[0])
  730. assert result['Header'].dtype == np.dtype('float64')
  731. tm.assert_frame_equal(result, expected)
  732. def test_bool_header_arg(self):
  733. # GH 6114
  734. for arg in [True, False]:
  735. with pytest.raises(TypeError):
  736. self.read_html(self.spam_data, header=arg)
  737. def test_converters(self):
  738. # GH 13461
  739. result = self.read_html(
  740. """<table>
  741. <thead>
  742. <tr>
  743. <th>a</th>
  744. </tr>
  745. </thead>
  746. <tbody>
  747. <tr>
  748. <td> 0.763</td>
  749. </tr>
  750. <tr>
  751. <td> 0.244</td>
  752. </tr>
  753. </tbody>
  754. </table>""",
  755. converters={'a': str}
  756. )[0]
  757. expected = DataFrame({'a': ['0.763', '0.244']})
  758. tm.assert_frame_equal(result, expected)
  759. def test_na_values(self):
  760. # GH 13461
  761. result = self.read_html(
  762. """<table>
  763. <thead>
  764. <tr>
  765. <th>a</th>
  766. </tr>
  767. </thead>
  768. <tbody>
  769. <tr>
  770. <td> 0.763</td>
  771. </tr>
  772. <tr>
  773. <td> 0.244</td>
  774. </tr>
  775. </tbody>
  776. </table>""",
  777. na_values=[0.244])[0]
  778. expected = DataFrame({'a': [0.763, np.nan]})
  779. tm.assert_frame_equal(result, expected)
  780. def test_keep_default_na(self):
  781. html_data = """<table>
  782. <thead>
  783. <tr>
  784. <th>a</th>
  785. </tr>
  786. </thead>
  787. <tbody>
  788. <tr>
  789. <td> N/A</td>
  790. </tr>
  791. <tr>
  792. <td> NA</td>
  793. </tr>
  794. </tbody>
  795. </table>"""
  796. expected_df = DataFrame({'a': ['N/A', 'NA']})
  797. html_df = self.read_html(html_data, keep_default_na=False)[0]
  798. tm.assert_frame_equal(expected_df, html_df)
  799. expected_df = DataFrame({'a': [np.nan, np.nan]})
  800. html_df = self.read_html(html_data, keep_default_na=True)[0]
  801. tm.assert_frame_equal(expected_df, html_df)
  802. def test_preserve_empty_rows(self):
  803. result = self.read_html("""
  804. <table>
  805. <tr>
  806. <th>A</th>
  807. <th>B</th>
  808. </tr>
  809. <tr>
  810. <td>a</td>
  811. <td>b</td>
  812. </tr>
  813. <tr>
  814. <td></td>
  815. <td></td>
  816. </tr>
  817. </table>
  818. """)[0]
  819. expected = DataFrame(data=[['a', 'b'], [np.nan, np.nan]],
  820. columns=['A', 'B'])
  821. tm.assert_frame_equal(result, expected)
  822. def test_ignore_empty_rows_when_inferring_header(self):
  823. result = self.read_html("""
  824. <table>
  825. <thead>
  826. <tr><th></th><th></tr>
  827. <tr><th>A</th><th>B</th></tr>
  828. <tr><th>a</th><th>b</th></tr>
  829. </thead>
  830. <tbody>
  831. <tr><td>1</td><td>2</td></tr>
  832. </tbody>
  833. </table>
  834. """)[0]
  835. columns = MultiIndex(levels=[['A', 'B'], ['a', 'b']],
  836. codes=[[0, 1], [0, 1]])
  837. expected = DataFrame(data=[[1, 2]], columns=columns)
  838. tm.assert_frame_equal(result, expected)
  839. def test_multiple_header_rows(self):
  840. # Issue #13434
  841. expected_df = DataFrame(data=[("Hillary", 68, "D"),
  842. ("Bernie", 74, "D"),
  843. ("Donald", 69, "R")])
  844. expected_df.columns = [["Unnamed: 0_level_0", "Age", "Party"],
  845. ["Name", "Unnamed: 1_level_1",
  846. "Unnamed: 2_level_1"]]
  847. html = expected_df.to_html(index=False)
  848. html_df = self.read_html(html, )[0]
  849. tm.assert_frame_equal(expected_df, html_df)
  850. def test_works_on_valid_markup(self, datapath):
  851. filename = datapath('io', 'data', 'valid_markup.html')
  852. dfs = self.read_html(filename, index_col=0)
  853. assert isinstance(dfs, list)
  854. assert isinstance(dfs[0], DataFrame)
  855. @pytest.mark.slow
  856. def test_fallback_success(self, datapath):
  857. banklist_data = datapath('io', 'data', 'banklist.html')
  858. self.read_html(banklist_data, '.*Water.*', flavor=['lxml', 'html5lib'])
  859. def test_to_html_timestamp(self):
  860. rng = date_range('2000-01-01', periods=10)
  861. df = DataFrame(np.random.randn(10, 4), index=rng)
  862. result = df.to_html()
  863. assert '2000-01-01' in result
  864. @pytest.mark.parametrize("displayed_only,exp0,exp1", [
  865. (True, DataFrame(["foo"]), None),
  866. (False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"]))])
  867. def test_displayed_only(self, displayed_only, exp0, exp1):
  868. # GH 20027
  869. data = StringIO("""<html>
  870. <body>
  871. <table>
  872. <tr>
  873. <td>
  874. foo
  875. <span style="display:none;text-align:center">bar</span>
  876. <span style="display:none">baz</span>
  877. <span style="display: none">qux</span>
  878. </td>
  879. </tr>
  880. </table>
  881. <table style="display: none">
  882. <tr>
  883. <td>foo</td>
  884. </tr>
  885. </table>
  886. </body>
  887. </html>""")
  888. dfs = self.read_html(data, displayed_only=displayed_only)
  889. tm.assert_frame_equal(dfs[0], exp0)
  890. if exp1 is not None:
  891. tm.assert_frame_equal(dfs[1], exp1)
  892. else:
  893. assert len(dfs) == 1 # Should not parse hidden table
  894. def test_encode(self, html_encoding_file):
  895. _, encoding = os.path.splitext(
  896. os.path.basename(html_encoding_file)
  897. )[0].split('_')
  898. try:
  899. with open(html_encoding_file, 'rb') as fobj:
  900. from_string = self.read_html(fobj.read(), encoding=encoding,
  901. index_col=0).pop()
  902. with open(html_encoding_file, 'rb') as fobj:
  903. from_file_like = self.read_html(BytesIO(fobj.read()),
  904. encoding=encoding,
  905. index_col=0).pop()
  906. from_filename = self.read_html(html_encoding_file,
  907. encoding=encoding,
  908. index_col=0).pop()
  909. tm.assert_frame_equal(from_string, from_file_like)
  910. tm.assert_frame_equal(from_string, from_filename)
  911. except Exception:
  912. # seems utf-16/32 fail on windows
  913. if is_platform_windows():
  914. if '16' in encoding or '32' in encoding:
  915. pytest.skip()
  916. raise
  917. def test_parse_failure_unseekable(self):
  918. # Issue #17975
  919. if self.read_html.keywords.get('flavor') == 'lxml':
  920. pytest.skip("Not applicable for lxml")
  921. class UnseekableStringIO(StringIO):
  922. def seekable(self):
  923. return False
  924. bad = UnseekableStringIO('''
  925. <table><tr><td>spam<foobr />eggs</td></tr></table>''')
  926. assert self.read_html(bad)
  927. with pytest.raises(ValueError,
  928. match='passed a non-rewindable file object'):
  929. self.read_html(bad)
  930. def test_parse_failure_rewinds(self):
  931. # Issue #17975
  932. class MockFile(object):
  933. def __init__(self, data):
  934. self.data = data
  935. self.at_end = False
  936. def read(self, size=None):
  937. data = '' if self.at_end else self.data
  938. self.at_end = True
  939. return data
  940. def seek(self, offset):
  941. self.at_end = False
  942. def seekable(self):
  943. return True
  944. good = MockFile('<table><tr><td>spam<br />eggs</td></tr></table>')
  945. bad = MockFile('<table><tr><td>spam<foobr />eggs</td></tr></table>')
  946. assert self.read_html(good)
  947. assert self.read_html(bad)
  948. @pytest.mark.slow
  949. def test_importcheck_thread_safety(self, datapath):
  950. # see gh-16928
  951. class ErrorThread(threading.Thread):
  952. def run(self):
  953. try:
  954. super(ErrorThread, self).run()
  955. except Exception as e:
  956. self.err = e
  957. else:
  958. self.err = None
  959. # force import check by reinitalising global vars in html.py
  960. reload(pandas.io.html)
  961. filename = datapath('io', 'data', 'valid_markup.html')
  962. helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
  963. helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
  964. helper_thread1.start()
  965. helper_thread2.start()
  966. while helper_thread1.is_alive() or helper_thread2.is_alive():
  967. pass
  968. assert None is helper_thread1.err is helper_thread2.err