test_soup.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. # -*- coding: utf-8 -*-
  2. """Tests of Beautiful Soup as a whole."""
  3. import logging
  4. import unittest
  5. import sys
  6. import tempfile
  7. from bs4 import (
  8. BeautifulSoup,
  9. BeautifulStoneSoup,
  10. )
  11. from bs4.element import (
  12. CharsetMetaAttributeValue,
  13. ContentMetaAttributeValue,
  14. SoupStrainer,
  15. NamespacedAttribute,
  16. )
  17. import bs4.dammit
  18. from bs4.dammit import (
  19. EntitySubstitution,
  20. UnicodeDammit,
  21. )
  22. from bs4.testing import (
  23. SoupTest,
  24. skipIf,
  25. )
  26. import warnings
  27. try:
  28. from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
  29. LXML_PRESENT = True
  30. except ImportError, e:
  31. LXML_PRESENT = False
  32. PYTHON_2_PRE_2_7 = (sys.version_info < (2,7))
  33. PYTHON_3_PRE_3_2 = (sys.version_info[0] == 3 and sys.version_info < (3,2))
  34. class TestConstructor(SoupTest):
  35. def test_short_unicode_input(self):
  36. data = u"<h1>éé</h1>"
  37. soup = self.soup(data)
  38. self.assertEqual(u"éé", soup.h1.string)
  39. def test_embedded_null(self):
  40. data = u"<h1>foo\0bar</h1>"
  41. soup = self.soup(data)
  42. self.assertEqual(u"foo\0bar", soup.h1.string)
  43. class TestDeprecatedConstructorArguments(SoupTest):
  44. def test_parseOnlyThese_renamed_to_parse_only(self):
  45. with warnings.catch_warnings(record=True) as w:
  46. soup = self.soup("<a><b></b></a>", parseOnlyThese=SoupStrainer("b"))
  47. msg = str(w[0].message)
  48. self.assertTrue("parseOnlyThese" in msg)
  49. self.assertTrue("parse_only" in msg)
  50. self.assertEqual(b"<b></b>", soup.encode())
  51. def test_fromEncoding_renamed_to_from_encoding(self):
  52. with warnings.catch_warnings(record=True) as w:
  53. utf8 = b"\xc3\xa9"
  54. soup = self.soup(utf8, fromEncoding="utf8")
  55. msg = str(w[0].message)
  56. self.assertTrue("fromEncoding" in msg)
  57. self.assertTrue("from_encoding" in msg)
  58. self.assertEqual("utf8", soup.original_encoding)
  59. def test_unrecognized_keyword_argument(self):
  60. self.assertRaises(
  61. TypeError, self.soup, "<a>", no_such_argument=True)
  62. class TestWarnings(SoupTest):
  63. def test_disk_file_warning(self):
  64. filehandle = tempfile.NamedTemporaryFile()
  65. filename = filehandle.name
  66. try:
  67. with warnings.catch_warnings(record=True) as w:
  68. soup = self.soup(filename)
  69. msg = str(w[0].message)
  70. self.assertTrue("looks like a filename" in msg)
  71. finally:
  72. filehandle.close()
  73. # The file no longer exists, so Beautiful Soup will no longer issue the warning.
  74. with warnings.catch_warnings(record=True) as w:
  75. soup = self.soup(filename)
  76. self.assertEqual(0, len(w))
  77. def test_url_warning(self):
  78. with warnings.catch_warnings(record=True) as w:
  79. soup = self.soup("http://www.crummy.com/")
  80. msg = str(w[0].message)
  81. self.assertTrue("looks like a URL" in msg)
  82. with warnings.catch_warnings(record=True) as w:
  83. soup = self.soup("http://www.crummy.com/ is great")
  84. self.assertEqual(0, len(w))
  85. class TestSelectiveParsing(SoupTest):
  86. def test_parse_with_soupstrainer(self):
  87. markup = "No<b>Yes</b><a>No<b>Yes <c>Yes</c></b>"
  88. strainer = SoupStrainer("b")
  89. soup = self.soup(markup, parse_only=strainer)
  90. self.assertEqual(soup.encode(), b"<b>Yes</b><b>Yes <c>Yes</c></b>")
  91. class TestEntitySubstitution(unittest.TestCase):
  92. """Standalone tests of the EntitySubstitution class."""
  93. def setUp(self):
  94. self.sub = EntitySubstitution
  95. def test_simple_html_substitution(self):
  96. # Unicode characters corresponding to named HTML entites
  97. # are substituted, and no others.
  98. s = u"foo\u2200\N{SNOWMAN}\u00f5bar"
  99. self.assertEqual(self.sub.substitute_html(s),
  100. u"foo&forall;\N{SNOWMAN}&otilde;bar")
  101. def test_smart_quote_substitution(self):
  102. # MS smart quotes are a common source of frustration, so we
  103. # give them a special test.
  104. quotes = b"\x91\x92foo\x93\x94"
  105. dammit = UnicodeDammit(quotes)
  106. self.assertEqual(self.sub.substitute_html(dammit.markup),
  107. "&lsquo;&rsquo;foo&ldquo;&rdquo;")
  108. def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self):
  109. s = 'Welcome to "my bar"'
  110. self.assertEqual(self.sub.substitute_xml(s, False), s)
  111. def test_xml_attribute_quoting_normally_uses_double_quotes(self):
  112. self.assertEqual(self.sub.substitute_xml("Welcome", True),
  113. '"Welcome"')
  114. self.assertEqual(self.sub.substitute_xml("Bob's Bar", True),
  115. '"Bob\'s Bar"')
  116. def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self):
  117. s = 'Welcome to "my bar"'
  118. self.assertEqual(self.sub.substitute_xml(s, True),
  119. "'Welcome to \"my bar\"'")
  120. def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self):
  121. s = 'Welcome to "Bob\'s Bar"'
  122. self.assertEqual(
  123. self.sub.substitute_xml(s, True),
  124. '"Welcome to &quot;Bob\'s Bar&quot;"')
  125. def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self):
  126. quoted = 'Welcome to "Bob\'s Bar"'
  127. self.assertEqual(self.sub.substitute_xml(quoted), quoted)
  128. def test_xml_quoting_handles_angle_brackets(self):
  129. self.assertEqual(
  130. self.sub.substitute_xml("foo<bar>"),
  131. "foo&lt;bar&gt;")
  132. def test_xml_quoting_handles_ampersands(self):
  133. self.assertEqual(self.sub.substitute_xml("AT&T"), "AT&amp;T")
  134. def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self):
  135. self.assertEqual(
  136. self.sub.substitute_xml("&Aacute;T&T"),
  137. "&amp;Aacute;T&amp;T")
  138. def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self):
  139. self.assertEqual(
  140. self.sub.substitute_xml_containing_entities("&Aacute;T&T"),
  141. "&Aacute;T&amp;T")
  142. def test_quotes_not_html_substituted(self):
  143. """There's no need to do this except inside attribute values."""
  144. text = 'Bob\'s "bar"'
  145. self.assertEqual(self.sub.substitute_html(text), text)
  146. class TestEncodingConversion(SoupTest):
  147. # Test Beautiful Soup's ability to decode and encode from various
  148. # encodings.
  149. def setUp(self):
  150. super(TestEncodingConversion, self).setUp()
  151. self.unicode_data = u'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</foo></body></html>'
  152. self.utf8_data = self.unicode_data.encode("utf-8")
  153. # Just so you know what it looks like.
  154. self.assertEqual(
  155. self.utf8_data,
  156. b'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\xc3\xa9 bleu!</foo></body></html>')
  157. def test_ascii_in_unicode_out(self):
  158. # ASCII input is converted to Unicode. The original_encoding
  159. # attribute is set to 'utf-8', a superset of ASCII.
  160. chardet = bs4.dammit.chardet_dammit
  161. logging.disable(logging.WARNING)
  162. try:
  163. def noop(str):
  164. return None
  165. # Disable chardet, which will realize that the ASCII is ASCII.
  166. bs4.dammit.chardet_dammit = noop
  167. ascii = b"<foo>a</foo>"
  168. soup_from_ascii = self.soup(ascii)
  169. unicode_output = soup_from_ascii.decode()
  170. self.assertTrue(isinstance(unicode_output, unicode))
  171. self.assertEqual(unicode_output, self.document_for(ascii.decode()))
  172. self.assertEqual(soup_from_ascii.original_encoding.lower(), "utf-8")
  173. finally:
  174. logging.disable(logging.NOTSET)
  175. bs4.dammit.chardet_dammit = chardet
  176. def test_unicode_in_unicode_out(self):
  177. # Unicode input is left alone. The original_encoding attribute
  178. # is not set.
  179. soup_from_unicode = self.soup(self.unicode_data)
  180. self.assertEqual(soup_from_unicode.decode(), self.unicode_data)
  181. self.assertEqual(soup_from_unicode.foo.string, u'Sacr\xe9 bleu!')
  182. self.assertEqual(soup_from_unicode.original_encoding, None)
  183. def test_utf8_in_unicode_out(self):
  184. # UTF-8 input is converted to Unicode. The original_encoding
  185. # attribute is set.
  186. soup_from_utf8 = self.soup(self.utf8_data)
  187. self.assertEqual(soup_from_utf8.decode(), self.unicode_data)
  188. self.assertEqual(soup_from_utf8.foo.string, u'Sacr\xe9 bleu!')
  189. def test_utf8_out(self):
  190. # The internal data structures can be encoded as UTF-8.
  191. soup_from_unicode = self.soup(self.unicode_data)
  192. self.assertEqual(soup_from_unicode.encode('utf-8'), self.utf8_data)
  193. @skipIf(
  194. PYTHON_2_PRE_2_7 or PYTHON_3_PRE_3_2,
  195. "Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.")
  196. def test_attribute_name_containing_unicode_characters(self):
  197. markup = u'<div><a \N{SNOWMAN}="snowman"></a></div>'
  198. self.assertEqual(self.soup(markup).div.encode("utf8"), markup.encode("utf8"))
  199. class TestUnicodeDammit(unittest.TestCase):
  200. """Standalone tests of UnicodeDammit."""
  201. def test_unicode_input(self):
  202. markup = u"I'm already Unicode! \N{SNOWMAN}"
  203. dammit = UnicodeDammit(markup)
  204. self.assertEqual(dammit.unicode_markup, markup)
  205. def test_smart_quotes_to_unicode(self):
  206. markup = b"<foo>\x91\x92\x93\x94</foo>"
  207. dammit = UnicodeDammit(markup)
  208. self.assertEqual(
  209. dammit.unicode_markup, u"<foo>\u2018\u2019\u201c\u201d</foo>")
  210. def test_smart_quotes_to_xml_entities(self):
  211. markup = b"<foo>\x91\x92\x93\x94</foo>"
  212. dammit = UnicodeDammit(markup, smart_quotes_to="xml")
  213. self.assertEqual(
  214. dammit.unicode_markup, "<foo>&#x2018;&#x2019;&#x201C;&#x201D;</foo>")
  215. def test_smart_quotes_to_html_entities(self):
  216. markup = b"<foo>\x91\x92\x93\x94</foo>"
  217. dammit = UnicodeDammit(markup, smart_quotes_to="html")
  218. self.assertEqual(
  219. dammit.unicode_markup, "<foo>&lsquo;&rsquo;&ldquo;&rdquo;</foo>")
  220. def test_smart_quotes_to_ascii(self):
  221. markup = b"<foo>\x91\x92\x93\x94</foo>"
  222. dammit = UnicodeDammit(markup, smart_quotes_to="ascii")
  223. self.assertEqual(
  224. dammit.unicode_markup, """<foo>''""</foo>""")
  225. def test_detect_utf8(self):
  226. utf8 = b"\xc3\xa9"
  227. dammit = UnicodeDammit(utf8)
  228. self.assertEqual(dammit.unicode_markup, u'\xe9')
  229. self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
  230. def test_convert_hebrew(self):
  231. hebrew = b"\xed\xe5\xec\xf9"
  232. dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
  233. self.assertEqual(dammit.original_encoding.lower(), 'iso-8859-8')
  234. self.assertEqual(dammit.unicode_markup, u'\u05dd\u05d5\u05dc\u05e9')
  235. def test_dont_see_smart_quotes_where_there_are_none(self):
  236. utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
  237. dammit = UnicodeDammit(utf_8)
  238. self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
  239. self.assertEqual(dammit.unicode_markup.encode("utf-8"), utf_8)
  240. def test_ignore_inappropriate_codecs(self):
  241. utf8_data = u"Räksmörgås".encode("utf-8")
  242. dammit = UnicodeDammit(utf8_data, ["iso-8859-8"])
  243. self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
  244. def test_ignore_invalid_codecs(self):
  245. utf8_data = u"Räksmörgås".encode("utf-8")
  246. for bad_encoding in ['.utf8', '...', 'utF---16.!']:
  247. dammit = UnicodeDammit(utf8_data, [bad_encoding])
  248. self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
  249. def test_detect_html5_style_meta_tag(self):
  250. for data in (
  251. b'<html><meta charset="euc-jp" /></html>',
  252. b"<html><meta charset='euc-jp' /></html>",
  253. b"<html><meta charset=euc-jp /></html>",
  254. b"<html><meta charset=euc-jp/></html>"):
  255. dammit = UnicodeDammit(data, is_html=True)
  256. self.assertEqual(
  257. "euc-jp", dammit.original_encoding)
  258. def test_last_ditch_entity_replacement(self):
  259. # This is a UTF-8 document that contains bytestrings
  260. # completely incompatible with UTF-8 (ie. encoded with some other
  261. # encoding).
  262. #
  263. # Since there is no consistent encoding for the document,
  264. # Unicode, Dammit will eventually encode the document as UTF-8
  265. # and encode the incompatible characters as REPLACEMENT
  266. # CHARACTER.
  267. #
  268. # If chardet is installed, it will detect that the document
  269. # can be converted into ISO-8859-1 without errors. This happens
  270. # to be the wrong encoding, but it is a consistent encoding, so the
  271. # code we're testing here won't run.
  272. #
  273. # So we temporarily disable chardet if it's present.
  274. doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
  275. <html><b>\330\250\330\252\330\261</b>
  276. <i>\310\322\321\220\312\321\355\344</i></html>"""
  277. chardet = bs4.dammit.chardet_dammit
  278. logging.disable(logging.WARNING)
  279. try:
  280. def noop(str):
  281. return None
  282. bs4.dammit.chardet_dammit = noop
  283. dammit = UnicodeDammit(doc)
  284. self.assertEqual(True, dammit.contains_replacement_characters)
  285. self.assertTrue(u"\ufffd" in dammit.unicode_markup)
  286. soup = BeautifulSoup(doc, "html.parser")
  287. self.assertTrue(soup.contains_replacement_characters)
  288. finally:
  289. logging.disable(logging.NOTSET)
  290. bs4.dammit.chardet_dammit = chardet
  291. def test_byte_order_mark_removed(self):
  292. # A document written in UTF-16LE will have its byte order marker stripped.
  293. data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
  294. dammit = UnicodeDammit(data)
  295. self.assertEqual(u"<a>áé</a>", dammit.unicode_markup)
  296. self.assertEqual("utf-16le", dammit.original_encoding)
  297. def test_detwingle(self):
  298. # Here's a UTF8 document.
  299. utf8 = (u"\N{SNOWMAN}" * 3).encode("utf8")
  300. # Here's a Windows-1252 document.
  301. windows_1252 = (
  302. u"\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!"
  303. u"\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252")
  304. # Through some unholy alchemy, they've been stuck together.
  305. doc = utf8 + windows_1252 + utf8
  306. # The document can't be turned into UTF-8:
  307. self.assertRaises(UnicodeDecodeError, doc.decode, "utf8")
  308. # Unicode, Dammit thinks the whole document is Windows-1252,
  309. # and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃"
  310. # But if we run it through fix_embedded_windows_1252, it's fixed:
  311. fixed = UnicodeDammit.detwingle(doc)
  312. self.assertEqual(
  313. u"☃☃☃“Hi, I like Windows!”☃☃☃", fixed.decode("utf8"))
  314. def test_detwingle_ignores_multibyte_characters(self):
  315. # Each of these characters has a UTF-8 representation ending
  316. # in \x93. \x93 is a smart quote if interpreted as
  317. # Windows-1252. But our code knows to skip over multibyte
  318. # UTF-8 characters, so they'll survive the process unscathed.
  319. for tricky_unicode_char in (
  320. u"\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93'
  321. u"\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93'
  322. u"\xf0\x90\x90\x93", # This is a CJK character, not sure which one.
  323. ):
  324. input = tricky_unicode_char.encode("utf8")
  325. self.assertTrue(input.endswith(b'\x93'))
  326. output = UnicodeDammit.detwingle(input)
  327. self.assertEqual(output, input)
  328. class TestNamedspacedAttribute(SoupTest):
  329. def test_name_may_be_none(self):
  330. a = NamespacedAttribute("xmlns", None)
  331. self.assertEqual(a, "xmlns")
  332. def test_attribute_is_equivalent_to_colon_separated_string(self):
  333. a = NamespacedAttribute("a", "b")
  334. self.assertEqual("a:b", a)
  335. def test_attributes_are_equivalent_if_prefix_and_name_identical(self):
  336. a = NamespacedAttribute("a", "b", "c")
  337. b = NamespacedAttribute("a", "b", "c")
  338. self.assertEqual(a, b)
  339. # The actual namespace is not considered.
  340. c = NamespacedAttribute("a", "b", None)
  341. self.assertEqual(a, c)
  342. # But name and prefix are important.
  343. d = NamespacedAttribute("a", "z", "c")
  344. self.assertNotEqual(a, d)
  345. e = NamespacedAttribute("z", "b", "c")
  346. self.assertNotEqual(a, e)
  347. class TestAttributeValueWithCharsetSubstitution(unittest.TestCase):
  348. def test_content_meta_attribute_value(self):
  349. value = CharsetMetaAttributeValue("euc-jp")
  350. self.assertEqual("euc-jp", value)
  351. self.assertEqual("euc-jp", value.original_value)
  352. self.assertEqual("utf8", value.encode("utf8"))
  353. def test_content_meta_attribute_value(self):
  354. value = ContentMetaAttributeValue("text/html; charset=euc-jp")
  355. self.assertEqual("text/html; charset=euc-jp", value)
  356. self.assertEqual("text/html; charset=euc-jp", value.original_value)
  357. self.assertEqual("text/html; charset=utf8", value.encode("utf8"))