__init__.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. """Beautiful Soup
  2. Elixir and Tonic
  3. "The Screen-Scraper's Friend"
  4. http://www.crummy.com/software/BeautifulSoup/
  5. Beautiful Soup uses a pluggable XML or HTML parser to parse a
  6. (possibly invalid) document into a tree representation. Beautiful Soup
  7. provides provides methods and Pythonic idioms that make it easy to
  8. navigate, search, and modify the parse tree.
  9. Beautiful Soup works with Python 2.6 and up. It works better if lxml
  10. and/or html5lib is installed.
  11. For more than you ever wanted to know about Beautiful Soup, see the
  12. documentation:
  13. http://www.crummy.com/software/BeautifulSoup/bs4/doc/
  14. """
  15. __author__ = "Leonard Richardson (leonardr@segfault.org)"
  16. __version__ = "4.3.2"
  17. __copyright__ = "Copyright (c) 2004-2013 Leonard Richardson"
  18. __license__ = "MIT"
  19. __all__ = ['BeautifulSoup']
  20. import os
  21. import re
  22. import warnings
  23. from .builder import builder_registry, ParserRejectedMarkup
  24. from .dammit import UnicodeDammit
  25. from .element import (
  26. CData,
  27. Comment,
  28. DEFAULT_OUTPUT_ENCODING,
  29. Declaration,
  30. Doctype,
  31. NavigableString,
  32. PageElement,
  33. ProcessingInstruction,
  34. ResultSet,
  35. SoupStrainer,
  36. Tag,
  37. )
  38. # The very first thing we do is give a useful error if someone is
  39. # running this code under Python 3 without converting it.
  40. syntax_error = u'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
  41. class BeautifulSoup(Tag):
  42. """
  43. This class defines the basic interface called by the tree builders.
  44. These methods will be called by the parser:
  45. reset()
  46. feed(markup)
  47. The tree builder may call these methods from its feed() implementation:
  48. handle_starttag(name, attrs) # See note about return value
  49. handle_endtag(name)
  50. handle_data(data) # Appends to the current data node
  51. endData(containerClass=NavigableString) # Ends the current data node
  52. No matter how complicated the underlying parser is, you should be
  53. able to build a tree using 'start tag' events, 'end tag' events,
  54. 'data' events, and "done with data" events.
  55. If you encounter an empty-element tag (aka a self-closing tag,
  56. like HTML's <br> tag), call handle_starttag and then
  57. handle_endtag.
  58. """
  59. ROOT_TAG_NAME = u'[document]'
  60. # If the end-user gives no indication which tree builder they
  61. # want, look for one with these features.
  62. DEFAULT_BUILDER_FEATURES = ['html', 'fast']
  63. ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
  64. def __init__(self, markup="", features=None, builder=None,
  65. parse_only=None, from_encoding=None, **kwargs):
  66. """The Soup object is initialized as the 'root tag', and the
  67. provided markup (which can be a string or a file-like object)
  68. is fed into the underlying parser."""
  69. if 'convertEntities' in kwargs:
  70. warnings.warn(
  71. "BS4 does not respect the convertEntities argument to the "
  72. "BeautifulSoup constructor. Entities are always converted "
  73. "to Unicode characters.")
  74. if 'markupMassage' in kwargs:
  75. del kwargs['markupMassage']
  76. warnings.warn(
  77. "BS4 does not respect the markupMassage argument to the "
  78. "BeautifulSoup constructor. The tree builder is responsible "
  79. "for any necessary markup massage.")
  80. if 'smartQuotesTo' in kwargs:
  81. del kwargs['smartQuotesTo']
  82. warnings.warn(
  83. "BS4 does not respect the smartQuotesTo argument to the "
  84. "BeautifulSoup constructor. Smart quotes are always converted "
  85. "to Unicode characters.")
  86. if 'selfClosingTags' in kwargs:
  87. del kwargs['selfClosingTags']
  88. warnings.warn(
  89. "BS4 does not respect the selfClosingTags argument to the "
  90. "BeautifulSoup constructor. The tree builder is responsible "
  91. "for understanding self-closing tags.")
  92. if 'isHTML' in kwargs:
  93. del kwargs['isHTML']
  94. warnings.warn(
  95. "BS4 does not respect the isHTML argument to the "
  96. "BeautifulSoup constructor. You can pass in features='html' "
  97. "or features='xml' to get a builder capable of handling "
  98. "one or the other.")
  99. def deprecated_argument(old_name, new_name):
  100. if old_name in kwargs:
  101. warnings.warn(
  102. 'The "%s" argument to the BeautifulSoup constructor '
  103. 'has been renamed to "%s."' % (old_name, new_name))
  104. value = kwargs[old_name]
  105. del kwargs[old_name]
  106. return value
  107. return None
  108. parse_only = parse_only or deprecated_argument(
  109. "parseOnlyThese", "parse_only")
  110. from_encoding = from_encoding or deprecated_argument(
  111. "fromEncoding", "from_encoding")
  112. if len(kwargs) > 0:
  113. arg = kwargs.keys().pop()
  114. raise TypeError(
  115. "__init__() got an unexpected keyword argument '%s'" % arg)
  116. if builder is None:
  117. if isinstance(features, basestring):
  118. features = [features]
  119. if features is None or len(features) == 0:
  120. features = self.DEFAULT_BUILDER_FEATURES
  121. builder_class = builder_registry.lookup(*features)
  122. if builder_class is None:
  123. raise FeatureNotFound(
  124. "Couldn't find a tree builder with the features you "
  125. "requested: %s. Do you need to install a parser library?"
  126. % ",".join(features))
  127. builder = builder_class()
  128. self.builder = builder
  129. self.is_xml = builder.is_xml
  130. self.builder.soup = self
  131. self.parse_only = parse_only
  132. if hasattr(markup, 'read'): # It's a file-type object.
  133. markup = markup.read()
  134. elif len(markup) <= 256:
  135. # Print out warnings for a couple beginner problems
  136. # involving passing non-markup to Beautiful Soup.
  137. # Beautiful Soup will still parse the input as markup,
  138. # just in case that's what the user really wants.
  139. if (isinstance(markup, unicode)
  140. and not os.path.supports_unicode_filenames):
  141. possible_filename = markup.encode("utf8")
  142. else:
  143. possible_filename = markup
  144. is_file = False
  145. try:
  146. is_file = os.path.exists(possible_filename)
  147. except Exception, e:
  148. # This is almost certainly a problem involving
  149. # characters not valid in filenames on this
  150. # system. Just let it go.
  151. pass
  152. if is_file:
  153. warnings.warn(
  154. '"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
  155. if markup[:5] == "http:" or markup[:6] == "https:":
  156. # TODO: This is ugly but I couldn't get it to work in
  157. # Python 3 otherwise.
  158. if ((isinstance(markup, bytes) and not b' ' in markup)
  159. or (isinstance(markup, unicode) and not u' ' in markup)):
  160. warnings.warn(
  161. '"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
  162. for (self.markup, self.original_encoding, self.declared_html_encoding,
  163. self.contains_replacement_characters) in (
  164. self.builder.prepare_markup(markup, from_encoding)):
  165. self.reset()
  166. try:
  167. self._feed()
  168. break
  169. except ParserRejectedMarkup:
  170. pass
  171. # Clear out the markup and remove the builder's circular
  172. # reference to this object.
  173. self.markup = None
  174. self.builder.soup = None
  175. def _feed(self):
  176. # Convert the document to Unicode.
  177. self.builder.reset()
  178. self.builder.feed(self.markup)
  179. # Close out any unfinished strings and close all the open tags.
  180. self.endData()
  181. while self.currentTag.name != self.ROOT_TAG_NAME:
  182. self.popTag()
  183. def reset(self):
  184. Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
  185. self.hidden = 1
  186. self.builder.reset()
  187. self.current_data = []
  188. self.currentTag = None
  189. self.tagStack = []
  190. self.preserve_whitespace_tag_stack = []
  191. self.pushTag(self)
  192. def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
  193. """Create a new tag associated with this soup."""
  194. return Tag(None, self.builder, name, namespace, nsprefix, attrs)
  195. def new_string(self, s, subclass=NavigableString):
  196. """Create a new NavigableString associated with this soup."""
  197. navigable = subclass(s)
  198. navigable.setup()
  199. return navigable
  200. def insert_before(self, successor):
  201. raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
  202. def insert_after(self, successor):
  203. raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
  204. def popTag(self):
  205. tag = self.tagStack.pop()
  206. if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
  207. self.preserve_whitespace_tag_stack.pop()
  208. #print "Pop", tag.name
  209. if self.tagStack:
  210. self.currentTag = self.tagStack[-1]
  211. return self.currentTag
  212. def pushTag(self, tag):
  213. #print "Push", tag.name
  214. if self.currentTag:
  215. self.currentTag.contents.append(tag)
  216. self.tagStack.append(tag)
  217. self.currentTag = self.tagStack[-1]
  218. if tag.name in self.builder.preserve_whitespace_tags:
  219. self.preserve_whitespace_tag_stack.append(tag)
  220. def endData(self, containerClass=NavigableString):
  221. if self.current_data:
  222. current_data = u''.join(self.current_data)
  223. # If whitespace is not preserved, and this string contains
  224. # nothing but ASCII spaces, replace it with a single space
  225. # or newline.
  226. if not self.preserve_whitespace_tag_stack:
  227. strippable = True
  228. for i in current_data:
  229. if i not in self.ASCII_SPACES:
  230. strippable = False
  231. break
  232. if strippable:
  233. if '\n' in current_data:
  234. current_data = '\n'
  235. else:
  236. current_data = ' '
  237. # Reset the data collector.
  238. self.current_data = []
  239. # Should we add this string to the tree at all?
  240. if self.parse_only and len(self.tagStack) <= 1 and \
  241. (not self.parse_only.text or \
  242. not self.parse_only.search(current_data)):
  243. return
  244. o = containerClass(current_data)
  245. self.object_was_parsed(o)
  246. def object_was_parsed(self, o, parent=None, most_recent_element=None):
  247. """Add an object to the parse tree."""
  248. parent = parent or self.currentTag
  249. most_recent_element = most_recent_element or self._most_recent_element
  250. o.setup(parent, most_recent_element)
  251. if most_recent_element is not None:
  252. most_recent_element.next_element = o
  253. self._most_recent_element = o
  254. parent.contents.append(o)
  255. def _popToTag(self, name, nsprefix=None, inclusivePop=True):
  256. """Pops the tag stack up to and including the most recent
  257. instance of the given tag. If inclusivePop is false, pops the tag
  258. stack up to but *not* including the most recent instqance of
  259. the given tag."""
  260. #print "Popping to %s" % name
  261. if name == self.ROOT_TAG_NAME:
  262. # The BeautifulSoup object itself can never be popped.
  263. return
  264. most_recently_popped = None
  265. stack_size = len(self.tagStack)
  266. for i in range(stack_size - 1, 0, -1):
  267. t = self.tagStack[i]
  268. if (name == t.name and nsprefix == t.prefix):
  269. if inclusivePop:
  270. most_recently_popped = self.popTag()
  271. break
  272. most_recently_popped = self.popTag()
  273. return most_recently_popped
  274. def handle_starttag(self, name, namespace, nsprefix, attrs):
  275. """Push a start tag on to the stack.
  276. If this method returns None, the tag was rejected by the
  277. SoupStrainer. You should proceed as if the tag had not occured
  278. in the document. For instance, if this was a self-closing tag,
  279. don't call handle_endtag.
  280. """
  281. # print "Start tag %s: %s" % (name, attrs)
  282. self.endData()
  283. if (self.parse_only and len(self.tagStack) <= 1
  284. and (self.parse_only.text
  285. or not self.parse_only.search_tag(name, attrs))):
  286. return None
  287. tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
  288. self.currentTag, self._most_recent_element)
  289. if tag is None:
  290. return tag
  291. if self._most_recent_element:
  292. self._most_recent_element.next_element = tag
  293. self._most_recent_element = tag
  294. self.pushTag(tag)
  295. return tag
  296. def handle_endtag(self, name, nsprefix=None):
  297. #print "End tag: " + name
  298. self.endData()
  299. self._popToTag(name, nsprefix)
  300. def handle_data(self, data):
  301. self.current_data.append(data)
  302. def decode(self, pretty_print=False,
  303. eventual_encoding=DEFAULT_OUTPUT_ENCODING,
  304. formatter="minimal"):
  305. """Returns a string or Unicode representation of this document.
  306. To get Unicode, pass None for encoding."""
  307. if self.is_xml:
  308. # Print the XML declaration
  309. encoding_part = ''
  310. if eventual_encoding != None:
  311. encoding_part = ' encoding="%s"' % eventual_encoding
  312. prefix = u'<?xml version="1.0"%s?>\n' % encoding_part
  313. else:
  314. prefix = u''
  315. if not pretty_print:
  316. indent_level = None
  317. else:
  318. indent_level = 0
  319. return prefix + super(BeautifulSoup, self).decode(
  320. indent_level, eventual_encoding, formatter)
  321. # Alias to make it easier to type import: 'from bs4 import _soup'
  322. _s = BeautifulSoup
  323. _soup = BeautifulSoup
  324. class BeautifulStoneSoup(BeautifulSoup):
  325. """Deprecated interface to an XML parser."""
  326. def __init__(self, *args, **kwargs):
  327. kwargs['features'] = 'xml'
  328. warnings.warn(
  329. 'The BeautifulStoneSoup class is deprecated. Instead of using '
  330. 'it, pass features="xml" into the BeautifulSoup constructor.')
  331. super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
  332. class StopParsing(Exception):
  333. pass
  334. class FeatureNotFound(ValueError):
  335. pass
  336. #By default, act as an HTML pretty-printer.
  337. if __name__ == '__main__':
  338. import sys
  339. soup = BeautifulSoup(sys.stdin)
  340. print soup.prettify()