soupparser.py 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. """External interface to the BeautifulSoup HTML parser.
  2. """
  3. __all__ = ["fromstring", "parse", "convert_tree"]
  4. import re
  5. from lxml import etree, html
  6. try:
  7. from bs4 import (
  8. BeautifulSoup, Tag, Comment, ProcessingInstruction, NavigableString,
  9. Declaration, Doctype)
  10. _DECLARATION_OR_DOCTYPE = (Declaration, Doctype)
  11. except ImportError:
  12. from BeautifulSoup import (
  13. BeautifulSoup, Tag, Comment, ProcessingInstruction, NavigableString,
  14. Declaration)
  15. _DECLARATION_OR_DOCTYPE = Declaration
  16. def fromstring(data, beautifulsoup=None, makeelement=None, **bsargs):
  17. """Parse a string of HTML data into an Element tree using the
  18. BeautifulSoup parser.
  19. Returns the root ``<html>`` Element of the tree.
  20. You can pass a different BeautifulSoup parser through the
  21. `beautifulsoup` keyword, and a diffent Element factory function
  22. through the `makeelement` keyword. By default, the standard
  23. ``BeautifulSoup`` class and the default factory of `lxml.html` are
  24. used.
  25. """
  26. return _parse(data, beautifulsoup, makeelement, **bsargs)
  27. def parse(file, beautifulsoup=None, makeelement=None, **bsargs):
  28. """Parse a file into an ElemenTree using the BeautifulSoup parser.
  29. You can pass a different BeautifulSoup parser through the
  30. `beautifulsoup` keyword, and a diffent Element factory function
  31. through the `makeelement` keyword. By default, the standard
  32. ``BeautifulSoup`` class and the default factory of `lxml.html` are
  33. used.
  34. """
  35. if not hasattr(file, 'read'):
  36. file = open(file)
  37. root = _parse(file, beautifulsoup, makeelement, **bsargs)
  38. return etree.ElementTree(root)
  39. def convert_tree(beautiful_soup_tree, makeelement=None):
  40. """Convert a BeautifulSoup tree to a list of Element trees.
  41. Returns a list instead of a single root Element to support
  42. HTML-like soup with more than one root element.
  43. You can pass a different Element factory through the `makeelement`
  44. keyword.
  45. """
  46. root = _convert_tree(beautiful_soup_tree, makeelement)
  47. children = root.getchildren()
  48. for child in children:
  49. root.remove(child)
  50. return children
  51. # helpers
  52. def _parse(source, beautifulsoup, makeelement, **bsargs):
  53. if beautifulsoup is None:
  54. beautifulsoup = BeautifulSoup
  55. if hasattr(beautifulsoup, "HTML_ENTITIES"): # bs3
  56. if 'convertEntities' not in bsargs:
  57. bsargs['convertEntities'] = 'html'
  58. if hasattr(beautifulsoup, "DEFAULT_BUILDER_FEATURES"): # bs4
  59. if 'features' not in bsargs:
  60. bsargs['features'] = ['html.parser'] # use Python html parser
  61. tree = beautifulsoup(source, **bsargs)
  62. root = _convert_tree(tree, makeelement)
  63. # from ET: wrap the document in a html root element, if necessary
  64. if len(root) == 1 and root[0].tag == "html":
  65. return root[0]
  66. root.tag = "html"
  67. return root
  68. _parse_doctype_declaration = re.compile(
  69. r'(?:\s|[<!])*DOCTYPE\s*HTML'
  70. r'(?:\s+PUBLIC)?(?:\s+(\'[^\']*\'|"[^"]*"))?'
  71. r'(?:\s+(\'[^\']*\'|"[^"]*"))?',
  72. re.IGNORECASE).match
  73. class _PseudoTag:
  74. # Minimal imitation of BeautifulSoup.Tag
  75. def __init__(self, contents):
  76. self.name = 'html'
  77. self.attrs = []
  78. self.contents = contents
  79. def __iter__(self):
  80. return self.contents.__iter__()
  81. def _convert_tree(beautiful_soup_tree, makeelement):
  82. if makeelement is None:
  83. makeelement = html.html_parser.makeelement
  84. # Split the tree into three parts:
  85. # i) everything before the root element: document type
  86. # declaration, comments, processing instructions, whitespace
  87. # ii) the root(s),
  88. # iii) everything after the root: comments, processing
  89. # instructions, whitespace
  90. first_element_idx = last_element_idx = None
  91. html_root = declaration = None
  92. for i, e in enumerate(beautiful_soup_tree):
  93. if isinstance(e, Tag):
  94. if first_element_idx is None:
  95. first_element_idx = i
  96. last_element_idx = i
  97. if html_root is None and e.name and e.name.lower() == 'html':
  98. html_root = e
  99. elif declaration is None and isinstance(e, _DECLARATION_OR_DOCTYPE):
  100. declaration = e
  101. # For a nice, well-formatted document, the variable roots below is
  102. # a list consisting of a single <html> element. However, the document
  103. # may be a soup like '<meta><head><title>Hello</head><body>Hi
  104. # all<\p>'. In this example roots is a list containing meta, head
  105. # and body elements.
  106. pre_root = beautiful_soup_tree.contents[:first_element_idx]
  107. roots = beautiful_soup_tree.contents[first_element_idx:last_element_idx+1]
  108. post_root = beautiful_soup_tree.contents[last_element_idx+1:]
  109. # Reorganize so that there is one <html> root...
  110. if html_root is not None:
  111. # ... use existing one if possible, ...
  112. i = roots.index(html_root)
  113. html_root.contents = roots[:i] + html_root.contents + roots[i+1:]
  114. else:
  115. # ... otherwise create a new one.
  116. html_root = _PseudoTag(roots)
  117. convert_node = _init_node_converters(makeelement)
  118. # Process pre_root
  119. res_root = convert_node(html_root)
  120. prev = res_root
  121. for e in reversed(pre_root):
  122. converted = convert_node(e)
  123. if converted is not None:
  124. prev.addprevious(converted)
  125. prev = converted
  126. # ditto for post_root
  127. prev = res_root
  128. for e in post_root:
  129. converted = convert_node(e)
  130. if converted is not None:
  131. prev.addnext(converted)
  132. prev = converted
  133. if declaration is not None:
  134. try:
  135. # bs4 provides full Doctype string
  136. doctype_string = declaration.output_ready()
  137. except AttributeError:
  138. doctype_string = declaration.string
  139. match = _parse_doctype_declaration(doctype_string)
  140. if not match:
  141. # Something is wrong if we end up in here. Since soupparser should
  142. # tolerate errors, do not raise Exception, just let it pass.
  143. pass
  144. else:
  145. external_id, sys_uri = match.groups()
  146. docinfo = res_root.getroottree().docinfo
  147. # strip quotes and update DOCTYPE values (any of None, '', '...')
  148. docinfo.public_id = external_id and external_id[1:-1]
  149. docinfo.system_url = sys_uri and sys_uri[1:-1]
  150. return res_root
  151. def _init_node_converters(makeelement):
  152. converters = {}
  153. ordered_node_types = []
  154. def converter(*types):
  155. def add(handler):
  156. for t in types:
  157. converters[t] = handler
  158. ordered_node_types.append(t)
  159. return handler
  160. return add
  161. def find_best_converter(node):
  162. for t in ordered_node_types:
  163. if isinstance(node, t):
  164. return converters[t]
  165. return None
  166. def convert_node(bs_node, parent=None):
  167. # duplicated in convert_tag() below
  168. try:
  169. handler = converters[type(bs_node)]
  170. except KeyError:
  171. handler = converters[type(bs_node)] = find_best_converter(bs_node)
  172. if handler is None:
  173. return None
  174. return handler(bs_node, parent)
  175. def map_attrs(bs_attrs):
  176. if isinstance(bs_attrs, dict): # bs4
  177. attribs = {}
  178. for k, v in bs_attrs.items():
  179. if isinstance(v, list):
  180. v = " ".join(v)
  181. attribs[k] = unescape(v)
  182. else:
  183. attribs = dict((k, unescape(v)) for k, v in bs_attrs)
  184. return attribs
  185. def append_text(parent, text):
  186. if len(parent) == 0:
  187. parent.text = (parent.text or '') + text
  188. else:
  189. parent[-1].tail = (parent[-1].tail or '') + text
  190. # converters are tried in order of their definition
  191. @converter(Tag, _PseudoTag)
  192. def convert_tag(bs_node, parent):
  193. attrs = bs_node.attrs
  194. if parent is not None:
  195. attribs = map_attrs(attrs) if attrs else None
  196. res = etree.SubElement(parent, bs_node.name, attrib=attribs)
  197. else:
  198. attribs = map_attrs(attrs) if attrs else {}
  199. res = makeelement(bs_node.name, attrib=attribs)
  200. for child in bs_node:
  201. # avoid double recursion by inlining convert_node(), see above
  202. try:
  203. handler = converters[type(child)]
  204. except KeyError:
  205. pass
  206. else:
  207. if handler is not None:
  208. handler(child, res)
  209. continue
  210. convert_node(child, res)
  211. return res
  212. @converter(Comment)
  213. def convert_comment(bs_node, parent):
  214. res = etree.Comment(bs_node)
  215. if parent is not None:
  216. parent.append(res)
  217. return res
  218. @converter(ProcessingInstruction)
  219. def convert_pi(bs_node, parent):
  220. if bs_node.endswith('?'):
  221. # The PI is of XML style (<?as df?>) but BeautifulSoup
  222. # interpreted it as being SGML style (<?as df>). Fix.
  223. bs_node = bs_node[:-1]
  224. res = etree.ProcessingInstruction(*bs_node.split(' ', 1))
  225. if parent is not None:
  226. parent.append(res)
  227. return res
  228. @converter(NavigableString)
  229. def convert_text(bs_node, parent):
  230. if parent is not None:
  231. append_text(parent, unescape(bs_node))
  232. return None
  233. return convert_node
  234. # copied from ET's ElementSoup
  235. try:
  236. from html.entities import name2codepoint # Python 3
  237. except ImportError:
  238. from htmlentitydefs import name2codepoint
  239. handle_entities = re.compile(r"&(\w+);").sub
  240. try:
  241. unichr
  242. except NameError:
  243. # Python 3
  244. unichr = chr
  245. def unescape(string):
  246. if not string:
  247. return ''
  248. # work around oddities in BeautifulSoup's entity handling
  249. def unescape_entity(m):
  250. try:
  251. return unichr(name2codepoint[m.group(1)])
  252. except KeyError:
  253. return m.group(0) # use as is
  254. return handle_entities(unescape_entity, string)