lxmlhtml.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. """
  2. Link extractor based on lxml.html
  3. """
  4. import six
  5. from six.moves.urllib.parse import urljoin
  6. import lxml.etree as etree
  7. from w3lib.html import strip_html5_whitespace
  8. from w3lib.url import canonicalize_url
  9. from scrapy.link import Link
  10. from scrapy.utils.misc import arg_to_iter, rel_has_nofollow
  11. from scrapy.utils.python import unique as unique_list, to_native_str
  12. from scrapy.utils.response import get_base_url
  13. from scrapy.linkextractors import FilteringLinkExtractor
  14. # from lxml/src/lxml/html/__init__.py
  15. XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
  16. _collect_string_content = etree.XPath("string()")
  17. def _nons(tag):
  18. if isinstance(tag, six.string_types):
  19. if tag[0] == '{' and tag[1:len(XHTML_NAMESPACE)+1] == XHTML_NAMESPACE:
  20. return tag.split('}')[-1]
  21. return tag
  22. class LxmlParserLinkExtractor(object):
  23. def __init__(self, tag="a", attr="href", process=None, unique=False,
  24. strip=True, canonicalized=False):
  25. self.scan_tag = tag if callable(tag) else lambda t: t == tag
  26. self.scan_attr = attr if callable(attr) else lambda a: a == attr
  27. self.process_attr = process if callable(process) else lambda v: v
  28. self.unique = unique
  29. self.strip = strip
  30. if canonicalized:
  31. self.link_key = lambda link: link.url
  32. else:
  33. self.link_key = lambda link: canonicalize_url(link.url,
  34. keep_fragments=True)
  35. def _iter_links(self, document):
  36. for el in document.iter(etree.Element):
  37. if not self.scan_tag(_nons(el.tag)):
  38. continue
  39. attribs = el.attrib
  40. for attrib in attribs:
  41. if not self.scan_attr(attrib):
  42. continue
  43. yield (el, attrib, attribs[attrib])
  44. def _extract_links(self, selector, response_url, response_encoding, base_url):
  45. links = []
  46. # hacky way to get the underlying lxml parsed document
  47. for el, attr, attr_val in self._iter_links(selector.root):
  48. # pseudo lxml.html.HtmlElement.make_links_absolute(base_url)
  49. try:
  50. if self.strip:
  51. attr_val = strip_html5_whitespace(attr_val)
  52. attr_val = urljoin(base_url, attr_val)
  53. except ValueError:
  54. continue # skipping bogus links
  55. else:
  56. url = self.process_attr(attr_val)
  57. if url is None:
  58. continue
  59. url = to_native_str(url, encoding=response_encoding)
  60. # to fix relative links after process_value
  61. url = urljoin(response_url, url)
  62. link = Link(url, _collect_string_content(el) or u'',
  63. nofollow=rel_has_nofollow(el.get('rel')))
  64. links.append(link)
  65. return self._deduplicate_if_needed(links)
  66. def extract_links(self, response):
  67. base_url = get_base_url(response)
  68. return self._extract_links(response.selector, response.url, response.encoding, base_url)
  69. def _process_links(self, links):
  70. """ Normalize and filter extracted links
  71. The subclass should override it if neccessary
  72. """
  73. return self._deduplicate_if_needed(links)
  74. def _deduplicate_if_needed(self, links):
  75. if self.unique:
  76. return unique_list(links, key=self.link_key)
  77. return links
  78. class LxmlLinkExtractor(FilteringLinkExtractor):
  79. def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
  80. tags=('a', 'area'), attrs=('href',), canonicalize=False,
  81. unique=True, process_value=None, deny_extensions=None, restrict_css=(),
  82. strip=True, restrict_text=None):
  83. tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))
  84. tag_func = lambda x: x in tags
  85. attr_func = lambda x: x in attrs
  86. lx = LxmlParserLinkExtractor(
  87. tag=tag_func,
  88. attr=attr_func,
  89. unique=unique,
  90. process=process_value,
  91. strip=strip,
  92. canonicalized=canonicalize
  93. )
  94. super(LxmlLinkExtractor, self).__init__(lx, allow=allow, deny=deny,
  95. allow_domains=allow_domains, deny_domains=deny_domains,
  96. restrict_xpaths=restrict_xpaths, restrict_css=restrict_css,
  97. canonicalize=canonicalize, deny_extensions=deny_extensions,
  98. restrict_text=restrict_text)
  99. def extract_links(self, response):
  100. base_url = get_base_url(response)
  101. if self.restrict_xpaths:
  102. docs = [subdoc
  103. for x in self.restrict_xpaths
  104. for subdoc in response.xpath(x)]
  105. else:
  106. docs = [response.selector]
  107. all_links = []
  108. for doc in docs:
  109. links = self._extract_links(doc, response.url, response.encoding, base_url)
  110. all_links.extend(self._process_links(links))
  111. return unique_list(all_links)