htmlparser.py 3.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. """
  2. HTMLParser-based link extractor
  3. """
  4. import warnings
  5. import six
  6. from six.moves.html_parser import HTMLParser
  7. from six.moves.urllib.parse import urljoin
  8. from w3lib.url import safe_url_string
  9. from w3lib.html import strip_html5_whitespace
  10. from scrapy.link import Link
  11. from scrapy.utils.python import unique as unique_list
  12. from scrapy.exceptions import ScrapyDeprecationWarning
  13. class HtmlParserLinkExtractor(HTMLParser):
  14. def __init__(self, tag="a", attr="href", process=None, unique=False,
  15. strip=True):
  16. HTMLParser.__init__(self)
  17. warnings.warn(
  18. "HtmlParserLinkExtractor is deprecated and will be removed in "
  19. "future releases. Please use scrapy.linkextractors.LinkExtractor",
  20. ScrapyDeprecationWarning, stacklevel=2,
  21. )
  22. self.scan_tag = tag if callable(tag) else lambda t: t == tag
  23. self.scan_attr = attr if callable(attr) else lambda a: a == attr
  24. self.process_attr = process if callable(process) else lambda v: v
  25. self.unique = unique
  26. self.strip = strip
  27. def _extract_links(self, response_text, response_url, response_encoding):
  28. self.reset()
  29. self.feed(response_text)
  30. self.close()
  31. links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links
  32. ret = []
  33. base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
  34. for link in links:
  35. if isinstance(link.url, six.text_type):
  36. link.url = link.url.encode(response_encoding)
  37. try:
  38. link.url = urljoin(base_url, link.url)
  39. except ValueError:
  40. continue
  41. link.url = safe_url_string(link.url, response_encoding)
  42. link.text = link.text.decode(response_encoding)
  43. ret.append(link)
  44. return ret
  45. def extract_links(self, response):
  46. # wrapper needed to allow to work directly with text
  47. return self._extract_links(response.body, response.url, response.encoding)
  48. def reset(self):
  49. HTMLParser.reset(self)
  50. self.base_url = None
  51. self.current_link = None
  52. self.links = []
  53. def handle_starttag(self, tag, attrs):
  54. if tag == 'base':
  55. self.base_url = dict(attrs).get('href')
  56. if self.scan_tag(tag):
  57. for attr, value in attrs:
  58. if self.scan_attr(attr):
  59. if self.strip:
  60. value = strip_html5_whitespace(value)
  61. url = self.process_attr(value)
  62. link = Link(url=url)
  63. self.links.append(link)
  64. self.current_link = link
  65. def handle_endtag(self, tag):
  66. if self.scan_tag(tag):
  67. self.current_link = None
  68. def handle_data(self, data):
  69. if self.current_link:
  70. self.current_link.text = self.current_link.text + data
  71. def matches(self, url):
  72. """This extractor matches with any url, since
  73. it doesn't contain any patterns"""
  74. return True