__init__.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. """
  2. scrapy.linkextractors
  3. This package contains a collection of Link Extractors.
  4. For more info see docs/topics/link-extractors.rst
  5. """
  6. import re
  7. from six.moves.urllib.parse import urlparse
  8. from parsel.csstranslator import HTMLTranslator
  9. from w3lib.url import canonicalize_url
  10. from scrapy.utils.misc import arg_to_iter
  11. from scrapy.utils.url import (
  12. url_is_from_any_domain, url_has_any_extension,
  13. )
  14. # common file extensions that are not followed if they occur in links
  15. IGNORED_EXTENSIONS = [
  16. # images
  17. 'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif',
  18. 'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg',
  19. # audio
  20. 'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff',
  21. # video
  22. '3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',
  23. 'm4a', 'm4v', 'flv',
  24. # office suites
  25. 'xls', 'xlsx', 'ppt', 'pptx', 'pps', 'doc', 'docx', 'odt', 'ods', 'odg',
  26. 'odp',
  27. # other
  28. 'css', 'pdf', 'exe', 'bin', 'rss', 'zip', 'rar',
  29. ]
  30. _re_type = type(re.compile("", 0))
  31. _matches = lambda url, regexs: any(r.search(url) for r in regexs)
  32. _is_valid_url = lambda url: url.split('://', 1)[0] in {'http', 'https', \
  33. 'file', 'ftp'}
  34. class FilteringLinkExtractor(object):
  35. _csstranslator = HTMLTranslator()
  36. def __init__(self, link_extractor, allow, deny, allow_domains, deny_domains,
  37. restrict_xpaths, canonicalize, deny_extensions, restrict_css, restrict_text):
  38. self.link_extractor = link_extractor
  39. self.allow_res = [x if isinstance(x, _re_type) else re.compile(x)
  40. for x in arg_to_iter(allow)]
  41. self.deny_res = [x if isinstance(x, _re_type) else re.compile(x)
  42. for x in arg_to_iter(deny)]
  43. self.allow_domains = set(arg_to_iter(allow_domains))
  44. self.deny_domains = set(arg_to_iter(deny_domains))
  45. self.restrict_xpaths = tuple(arg_to_iter(restrict_xpaths))
  46. self.restrict_xpaths += tuple(map(self._csstranslator.css_to_xpath,
  47. arg_to_iter(restrict_css)))
  48. self.canonicalize = canonicalize
  49. if deny_extensions is None:
  50. deny_extensions = IGNORED_EXTENSIONS
  51. self.deny_extensions = {'.' + e for e in arg_to_iter(deny_extensions)}
  52. self.restrict_text = [x if isinstance(x, _re_type) else re.compile(x)
  53. for x in arg_to_iter(restrict_text)]
  54. def _link_allowed(self, link):
  55. if not _is_valid_url(link.url):
  56. return False
  57. if self.allow_res and not _matches(link.url, self.allow_res):
  58. return False
  59. if self.deny_res and _matches(link.url, self.deny_res):
  60. return False
  61. parsed_url = urlparse(link.url)
  62. if self.allow_domains and not url_is_from_any_domain(parsed_url, self.allow_domains):
  63. return False
  64. if self.deny_domains and url_is_from_any_domain(parsed_url, self.deny_domains):
  65. return False
  66. if self.deny_extensions and url_has_any_extension(parsed_url, self.deny_extensions):
  67. return False
  68. if self.restrict_text and not _matches(link.text, self.restrict_text):
  69. return False
  70. return True
  71. def matches(self, url):
  72. if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains):
  73. return False
  74. if self.deny_domains and url_is_from_any_domain(url, self.deny_domains):
  75. return False
  76. allowed = (regex.search(url) for regex in self.allow_res) if self.allow_res else [True]
  77. denied = (regex.search(url) for regex in self.deny_res) if self.deny_res else []
  78. return any(allowed) and not any(denied)
  79. def _process_links(self, links):
  80. links = [x for x in links if self._link_allowed(x)]
  81. if self.canonicalize:
  82. for link in links:
  83. link.url = canonicalize_url(link.url)
  84. links = self.link_extractor._process_links(links)
  85. return links
  86. def _extract_links(self, *args, **kwargs):
  87. return self.link_extractor._extract_links(*args, **kwargs)
  88. # Top-level imports
  89. from .lxmlhtml import LxmlLinkExtractor as LinkExtractor