fetch.py 2.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970
  1. from __future__ import print_function
  2. import sys, six
  3. from w3lib.url import is_url
  4. from scrapy.commands import ScrapyCommand
  5. from scrapy.http import Request
  6. from scrapy.exceptions import UsageError
  7. from scrapy.utils.datatypes import SequenceExclude
  8. from scrapy.utils.spider import spidercls_for_request, DefaultSpider
  9. class Command(ScrapyCommand):
  10. requires_project = False
  11. def syntax(self):
  12. return "[options] <url>"
  13. def short_desc(self):
  14. return "Fetch a URL using the Scrapy downloader"
  15. def long_desc(self):
  16. return "Fetch a URL using the Scrapy downloader and print its content " \
  17. "to stdout. You may want to use --nolog to disable logging"
  18. def add_options(self, parser):
  19. ScrapyCommand.add_options(self, parser)
  20. parser.add_option("--spider", dest="spider",
  21. help="use this spider")
  22. parser.add_option("--headers", dest="headers", action="store_true", \
  23. help="print response HTTP headers instead of body")
  24. parser.add_option("--no-redirect", dest="no_redirect", action="store_true", \
  25. default=False, help="do not handle HTTP 3xx status codes and print response as-is")
  26. def _print_headers(self, headers, prefix):
  27. for key, values in headers.items():
  28. for value in values:
  29. self._print_bytes(prefix + b' ' + key + b': ' + value)
  30. def _print_response(self, response, opts):
  31. if opts.headers:
  32. self._print_headers(response.request.headers, b'>')
  33. print('>')
  34. self._print_headers(response.headers, b'<')
  35. else:
  36. self._print_bytes(response.body)
  37. def _print_bytes(self, bytes_):
  38. bytes_writer = sys.stdout if six.PY2 else sys.stdout.buffer
  39. bytes_writer.write(bytes_ + b'\n')
  40. def run(self, args, opts):
  41. if len(args) != 1 or not is_url(args[0]):
  42. raise UsageError()
  43. cb = lambda x: self._print_response(x, opts)
  44. request = Request(args[0], callback=cb, dont_filter=True)
  45. # by default, let the framework handle redirects,
  46. # i.e. command handles all codes expect 3xx
  47. if not opts.no_redirect:
  48. request.meta['handle_httpstatus_list'] = SequenceExclude(range(300, 400))
  49. else:
  50. request.meta['handle_httpstatus_all'] = True
  51. spidercls = DefaultSpider
  52. spider_loader = self.crawler_process.spider_loader
  53. if opts.spider:
  54. spidercls = spider_loader.load(opts.spider)
  55. else:
  56. spidercls = spidercls_for_request(spider_loader, request, spidercls)
  57. self.crawler_process.crawl(spidercls, start_requests=lambda: [request])
  58. self.crawler_process.start()