shell.py 2.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. """
  2. Scrapy Shell
  3. See documentation in docs/topics/shell.rst
  4. """
  5. from threading import Thread
  6. from scrapy.commands import ScrapyCommand
  7. from scrapy.shell import Shell
  8. from scrapy.http import Request
  9. from scrapy.utils.spider import spidercls_for_request, DefaultSpider
  10. from scrapy.utils.url import guess_scheme
  11. class Command(ScrapyCommand):
  12. requires_project = False
  13. default_settings = {
  14. 'KEEP_ALIVE': True,
  15. 'LOGSTATS_INTERVAL': 0,
  16. 'DUPEFILTER_CLASS': 'scrapy.dupefilters.BaseDupeFilter',
  17. }
  18. def syntax(self):
  19. return "[url|file]"
  20. def short_desc(self):
  21. return "Interactive scraping console"
  22. def long_desc(self):
  23. return ("Interactive console for scraping the given url or file. "
  24. "Use ./file.html syntax or full path for local file.")
  25. def add_options(self, parser):
  26. ScrapyCommand.add_options(self, parser)
  27. parser.add_option("-c", dest="code",
  28. help="evaluate the code in the shell, print the result and exit")
  29. parser.add_option("--spider", dest="spider",
  30. help="use this spider")
  31. parser.add_option("--no-redirect", dest="no_redirect", action="store_true", \
  32. default=False, help="do not handle HTTP 3xx status codes and print response as-is")
  33. def update_vars(self, vars):
  34. """You can use this function to update the Scrapy objects that will be
  35. available in the shell
  36. """
  37. pass
  38. def run(self, args, opts):
  39. url = args[0] if args else None
  40. if url:
  41. # first argument may be a local file
  42. url = guess_scheme(url)
  43. spider_loader = self.crawler_process.spider_loader
  44. spidercls = DefaultSpider
  45. if opts.spider:
  46. spidercls = spider_loader.load(opts.spider)
  47. elif url:
  48. spidercls = spidercls_for_request(spider_loader, Request(url),
  49. spidercls, log_multiple=True)
  50. # The crawler is created this way since the Shell manually handles the
  51. # crawling engine, so the set up in the crawl method won't work
  52. crawler = self.crawler_process._create_crawler(spidercls)
  53. # The Shell class needs a persistent engine in the crawler
  54. crawler.engine = crawler._create_engine()
  55. crawler.engine.start()
  56. self._start_crawler_thread()
  57. shell = Shell(crawler, update_vars=self.update_vars, code=opts.code)
  58. shell.start(url=url, redirect=not opts.no_redirect)
  59. def _start_crawler_thread(self):
  60. t = Thread(target=self.crawler_process.start,
  61. kwargs={'stop_after_crawl': False})
  62. t.daemon = True
  63. t.start()