crawler.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. import six
  2. import signal
  3. import logging
  4. import warnings
  5. import sys
  6. from twisted.internet import reactor, defer
  7. from zope.interface.verify import verifyClass, DoesNotImplement
  8. from scrapy import Spider
  9. from scrapy.core.engine import ExecutionEngine
  10. from scrapy.resolver import CachingThreadedResolver
  11. from scrapy.interfaces import ISpiderLoader
  12. from scrapy.extension import ExtensionManager
  13. from scrapy.settings import overridden_settings, Settings
  14. from scrapy.signalmanager import SignalManager
  15. from scrapy.exceptions import ScrapyDeprecationWarning
  16. from scrapy.utils.ossignal import install_shutdown_handlers, signal_names
  17. from scrapy.utils.misc import load_object
  18. from scrapy.utils.log import (
  19. LogCounterHandler, configure_logging, log_scrapy_info,
  20. get_scrapy_root_handler, install_scrapy_root_handler)
  21. from scrapy import signals
  22. logger = logging.getLogger(__name__)
  23. class Crawler(object):
  24. def __init__(self, spidercls, settings=None):
  25. if isinstance(spidercls, Spider):
  26. raise ValueError(
  27. 'The spidercls argument must be a class, not an object')
  28. if isinstance(settings, dict) or settings is None:
  29. settings = Settings(settings)
  30. self.spidercls = spidercls
  31. self.settings = settings.copy()
  32. self.spidercls.update_settings(self.settings)
  33. self.signals = SignalManager(self)
  34. self.stats = load_object(self.settings['STATS_CLASS'])(self)
  35. handler = LogCounterHandler(self, level=self.settings.get('LOG_LEVEL'))
  36. logging.root.addHandler(handler)
  37. d = dict(overridden_settings(self.settings))
  38. logger.info("Overridden settings: %(settings)r", {'settings': d})
  39. if get_scrapy_root_handler() is not None:
  40. # scrapy root handler already installed: update it with new settings
  41. install_scrapy_root_handler(self.settings)
  42. # lambda is assigned to Crawler attribute because this way it is not
  43. # garbage collected after leaving __init__ scope
  44. self.__remove_handler = lambda: logging.root.removeHandler(handler)
  45. self.signals.connect(self.__remove_handler, signals.engine_stopped)
  46. lf_cls = load_object(self.settings['LOG_FORMATTER'])
  47. self.logformatter = lf_cls.from_crawler(self)
  48. self.extensions = ExtensionManager.from_crawler(self)
  49. self.settings.freeze()
  50. self.crawling = False
  51. self.spider = None
  52. self.engine = None
  53. @property
  54. def spiders(self):
  55. if not hasattr(self, '_spiders'):
  56. warnings.warn("Crawler.spiders is deprecated, use "
  57. "CrawlerRunner.spider_loader or instantiate "
  58. "scrapy.spiderloader.SpiderLoader with your "
  59. "settings.",
  60. category=ScrapyDeprecationWarning, stacklevel=2)
  61. self._spiders = _get_spider_loader(self.settings.frozencopy())
  62. return self._spiders
  63. @defer.inlineCallbacks
  64. def crawl(self, *args, **kwargs):
  65. assert not self.crawling, "Crawling already taking place"
  66. self.crawling = True
  67. try:
  68. self.spider = self._create_spider(*args, **kwargs)
  69. self.engine = self._create_engine()
  70. start_requests = iter(self.spider.start_requests())
  71. yield self.engine.open_spider(self.spider, start_requests)
  72. yield defer.maybeDeferred(self.engine.start)
  73. except Exception:
  74. # In Python 2 reraising an exception after yield discards
  75. # the original traceback (see https://bugs.python.org/issue7563),
  76. # so sys.exc_info() workaround is used.
  77. # This workaround also works in Python 3, but it is not needed,
  78. # and it is slower, so in Python 3 we use native `raise`.
  79. if six.PY2:
  80. exc_info = sys.exc_info()
  81. self.crawling = False
  82. if self.engine is not None:
  83. yield self.engine.close()
  84. if six.PY2:
  85. six.reraise(*exc_info)
  86. raise
  87. def _create_spider(self, *args, **kwargs):
  88. return self.spidercls.from_crawler(self, *args, **kwargs)
  89. def _create_engine(self):
  90. return ExecutionEngine(self, lambda _: self.stop())
  91. @defer.inlineCallbacks
  92. def stop(self):
  93. """Starts a graceful stop of the crawler and returns a deferred that is
  94. fired when the crawler is stopped."""
  95. if self.crawling:
  96. self.crawling = False
  97. yield defer.maybeDeferred(self.engine.stop)
  98. class CrawlerRunner(object):
  99. """
  100. This is a convenient helper class that keeps track of, manages and runs
  101. crawlers inside an already setup Twisted `reactor`_.
  102. The CrawlerRunner object must be instantiated with a
  103. :class:`~scrapy.settings.Settings` object.
  104. This class shouldn't be needed (since Scrapy is responsible of using it
  105. accordingly) unless writing scripts that manually handle the crawling
  106. process. See :ref:`run-from-script` for an example.
  107. """
  108. crawlers = property(
  109. lambda self: self._crawlers,
  110. doc="Set of :class:`crawlers <scrapy.crawler.Crawler>` started by "
  111. ":meth:`crawl` and managed by this class."
  112. )
  113. def __init__(self, settings=None):
  114. if isinstance(settings, dict) or settings is None:
  115. settings = Settings(settings)
  116. self.settings = settings
  117. self.spider_loader = _get_spider_loader(settings)
  118. self._crawlers = set()
  119. self._active = set()
  120. self.bootstrap_failed = False
  121. @property
  122. def spiders(self):
  123. warnings.warn("CrawlerRunner.spiders attribute is renamed to "
  124. "CrawlerRunner.spider_loader.",
  125. category=ScrapyDeprecationWarning, stacklevel=2)
  126. return self.spider_loader
  127. def crawl(self, crawler_or_spidercls, *args, **kwargs):
  128. """
  129. Run a crawler with the provided arguments.
  130. It will call the given Crawler's :meth:`~Crawler.crawl` method, while
  131. keeping track of it so it can be stopped later.
  132. If ``crawler_or_spidercls`` isn't a :class:`~scrapy.crawler.Crawler`
  133. instance, this method will try to create one using this parameter as
  134. the spider class given to it.
  135. Returns a deferred that is fired when the crawling is finished.
  136. :param crawler_or_spidercls: already created crawler, or a spider class
  137. or spider's name inside the project to create it
  138. :type crawler_or_spidercls: :class:`~scrapy.crawler.Crawler` instance,
  139. :class:`~scrapy.spiders.Spider` subclass or string
  140. :param list args: arguments to initialize the spider
  141. :param dict kwargs: keyword arguments to initialize the spider
  142. """
  143. if isinstance(crawler_or_spidercls, Spider):
  144. raise ValueError(
  145. 'The crawler_or_spidercls argument cannot be a spider object, '
  146. 'it must be a spider class (or a Crawler object)')
  147. crawler = self.create_crawler(crawler_or_spidercls)
  148. return self._crawl(crawler, *args, **kwargs)
  149. def _crawl(self, crawler, *args, **kwargs):
  150. self.crawlers.add(crawler)
  151. d = crawler.crawl(*args, **kwargs)
  152. self._active.add(d)
  153. def _done(result):
  154. self.crawlers.discard(crawler)
  155. self._active.discard(d)
  156. self.bootstrap_failed |= not getattr(crawler, 'spider', None)
  157. return result
  158. return d.addBoth(_done)
  159. def create_crawler(self, crawler_or_spidercls):
  160. """
  161. Return a :class:`~scrapy.crawler.Crawler` object.
  162. * If ``crawler_or_spidercls`` is a Crawler, it is returned as-is.
  163. * If ``crawler_or_spidercls`` is a Spider subclass, a new Crawler
  164. is constructed for it.
  165. * If ``crawler_or_spidercls`` is a string, this function finds
  166. a spider with this name in a Scrapy project (using spider loader),
  167. then creates a Crawler instance for it.
  168. """
  169. if isinstance(crawler_or_spidercls, Spider):
  170. raise ValueError(
  171. 'The crawler_or_spidercls argument cannot be a spider object, '
  172. 'it must be a spider class (or a Crawler object)')
  173. if isinstance(crawler_or_spidercls, Crawler):
  174. return crawler_or_spidercls
  175. return self._create_crawler(crawler_or_spidercls)
  176. def _create_crawler(self, spidercls):
  177. if isinstance(spidercls, six.string_types):
  178. spidercls = self.spider_loader.load(spidercls)
  179. return Crawler(spidercls, self.settings)
  180. def stop(self):
  181. """
  182. Stops simultaneously all the crawling jobs taking place.
  183. Returns a deferred that is fired when they all have ended.
  184. """
  185. return defer.DeferredList([c.stop() for c in list(self.crawlers)])
  186. @defer.inlineCallbacks
  187. def join(self):
  188. """
  189. join()
  190. Returns a deferred that is fired when all managed :attr:`crawlers` have
  191. completed their executions.
  192. """
  193. while self._active:
  194. yield defer.DeferredList(self._active)
  195. class CrawlerProcess(CrawlerRunner):
  196. """
  197. A class to run multiple scrapy crawlers in a process simultaneously.
  198. This class extends :class:`~scrapy.crawler.CrawlerRunner` by adding support
  199. for starting a Twisted `reactor`_ and handling shutdown signals, like the
  200. keyboard interrupt command Ctrl-C. It also configures top-level logging.
  201. This utility should be a better fit than
  202. :class:`~scrapy.crawler.CrawlerRunner` if you aren't running another
  203. Twisted `reactor`_ within your application.
  204. The CrawlerProcess object must be instantiated with a
  205. :class:`~scrapy.settings.Settings` object.
  206. :param install_root_handler: whether to install root logging handler
  207. (default: True)
  208. This class shouldn't be needed (since Scrapy is responsible of using it
  209. accordingly) unless writing scripts that manually handle the crawling
  210. process. See :ref:`run-from-script` for an example.
  211. """
  212. def __init__(self, settings=None, install_root_handler=True):
  213. super(CrawlerProcess, self).__init__(settings)
  214. install_shutdown_handlers(self._signal_shutdown)
  215. configure_logging(self.settings, install_root_handler)
  216. log_scrapy_info(self.settings)
  217. def _signal_shutdown(self, signum, _):
  218. install_shutdown_handlers(self._signal_kill)
  219. signame = signal_names[signum]
  220. logger.info("Received %(signame)s, shutting down gracefully. Send again to force ",
  221. {'signame': signame})
  222. reactor.callFromThread(self._graceful_stop_reactor)
  223. def _signal_kill(self, signum, _):
  224. install_shutdown_handlers(signal.SIG_IGN)
  225. signame = signal_names[signum]
  226. logger.info('Received %(signame)s twice, forcing unclean shutdown',
  227. {'signame': signame})
  228. reactor.callFromThread(self._stop_reactor)
  229. def start(self, stop_after_crawl=True):
  230. """
  231. This method starts a Twisted `reactor`_, adjusts its pool size to
  232. :setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based
  233. on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`.
  234. If ``stop_after_crawl`` is True, the reactor will be stopped after all
  235. crawlers have finished, using :meth:`join`.
  236. :param boolean stop_after_crawl: stop or not the reactor when all
  237. crawlers have finished
  238. """
  239. if stop_after_crawl:
  240. d = self.join()
  241. # Don't start the reactor if the deferreds are already fired
  242. if d.called:
  243. return
  244. d.addBoth(self._stop_reactor)
  245. reactor.installResolver(self._get_dns_resolver())
  246. tp = reactor.getThreadPool()
  247. tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE'))
  248. reactor.addSystemEventTrigger('before', 'shutdown', self.stop)
  249. reactor.run(installSignalHandlers=False) # blocking call
  250. def _get_dns_resolver(self):
  251. if self.settings.getbool('DNSCACHE_ENABLED'):
  252. cache_size = self.settings.getint('DNSCACHE_SIZE')
  253. else:
  254. cache_size = 0
  255. return CachingThreadedResolver(
  256. reactor=reactor,
  257. cache_size=cache_size,
  258. timeout=self.settings.getfloat('DNS_TIMEOUT')
  259. )
  260. def _graceful_stop_reactor(self):
  261. d = self.stop()
  262. d.addBoth(self._stop_reactor)
  263. return d
  264. def _stop_reactor(self, _=None):
  265. try:
  266. reactor.stop()
  267. except RuntimeError: # raised if already stopped or in shutdown stage
  268. pass
  269. def _get_spider_loader(settings):
  270. """ Get SpiderLoader instance from settings """
  271. cls_path = settings.get('SPIDER_LOADER_CLASS')
  272. loader_cls = load_object(cls_path)
  273. try:
  274. verifyClass(ISpiderLoader, loader_cls)
  275. except DoesNotImplement:
  276. warnings.warn(
  277. 'SPIDER_LOADER_CLASS (previously named SPIDER_MANAGER_CLASS) does '
  278. 'not fully implement scrapy.interfaces.ISpiderLoader interface. '
  279. 'Please add all missing methods to avoid unexpected runtime errors.',
  280. category=ScrapyDeprecationWarning, stacklevel=2
  281. )
  282. return loader_cls.from_settings(settings.frozencopy())