thread.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. # Copyright 2009 Brian Quinlan. All Rights Reserved.
  2. # Licensed to PSF under a Contributor Agreement.
  3. """Implements ThreadPoolExecutor."""
  4. import atexit
  5. from concurrent.futures import _base
  6. import itertools
  7. import Queue as queue
  8. import threading
  9. import weakref
  10. import sys
  11. try:
  12. from multiprocessing import cpu_count
  13. except ImportError:
  14. # some platforms don't have multiprocessing
  15. def cpu_count():
  16. return None
  17. __author__ = 'Brian Quinlan (brian@sweetapp.com)'
  18. # Workers are created as daemon threads. This is done to allow the interpreter
  19. # to exit when there are still idle threads in a ThreadPoolExecutor's thread
  20. # pool (i.e. shutdown() was not called). However, allowing workers to die with
  21. # the interpreter has two undesirable properties:
  22. # - The workers would still be running during interpretor shutdown,
  23. # meaning that they would fail in unpredictable ways.
  24. # - The workers could be killed while evaluating a work item, which could
  25. # be bad if the callable being evaluated has external side-effects e.g.
  26. # writing to a file.
  27. #
  28. # To work around this problem, an exit handler is installed which tells the
  29. # workers to exit when their work queues are empty and then waits until the
  30. # threads finish.
  31. _threads_queues = weakref.WeakKeyDictionary()
  32. _shutdown = False
  33. def _python_exit():
  34. global _shutdown
  35. _shutdown = True
  36. items = list(_threads_queues.items()) if _threads_queues else ()
  37. for t, q in items:
  38. q.put(None)
  39. for t, q in items:
  40. t.join(sys.maxint)
  41. atexit.register(_python_exit)
  42. class _WorkItem(object):
  43. def __init__(self, future, fn, args, kwargs):
  44. self.future = future
  45. self.fn = fn
  46. self.args = args
  47. self.kwargs = kwargs
  48. def run(self):
  49. if not self.future.set_running_or_notify_cancel():
  50. return
  51. try:
  52. result = self.fn(*self.args, **self.kwargs)
  53. except:
  54. e, tb = sys.exc_info()[1:]
  55. self.future.set_exception_info(e, tb)
  56. else:
  57. self.future.set_result(result)
  58. def _worker(executor_reference, work_queue):
  59. try:
  60. while True:
  61. work_item = work_queue.get(block=True)
  62. if work_item is not None:
  63. work_item.run()
  64. # Delete references to object. See issue16284
  65. del work_item
  66. continue
  67. executor = executor_reference()
  68. # Exit if:
  69. # - The interpreter is shutting down OR
  70. # - The executor that owns the worker has been collected OR
  71. # - The executor that owns the worker has been shutdown.
  72. if _shutdown or executor is None or executor._shutdown:
  73. # Notice other workers
  74. work_queue.put(None)
  75. return
  76. del executor
  77. except:
  78. _base.LOGGER.critical('Exception in worker', exc_info=True)
  79. class ThreadPoolExecutor(_base.Executor):
  80. # Used to assign unique thread names when thread_name_prefix is not supplied.
  81. _counter = itertools.count().next
  82. def __init__(self, max_workers=None, thread_name_prefix=''):
  83. """Initializes a new ThreadPoolExecutor instance.
  84. Args:
  85. max_workers: The maximum number of threads that can be used to
  86. execute the given calls.
  87. thread_name_prefix: An optional name prefix to give our threads.
  88. """
  89. if max_workers is None:
  90. # Use this number because ThreadPoolExecutor is often
  91. # used to overlap I/O instead of CPU work.
  92. max_workers = (cpu_count() or 1) * 5
  93. if max_workers <= 0:
  94. raise ValueError("max_workers must be greater than 0")
  95. self._max_workers = max_workers
  96. self._work_queue = queue.Queue()
  97. self._threads = set()
  98. self._shutdown = False
  99. self._shutdown_lock = threading.Lock()
  100. self._thread_name_prefix = (thread_name_prefix or
  101. ("ThreadPoolExecutor-%d" % self._counter()))
  102. def submit(self, fn, *args, **kwargs):
  103. with self._shutdown_lock:
  104. if self._shutdown:
  105. raise RuntimeError('cannot schedule new futures after shutdown')
  106. f = _base.Future()
  107. w = _WorkItem(f, fn, args, kwargs)
  108. self._work_queue.put(w)
  109. self._adjust_thread_count()
  110. return f
  111. submit.__doc__ = _base.Executor.submit.__doc__
  112. def _adjust_thread_count(self):
  113. # When the executor gets lost, the weakref callback will wake up
  114. # the worker threads.
  115. def weakref_cb(_, q=self._work_queue):
  116. q.put(None)
  117. # TODO(bquinlan): Should avoid creating new threads if there are more
  118. # idle threads than items in the work queue.
  119. num_threads = len(self._threads)
  120. if num_threads < self._max_workers:
  121. thread_name = '%s_%d' % (self._thread_name_prefix or self,
  122. num_threads)
  123. t = threading.Thread(name=thread_name, target=_worker,
  124. args=(weakref.ref(self, weakref_cb),
  125. self._work_queue))
  126. t.daemon = True
  127. t.start()
  128. self._threads.add(t)
  129. _threads_queues[t] = self._work_queue
  130. def shutdown(self, wait=True):
  131. with self._shutdown_lock:
  132. self._shutdown = True
  133. self._work_queue.put(None)
  134. if wait:
  135. for t in self._threads:
  136. t.join(sys.maxint)
  137. shutdown.__doc__ = _base.Executor.shutdown.__doc__