logo

oasis-root

Compiled tree of Oasis Linux based on own branch at <https://hacktivis.me/git/oasis/> git clone https://anongit.hacktivis.me/git/oasis-root.git

thread.py (8445B)


  1. # Copyright 2009 Brian Quinlan. All Rights Reserved.
  2. # Licensed to PSF under a Contributor Agreement.
  3. """Implements ThreadPoolExecutor."""
  4. __author__ = 'Brian Quinlan (brian@sweetapp.com)'
  5. from concurrent.futures import _base
  6. import itertools
  7. import queue
  8. import threading
  9. import types
  10. import weakref
  11. import os
  12. _threads_queues = weakref.WeakKeyDictionary()
  13. _shutdown = False
  14. # Lock that ensures that new workers are not created while the interpreter is
  15. # shutting down. Must be held while mutating _threads_queues and _shutdown.
  16. _global_shutdown_lock = threading.Lock()
  17. def _python_exit():
  18. global _shutdown
  19. with _global_shutdown_lock:
  20. _shutdown = True
  21. items = list(_threads_queues.items())
  22. for t, q in items:
  23. q.put(None)
  24. for t, q in items:
  25. t.join()
  26. # Register for `_python_exit()` to be called just before joining all
  27. # non-daemon threads. This is used instead of `atexit.register()` for
  28. # compatibility with subinterpreters, which no longer support daemon threads.
  29. # See bpo-39812 for context.
  30. threading._register_atexit(_python_exit)
  31. class _WorkItem(object):
  32. def __init__(self, future, fn, args, kwargs):
  33. self.future = future
  34. self.fn = fn
  35. self.args = args
  36. self.kwargs = kwargs
  37. def run(self):
  38. if not self.future.set_running_or_notify_cancel():
  39. return
  40. try:
  41. result = self.fn(*self.args, **self.kwargs)
  42. except BaseException as exc:
  43. self.future.set_exception(exc)
  44. # Break a reference cycle with the exception 'exc'
  45. self = None
  46. else:
  47. self.future.set_result(result)
  48. __class_getitem__ = classmethod(types.GenericAlias)
  49. def _worker(executor_reference, work_queue, initializer, initargs):
  50. if initializer is not None:
  51. try:
  52. initializer(*initargs)
  53. except BaseException:
  54. _base.LOGGER.critical('Exception in initializer:', exc_info=True)
  55. executor = executor_reference()
  56. if executor is not None:
  57. executor._initializer_failed()
  58. return
  59. try:
  60. while True:
  61. work_item = work_queue.get(block=True)
  62. if work_item is not None:
  63. work_item.run()
  64. # Delete references to object. See issue16284
  65. del work_item
  66. # attempt to increment idle count
  67. executor = executor_reference()
  68. if executor is not None:
  69. executor._idle_semaphore.release()
  70. del executor
  71. continue
  72. executor = executor_reference()
  73. # Exit if:
  74. # - The interpreter is shutting down OR
  75. # - The executor that owns the worker has been collected OR
  76. # - The executor that owns the worker has been shutdown.
  77. if _shutdown or executor is None or executor._shutdown:
  78. # Flag the executor as shutting down as early as possible if it
  79. # is not gc-ed yet.
  80. if executor is not None:
  81. executor._shutdown = True
  82. # Notice other workers
  83. work_queue.put(None)
  84. return
  85. del executor
  86. except BaseException:
  87. _base.LOGGER.critical('Exception in worker', exc_info=True)
  88. class BrokenThreadPool(_base.BrokenExecutor):
  89. """
  90. Raised when a worker thread in a ThreadPoolExecutor failed initializing.
  91. """
  92. class ThreadPoolExecutor(_base.Executor):
  93. # Used to assign unique thread names when thread_name_prefix is not supplied.
  94. _counter = itertools.count().__next__
  95. def __init__(self, max_workers=None, thread_name_prefix='',
  96. initializer=None, initargs=()):
  97. """Initializes a new ThreadPoolExecutor instance.
  98. Args:
  99. max_workers: The maximum number of threads that can be used to
  100. execute the given calls.
  101. thread_name_prefix: An optional name prefix to give our threads.
  102. initializer: A callable used to initialize worker threads.
  103. initargs: A tuple of arguments to pass to the initializer.
  104. """
  105. if max_workers is None:
  106. # ThreadPoolExecutor is often used to:
  107. # * CPU bound task which releases GIL
  108. # * I/O bound task (which releases GIL, of course)
  109. #
  110. # We use cpu_count + 4 for both types of tasks.
  111. # But we limit it to 32 to avoid consuming surprisingly large resource
  112. # on many core machine.
  113. max_workers = min(32, (os.cpu_count() or 1) + 4)
  114. if max_workers <= 0:
  115. raise ValueError("max_workers must be greater than 0")
  116. if initializer is not None and not callable(initializer):
  117. raise TypeError("initializer must be a callable")
  118. self._max_workers = max_workers
  119. self._work_queue = queue.SimpleQueue()
  120. self._idle_semaphore = threading.Semaphore(0)
  121. self._threads = set()
  122. self._broken = False
  123. self._shutdown = False
  124. self._shutdown_lock = threading.Lock()
  125. self._thread_name_prefix = (thread_name_prefix or
  126. ("ThreadPoolExecutor-%d" % self._counter()))
  127. self._initializer = initializer
  128. self._initargs = initargs
  129. def submit(self, fn, /, *args, **kwargs):
  130. with self._shutdown_lock, _global_shutdown_lock:
  131. if self._broken:
  132. raise BrokenThreadPool(self._broken)
  133. if self._shutdown:
  134. raise RuntimeError('cannot schedule new futures after shutdown')
  135. if _shutdown:
  136. raise RuntimeError('cannot schedule new futures after '
  137. 'interpreter shutdown')
  138. f = _base.Future()
  139. w = _WorkItem(f, fn, args, kwargs)
  140. self._work_queue.put(w)
  141. self._adjust_thread_count()
  142. return f
  143. submit.__doc__ = _base.Executor.submit.__doc__
  144. def _adjust_thread_count(self):
  145. # if idle threads are available, don't spin new threads
  146. if self._idle_semaphore.acquire(timeout=0):
  147. return
  148. # When the executor gets lost, the weakref callback will wake up
  149. # the worker threads.
  150. def weakref_cb(_, q=self._work_queue):
  151. q.put(None)
  152. num_threads = len(self._threads)
  153. if num_threads < self._max_workers:
  154. thread_name = '%s_%d' % (self._thread_name_prefix or self,
  155. num_threads)
  156. t = threading.Thread(name=thread_name, target=_worker,
  157. args=(weakref.ref(self, weakref_cb),
  158. self._work_queue,
  159. self._initializer,
  160. self._initargs))
  161. t.start()
  162. self._threads.add(t)
  163. _threads_queues[t] = self._work_queue
  164. def _initializer_failed(self):
  165. with self._shutdown_lock:
  166. self._broken = ('A thread initializer failed, the thread pool '
  167. 'is not usable anymore')
  168. # Drain work queue and mark pending futures failed
  169. while True:
  170. try:
  171. work_item = self._work_queue.get_nowait()
  172. except queue.Empty:
  173. break
  174. if work_item is not None:
  175. work_item.future.set_exception(BrokenThreadPool(self._broken))
  176. def shutdown(self, wait=True, *, cancel_futures=False):
  177. with self._shutdown_lock:
  178. self._shutdown = True
  179. if cancel_futures:
  180. # Drain all work items from the queue, and then cancel their
  181. # associated futures.
  182. while True:
  183. try:
  184. work_item = self._work_queue.get_nowait()
  185. except queue.Empty:
  186. break
  187. if work_item is not None:
  188. work_item.future.cancel()
  189. # Send a wake-up to prevent threads calling
  190. # _work_queue.get(block=True) from permanently blocking.
  191. self._work_queue.put(None)
  192. if wait:
  193. for t in self._threads:
  194. t.join()
  195. shutdown.__doc__ = _base.Executor.shutdown.__doc__