python (3.12.0)
1 # Copyright 2009 Brian Quinlan. All Rights Reserved.
2 # Licensed to PSF under a Contributor Agreement.
3
4 """Implements ThreadPoolExecutor."""
5
6 __author__ = 'Brian Quinlan (brian@sweetapp.com)'
7
8 from concurrent.futures import _base
9 import itertools
10 import queue
11 import threading
12 import types
13 import weakref
14 import os
15
16
17 _threads_queues = weakref.WeakKeyDictionary()
18 _shutdown = False
19 # Lock that ensures that new workers are not created while the interpreter is
20 # shutting down. Must be held while mutating _threads_queues and _shutdown.
21 _global_shutdown_lock = threading.Lock()
22
23 def _python_exit():
24 global _shutdown
25 with _global_shutdown_lock:
26 _shutdown = True
27 items = list(_threads_queues.items())
28 for t, q in items:
29 q.put(None)
30 for t, q in items:
31 t.join()
32
33 # Register for `_python_exit()` to be called just before joining all
34 # non-daemon threads. This is used instead of `atexit.register()` for
35 # compatibility with subinterpreters, which no longer support daemon threads.
36 # See bpo-39812 for context.
37 threading._register_atexit(_python_exit)
38
39 # At fork, reinitialize the `_global_shutdown_lock` lock in the child process
40 if hasattr(os, 'register_at_fork'):
41 os.register_at_fork(before=_global_shutdown_lock.acquire,
42 after_in_child=_global_shutdown_lock._at_fork_reinit,
43 after_in_parent=_global_shutdown_lock.release)
44
45
46 class ESC[4;38;5;81m_WorkItem:
47 def __init__(self, future, fn, args, kwargs):
48 self.future = future
49 self.fn = fn
50 self.args = args
51 self.kwargs = kwargs
52
53 def run(self):
54 if not self.future.set_running_or_notify_cancel():
55 return
56
57 try:
58 result = self.fn(*self.args, **self.kwargs)
59 except BaseException as exc:
60 self.future.set_exception(exc)
61 # Break a reference cycle with the exception 'exc'
62 self = None
63 else:
64 self.future.set_result(result)
65
66 __class_getitem__ = classmethod(types.GenericAlias)
67
68
69 def _worker(executor_reference, work_queue, initializer, initargs):
70 if initializer is not None:
71 try:
72 initializer(*initargs)
73 except BaseException:
74 _base.LOGGER.critical('Exception in initializer:', exc_info=True)
75 executor = executor_reference()
76 if executor is not None:
77 executor._initializer_failed()
78 return
79 try:
80 while True:
81 try:
82 work_item = work_queue.get_nowait()
83 except queue.Empty:
84 # attempt to increment idle count if queue is empty
85 executor = executor_reference()
86 if executor is not None:
87 executor._idle_semaphore.release()
88 del executor
89 work_item = work_queue.get(block=True)
90
91 if work_item is not None:
92 work_item.run()
93 # Delete references to object. See GH-60488
94 del work_item
95 continue
96
97 executor = executor_reference()
98 # Exit if:
99 # - The interpreter is shutting down OR
100 # - The executor that owns the worker has been collected OR
101 # - The executor that owns the worker has been shutdown.
102 if _shutdown or executor is None or executor._shutdown:
103 # Flag the executor as shutting down as early as possible if it
104 # is not gc-ed yet.
105 if executor is not None:
106 executor._shutdown = True
107 # Notice other workers
108 work_queue.put(None)
109 return
110 del executor
111 except BaseException:
112 _base.LOGGER.critical('Exception in worker', exc_info=True)
113
114
115 class ESC[4;38;5;81mBrokenThreadPool(ESC[4;38;5;149m_baseESC[4;38;5;149m.ESC[4;38;5;149mBrokenExecutor):
116 """
117 Raised when a worker thread in a ThreadPoolExecutor failed initializing.
118 """
119
120
121 class ESC[4;38;5;81mThreadPoolExecutor(ESC[4;38;5;149m_baseESC[4;38;5;149m.ESC[4;38;5;149mExecutor):
122
123 # Used to assign unique thread names when thread_name_prefix is not supplied.
124 _counter = itertools.count().__next__
125
126 def __init__(self, max_workers=None, thread_name_prefix='',
127 initializer=None, initargs=()):
128 """Initializes a new ThreadPoolExecutor instance.
129
130 Args:
131 max_workers: The maximum number of threads that can be used to
132 execute the given calls.
133 thread_name_prefix: An optional name prefix to give our threads.
134 initializer: A callable used to initialize worker threads.
135 initargs: A tuple of arguments to pass to the initializer.
136 """
137 if max_workers is None:
138 # ThreadPoolExecutor is often used to:
139 # * CPU bound task which releases GIL
140 # * I/O bound task (which releases GIL, of course)
141 #
142 # We use cpu_count + 4 for both types of tasks.
143 # But we limit it to 32 to avoid consuming surprisingly large resource
144 # on many core machine.
145 max_workers = min(32, (os.cpu_count() or 1) + 4)
146 if max_workers <= 0:
147 raise ValueError("max_workers must be greater than 0")
148
149 if initializer is not None and not callable(initializer):
150 raise TypeError("initializer must be a callable")
151
152 self._max_workers = max_workers
153 self._work_queue = queue.SimpleQueue()
154 self._idle_semaphore = threading.Semaphore(0)
155 self._threads = set()
156 self._broken = False
157 self._shutdown = False
158 self._shutdown_lock = threading.Lock()
159 self._thread_name_prefix = (thread_name_prefix or
160 ("ThreadPoolExecutor-%d" % self._counter()))
161 self._initializer = initializer
162 self._initargs = initargs
163
164 def submit(self, fn, /, *args, **kwargs):
165 with self._shutdown_lock, _global_shutdown_lock:
166 if self._broken:
167 raise BrokenThreadPool(self._broken)
168
169 if self._shutdown:
170 raise RuntimeError('cannot schedule new futures after shutdown')
171 if _shutdown:
172 raise RuntimeError('cannot schedule new futures after '
173 'interpreter shutdown')
174
175 f = _base.Future()
176 w = _WorkItem(f, fn, args, kwargs)
177
178 self._work_queue.put(w)
179 self._adjust_thread_count()
180 return f
181 submit.__doc__ = _base.Executor.submit.__doc__
182
183 def _adjust_thread_count(self):
184 # if idle threads are available, don't spin new threads
185 if self._idle_semaphore.acquire(timeout=0):
186 return
187
188 # When the executor gets lost, the weakref callback will wake up
189 # the worker threads.
190 def weakref_cb(_, q=self._work_queue):
191 q.put(None)
192
193 num_threads = len(self._threads)
194 if num_threads < self._max_workers:
195 thread_name = '%s_%d' % (self._thread_name_prefix or self,
196 num_threads)
197 t = threading.Thread(name=thread_name, target=_worker,
198 args=(weakref.ref(self, weakref_cb),
199 self._work_queue,
200 self._initializer,
201 self._initargs))
202 t.start()
203 self._threads.add(t)
204 _threads_queues[t] = self._work_queue
205
206 def _initializer_failed(self):
207 with self._shutdown_lock:
208 self._broken = ('A thread initializer failed, the thread pool '
209 'is not usable anymore')
210 # Drain work queue and mark pending futures failed
211 while True:
212 try:
213 work_item = self._work_queue.get_nowait()
214 except queue.Empty:
215 break
216 if work_item is not None:
217 work_item.future.set_exception(BrokenThreadPool(self._broken))
218
219 def shutdown(self, wait=True, *, cancel_futures=False):
220 with self._shutdown_lock:
221 self._shutdown = True
222 if cancel_futures:
223 # Drain all work items from the queue, and then cancel their
224 # associated futures.
225 while True:
226 try:
227 work_item = self._work_queue.get_nowait()
228 except queue.Empty:
229 break
230 if work_item is not None:
231 work_item.future.cancel()
232
233 # Send a wake-up to prevent threads calling
234 # _work_queue.get(block=True) from permanently blocking.
235 self._work_queue.put(None)
236 if wait:
237 for t in self._threads:
238 t.join()
239 shutdown.__doc__ = _base.Executor.shutdown.__doc__