1
2 #include "Python.h"
3 #include "pycore_atomic.h" // _Py_atomic_int
4 #include "pycore_ceval.h" // _PyEval_SignalReceived()
5 #include "pycore_pyerrors.h" // _PyErr_GetRaisedException()
6 #include "pycore_pylifecycle.h" // _PyErr_Print()
7 #include "pycore_initconfig.h" // _PyStatus_OK()
8 #include "pycore_interp.h" // _Py_RunGC()
9 #include "pycore_pymem.h" // _PyMem_IsPtrFreed()
10
11 /*
12 Notes about the implementation:
13
14 - The GIL is just a boolean variable (locked) whose access is protected
15 by a mutex (gil_mutex), and whose changes are signalled by a condition
16 variable (gil_cond). gil_mutex is taken for short periods of time,
17 and therefore mostly uncontended.
18
19 - In the GIL-holding thread, the main loop (PyEval_EvalFrameEx) must be
20 able to release the GIL on demand by another thread. A volatile boolean
21 variable (gil_drop_request) is used for that purpose, which is checked
22 at every turn of the eval loop. That variable is set after a wait of
23 `interval` microseconds on `gil_cond` has timed out.
24
25 [Actually, another volatile boolean variable (eval_breaker) is used
26 which ORs several conditions into one. Volatile booleans are
27 sufficient as inter-thread signalling means since Python is run
28 on cache-coherent architectures only.]
29
30 - A thread wanting to take the GIL will first let pass a given amount of
31 time (`interval` microseconds) before setting gil_drop_request. This
32 encourages a defined switching period, but doesn't enforce it since
33 opcodes can take an arbitrary time to execute.
34
35 The `interval` value is available for the user to read and modify
36 using the Python API `sys.{get,set}switchinterval()`.
37
38 - When a thread releases the GIL and gil_drop_request is set, that thread
39 ensures that another GIL-awaiting thread gets scheduled.
40 It does so by waiting on a condition variable (switch_cond) until
41 the value of last_holder is changed to something else than its
42 own thread state pointer, indicating that another thread was able to
43 take the GIL.
44
45 This is meant to prohibit the latency-adverse behaviour on multi-core
46 machines where one thread would speculatively release the GIL, but still
47 run and end up being the first to re-acquire it, making the "timeslices"
48 much longer than expected.
49 (Note: this mechanism is enabled with FORCE_SWITCHING above)
50 */
51
52 // GH-89279: Force inlining by using a macro.
53 #if defined(_MSC_VER) && SIZEOF_INT == 4
54 #define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) (assert(sizeof((ATOMIC_VAL)->_value) == 4), *((volatile int*)&((ATOMIC_VAL)->_value)))
55 #else
56 #define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) _Py_atomic_load_relaxed(ATOMIC_VAL)
57 #endif
58
59 /* This can set eval_breaker to 0 even though gil_drop_request became
60 1. We believe this is all right because the eval loop will release
61 the GIL eventually anyway. */
62 static inline void
63 COMPUTE_EVAL_BREAKER(PyInterpreterState *interp,
64 struct _ceval_runtime_state *ceval,
65 struct _ceval_state *ceval2)
66 {
67 _Py_atomic_store_relaxed(&ceval2->eval_breaker,
68 _Py_atomic_load_relaxed_int32(&ceval2->gil_drop_request)
69 | (_Py_atomic_load_relaxed_int32(&ceval->signals_pending)
70 && _Py_ThreadCanHandleSignals(interp))
71 | (_Py_atomic_load_relaxed_int32(&ceval2->pending.calls_to_do))
72 | (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)
73 &&_Py_atomic_load_relaxed_int32(&ceval->pending_mainthread.calls_to_do))
74 | ceval2->pending.async_exc
75 | _Py_atomic_load_relaxed_int32(&ceval2->gc_scheduled));
76 }
77
78
79 static inline void
80 SET_GIL_DROP_REQUEST(PyInterpreterState *interp)
81 {
82 struct _ceval_state *ceval2 = &interp->ceval;
83 _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 1);
84 _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
85 }
86
87
88 static inline void
89 RESET_GIL_DROP_REQUEST(PyInterpreterState *interp)
90 {
91 struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
92 struct _ceval_state *ceval2 = &interp->ceval;
93 _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 0);
94 COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
95 }
96
97
98 static inline void
99 SIGNAL_PENDING_CALLS(struct _pending_calls *pending, PyInterpreterState *interp)
100 {
101 struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
102 struct _ceval_state *ceval2 = &interp->ceval;
103 _Py_atomic_store_relaxed(&pending->calls_to_do, 1);
104 COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
105 }
106
107
108 static inline void
109 UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp)
110 {
111 struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
112 struct _ceval_state *ceval2 = &interp->ceval;
113 if (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)) {
114 _Py_atomic_store_relaxed(&ceval->pending_mainthread.calls_to_do, 0);
115 }
116 _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0);
117 COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
118 }
119
120
121 static inline void
122 SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp, int force)
123 {
124 struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
125 struct _ceval_state *ceval2 = &interp->ceval;
126 _Py_atomic_store_relaxed(&ceval->signals_pending, 1);
127 if (force) {
128 _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
129 }
130 else {
131 /* eval_breaker is not set to 1 if thread_can_handle_signals() is false */
132 COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
133 }
134 }
135
136
137 static inline void
138 UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp)
139 {
140 struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
141 struct _ceval_state *ceval2 = &interp->ceval;
142 _Py_atomic_store_relaxed(&ceval->signals_pending, 0);
143 COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
144 }
145
146
147 static inline void
148 SIGNAL_ASYNC_EXC(PyInterpreterState *interp)
149 {
150 struct _ceval_state *ceval2 = &interp->ceval;
151 ceval2->pending.async_exc = 1;
152 _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
153 }
154
155
156 static inline void
157 UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp)
158 {
159 struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
160 struct _ceval_state *ceval2 = &interp->ceval;
161 ceval2->pending.async_exc = 0;
162 COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
163 }
164
165 #ifndef NDEBUG
166 /* Ensure that tstate is valid */
167 static int
168 is_tstate_valid(PyThreadState *tstate)
169 {
170 assert(!_PyMem_IsPtrFreed(tstate));
171 assert(!_PyMem_IsPtrFreed(tstate->interp));
172 return 1;
173 }
174 #endif
175
176 /*
177 * Implementation of the Global Interpreter Lock (GIL).
178 */
179
180 #include <stdlib.h>
181 #include <errno.h>
182
183 #include "pycore_atomic.h"
184
185
186 #include "condvar.h"
187
188 #define MUTEX_INIT(mut) \
189 if (PyMUTEX_INIT(&(mut))) { \
190 Py_FatalError("PyMUTEX_INIT(" #mut ") failed"); };
191 #define MUTEX_FINI(mut) \
192 if (PyMUTEX_FINI(&(mut))) { \
193 Py_FatalError("PyMUTEX_FINI(" #mut ") failed"); };
194 #define MUTEX_LOCK(mut) \
195 if (PyMUTEX_LOCK(&(mut))) { \
196 Py_FatalError("PyMUTEX_LOCK(" #mut ") failed"); };
197 #define MUTEX_UNLOCK(mut) \
198 if (PyMUTEX_UNLOCK(&(mut))) { \
199 Py_FatalError("PyMUTEX_UNLOCK(" #mut ") failed"); };
200
201 #define COND_INIT(cond) \
202 if (PyCOND_INIT(&(cond))) { \
203 Py_FatalError("PyCOND_INIT(" #cond ") failed"); };
204 #define COND_FINI(cond) \
205 if (PyCOND_FINI(&(cond))) { \
206 Py_FatalError("PyCOND_FINI(" #cond ") failed"); };
207 #define COND_SIGNAL(cond) \
208 if (PyCOND_SIGNAL(&(cond))) { \
209 Py_FatalError("PyCOND_SIGNAL(" #cond ") failed"); };
210 #define COND_WAIT(cond, mut) \
211 if (PyCOND_WAIT(&(cond), &(mut))) { \
212 Py_FatalError("PyCOND_WAIT(" #cond ") failed"); };
213 #define COND_TIMED_WAIT(cond, mut, microseconds, timeout_result) \
214 { \
215 int r = PyCOND_TIMEDWAIT(&(cond), &(mut), (microseconds)); \
216 if (r < 0) \
217 Py_FatalError("PyCOND_WAIT(" #cond ") failed"); \
218 if (r) /* 1 == timeout, 2 == impl. can't say, so assume timeout */ \
219 timeout_result = 1; \
220 else \
221 timeout_result = 0; \
222 } \
223
224
225 #define DEFAULT_INTERVAL 5000
226
227 static void _gil_initialize(struct _gil_runtime_state *gil)
228 {
229 _Py_atomic_int uninitialized = {-1};
230 gil->locked = uninitialized;
231 gil->interval = DEFAULT_INTERVAL;
232 }
233
234 static int gil_created(struct _gil_runtime_state *gil)
235 {
236 if (gil == NULL) {
237 return 0;
238 }
239 return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0);
240 }
241
242 static void create_gil(struct _gil_runtime_state *gil)
243 {
244 MUTEX_INIT(gil->mutex);
245 #ifdef FORCE_SWITCHING
246 MUTEX_INIT(gil->switch_mutex);
247 #endif
248 COND_INIT(gil->cond);
249 #ifdef FORCE_SWITCHING
250 COND_INIT(gil->switch_cond);
251 #endif
252 _Py_atomic_store_relaxed(&gil->last_holder, 0);
253 _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
254 _Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
255 }
256
257 static void destroy_gil(struct _gil_runtime_state *gil)
258 {
259 /* some pthread-like implementations tie the mutex to the cond
260 * and must have the cond destroyed first.
261 */
262 COND_FINI(gil->cond);
263 MUTEX_FINI(gil->mutex);
264 #ifdef FORCE_SWITCHING
265 COND_FINI(gil->switch_cond);
266 MUTEX_FINI(gil->switch_mutex);
267 #endif
268 _Py_atomic_store_explicit(&gil->locked, -1,
269 _Py_memory_order_release);
270 _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
271 }
272
273 #ifdef HAVE_FORK
274 static void recreate_gil(struct _gil_runtime_state *gil)
275 {
276 _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
277 /* XXX should we destroy the old OS resources here? */
278 create_gil(gil);
279 }
280 #endif
281
282 static void
283 drop_gil(struct _ceval_state *ceval, PyThreadState *tstate)
284 {
285 /* If tstate is NULL, the caller is indicating that we're releasing
286 the GIL for the last time in this thread. This is particularly
287 relevant when the current thread state is finalizing or its
288 interpreter is finalizing (either may be in an inconsistent
289 state). In that case the current thread will definitely
290 never try to acquire the GIL again. */
291 // XXX It may be more correct to check tstate->_status.finalizing.
292 // XXX assert(tstate == NULL || !tstate->_status.cleared);
293
294 struct _gil_runtime_state *gil = ceval->gil;
295 if (!_Py_atomic_load_relaxed(&gil->locked)) {
296 Py_FatalError("drop_gil: GIL is not locked");
297 }
298
299 /* tstate is allowed to be NULL (early interpreter init) */
300 if (tstate != NULL) {
301 /* Sub-interpreter support: threads might have been switched
302 under our feet using PyThreadState_Swap(). Fix the GIL last
303 holder variable so that our heuristics work. */
304 _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
305 }
306
307 MUTEX_LOCK(gil->mutex);
308 _Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1);
309 _Py_atomic_store_relaxed(&gil->locked, 0);
310 COND_SIGNAL(gil->cond);
311 MUTEX_UNLOCK(gil->mutex);
312
313 #ifdef FORCE_SWITCHING
314 /* We check tstate first in case we might be releasing the GIL for
315 the last time in this thread. In that case there's a possible
316 race with tstate->interp getting deleted after gil->mutex is
317 unlocked and before the following code runs, leading to a crash.
318 We can use (tstate == NULL) to indicate the thread is done with
319 the GIL, and that's the only time we might delete the
320 interpreter, so checking tstate first prevents the crash.
321 See https://github.com/python/cpython/issues/104341. */
322 if (tstate != NULL && _Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
323 MUTEX_LOCK(gil->switch_mutex);
324 /* Not switched yet => wait */
325 if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
326 {
327 assert(is_tstate_valid(tstate));
328 RESET_GIL_DROP_REQUEST(tstate->interp);
329 /* NOTE: if COND_WAIT does not atomically start waiting when
330 releasing the mutex, another thread can run through, take
331 the GIL and drop it again, and reset the condition
332 before we even had a chance to wait for it. */
333 COND_WAIT(gil->switch_cond, gil->switch_mutex);
334 }
335 MUTEX_UNLOCK(gil->switch_mutex);
336 }
337 #endif
338 }
339
340
341 /* Check if a Python thread must exit immediately, rather than taking the GIL
342 if Py_Finalize() has been called.
343
344 When this function is called by a daemon thread after Py_Finalize() has been
345 called, the GIL does no longer exist.
346
347 tstate must be non-NULL. */
348 static inline int
349 tstate_must_exit(PyThreadState *tstate)
350 {
351 /* bpo-39877: Access _PyRuntime directly rather than using
352 tstate->interp->runtime to support calls from Python daemon threads.
353 After Py_Finalize() has been called, tstate can be a dangling pointer:
354 point to PyThreadState freed memory. */
355 PyThreadState *finalizing = _PyRuntimeState_GetFinalizing(&_PyRuntime);
356 if (finalizing == NULL) {
357 finalizing = _PyInterpreterState_GetFinalizing(tstate->interp);
358 }
359 return (finalizing != NULL && finalizing != tstate);
360 }
361
362
363 /* Take the GIL.
364
365 The function saves errno at entry and restores its value at exit.
366
367 tstate must be non-NULL. */
368 static void
369 take_gil(PyThreadState *tstate)
370 {
371 int err = errno;
372
373 assert(tstate != NULL);
374 /* We shouldn't be using a thread state that isn't viable any more. */
375 // XXX It may be more correct to check tstate->_status.finalizing.
376 // XXX assert(!tstate->_status.cleared);
377
378 if (tstate_must_exit(tstate)) {
379 /* bpo-39877: If Py_Finalize() has been called and tstate is not the
380 thread which called Py_Finalize(), exit immediately the thread.
381
382 This code path can be reached by a daemon thread after Py_Finalize()
383 completes. In this case, tstate is a dangling pointer: points to
384 PyThreadState freed memory. */
385 PyThread_exit_thread();
386 }
387
388 assert(is_tstate_valid(tstate));
389 PyInterpreterState *interp = tstate->interp;
390 struct _ceval_state *ceval = &interp->ceval;
391 struct _gil_runtime_state *gil = ceval->gil;
392
393 /* Check that _PyEval_InitThreads() was called to create the lock */
394 assert(gil_created(gil));
395
396 MUTEX_LOCK(gil->mutex);
397
398 if (!_Py_atomic_load_relaxed(&gil->locked)) {
399 goto _ready;
400 }
401
402 int drop_requested = 0;
403 while (_Py_atomic_load_relaxed(&gil->locked)) {
404 unsigned long saved_switchnum = gil->switch_number;
405
406 unsigned long interval = (gil->interval >= 1 ? gil->interval : 1);
407 int timed_out = 0;
408 COND_TIMED_WAIT(gil->cond, gil->mutex, interval, timed_out);
409
410 /* If we timed out and no switch occurred in the meantime, it is time
411 to ask the GIL-holding thread to drop it. */
412 if (timed_out &&
413 _Py_atomic_load_relaxed(&gil->locked) &&
414 gil->switch_number == saved_switchnum)
415 {
416 if (tstate_must_exit(tstate)) {
417 MUTEX_UNLOCK(gil->mutex);
418 // gh-96387: If the loop requested a drop request in a previous
419 // iteration, reset the request. Otherwise, drop_gil() can
420 // block forever waiting for the thread which exited. Drop
421 // requests made by other threads are also reset: these threads
422 // may have to request again a drop request (iterate one more
423 // time).
424 if (drop_requested) {
425 RESET_GIL_DROP_REQUEST(interp);
426 }
427 PyThread_exit_thread();
428 }
429 assert(is_tstate_valid(tstate));
430
431 SET_GIL_DROP_REQUEST(interp);
432 drop_requested = 1;
433 }
434 }
435
436 _ready:
437 #ifdef FORCE_SWITCHING
438 /* This mutex must be taken before modifying gil->last_holder:
439 see drop_gil(). */
440 MUTEX_LOCK(gil->switch_mutex);
441 #endif
442 /* We now hold the GIL */
443 _Py_atomic_store_relaxed(&gil->locked, 1);
444 _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);
445
446 if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) {
447 _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
448 ++gil->switch_number;
449 }
450
451 #ifdef FORCE_SWITCHING
452 COND_SIGNAL(gil->switch_cond);
453 MUTEX_UNLOCK(gil->switch_mutex);
454 #endif
455
456 if (tstate_must_exit(tstate)) {
457 /* bpo-36475: If Py_Finalize() has been called and tstate is not
458 the thread which called Py_Finalize(), exit immediately the
459 thread.
460
461 This code path can be reached by a daemon thread which was waiting
462 in take_gil() while the main thread called
463 wait_for_thread_shutdown() from Py_Finalize(). */
464 MUTEX_UNLOCK(gil->mutex);
465 drop_gil(ceval, tstate);
466 PyThread_exit_thread();
467 }
468 assert(is_tstate_valid(tstate));
469
470 if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
471 RESET_GIL_DROP_REQUEST(interp);
472 }
473 else {
474 /* bpo-40010: eval_breaker should be recomputed to be set to 1 if there
475 is a pending signal: signal received by another thread which cannot
476 handle signals.
477
478 Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */
479 COMPUTE_EVAL_BREAKER(interp, &_PyRuntime.ceval, ceval);
480 }
481
482 /* Don't access tstate if the thread must exit */
483 if (tstate->async_exc != NULL) {
484 _PyEval_SignalAsyncExc(tstate->interp);
485 }
486
487 MUTEX_UNLOCK(gil->mutex);
488
489 errno = err;
490 }
491
492 void _PyEval_SetSwitchInterval(unsigned long microseconds)
493 {
494 PyInterpreterState *interp = _PyInterpreterState_Get();
495 struct _gil_runtime_state *gil = interp->ceval.gil;
496 assert(gil != NULL);
497 gil->interval = microseconds;
498 }
499
500 unsigned long _PyEval_GetSwitchInterval(void)
501 {
502 PyInterpreterState *interp = _PyInterpreterState_Get();
503 struct _gil_runtime_state *gil = interp->ceval.gil;
504 assert(gil != NULL);
505 return gil->interval;
506 }
507
508
509 int
510 _PyEval_ThreadsInitialized(void)
511 {
512 /* XXX This is only needed for an assert in PyGILState_Ensure(),
513 * which currently does not work with subinterpreters.
514 * Thus we only use the main interpreter. */
515 PyInterpreterState *interp = _PyInterpreterState_Main();
516 if (interp == NULL) {
517 return 0;
518 }
519 struct _gil_runtime_state *gil = interp->ceval.gil;
520 return gil_created(gil);
521 }
522
523 int
524 PyEval_ThreadsInitialized(void)
525 {
526 return _PyEval_ThreadsInitialized();
527 }
528
529 static inline int
530 current_thread_holds_gil(struct _gil_runtime_state *gil, PyThreadState *tstate)
531 {
532 if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) != tstate) {
533 return 0;
534 }
535 return _Py_atomic_load_relaxed(&gil->locked);
536 }
537
538 static void
539 init_shared_gil(PyInterpreterState *interp, struct _gil_runtime_state *gil)
540 {
541 assert(gil_created(gil));
542 interp->ceval.gil = gil;
543 interp->ceval.own_gil = 0;
544 }
545
546 static void
547 init_own_gil(PyInterpreterState *interp, struct _gil_runtime_state *gil)
548 {
549 assert(!gil_created(gil));
550 create_gil(gil);
551 assert(gil_created(gil));
552 interp->ceval.gil = gil;
553 interp->ceval.own_gil = 1;
554 }
555
556 PyStatus
557 _PyEval_InitGIL(PyThreadState *tstate, int own_gil)
558 {
559 assert(tstate->interp->ceval.gil == NULL);
560 int locked;
561 if (!own_gil) {
562 /* The interpreter will share the main interpreter's instead. */
563 PyInterpreterState *main_interp = _PyInterpreterState_Main();
564 assert(tstate->interp != main_interp);
565 struct _gil_runtime_state *gil = main_interp->ceval.gil;
566 init_shared_gil(tstate->interp, gil);
567 locked = current_thread_holds_gil(gil, tstate);
568 }
569 else {
570 PyThread_init_thread();
571 init_own_gil(tstate->interp, &tstate->interp->_gil);
572 locked = 0;
573 }
574 if (!locked) {
575 take_gil(tstate);
576 }
577
578 return _PyStatus_OK();
579 }
580
581 void
582 _PyEval_FiniGIL(PyInterpreterState *interp)
583 {
584 struct _gil_runtime_state *gil = interp->ceval.gil;
585 if (gil == NULL) {
586 /* It was already finalized (or hasn't been initialized yet). */
587 assert(!interp->ceval.own_gil);
588 return;
589 }
590 else if (!interp->ceval.own_gil) {
591 #ifdef Py_DEBUG
592 PyInterpreterState *main_interp = _PyInterpreterState_Main();
593 assert(main_interp != NULL && interp != main_interp);
594 assert(interp->ceval.gil == main_interp->ceval.gil);
595 #endif
596 interp->ceval.gil = NULL;
597 return;
598 }
599
600 if (!gil_created(gil)) {
601 /* First Py_InitializeFromConfig() call: the GIL doesn't exist
602 yet: do nothing. */
603 return;
604 }
605
606 destroy_gil(gil);
607 assert(!gil_created(gil));
608 interp->ceval.gil = NULL;
609 }
610
611 void
612 PyEval_InitThreads(void)
613 {
614 /* Do nothing: kept for backward compatibility */
615 }
616
617 void
618 _PyEval_Fini(void)
619 {
620 #ifdef Py_STATS
621 _Py_PrintSpecializationStats(1);
622 #endif
623 }
624 void
625 PyEval_AcquireLock(void)
626 {
627 PyThreadState *tstate = _PyThreadState_GET();
628 _Py_EnsureTstateNotNULL(tstate);
629
630 take_gil(tstate);
631 }
632
633 void
634 PyEval_ReleaseLock(void)
635 {
636 PyThreadState *tstate = _PyThreadState_GET();
637 /* This function must succeed when the current thread state is NULL.
638 We therefore avoid PyThreadState_Get() which dumps a fatal error
639 in debug mode. */
640 struct _ceval_state *ceval = &tstate->interp->ceval;
641 drop_gil(ceval, tstate);
642 }
643
644 void
645 _PyEval_AcquireLock(PyThreadState *tstate)
646 {
647 _Py_EnsureTstateNotNULL(tstate);
648 take_gil(tstate);
649 }
650
651 void
652 _PyEval_ReleaseLock(PyInterpreterState *interp, PyThreadState *tstate)
653 {
654 /* If tstate is NULL then we do not expect the current thread
655 to acquire the GIL ever again. */
656 assert(tstate == NULL || tstate->interp == interp);
657 struct _ceval_state *ceval = &interp->ceval;
658 drop_gil(ceval, tstate);
659 }
660
661 void
662 PyEval_AcquireThread(PyThreadState *tstate)
663 {
664 _Py_EnsureTstateNotNULL(tstate);
665
666 take_gil(tstate);
667
668 if (_PyThreadState_SwapNoGIL(tstate) != NULL) {
669 Py_FatalError("non-NULL old thread state");
670 }
671 }
672
673 void
674 PyEval_ReleaseThread(PyThreadState *tstate)
675 {
676 assert(is_tstate_valid(tstate));
677
678 PyThreadState *new_tstate = _PyThreadState_SwapNoGIL(NULL);
679 if (new_tstate != tstate) {
680 Py_FatalError("wrong thread state");
681 }
682 struct _ceval_state *ceval = &tstate->interp->ceval;
683 drop_gil(ceval, tstate);
684 }
685
686 #ifdef HAVE_FORK
687 /* This function is called from PyOS_AfterFork_Child to destroy all threads
688 which are not running in the child process, and clear internal locks
689 which might be held by those threads. */
690 PyStatus
691 _PyEval_ReInitThreads(PyThreadState *tstate)
692 {
693 assert(tstate->interp == _PyInterpreterState_Main());
694
695 struct _gil_runtime_state *gil = tstate->interp->ceval.gil;
696 if (!gil_created(gil)) {
697 return _PyStatus_OK();
698 }
699 recreate_gil(gil);
700
701 take_gil(tstate);
702
703 struct _pending_calls *pending = &tstate->interp->ceval.pending;
704 if (_PyThread_at_fork_reinit(&pending->lock) < 0) {
705 return _PyStatus_ERR("Can't reinitialize pending calls lock");
706 }
707
708 /* Destroy all threads except the current one */
709 _PyThreadState_DeleteExcept(tstate);
710 return _PyStatus_OK();
711 }
712 #endif
713
714 /* This function is used to signal that async exceptions are waiting to be
715 raised. */
716
717 void
718 _PyEval_SignalAsyncExc(PyInterpreterState *interp)
719 {
720 SIGNAL_ASYNC_EXC(interp);
721 }
722
723 PyThreadState *
724 PyEval_SaveThread(void)
725 {
726 PyThreadState *tstate = _PyThreadState_SwapNoGIL(NULL);
727 _Py_EnsureTstateNotNULL(tstate);
728
729 struct _ceval_state *ceval = &tstate->interp->ceval;
730 assert(gil_created(ceval->gil));
731 drop_gil(ceval, tstate);
732 return tstate;
733 }
734
735 void
736 PyEval_RestoreThread(PyThreadState *tstate)
737 {
738 _Py_EnsureTstateNotNULL(tstate);
739
740 take_gil(tstate);
741
742 _PyThreadState_SwapNoGIL(tstate);
743 }
744
745
746 /* Mechanism whereby asynchronously executing callbacks (e.g. UNIX
747 signal handlers or Mac I/O completion routines) can schedule calls
748 to a function to be called synchronously.
749 The synchronous function is called with one void* argument.
750 It should return 0 for success or -1 for failure -- failure should
751 be accompanied by an exception.
752
753 If registry succeeds, the registry function returns 0; if it fails
754 (e.g. due to too many pending calls) it returns -1 (without setting
755 an exception condition).
756
757 Note that because registry may occur from within signal handlers,
758 or other asynchronous events, calling malloc() is unsafe!
759
760 Any thread can schedule pending calls, but only the main thread
761 will execute them.
762 There is no facility to schedule calls to a particular thread, but
763 that should be easy to change, should that ever be required. In
764 that case, the static variables here should go into the python
765 threadstate.
766 */
767
768 void
769 _PyEval_SignalReceived(PyInterpreterState *interp)
770 {
771 #ifdef MS_WINDOWS
772 // bpo-42296: On Windows, _PyEval_SignalReceived() is called from a signal
773 // handler which can run in a thread different than the Python thread, in
774 // which case _Py_ThreadCanHandleSignals() is wrong. Ignore
775 // _Py_ThreadCanHandleSignals() and always set eval_breaker to 1.
776 //
777 // The next eval_frame_handle_pending() call will call
778 // _Py_ThreadCanHandleSignals() to recompute eval_breaker.
779 int force = 1;
780 #else
781 int force = 0;
782 #endif
783 /* bpo-30703: Function called when the C signal handler of Python gets a
784 signal. We cannot queue a callback using _PyEval_AddPendingCall() since
785 that function is not async-signal-safe. */
786 SIGNAL_PENDING_SIGNALS(interp, force);
787 }
788
789 /* Push one item onto the queue while holding the lock. */
790 static int
791 _push_pending_call(struct _pending_calls *pending,
792 int (*func)(void *), void *arg)
793 {
794 int i = pending->last;
795 int j = (i + 1) % NPENDINGCALLS;
796 if (j == pending->first) {
797 return -1; /* Queue full */
798 }
799 pending->calls[i].func = func;
800 pending->calls[i].arg = arg;
801 pending->last = j;
802 return 0;
803 }
804
805 static int
806 _next_pending_call(struct _pending_calls *pending,
807 int (**func)(void *), void **arg)
808 {
809 int i = pending->first;
810 if (i == pending->last) {
811 /* Queue empty */
812 assert(pending->calls[i].func == NULL);
813 return -1;
814 }
815 *func = pending->calls[i].func;
816 *arg = pending->calls[i].arg;
817 return i;
818 }
819
820 /* Pop one item off the queue while holding the lock. */
821 static void
822 _pop_pending_call(struct _pending_calls *pending,
823 int (**func)(void *), void **arg)
824 {
825 int i = _next_pending_call(pending, func, arg);
826 if (i >= 0) {
827 pending->calls[i] = (struct _pending_call){0};
828 pending->first = (i + 1) % NPENDINGCALLS;
829 }
830 }
831
832 /* This implementation is thread-safe. It allows
833 scheduling to be made from any thread, and even from an executing
834 callback.
835 */
836
837 int
838 _PyEval_AddPendingCall(PyInterpreterState *interp,
839 int (*func)(void *), void *arg,
840 int mainthreadonly)
841 {
842 assert(!mainthreadonly || _Py_IsMainInterpreter(interp));
843 struct _pending_calls *pending = &interp->ceval.pending;
844 if (mainthreadonly) {
845 /* The main thread only exists in the main interpreter. */
846 assert(_Py_IsMainInterpreter(interp));
847 pending = &_PyRuntime.ceval.pending_mainthread;
848 }
849 /* Ensure that _PyEval_InitState() was called
850 and that _PyEval_FiniState() is not called yet. */
851 assert(pending->lock != NULL);
852
853 PyThread_acquire_lock(pending->lock, WAIT_LOCK);
854 int result = _push_pending_call(pending, func, arg);
855 PyThread_release_lock(pending->lock);
856
857 /* signal main loop */
858 SIGNAL_PENDING_CALLS(pending, interp);
859 return result;
860 }
861
862 int
863 Py_AddPendingCall(int (*func)(void *), void *arg)
864 {
865 /* Legacy users of this API will continue to target the main thread
866 (of the main interpreter). */
867 PyInterpreterState *interp = _PyInterpreterState_Main();
868 return _PyEval_AddPendingCall(interp, func, arg, 1);
869 }
870
871 static int
872 handle_signals(PyThreadState *tstate)
873 {
874 assert(is_tstate_valid(tstate));
875 if (!_Py_ThreadCanHandleSignals(tstate->interp)) {
876 return 0;
877 }
878
879 UNSIGNAL_PENDING_SIGNALS(tstate->interp);
880 if (_PyErr_CheckSignalsTstate(tstate) < 0) {
881 /* On failure, re-schedule a call to handle_signals(). */
882 SIGNAL_PENDING_SIGNALS(tstate->interp, 0);
883 return -1;
884 }
885 return 0;
886 }
887
888 static inline int
889 maybe_has_pending_calls(PyInterpreterState *interp)
890 {
891 struct _pending_calls *pending = &interp->ceval.pending;
892 if (_Py_atomic_load_relaxed_int32(&pending->calls_to_do)) {
893 return 1;
894 }
895 if (!_Py_IsMainThread() || !_Py_IsMainInterpreter(interp)) {
896 return 0;
897 }
898 pending = &_PyRuntime.ceval.pending_mainthread;
899 return _Py_atomic_load_relaxed_int32(&pending->calls_to_do);
900 }
901
902 static int
903 _make_pending_calls(struct _pending_calls *pending)
904 {
905 /* perform a bounded number of calls, in case of recursion */
906 for (int i=0; i<NPENDINGCALLS; i++) {
907 int (*func)(void *) = NULL;
908 void *arg = NULL;
909
910 /* pop one item off the queue while holding the lock */
911 PyThread_acquire_lock(pending->lock, WAIT_LOCK);
912 _pop_pending_call(pending, &func, &arg);
913 PyThread_release_lock(pending->lock);
914
915 /* having released the lock, perform the callback */
916 if (func == NULL) {
917 break;
918 }
919 if (func(arg) != 0) {
920 return -1;
921 }
922 }
923 return 0;
924 }
925
926 static int
927 make_pending_calls(PyInterpreterState *interp)
928 {
929 struct _pending_calls *pending = &interp->ceval.pending;
930 struct _pending_calls *pending_main = &_PyRuntime.ceval.pending_mainthread;
931
932 /* Only one thread (per interpreter) may run the pending calls
933 at once. In the same way, we don't do recursive pending calls. */
934 PyThread_acquire_lock(pending->lock, WAIT_LOCK);
935 if (pending->busy) {
936 /* A pending call was added after another thread was already
937 handling the pending calls (and had already "unsignaled").
938 Once that thread is done, it may have taken care of all the
939 pending calls, or there might be some still waiting.
940 Regardless, this interpreter's pending calls will stay
941 "signaled" until that first thread has finished. At that
942 point the next thread to trip the eval breaker will take
943 care of any remaining pending calls. Until then, though,
944 all the interpreter's threads will be tripping the eval
945 breaker every time it's checked. */
946 PyThread_release_lock(pending->lock);
947 return 0;
948 }
949 pending->busy = 1;
950 PyThread_release_lock(pending->lock);
951
952 /* unsignal before starting to call callbacks, so that any callback
953 added in-between re-signals */
954 UNSIGNAL_PENDING_CALLS(interp);
955
956 if (_make_pending_calls(pending) != 0) {
957 pending->busy = 0;
958 /* There might not be more calls to make, but we play it safe. */
959 SIGNAL_PENDING_CALLS(pending, interp);
960 return -1;
961 }
962
963 if (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)) {
964 if (_make_pending_calls(pending_main) != 0) {
965 pending->busy = 0;
966 /* There might not be more calls to make, but we play it safe. */
967 SIGNAL_PENDING_CALLS(pending_main, interp);
968 return -1;
969 }
970 }
971
972 pending->busy = 0;
973 return 0;
974 }
975
976 void
977 _Py_FinishPendingCalls(PyThreadState *tstate)
978 {
979 assert(PyGILState_Check());
980 assert(is_tstate_valid(tstate));
981
982 if (make_pending_calls(tstate->interp) < 0) {
983 PyObject *exc = _PyErr_GetRaisedException(tstate);
984 PyErr_BadInternalCall();
985 _PyErr_ChainExceptions1(exc);
986 _PyErr_Print(tstate);
987 }
988 }
989
990 int
991 _PyEval_MakePendingCalls(PyThreadState *tstate)
992 {
993 int res;
994
995 if (_Py_IsMainThread() && _Py_IsMainInterpreter(tstate->interp)) {
996 /* Python signal handler doesn't really queue a callback:
997 it only signals that a signal was received,
998 see _PyEval_SignalReceived(). */
999 res = handle_signals(tstate);
1000 if (res != 0) {
1001 return res;
1002 }
1003 }
1004
1005 res = make_pending_calls(tstate->interp);
1006 if (res != 0) {
1007 return res;
1008 }
1009
1010 return 0;
1011 }
1012
1013 /* Py_MakePendingCalls() is a simple wrapper for the sake
1014 of backward-compatibility. */
1015 int
1016 Py_MakePendingCalls(void)
1017 {
1018 assert(PyGILState_Check());
1019
1020 PyThreadState *tstate = _PyThreadState_GET();
1021 assert(is_tstate_valid(tstate));
1022
1023 /* Only execute pending calls on the main thread. */
1024 if (!_Py_IsMainThread() || !_Py_IsMainInterpreter(tstate->interp)) {
1025 return 0;
1026 }
1027 return _PyEval_MakePendingCalls(tstate);
1028 }
1029
1030 void
1031 _PyEval_InitState(PyInterpreterState *interp, PyThread_type_lock pending_lock)
1032 {
1033 _gil_initialize(&interp->_gil);
1034
1035 struct _pending_calls *pending = &interp->ceval.pending;
1036 assert(pending->lock == NULL);
1037 pending->lock = pending_lock;
1038 }
1039
1040 void
1041 _PyEval_FiniState(struct _ceval_state *ceval)
1042 {
1043 struct _pending_calls *pending = &ceval->pending;
1044 if (pending->lock != NULL) {
1045 PyThread_free_lock(pending->lock);
1046 pending->lock = NULL;
1047 }
1048 }
1049
1050 /* Handle signals, pending calls, GIL drop request
1051 and asynchronous exception */
1052 int
1053 _Py_HandlePending(PyThreadState *tstate)
1054 {
1055 _PyRuntimeState * const runtime = &_PyRuntime;
1056 struct _ceval_runtime_state *ceval = &runtime->ceval;
1057 struct _ceval_state *interp_ceval_state = &tstate->interp->ceval;
1058
1059 /* Pending signals */
1060 if (_Py_atomic_load_relaxed_int32(&ceval->signals_pending)) {
1061 if (handle_signals(tstate) != 0) {
1062 return -1;
1063 }
1064 }
1065
1066 /* Pending calls */
1067 if (maybe_has_pending_calls(tstate->interp)) {
1068 if (make_pending_calls(tstate->interp) != 0) {
1069 return -1;
1070 }
1071 }
1072
1073 /* GC scheduled to run */
1074 if (_Py_atomic_load_relaxed_int32(&interp_ceval_state->gc_scheduled)) {
1075 _Py_atomic_store_relaxed(&interp_ceval_state->gc_scheduled, 0);
1076 COMPUTE_EVAL_BREAKER(tstate->interp, ceval, interp_ceval_state);
1077 _Py_RunGC(tstate);
1078 }
1079
1080 /* GIL drop request */
1081 if (_Py_atomic_load_relaxed_int32(&interp_ceval_state->gil_drop_request)) {
1082 /* Give another thread a chance */
1083 if (_PyThreadState_SwapNoGIL(NULL) != tstate) {
1084 Py_FatalError("tstate mix-up");
1085 }
1086 drop_gil(interp_ceval_state, tstate);
1087
1088 /* Other threads may run now */
1089
1090 take_gil(tstate);
1091
1092 if (_PyThreadState_SwapNoGIL(tstate) != NULL) {
1093 Py_FatalError("orphan tstate");
1094 }
1095 }
1096
1097 /* Check for asynchronous exception. */
1098 if (tstate->async_exc != NULL) {
1099 PyObject *exc = tstate->async_exc;
1100 tstate->async_exc = NULL;
1101 UNSIGNAL_ASYNC_EXC(tstate->interp);
1102 _PyErr_SetNone(tstate, exc);
1103 Py_DECREF(exc);
1104 return -1;
1105 }
1106
1107
1108 // It is possible that some of the conditions that trigger the eval breaker
1109 // are called in a different thread than the Python thread. An example of
1110 // this is bpo-42296: On Windows, _PyEval_SignalReceived() can be called in
1111 // a different thread than the Python thread, in which case
1112 // _Py_ThreadCanHandleSignals() is wrong. Recompute eval_breaker in the
1113 // current Python thread with the correct _Py_ThreadCanHandleSignals()
1114 // value. It prevents to interrupt the eval loop at every instruction if
1115 // the current Python thread cannot handle signals (if
1116 // _Py_ThreadCanHandleSignals() is false).
1117 COMPUTE_EVAL_BREAKER(tstate->interp, ceval, interp_ceval_state);
1118
1119 return 0;
1120 }
1121