(root)/
Python-3.11.7/
Python/
ceval_gil.h
       1  /*
       2   * Implementation of the Global Interpreter Lock (GIL).
       3   */
       4  
       5  #include <stdlib.h>
       6  #include <errno.h>
       7  
       8  #include "pycore_atomic.h"
       9  
      10  
      11  /*
      12     Notes about the implementation:
      13  
      14     - The GIL is just a boolean variable (locked) whose access is protected
      15       by a mutex (gil_mutex), and whose changes are signalled by a condition
      16       variable (gil_cond). gil_mutex is taken for short periods of time,
      17       and therefore mostly uncontended.
      18  
      19     - In the GIL-holding thread, the main loop (PyEval_EvalFrameEx) must be
      20       able to release the GIL on demand by another thread. A volatile boolean
      21       variable (gil_drop_request) is used for that purpose, which is checked
      22       at every turn of the eval loop. That variable is set after a wait of
      23       `interval` microseconds on `gil_cond` has timed out.
      24  
      25        [Actually, another volatile boolean variable (eval_breaker) is used
      26         which ORs several conditions into one. Volatile booleans are
      27         sufficient as inter-thread signalling means since Python is run
      28         on cache-coherent architectures only.]
      29  
      30     - A thread wanting to take the GIL will first let pass a given amount of
      31       time (`interval` microseconds) before setting gil_drop_request. This
      32       encourages a defined switching period, but doesn't enforce it since
      33       opcodes can take an arbitrary time to execute.
      34  
      35       The `interval` value is available for the user to read and modify
      36       using the Python API `sys.{get,set}switchinterval()`.
      37  
      38     - When a thread releases the GIL and gil_drop_request is set, that thread
      39       ensures that another GIL-awaiting thread gets scheduled.
      40       It does so by waiting on a condition variable (switch_cond) until
      41       the value of last_holder is changed to something else than its
      42       own thread state pointer, indicating that another thread was able to
      43       take the GIL.
      44  
      45       This is meant to prohibit the latency-adverse behaviour on multi-core
      46       machines where one thread would speculatively release the GIL, but still
      47       run and end up being the first to re-acquire it, making the "timeslices"
      48       much longer than expected.
      49       (Note: this mechanism is enabled with FORCE_SWITCHING above)
      50  */
      51  
      52  #include "condvar.h"
      53  
      54  #define MUTEX_INIT(mut) \
      55      if (PyMUTEX_INIT(&(mut))) { \
      56          Py_FatalError("PyMUTEX_INIT(" #mut ") failed"); };
      57  #define MUTEX_FINI(mut) \
      58      if (PyMUTEX_FINI(&(mut))) { \
      59          Py_FatalError("PyMUTEX_FINI(" #mut ") failed"); };
      60  #define MUTEX_LOCK(mut) \
      61      if (PyMUTEX_LOCK(&(mut))) { \
      62          Py_FatalError("PyMUTEX_LOCK(" #mut ") failed"); };
      63  #define MUTEX_UNLOCK(mut) \
      64      if (PyMUTEX_UNLOCK(&(mut))) { \
      65          Py_FatalError("PyMUTEX_UNLOCK(" #mut ") failed"); };
      66  
      67  #define COND_INIT(cond) \
      68      if (PyCOND_INIT(&(cond))) { \
      69          Py_FatalError("PyCOND_INIT(" #cond ") failed"); };
      70  #define COND_FINI(cond) \
      71      if (PyCOND_FINI(&(cond))) { \
      72          Py_FatalError("PyCOND_FINI(" #cond ") failed"); };
      73  #define COND_SIGNAL(cond) \
      74      if (PyCOND_SIGNAL(&(cond))) { \
      75          Py_FatalError("PyCOND_SIGNAL(" #cond ") failed"); };
      76  #define COND_WAIT(cond, mut) \
      77      if (PyCOND_WAIT(&(cond), &(mut))) { \
      78          Py_FatalError("PyCOND_WAIT(" #cond ") failed"); };
      79  #define COND_TIMED_WAIT(cond, mut, microseconds, timeout_result) \
      80      { \
      81          int r = PyCOND_TIMEDWAIT(&(cond), &(mut), (microseconds)); \
      82          if (r < 0) \
      83              Py_FatalError("PyCOND_WAIT(" #cond ") failed"); \
      84          if (r) /* 1 == timeout, 2 == impl. can't say, so assume timeout */ \
      85              timeout_result = 1; \
      86          else \
      87              timeout_result = 0; \
      88      } \
      89  
      90  
      91  #define DEFAULT_INTERVAL 5000
      92  
      93  static void _gil_initialize(struct _gil_runtime_state *gil)
      94  {
      95      _Py_atomic_int uninitialized = {-1};
      96      gil->locked = uninitialized;
      97      gil->interval = DEFAULT_INTERVAL;
      98  }
      99  
     100  static int gil_created(struct _gil_runtime_state *gil)
     101  {
     102      return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0);
     103  }
     104  
     105  static void create_gil(struct _gil_runtime_state *gil)
     106  {
     107      MUTEX_INIT(gil->mutex);
     108  #ifdef FORCE_SWITCHING
     109      MUTEX_INIT(gil->switch_mutex);
     110  #endif
     111      COND_INIT(gil->cond);
     112  #ifdef FORCE_SWITCHING
     113      COND_INIT(gil->switch_cond);
     114  #endif
     115      _Py_atomic_store_relaxed(&gil->last_holder, 0);
     116      _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
     117      _Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
     118  }
     119  
     120  static void destroy_gil(struct _gil_runtime_state *gil)
     121  {
     122      /* some pthread-like implementations tie the mutex to the cond
     123       * and must have the cond destroyed first.
     124       */
     125      COND_FINI(gil->cond);
     126      MUTEX_FINI(gil->mutex);
     127  #ifdef FORCE_SWITCHING
     128      COND_FINI(gil->switch_cond);
     129      MUTEX_FINI(gil->switch_mutex);
     130  #endif
     131      _Py_atomic_store_explicit(&gil->locked, -1,
     132                                _Py_memory_order_release);
     133      _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
     134  }
     135  
     136  #ifdef HAVE_FORK
     137  static void recreate_gil(struct _gil_runtime_state *gil)
     138  {
     139      _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
     140      /* XXX should we destroy the old OS resources here? */
     141      create_gil(gil);
     142  }
     143  #endif
     144  
     145  static void
     146  drop_gil(struct _ceval_runtime_state *ceval, struct _ceval_state *ceval2,
     147           PyThreadState *tstate)
     148  {
     149      struct _gil_runtime_state *gil = &ceval->gil;
     150      if (!_Py_atomic_load_relaxed(&gil->locked)) {
     151          Py_FatalError("drop_gil: GIL is not locked");
     152      }
     153  
     154      /* tstate is allowed to be NULL (early interpreter init) */
     155      if (tstate != NULL) {
     156          /* Sub-interpreter support: threads might have been switched
     157             under our feet using PyThreadState_Swap(). Fix the GIL last
     158             holder variable so that our heuristics work. */
     159          _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
     160      }
     161  
     162      MUTEX_LOCK(gil->mutex);
     163      _Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1);
     164      _Py_atomic_store_relaxed(&gil->locked, 0);
     165      COND_SIGNAL(gil->cond);
     166      MUTEX_UNLOCK(gil->mutex);
     167  
     168  #ifdef FORCE_SWITCHING
     169      if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request) && tstate != NULL) {
     170          MUTEX_LOCK(gil->switch_mutex);
     171          /* Not switched yet => wait */
     172          if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
     173          {
     174              assert(_PyThreadState_CheckConsistency(tstate));
     175              RESET_GIL_DROP_REQUEST(tstate->interp);
     176              /* NOTE: if COND_WAIT does not atomically start waiting when
     177                 releasing the mutex, another thread can run through, take
     178                 the GIL and drop it again, and reset the condition
     179                 before we even had a chance to wait for it. */
     180              COND_WAIT(gil->switch_cond, gil->switch_mutex);
     181          }
     182          MUTEX_UNLOCK(gil->switch_mutex);
     183      }
     184  #endif
     185  }
     186  
     187  
     188  /* Take the GIL.
     189  
     190     The function saves errno at entry and restores its value at exit.
     191  
     192     tstate must be non-NULL. */
     193  static void
     194  take_gil(PyThreadState *tstate)
     195  {
     196      int err = errno;
     197  
     198      assert(tstate != NULL);
     199  
     200      if (_PyThreadState_MustExit(tstate)) {
     201          /* bpo-39877: If Py_Finalize() has been called and tstate is not the
     202             thread which called Py_Finalize(), exit immediately the thread.
     203  
     204             This code path can be reached by a daemon thread after Py_Finalize()
     205             completes. In this case, tstate is a dangling pointer: points to
     206             PyThreadState freed memory. */
     207          PyThread_exit_thread();
     208      }
     209  
     210      assert(_PyThreadState_CheckConsistency(tstate));
     211      PyInterpreterState *interp = tstate->interp;
     212      struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
     213      struct _ceval_state *ceval2 = &interp->ceval;
     214      struct _gil_runtime_state *gil = &ceval->gil;
     215  
     216      /* Check that _PyEval_InitThreads() was called to create the lock */
     217      assert(gil_created(gil));
     218  
     219      MUTEX_LOCK(gil->mutex);
     220  
     221      if (!_Py_atomic_load_relaxed(&gil->locked)) {
     222          goto _ready;
     223      }
     224  
     225      int drop_requested = 0;
     226      while (_Py_atomic_load_relaxed(&gil->locked)) {
     227          unsigned long saved_switchnum = gil->switch_number;
     228  
     229          unsigned long interval = (gil->interval >= 1 ? gil->interval : 1);
     230          int timed_out = 0;
     231          COND_TIMED_WAIT(gil->cond, gil->mutex, interval, timed_out);
     232  
     233          /* If we timed out and no switch occurred in the meantime, it is time
     234             to ask the GIL-holding thread to drop it. */
     235          if (timed_out &&
     236              _Py_atomic_load_relaxed(&gil->locked) &&
     237              gil->switch_number == saved_switchnum)
     238          {
     239              if (_PyThreadState_MustExit(tstate)) {
     240                  MUTEX_UNLOCK(gil->mutex);
     241                  // gh-96387: If the loop requested a drop request in a previous
     242                  // iteration, reset the request. Otherwise, drop_gil() can
     243                  // block forever waiting for the thread which exited. Drop
     244                  // requests made by other threads are also reset: these threads
     245                  // may have to request again a drop request (iterate one more
     246                  // time).
     247                  if (drop_requested) {
     248                      RESET_GIL_DROP_REQUEST(interp);
     249                  }
     250                  PyThread_exit_thread();
     251              }
     252              assert(_PyThreadState_CheckConsistency(tstate));
     253  
     254              SET_GIL_DROP_REQUEST(interp);
     255              drop_requested = 1;
     256          }
     257      }
     258  
     259  _ready:
     260  #ifdef FORCE_SWITCHING
     261      /* This mutex must be taken before modifying gil->last_holder:
     262         see drop_gil(). */
     263      MUTEX_LOCK(gil->switch_mutex);
     264  #endif
     265      /* We now hold the GIL */
     266      _Py_atomic_store_relaxed(&gil->locked, 1);
     267      _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);
     268  
     269      if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) {
     270          _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
     271          ++gil->switch_number;
     272      }
     273  
     274  #ifdef FORCE_SWITCHING
     275      COND_SIGNAL(gil->switch_cond);
     276      MUTEX_UNLOCK(gil->switch_mutex);
     277  #endif
     278  
     279      if (_PyThreadState_MustExit(tstate)) {
     280          /* bpo-36475: If Py_Finalize() has been called and tstate is not
     281             the thread which called Py_Finalize(), exit immediately the
     282             thread.
     283  
     284             This code path can be reached by a daemon thread which was waiting
     285             in take_gil() while the main thread called
     286             wait_for_thread_shutdown() from Py_Finalize(). */
     287          MUTEX_UNLOCK(gil->mutex);
     288          drop_gil(ceval, ceval2, tstate);
     289          PyThread_exit_thread();
     290      }
     291      assert(_PyThreadState_CheckConsistency(tstate));
     292  
     293      if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) {
     294          RESET_GIL_DROP_REQUEST(interp);
     295      }
     296      else {
     297          /* bpo-40010: eval_breaker should be recomputed to be set to 1 if there
     298             is a pending signal: signal received by another thread which cannot
     299             handle signals.
     300  
     301             Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */
     302          COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
     303      }
     304  
     305      /* Don't access tstate if the thread must exit */
     306      if (tstate->async_exc != NULL) {
     307          _PyEval_SignalAsyncExc(tstate->interp);
     308      }
     309  
     310      MUTEX_UNLOCK(gil->mutex);
     311  
     312      errno = err;
     313  }
     314  
     315  void _PyEval_SetSwitchInterval(unsigned long microseconds)
     316  {
     317      struct _gil_runtime_state *gil = &_PyRuntime.ceval.gil;
     318      gil->interval = microseconds;
     319  }
     320  
     321  unsigned long _PyEval_GetSwitchInterval()
     322  {
     323      struct _gil_runtime_state *gil = &_PyRuntime.ceval.gil;
     324      return gil->interval;
     325  }