1  #include "pycore_interp.h"    // _PyInterpreterState.threads.stacksize
       2  
       3  /* Posix threads interface */
       4  
       5  #include <stdlib.h>
       6  #include <string.h>
       7  #if defined(__APPLE__) || defined(HAVE_PTHREAD_DESTRUCTOR)
       8  #define destructor xxdestructor
       9  #endif
      10  #ifndef HAVE_PTHREAD_STUBS
      11  #  include <pthread.h>
      12  #endif
      13  #if defined(__APPLE__) || defined(HAVE_PTHREAD_DESTRUCTOR)
      14  #undef destructor
      15  #endif
      16  #include <signal.h>
      17  
      18  #if defined(__linux__)
      19  #   include <sys/syscall.h>     /* syscall(SYS_gettid) */
      20  #elif defined(__FreeBSD__)
      21  #   include <pthread_np.h>      /* pthread_getthreadid_np() */
      22  #elif defined(__OpenBSD__)
      23  #   include <unistd.h>          /* getthrid() */
      24  #elif defined(_AIX)
      25  #   include <sys/thread.h>      /* thread_self() */
      26  #elif defined(__NetBSD__)
      27  #   include <lwp.h>             /* _lwp_self() */
      28  #endif
      29  
      30  /* The POSIX spec requires that use of pthread_attr_setstacksize
      31     be conditional on _POSIX_THREAD_ATTR_STACKSIZE being defined. */
      32  #ifdef _POSIX_THREAD_ATTR_STACKSIZE
      33  #ifndef THREAD_STACK_SIZE
      34  #define THREAD_STACK_SIZE       0       /* use default stack size */
      35  #endif
      36  
      37  /* The default stack size for new threads on BSD is small enough that
      38   * we'll get hard crashes instead of 'maximum recursion depth exceeded'
      39   * exceptions.
      40   *
      41   * The default stack size below is the empirically determined minimal stack
      42   * sizes where a simple recursive function doesn't cause a hard crash.
      43   *
      44   * For macOS the value of THREAD_STACK_SIZE is determined in configure.ac
      45   * as it also depends on the other configure options like chosen sanitizer
      46   * runtimes.
      47   */
      48  #if defined(__FreeBSD__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
      49  #undef  THREAD_STACK_SIZE
      50  #define THREAD_STACK_SIZE       0x400000
      51  #endif
      52  #if defined(_AIX) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
      53  #undef  THREAD_STACK_SIZE
      54  #define THREAD_STACK_SIZE       0x200000
      55  #endif
      56  /* bpo-38852: test_threading.test_recursion_limit() checks that 1000 recursive
      57     Python calls (default recursion limit) doesn't crash, but raise a regular
      58     RecursionError exception. In debug mode, Python function calls allocates
      59     more memory on the stack, so use a stack of 8 MiB. */
      60  #if defined(__ANDROID__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
      61  #   ifdef Py_DEBUG
      62  #   undef  THREAD_STACK_SIZE
      63  #   define THREAD_STACK_SIZE    0x800000
      64  #   endif
      65  #endif
      66  #if defined(__VXWORKS__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
      67  #undef  THREAD_STACK_SIZE
      68  #define THREAD_STACK_SIZE       0x100000
      69  #endif
      70  /* for safety, ensure a viable minimum stacksize */
      71  #define THREAD_STACK_MIN        0x8000  /* 32 KiB */
      72  #else  /* !_POSIX_THREAD_ATTR_STACKSIZE */
      73  #ifdef THREAD_STACK_SIZE
      74  #error "THREAD_STACK_SIZE defined but _POSIX_THREAD_ATTR_STACKSIZE undefined"
      75  #endif
      76  #endif
      77  
      78  /* The POSIX spec says that implementations supporting the sem_*
      79     family of functions must indicate this by defining
      80     _POSIX_SEMAPHORES. */
      81  #ifdef _POSIX_SEMAPHORES
      82  /* On FreeBSD 4.x, _POSIX_SEMAPHORES is defined empty, so
      83     we need to add 0 to make it work there as well. */
      84  #if (_POSIX_SEMAPHORES+0) == -1
      85  #define HAVE_BROKEN_POSIX_SEMAPHORES
      86  #else
      87  #include <semaphore.h>
      88  #include <errno.h>
      89  #endif
      90  #endif
      91  
      92  
      93  /* Whether or not to use semaphores directly rather than emulating them with
      94   * mutexes and condition variables:
      95   */
      96  #if (defined(_POSIX_SEMAPHORES) && !defined(HAVE_BROKEN_POSIX_SEMAPHORES) && \
      97       (defined(HAVE_SEM_TIMEDWAIT) || defined(HAVE_SEM_CLOCKWAIT)))
      98  #  define USE_SEMAPHORES
      99  #else
     100  #  undef USE_SEMAPHORES
     101  #endif
     102  
     103  
     104  /* On platforms that don't use standard POSIX threads pthread_sigmask()
     105   * isn't present.  DEC threads uses sigprocmask() instead as do most
     106   * other UNIX International compliant systems that don't have the full
     107   * pthread implementation.
     108   */
     109  #if defined(HAVE_PTHREAD_SIGMASK) && !defined(HAVE_BROKEN_PTHREAD_SIGMASK)
     110  #  define SET_THREAD_SIGMASK pthread_sigmask
     111  #else
     112  #  define SET_THREAD_SIGMASK sigprocmask
     113  #endif
     114  
     115  
     116  #define MICROSECONDS_TO_TIMESPEC(microseconds, ts) \
     117  do { \
     118      struct timeval tv; \
     119      gettimeofday(&tv, NULL); \
     120      tv.tv_usec += microseconds % 1000000; \
     121      tv.tv_sec += microseconds / 1000000; \
     122      tv.tv_sec += tv.tv_usec / 1000000; \
     123      tv.tv_usec %= 1000000; \
     124      ts.tv_sec = tv.tv_sec; \
     125      ts.tv_nsec = tv.tv_usec * 1000; \
     126  } while(0)
     127  
     128  
     129  /*
     130   * pthread_cond support
     131   */
     132  
     133  #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
     134  // monotonic is supported statically.  It doesn't mean it works on runtime.
     135  #define CONDATTR_MONOTONIC
     136  #endif
     137  
     138  // NULL when pthread_condattr_setclock(CLOCK_MONOTONIC) is not supported.
     139  static pthread_condattr_t *condattr_monotonic = NULL;
     140  
     141  static void
     142  init_condattr(void)
     143  {
     144  #ifdef CONDATTR_MONOTONIC
     145      static pthread_condattr_t ca;
     146      pthread_condattr_init(&ca);
     147      if (pthread_condattr_setclock(&ca, CLOCK_MONOTONIC) == 0) {
     148          condattr_monotonic = &ca;  // Use monotonic clock
     149      }
     150  #endif
     151  }
     152  
     153  int
     154  _PyThread_cond_init(PyCOND_T *cond)
     155  {
     156      return pthread_cond_init(cond, condattr_monotonic);
     157  }
     158  
     159  void
     160  _PyThread_cond_after(long long us, struct timespec *abs)
     161  {
     162  #ifdef CONDATTR_MONOTONIC
     163      if (condattr_monotonic) {
     164          clock_gettime(CLOCK_MONOTONIC, abs);
     165          abs->tv_sec  += us / 1000000;
     166          abs->tv_nsec += (us % 1000000) * 1000;
     167          abs->tv_sec  += abs->tv_nsec / 1000000000;
     168          abs->tv_nsec %= 1000000000;
     169          return;
     170      }
     171  #endif
     172  
     173      struct timespec ts;
     174      MICROSECONDS_TO_TIMESPEC(us, ts);
     175      *abs = ts;
     176  }
     177  
     178  
     179  /* A pthread mutex isn't sufficient to model the Python lock type
     180   * because, according to Draft 5 of the docs (P1003.4a/D5), both of the
     181   * following are undefined:
     182   *  -> a thread tries to lock a mutex it already has locked
     183   *  -> a thread tries to unlock a mutex locked by a different thread
     184   * pthread mutexes are designed for serializing threads over short pieces
     185   * of code anyway, so wouldn't be an appropriate implementation of
     186   * Python's locks regardless.
     187   *
     188   * The pthread_lock struct implements a Python lock as a "locked?" bit
     189   * and a <condition, mutex> pair.  In general, if the bit can be acquired
     190   * instantly, it is, else the pair is used to block the thread until the
     191   * bit is cleared.     9 May 1994 tim@ksr.com
     192   */
     193  
     194  typedef struct {
     195      char             locked; /* 0=unlocked, 1=locked */
     196      /* a <cond, mutex> pair to handle an acquire of a locked lock */
     197      pthread_cond_t   lock_released;
     198      pthread_mutex_t  mut;
     199  } pthread_lock;
     200  
     201  #define CHECK_STATUS(name)  if (status != 0) { perror(name); error = 1; }
     202  #define CHECK_STATUS_PTHREAD(name)  if (status != 0) { fprintf(stderr, \
     203      "%s: %s\n", name, strerror(status)); error = 1; }
     204  
     205  /*
     206   * Initialization.
     207   */
     208  static void
     209  PyThread__init_thread(void)
     210  {
     211  #if defined(_AIX) && defined(__GNUC__)
     212      extern void pthread_init(void);
     213      pthread_init();
     214  #endif
     215      init_condattr();
     216  }
     217  
     218  /*
     219   * Thread support.
     220   */
     221  
     222  /* bpo-33015: pythread_callback struct and pythread_wrapper() cast
     223     "void func(void *)" to "void* func(void *)": always return NULL.
     224  
     225     PyThread_start_new_thread() uses "void func(void *)" type, whereas
     226     pthread_create() requires a void* return value. */
     227  typedef struct {
     228      void (*func) (void *);
     229      void *arg;
     230  } pythread_callback;
     231  
     232  static void *
     233  pythread_wrapper(void *arg)
     234  {
     235      /* copy func and func_arg and free the temporary structure */
     236      pythread_callback *callback = arg;
     237      void (*func)(void *) = callback->func;
     238      void *func_arg = callback->arg;
     239      PyMem_RawFree(arg);
     240  
     241      func(func_arg);
     242      return NULL;
     243  }
     244  
     245  unsigned long
     246  PyThread_start_new_thread(void (*func)(void *), void *arg)
     247  {
     248      pthread_t th;
     249      int status;
     250  #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
     251      pthread_attr_t attrs;
     252  #endif
     253  #if defined(THREAD_STACK_SIZE)
     254      size_t      tss;
     255  #endif
     256  
     257      dprintf(("PyThread_start_new_thread called\n"));
     258      if (!initialized)
     259          PyThread_init_thread();
     260  
     261  #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
     262      if (pthread_attr_init(&attrs) != 0)
     263          return PYTHREAD_INVALID_THREAD_ID;
     264  #endif
     265  #if defined(THREAD_STACK_SIZE)
     266      PyThreadState *tstate = _PyThreadState_GET();
     267      size_t stacksize = tstate ? tstate->interp->threads.stacksize : 0;
     268      tss = (stacksize != 0) ? stacksize : THREAD_STACK_SIZE;
     269      if (tss != 0) {
     270          if (pthread_attr_setstacksize(&attrs, tss) != 0) {
     271              pthread_attr_destroy(&attrs);
     272              return PYTHREAD_INVALID_THREAD_ID;
     273          }
     274      }
     275  #endif
     276  #if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
     277      pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM);
     278  #endif
     279  
     280      pythread_callback *callback = PyMem_RawMalloc(sizeof(pythread_callback));
     281  
     282      if (callback == NULL) {
     283        return PYTHREAD_INVALID_THREAD_ID;
     284      }
     285  
     286      callback->func = func;
     287      callback->arg = arg;
     288  
     289      status = pthread_create(&th,
     290  #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
     291                               &attrs,
     292  #else
     293                               (pthread_attr_t*)NULL,
     294  #endif
     295                               pythread_wrapper, callback);
     296  
     297  #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
     298      pthread_attr_destroy(&attrs);
     299  #endif
     300  
     301      if (status != 0) {
     302          PyMem_RawFree(callback);
     303          return PYTHREAD_INVALID_THREAD_ID;
     304      }
     305  
     306      pthread_detach(th);
     307  
     308  #if SIZEOF_PTHREAD_T <= SIZEOF_LONG
     309      return (unsigned long) th;
     310  #else
     311      return (unsigned long) *(unsigned long *) &th;
     312  #endif
     313  }
     314  
     315  /* XXX This implementation is considered (to quote Tim Peters) "inherently
     316     hosed" because:
     317       - It does not guarantee the promise that a non-zero integer is returned.
     318       - The cast to unsigned long is inherently unsafe.
     319       - It is not clear that the 'volatile' (for AIX?) are any longer necessary.
     320  */
     321  unsigned long
     322  PyThread_get_thread_ident(void)
     323  {
     324      volatile pthread_t threadid;
     325      if (!initialized)
     326          PyThread_init_thread();
     327      threadid = pthread_self();
     328      return (unsigned long) threadid;
     329  }
     330  
     331  #ifdef PY_HAVE_THREAD_NATIVE_ID
     332  unsigned long
     333  PyThread_get_thread_native_id(void)
     334  {
     335      if (!initialized)
     336          PyThread_init_thread();
     337  #ifdef __APPLE__
     338      uint64_t native_id;
     339      (void) pthread_threadid_np(NULL, &native_id);
     340  #elif defined(__linux__)
     341      pid_t native_id;
     342      native_id = syscall(SYS_gettid);
     343  #elif defined(__FreeBSD__)
     344      int native_id;
     345      native_id = pthread_getthreadid_np();
     346  #elif defined(__OpenBSD__)
     347      pid_t native_id;
     348      native_id = getthrid();
     349  #elif defined(_AIX)
     350      tid_t native_id;
     351      native_id = thread_self();
     352  #elif defined(__NetBSD__)
     353      lwpid_t native_id;
     354      native_id = _lwp_self();
     355  #endif
     356      return (unsigned long) native_id;
     357  }
     358  #endif
     359  
     360  void _Py_NO_RETURN
     361  PyThread_exit_thread(void)
     362  {
     363      dprintf(("PyThread_exit_thread called\n"));
     364      if (!initialized)
     365          exit(0);
     366      pthread_exit(0);
     367  }
     368  
     369  #ifdef USE_SEMAPHORES
     370  
     371  /*
     372   * Lock support.
     373   */
     374  
     375  PyThread_type_lock
     376  PyThread_allocate_lock(void)
     377  {
     378      sem_t *lock;
     379      int status, error = 0;
     380  
     381      dprintf(("PyThread_allocate_lock called\n"));
     382      if (!initialized)
     383          PyThread_init_thread();
     384  
     385      lock = (sem_t *)PyMem_RawMalloc(sizeof(sem_t));
     386  
     387      if (lock) {
     388          status = sem_init(lock,0,1);
     389          CHECK_STATUS("sem_init");
     390  
     391          if (error) {
     392              PyMem_RawFree((void *)lock);
     393              lock = NULL;
     394          }
     395      }
     396  
     397      dprintf(("PyThread_allocate_lock() -> %p\n", (void *)lock));
     398      return (PyThread_type_lock)lock;
     399  }
     400  
     401  void
     402  PyThread_free_lock(PyThread_type_lock lock)
     403  {
     404      sem_t *thelock = (sem_t *)lock;
     405      int status, error = 0;
     406  
     407      (void) error; /* silence unused-but-set-variable warning */
     408      dprintf(("PyThread_free_lock(%p) called\n", lock));
     409  
     410      if (!thelock)
     411          return;
     412  
     413      status = sem_destroy(thelock);
     414      CHECK_STATUS("sem_destroy");
     415  
     416      PyMem_RawFree((void *)thelock);
     417  }
     418  
     419  /*
     420   * As of February 2002, Cygwin thread implementations mistakenly report error
     421   * codes in the return value of the sem_ calls (like the pthread_ functions).
     422   * Correct implementations return -1 and put the code in errno. This supports
     423   * either.
     424   */
     425  static int
     426  fix_status(int status)
     427  {
     428      return (status == -1) ? errno : status;
     429  }
     430  
     431  PyLockStatus
     432  PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
     433                              int intr_flag)
     434  {
     435      PyLockStatus success;
     436      sem_t *thelock = (sem_t *)lock;
     437      int status, error = 0;
     438  
     439      (void) error; /* silence unused-but-set-variable warning */
     440      dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) called\n",
     441               lock, microseconds, intr_flag));
     442  
     443      _PyTime_t timeout;  // relative timeout
     444      if (microseconds >= 0) {
     445          _PyTime_t ns;
     446          if (microseconds <= _PyTime_MAX / 1000) {
     447              ns = microseconds * 1000;
     448          }
     449          else {
     450              // bpo-41710: PyThread_acquire_lock_timed() cannot report timeout
     451              // overflow to the caller, so clamp the timeout to
     452              // [_PyTime_MIN, _PyTime_MAX].
     453              //
     454              // _PyTime_MAX nanoseconds is around 292.3 years.
     455              //
     456              // _thread.Lock.acquire() and _thread.RLock.acquire() raise an
     457              // OverflowError if microseconds is greater than PY_TIMEOUT_MAX.
     458              ns = _PyTime_MAX;
     459          }
     460          timeout = _PyTime_FromNanoseconds(ns);
     461      }
     462      else {
     463          timeout = _PyTime_FromNanoseconds(-1);
     464      }
     465  
     466  #ifdef HAVE_SEM_CLOCKWAIT
     467      struct timespec abs_timeout;
     468      // Local scope for deadline
     469      {
     470          _PyTime_t deadline = _PyTime_Add(_PyTime_GetMonotonicClock(), timeout);
     471          _PyTime_AsTimespec_clamp(deadline, &abs_timeout);
     472      }
     473  #else
     474      _PyTime_t deadline = 0;
     475      if (timeout > 0 && !intr_flag) {
     476          deadline = _PyDeadline_Init(timeout);
     477      }
     478  #endif
     479  
     480      while (1) {
     481          if (timeout > 0) {
     482  #ifdef HAVE_SEM_CLOCKWAIT
     483              status = fix_status(sem_clockwait(thelock, CLOCK_MONOTONIC,
     484                                                &abs_timeout));
     485  #else
     486              _PyTime_t abs_time = _PyTime_Add(_PyTime_GetSystemClock(),
     487                                               timeout);
     488              struct timespec ts;
     489              _PyTime_AsTimespec_clamp(abs_time, &ts);
     490              status = fix_status(sem_timedwait(thelock, &ts));
     491  #endif
     492          }
     493          else if (timeout == 0) {
     494              status = fix_status(sem_trywait(thelock));
     495          }
     496          else {
     497              status = fix_status(sem_wait(thelock));
     498          }
     499  
     500          /* Retry if interrupted by a signal, unless the caller wants to be
     501             notified.  */
     502          if (intr_flag || status != EINTR) {
     503              break;
     504          }
     505  
     506          // sem_clockwait() uses an absolute timeout, there is no need
     507          // to recompute the relative timeout.
     508  #ifndef HAVE_SEM_CLOCKWAIT
     509          if (timeout > 0) {
     510              /* wait interrupted by a signal (EINTR): recompute the timeout */
     511              timeout = _PyDeadline_Get(deadline);
     512              if (timeout < 0) {
     513                  status = ETIMEDOUT;
     514                  break;
     515              }
     516          }
     517  #endif
     518      }
     519  
     520      /* Don't check the status if we're stopping because of an interrupt.  */
     521      if (!(intr_flag && status == EINTR)) {
     522          if (timeout > 0) {
     523              if (status != ETIMEDOUT) {
     524  #ifdef HAVE_SEM_CLOCKWAIT
     525                  CHECK_STATUS("sem_clockwait");
     526  #else
     527                  CHECK_STATUS("sem_timedwait");
     528  #endif
     529              }
     530          }
     531          else if (timeout == 0) {
     532              if (status != EAGAIN) {
     533                  CHECK_STATUS("sem_trywait");
     534              }
     535          }
     536          else {
     537              CHECK_STATUS("sem_wait");
     538          }
     539      }
     540  
     541      if (status == 0) {
     542          success = PY_LOCK_ACQUIRED;
     543      } else if (intr_flag && status == EINTR) {
     544          success = PY_LOCK_INTR;
     545      } else {
     546          success = PY_LOCK_FAILURE;
     547      }
     548  
     549      dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) -> %d\n",
     550               lock, microseconds, intr_flag, success));
     551      return success;
     552  }
     553  
     554  void
     555  PyThread_release_lock(PyThread_type_lock lock)
     556  {
     557      sem_t *thelock = (sem_t *)lock;
     558      int status, error = 0;
     559  
     560      (void) error; /* silence unused-but-set-variable warning */
     561      dprintf(("PyThread_release_lock(%p) called\n", lock));
     562  
     563      status = sem_post(thelock);
     564      CHECK_STATUS("sem_post");
     565  }
     566  
     567  #else /* USE_SEMAPHORES */
     568  
     569  /*
     570   * Lock support.
     571   */
     572  PyThread_type_lock
     573  PyThread_allocate_lock(void)
     574  {
     575      pthread_lock *lock;
     576      int status, error = 0;
     577  
     578      dprintf(("PyThread_allocate_lock called\n"));
     579      if (!initialized)
     580          PyThread_init_thread();
     581  
     582      lock = (pthread_lock *) PyMem_RawCalloc(1, sizeof(pthread_lock));
     583      if (lock) {
     584          lock->locked = 0;
     585  
     586          status = pthread_mutex_init(&lock->mut, NULL);
     587          CHECK_STATUS_PTHREAD("pthread_mutex_init");
     588          /* Mark the pthread mutex underlying a Python mutex as
     589             pure happens-before.  We can't simply mark the
     590             Python-level mutex as a mutex because it can be
     591             acquired and released in different threads, which
     592             will cause errors. */
     593          _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&lock->mut);
     594  
     595          status = _PyThread_cond_init(&lock->lock_released);
     596          CHECK_STATUS_PTHREAD("pthread_cond_init");
     597  
     598          if (error) {
     599              PyMem_RawFree((void *)lock);
     600              lock = 0;
     601          }
     602      }
     603  
     604      dprintf(("PyThread_allocate_lock() -> %p\n", (void *)lock));
     605      return (PyThread_type_lock) lock;
     606  }
     607  
     608  void
     609  PyThread_free_lock(PyThread_type_lock lock)
     610  {
     611      pthread_lock *thelock = (pthread_lock *)lock;
     612      int status, error = 0;
     613  
     614      (void) error; /* silence unused-but-set-variable warning */
     615      dprintf(("PyThread_free_lock(%p) called\n", lock));
     616  
     617      /* some pthread-like implementations tie the mutex to the cond
     618       * and must have the cond destroyed first.
     619       */
     620      status = pthread_cond_destroy( &thelock->lock_released );
     621      CHECK_STATUS_PTHREAD("pthread_cond_destroy");
     622  
     623      status = pthread_mutex_destroy( &thelock->mut );
     624      CHECK_STATUS_PTHREAD("pthread_mutex_destroy");
     625  
     626      PyMem_RawFree((void *)thelock);
     627  }
     628  
     629  PyLockStatus
     630  PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
     631                              int intr_flag)
     632  {
     633      PyLockStatus success = PY_LOCK_FAILURE;
     634      pthread_lock *thelock = (pthread_lock *)lock;
     635      int status, error = 0;
     636  
     637      dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) called\n",
     638               lock, microseconds, intr_flag));
     639  
     640      if (microseconds == 0) {
     641          status = pthread_mutex_trylock( &thelock->mut );
     642          if (status != EBUSY)
     643              CHECK_STATUS_PTHREAD("pthread_mutex_trylock[1]");
     644      }
     645      else {
     646          status = pthread_mutex_lock( &thelock->mut );
     647          CHECK_STATUS_PTHREAD("pthread_mutex_lock[1]");
     648      }
     649      if (status == 0) {
     650          if (thelock->locked == 0) {
     651              success = PY_LOCK_ACQUIRED;
     652          }
     653          else if (microseconds != 0) {
     654              struct timespec abs;
     655              if (microseconds > 0) {
     656                  _PyThread_cond_after(microseconds, &abs);
     657              }
     658              /* continue trying until we get the lock */
     659  
     660              /* mut must be locked by me -- part of the condition
     661               * protocol */
     662              while (success == PY_LOCK_FAILURE) {
     663                  if (microseconds > 0) {
     664                      status = pthread_cond_timedwait(
     665                          &thelock->lock_released,
     666                          &thelock->mut, &abs);
     667                      if (status == 1) {
     668                          break;
     669                      }
     670                      if (status == ETIMEDOUT)
     671                          break;
     672                      CHECK_STATUS_PTHREAD("pthread_cond_timedwait");
     673                  }
     674                  else {
     675                      status = pthread_cond_wait(
     676                          &thelock->lock_released,
     677                          &thelock->mut);
     678                      CHECK_STATUS_PTHREAD("pthread_cond_wait");
     679                  }
     680  
     681                  if (intr_flag && status == 0 && thelock->locked) {
     682                      /* We were woken up, but didn't get the lock.  We probably received
     683                       * a signal.  Return PY_LOCK_INTR to allow the caller to handle
     684                       * it and retry.  */
     685                      success = PY_LOCK_INTR;
     686                      break;
     687                  }
     688                  else if (status == 0 && !thelock->locked) {
     689                      success = PY_LOCK_ACQUIRED;
     690                  }
     691              }
     692          }
     693          if (success == PY_LOCK_ACQUIRED) thelock->locked = 1;
     694          status = pthread_mutex_unlock( &thelock->mut );
     695          CHECK_STATUS_PTHREAD("pthread_mutex_unlock[1]");
     696      }
     697  
     698      if (error) success = PY_LOCK_FAILURE;
     699      dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) -> %d\n",
     700               lock, microseconds, intr_flag, success));
     701      return success;
     702  }
     703  
     704  void
     705  PyThread_release_lock(PyThread_type_lock lock)
     706  {
     707      pthread_lock *thelock = (pthread_lock *)lock;
     708      int status, error = 0;
     709  
     710      (void) error; /* silence unused-but-set-variable warning */
     711      dprintf(("PyThread_release_lock(%p) called\n", lock));
     712  
     713      status = pthread_mutex_lock( &thelock->mut );
     714      CHECK_STATUS_PTHREAD("pthread_mutex_lock[3]");
     715  
     716      thelock->locked = 0;
     717  
     718      /* wake up someone (anyone, if any) waiting on the lock */
     719      status = pthread_cond_signal( &thelock->lock_released );
     720      CHECK_STATUS_PTHREAD("pthread_cond_signal");
     721  
     722      status = pthread_mutex_unlock( &thelock->mut );
     723      CHECK_STATUS_PTHREAD("pthread_mutex_unlock[3]");
     724  }
     725  
     726  #endif /* USE_SEMAPHORES */
     727  
     728  int
     729  _PyThread_at_fork_reinit(PyThread_type_lock *lock)
     730  {
     731      PyThread_type_lock new_lock = PyThread_allocate_lock();
     732      if (new_lock == NULL) {
     733          return -1;
     734      }
     735  
     736      /* bpo-6721, bpo-40089: The old lock can be in an inconsistent state.
     737         fork() can be called in the middle of an operation on the lock done by
     738         another thread. So don't call PyThread_free_lock(*lock).
     739  
     740         Leak memory on purpose. Don't release the memory either since the
     741         address of a mutex is relevant. Putting two mutexes at the same address
     742         can lead to problems. */
     743  
     744      *lock = new_lock;
     745      return 0;
     746  }
     747  
     748  int
     749  PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
     750  {
     751      return PyThread_acquire_lock_timed(lock, waitflag ? -1 : 0, /*intr_flag=*/0);
     752  }
     753  
     754  /* set the thread stack size.
     755   * Return 0 if size is valid, -1 if size is invalid,
     756   * -2 if setting stack size is not supported.
     757   */
     758  static int
     759  _pythread_pthread_set_stacksize(size_t size)
     760  {
     761  #if defined(THREAD_STACK_SIZE)
     762      pthread_attr_t attrs;
     763      size_t tss_min;
     764      int rc = 0;
     765  #endif
     766  
     767      /* set to default */
     768      if (size == 0) {
     769          _PyInterpreterState_GET()->threads.stacksize = 0;
     770          return 0;
     771      }
     772  
     773  #if defined(THREAD_STACK_SIZE)
     774  #if defined(PTHREAD_STACK_MIN)
     775      tss_min = PTHREAD_STACK_MIN > THREAD_STACK_MIN ? PTHREAD_STACK_MIN
     776                                                     : THREAD_STACK_MIN;
     777  #else
     778      tss_min = THREAD_STACK_MIN;
     779  #endif
     780      if (size >= tss_min) {
     781          /* validate stack size by setting thread attribute */
     782          if (pthread_attr_init(&attrs) == 0) {
     783              rc = pthread_attr_setstacksize(&attrs, size);
     784              pthread_attr_destroy(&attrs);
     785              if (rc == 0) {
     786                  _PyInterpreterState_GET()->threads.stacksize = size;
     787                  return 0;
     788              }
     789          }
     790      }
     791      return -1;
     792  #else
     793      return -2;
     794  #endif
     795  }
     796  
     797  #define THREAD_SET_STACKSIZE(x) _pythread_pthread_set_stacksize(x)
     798  
     799  
     800  /* Thread Local Storage (TLS) API
     801  
     802     This API is DEPRECATED since Python 3.7.  See PEP 539 for details.
     803  */
     804  
     805  /* Issue #25658: On platforms where native TLS key is defined in a way that
     806     cannot be safely cast to int, PyThread_create_key returns immediately a
     807     failure status and other TLS functions all are no-ops.  This indicates
     808     clearly that the old API is not supported on platforms where it cannot be
     809     used reliably, and that no effort will be made to add such support.
     810  
     811     Note: PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT will be unnecessary after
     812     removing this API.
     813  */
     814  
     815  int
     816  PyThread_create_key(void)
     817  {
     818  #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
     819      pthread_key_t key;
     820      int fail = pthread_key_create(&key, NULL);
     821      if (fail)
     822          return -1;
     823      if (key > INT_MAX) {
     824          /* Issue #22206: handle integer overflow */
     825          pthread_key_delete(key);
     826          errno = ENOMEM;
     827          return -1;
     828      }
     829      return (int)key;
     830  #else
     831      return -1;  /* never return valid key value. */
     832  #endif
     833  }
     834  
     835  void
     836  PyThread_delete_key(int key)
     837  {
     838  #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
     839      pthread_key_delete(key);
     840  #endif
     841  }
     842  
     843  void
     844  PyThread_delete_key_value(int key)
     845  {
     846  #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
     847      pthread_setspecific(key, NULL);
     848  #endif
     849  }
     850  
     851  int
     852  PyThread_set_key_value(int key, void *value)
     853  {
     854  #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
     855      int fail = pthread_setspecific(key, value);
     856      return fail ? -1 : 0;
     857  #else
     858      return -1;
     859  #endif
     860  }
     861  
     862  void *
     863  PyThread_get_key_value(int key)
     864  {
     865  #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
     866      return pthread_getspecific(key);
     867  #else
     868      return NULL;
     869  #endif
     870  }
     871  
     872  
     873  void
     874  PyThread_ReInitTLS(void)
     875  {
     876  }
     877  
     878  
     879  /* Thread Specific Storage (TSS) API
     880  
     881     Platform-specific components of TSS API implementation.
     882  */
     883  
     884  int
     885  PyThread_tss_create(Py_tss_t *key)
     886  {
     887      assert(key != NULL);
     888      /* If the key has been created, function is silently skipped. */
     889      if (key->_is_initialized) {
     890          return 0;
     891      }
     892  
     893      int fail = pthread_key_create(&(key->_key), NULL);
     894      if (fail) {
     895          return -1;
     896      }
     897      key->_is_initialized = 1;
     898      return 0;
     899  }
     900  
     901  void
     902  PyThread_tss_delete(Py_tss_t *key)
     903  {
     904      assert(key != NULL);
     905      /* If the key has not been created, function is silently skipped. */
     906      if (!key->_is_initialized) {
     907          return;
     908      }
     909  
     910      pthread_key_delete(key->_key);
     911      /* pthread has not provided the defined invalid value for the key. */
     912      key->_is_initialized = 0;
     913  }
     914  
     915  int
     916  PyThread_tss_set(Py_tss_t *key, void *value)
     917  {
     918      assert(key != NULL);
     919      int fail = pthread_setspecific(key->_key, value);
     920      return fail ? -1 : 0;
     921  }
     922  
     923  void *
     924  PyThread_tss_get(Py_tss_t *key)
     925  {
     926      assert(key != NULL);
     927      return pthread_getspecific(key->_key);
     928  }