(root)/
glibc-2.38/
sysdeps/
htl/
pt-cond-timedwait.c
       1  /* Wait on a condition.  Generic version.
       2     Copyright (C) 2000-2023 Free Software Foundation, Inc.
       3     This file is part of the GNU C Library.
       4  
       5     The GNU C Library is free software; you can redistribute it and/or
       6     modify it under the terms of the GNU Lesser General Public
       7     License as published by the Free Software Foundation; either
       8     version 2.1 of the License, or (at your option) any later version.
       9  
      10     The GNU C Library is distributed in the hope that it will be useful,
      11     but WITHOUT ANY WARRANTY; without even the implied warranty of
      12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      13     Lesser General Public License for more details.
      14  
      15     You should have received a copy of the GNU Lesser General Public
      16     License along with the GNU C Library;  if not, see
      17     <https://www.gnu.org/licenses/>.  */
      18  
      19  #include <pthread.h>
      20  
      21  #include <pt-internal.h>
      22  #include <pthreadP.h>
      23  #include <time.h>
      24  
      25  extern int __pthread_cond_timedwait_internal (pthread_cond_t *cond,
      26  					      pthread_mutex_t *mutex,
      27  					      clockid_t clockid,
      28  					      const struct timespec *abstime);
      29  
      30  int
      31  __pthread_cond_timedwait (pthread_cond_t *cond,
      32  			  pthread_mutex_t *mutex,
      33  			  const struct timespec *abstime)
      34  {
      35    return __pthread_cond_timedwait_internal (cond, mutex, -1, abstime);
      36  }
      37  
      38  weak_alias (__pthread_cond_timedwait, pthread_cond_timedwait);
      39  
      40  int
      41  __pthread_cond_clockwait (pthread_cond_t *cond,
      42  			  pthread_mutex_t *mutex,
      43  			  clockid_t clockid,
      44  			  const struct timespec *abstime)
      45  {
      46    return __pthread_cond_timedwait_internal (cond, mutex, clockid, abstime);
      47  }
      48  
      49  weak_alias (__pthread_cond_clockwait, pthread_cond_clockwait);
      50  
      51  struct cancel_ctx
      52  {
      53    struct __pthread *wakeup;
      54    pthread_cond_t *cond;
      55  };
      56  
      57  static void
      58  cancel_hook (void *arg)
      59  {
      60    struct cancel_ctx *ctx = arg;
      61    struct __pthread *wakeup = ctx->wakeup;
      62    pthread_cond_t *cond = ctx->cond;
      63    int unblock;
      64  
      65    __pthread_spin_wait (&cond->__lock);
      66    /* The thread only needs to be awaken if it's blocking or about to block.
      67       If it was already unblocked, it's not queued any more.  */
      68    unblock = wakeup->prevp != NULL;
      69    if (unblock)
      70      __pthread_dequeue (wakeup);
      71    __pthread_spin_unlock (&cond->__lock);
      72  
      73    if (unblock)
      74      __pthread_wakeup (wakeup);
      75  }
      76  
      77  /* Block on condition variable COND until ABSTIME.  As a GNU
      78     extension, if ABSTIME is NULL, then wait forever.  MUTEX should be
      79     held by the calling thread.  On return, MUTEX will be held by the
      80     calling thread.  */
      81  int
      82  __pthread_cond_timedwait_internal (pthread_cond_t *cond,
      83  				   pthread_mutex_t *mutex,
      84  				   clockid_t clockid,
      85  				   const struct timespec *abstime)
      86  {
      87    error_t err;
      88    int cancelled, oldtype, drain;
      89    clockid_t clock_id;
      90  
      91    if (clockid != -1)
      92      clock_id = clockid;
      93    else
      94      clock_id = __pthread_default_condattr.__clock;
      95  
      96    if (abstime && ! valid_nanoseconds (abstime->tv_nsec))
      97      return EINVAL;
      98  
      99    err = __pthread_mutex_checklocked (mutex);
     100    if (err)
     101      return err;
     102  
     103    struct __pthread *self = _pthread_self ();
     104    struct cancel_ctx ctx;
     105    ctx.wakeup = self;
     106    ctx.cond = cond;
     107  
     108    /* Test for a pending cancellation request, switch to deferred mode for
     109       safer resource handling, and prepare the hook to call in case we're
     110       cancelled while blocking.  Once CANCEL_LOCK is released, the cancellation
     111       hook can be called by another thread at any time.  Whatever happens,
     112       this function must exit with MUTEX locked.
     113  
     114       This function contains inline implementations of pthread_testcancel and
     115       pthread_setcanceltype to reduce locking overhead.  */
     116    __pthread_mutex_lock (&self->cancel_lock);
     117    cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
     118        && self->cancel_pending;
     119  
     120    if (cancelled)
     121      {
     122        __pthread_mutex_unlock (&self->cancel_lock);
     123        __pthread_exit (PTHREAD_CANCELED);
     124      }
     125  
     126    self->cancel_hook = cancel_hook;
     127    self->cancel_hook_arg = &ctx;
     128    oldtype = self->cancel_type;
     129  
     130    if (oldtype != PTHREAD_CANCEL_DEFERRED)
     131      self->cancel_type = PTHREAD_CANCEL_DEFERRED;
     132  
     133    /* Add ourselves to the list of waiters.  This is done while setting
     134       the cancellation hook to simplify the cancellation procedure, i.e.
     135       if the thread is queued, it can be cancelled, otherwise it is
     136       already unblocked, progressing on the return path.  */
     137    __pthread_spin_wait (&cond->__lock);
     138    __pthread_enqueue (&cond->__queue, self);
     139    if (cond->__attr != NULL && clockid == -1)
     140      clock_id = cond->__attr->__clock;
     141    __pthread_spin_unlock (&cond->__lock);
     142  
     143    __pthread_mutex_unlock (&self->cancel_lock);
     144  
     145    /* Increase the waiter reference count.  Relaxed MO is sufficient because
     146       we only need to synchronize when decrementing the reference count.
     147       We however need to have the mutex held to prevent concurrency with
     148       a pthread_cond_destroy.  */
     149    atomic_fetch_add_relaxed (&cond->__wrefs, 2);
     150  
     151    /* Release MUTEX before blocking.  */
     152    __pthread_mutex_unlock (mutex);
     153  
     154    /* Block the thread.  */
     155    if (abstime != NULL)
     156      err = __pthread_timedblock (self, abstime, clock_id);
     157    else
     158      {
     159        err = 0;
     160        __pthread_block (self);
     161      }
     162  
     163    __pthread_spin_wait (&cond->__lock);
     164    if (self->prevp == NULL)
     165      {
     166        /* Another thread removed us from the list of waiters, which means a
     167           wakeup message has been sent.  It was either consumed while we were
     168           blocking, or queued after we timed out and before we acquired the
     169           condition lock, in which case the message queue must be drained.  */
     170        if (!err)
     171  	drain = 0;
     172        else
     173  	{
     174  	  assert (err == ETIMEDOUT);
     175  	  drain = 1;
     176  	}
     177      }
     178    else
     179      {
     180        /* We're still in the list of waiters.  No one attempted to wake us up,
     181           i.e. we timed out.  */
     182        assert (err == ETIMEDOUT);
     183        __pthread_dequeue (self);
     184        drain = 0;
     185      }
     186    __pthread_spin_unlock (&cond->__lock);
     187  
     188    /* If destruction is pending (i.e., the wake-request flag is nonzero) and we
     189       are the last waiter (prior value of __wrefs was 1 << 1), then wake any
     190       threads waiting in pthread_cond_destroy.  Release MO to synchronize with
     191       these threads.  Don't bother clearing the wake-up request flag.  */
     192    if ((atomic_fetch_add_release (&cond->__wrefs, -2)) == 3)
     193      __gsync_wake (__mach_task_self (), (vm_offset_t) &cond->__wrefs, 0, 0);
     194  
     195    if (drain)
     196      __pthread_block (self);
     197  
     198    /* We're almost done.  Remove the unblock hook, restore the previous
     199       cancellation type, and check for a pending cancellation request.  */
     200    __pthread_mutex_lock (&self->cancel_lock);
     201    self->cancel_hook = NULL;
     202    self->cancel_hook_arg = NULL;
     203    self->cancel_type = oldtype;
     204    cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
     205        && self->cancel_pending;
     206    __pthread_mutex_unlock (&self->cancel_lock);
     207  
     208    /* Reacquire MUTEX before returning/cancelling.  */
     209    __pthread_mutex_lock (mutex);
     210  
     211    if (cancelled)
     212      __pthread_exit (PTHREAD_CANCELED);
     213  
     214    return err;
     215  }