(root)/
glibc-2.38/
sysdeps/
nptl/
fork.h
       1  /* System specific fork hooks.  Linux version.
       2     Copyright (C) 2021-2023 Free Software Foundation, Inc.
       3     This file is part of the GNU C Library.
       4  
       5     The GNU C Library is free software; you can redistribute it and/or
       6     modify it under the terms of the GNU Lesser General Public
       7     License as published by the Free Software Foundation; either
       8     version 2.1 of the License, or (at your option) any later version.
       9  
      10     The GNU C Library is distributed in the hope that it will be useful,
      11     but WITHOUT ANY WARRANTY; without even the implied warranty of
      12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      13     Lesser General Public License for more details.
      14  
      15     You should have received a copy of the GNU Lesser General Public
      16     License along with the GNU C Library; if not, see
      17     <https://www.gnu.org/licenses/>.  */
      18  
      19  #ifndef _FORK_H
      20  #define _FORK_H
      21  
      22  #include <assert.h>
      23  #include <kernel-posix-timers.h>
      24  #include <ldsodefs.h>
      25  #include <list.h>
      26  #include <mqueue.h>
      27  #include <pthreadP.h>
      28  #include <sysdep.h>
      29  
      30  static inline void
      31  fork_system_setup (void)
      32  {
      33    /* See __pthread_once.  */
      34    __fork_generation += __PTHREAD_ONCE_FORK_GEN_INCR;
      35  }
      36  
      37  static void
      38  fork_system_setup_after_fork (void)
      39  {
      40    /* There is one thread running.  */
      41    __nptl_nthreads = 1;
      42  
      43    /* Initialize thread library locks.  */
      44    GL (dl_stack_cache_lock) = LLL_LOCK_INITIALIZER;
      45    __default_pthread_attr_lock = LLL_LOCK_INITIALIZER;
      46  
      47    call_function_static_weak (__mq_notify_fork_subprocess);
      48    call_function_static_weak (__timer_fork_subprocess);
      49  }
      50  
      51  /* In case of a fork() call the memory allocation in the child will be
      52     the same but only one thread is running.  All stacks except that of
      53     the one running thread are not used anymore.  We have to recycle
      54     them.  */
      55  static void
      56  reclaim_stacks (void)
      57  {
      58    struct pthread *self = (struct pthread *) THREAD_SELF;
      59  
      60    /* No locking necessary.  The caller is the only stack in use.  But
      61       we have to be aware that we might have interrupted a list
      62       operation.  */
      63  
      64    if (GL (dl_in_flight_stack) != 0)
      65      {
      66        bool add_p = GL (dl_in_flight_stack) & 1;
      67        list_t *elem = (list_t *) (GL (dl_in_flight_stack) & ~(uintptr_t) 1);
      68  
      69        if (add_p)
      70  	{
      71  	  /* We always add at the beginning of the list.  So in this case we
      72  	     only need to check the beginning of these lists to see if the
      73  	     pointers at the head of the list are inconsistent.  */
      74  	  list_t *l = NULL;
      75  
      76  	  if (GL (dl_stack_used).next->prev != &GL (dl_stack_used))
      77  	    l = &GL (dl_stack_used);
      78  	  else if (GL (dl_stack_cache).next->prev != &GL (dl_stack_cache))
      79  	    l = &GL (dl_stack_cache);
      80  
      81  	  if (l != NULL)
      82  	    {
      83  	      assert (l->next->prev == elem);
      84  	      elem->next = l->next;
      85  	      elem->prev = l;
      86  	      l->next = elem;
      87  	    }
      88  	}
      89        else
      90  	{
      91  	  /* We can simply always replay the delete operation.  */
      92  	  elem->next->prev = elem->prev;
      93  	  elem->prev->next = elem->next;
      94  	}
      95  
      96        GL (dl_in_flight_stack) = 0;
      97      }
      98  
      99    /* Mark all stacks except the still running one as free.  */
     100    list_t *runp;
     101    list_for_each (runp, &GL (dl_stack_used))
     102      {
     103        struct pthread *curp = list_entry (runp, struct pthread, list);
     104        if (curp != self)
     105  	{
     106  	  /* This marks the stack as free.  */
     107  	  curp->tid = 0;
     108  
     109  	  /* Account for the size of the stack.  */
     110  	  GL (dl_stack_cache_actsize) += curp->stackblock_size;
     111  
     112  	  if (curp->specific_used)
     113  	    {
     114  	      /* Clear the thread-specific data.  */
     115  	      memset (curp->specific_1stblock, '\0',
     116  		      sizeof (curp->specific_1stblock));
     117  
     118  	      curp->specific_used = false;
     119  
     120  	      for (size_t cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
     121  		if (curp->specific[cnt] != NULL)
     122  		  {
     123  		    memset (curp->specific[cnt], '\0',
     124  			    sizeof (curp->specific_1stblock));
     125  
     126  		    /* We have allocated the block which we do not
     127  		       free here so re-set the bit.  */
     128  		    curp->specific_used = true;
     129  		  }
     130  	    }
     131  	}
     132      }
     133  
     134    /* Add the stack of all running threads to the cache.  */
     135    list_splice (&GL (dl_stack_used), &GL (dl_stack_cache));
     136  
     137    /* Remove the entry for the current thread to from the cache list
     138       and add it to the list of running threads.  Which of the two
     139       lists is decided by the user_stack flag.  */
     140    list_del (&self->list);
     141  
     142    /* Re-initialize the lists for all the threads.  */
     143    INIT_LIST_HEAD (&GL (dl_stack_used));
     144    INIT_LIST_HEAD (&GL (dl_stack_user));
     145  
     146    if (__glibc_unlikely (THREAD_GETMEM (self, user_stack)))
     147      list_add (&self->list, &GL (dl_stack_user));
     148    else
     149      list_add (&self->list, &GL (dl_stack_used));
     150  }
     151  
     152  
     153  #endif