(root)/
glibc-2.38/
elf/
dl-reloc.c
       1  /* Relocate a shared object and resolve its references to other loaded objects.
       2     Copyright (C) 1995-2023 Free Software Foundation, Inc.
       3     Copyright The GNU Toolchain Authors.
       4     This file is part of the GNU C Library.
       5  
       6     The GNU C Library is free software; you can redistribute it and/or
       7     modify it under the terms of the GNU Lesser General Public
       8     License as published by the Free Software Foundation; either
       9     version 2.1 of the License, or (at your option) any later version.
      10  
      11     The GNU C Library is distributed in the hope that it will be useful,
      12     but WITHOUT ANY WARRANTY; without even the implied warranty of
      13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      14     Lesser General Public License for more details.
      15  
      16     You should have received a copy of the GNU Lesser General Public
      17     License along with the GNU C Library; if not, see
      18     <https://www.gnu.org/licenses/>.  */
      19  
      20  #include <errno.h>
      21  #include <libintl.h>
      22  #include <stdlib.h>
      23  #include <unistd.h>
      24  #include <ldsodefs.h>
      25  #include <sys/mman.h>
      26  #include <sys/param.h>
      27  #include <sys/types.h>
      28  #include <_itoa.h>
      29  #include <libc-pointer-arith.h>
      30  #include "dynamic-link.h"
      31  
      32  /* Statistics function.  */
      33  #ifdef SHARED
      34  # define bump_num_cache_relocations() ++GL(dl_num_cache_relocations)
      35  #else
      36  # define bump_num_cache_relocations() ((void) 0)
      37  #endif
      38  
      39  
      40  /* We are trying to perform a static TLS relocation in MAP, but it was
      41     dynamically loaded.  This can only work if there is enough surplus in
      42     the static TLS area already allocated for each running thread.  If this
      43     object's TLS segment is too big to fit, we fail with -1.  If it fits,
      44     we set MAP->l_tls_offset and return 0.
      45     A portion of the surplus static TLS can be optionally used to optimize
      46     dynamic TLS access (with TLSDESC or powerpc TLS optimizations).
      47     If OPTIONAL is true then TLS is allocated for such optimization and
      48     the caller must have a fallback in case the optional portion of surplus
      49     TLS runs out.  If OPTIONAL is false then the entire surplus TLS area is
      50     considered and the allocation only fails if that runs out.  */
      51  int
      52  _dl_try_allocate_static_tls (struct link_map *map, bool optional)
      53  {
      54    /* If we've already used the variable with dynamic access, or if the
      55       alignment requirements are too high, fail.  */
      56    if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
      57        || map->l_tls_align > GLRO (dl_tls_static_align))
      58      {
      59      fail:
      60        return -1;
      61      }
      62  
      63  #if TLS_TCB_AT_TP
      64    size_t freebytes = GLRO (dl_tls_static_size) - GL(dl_tls_static_used);
      65    if (freebytes < TLS_TCB_SIZE)
      66      goto fail;
      67    freebytes -= TLS_TCB_SIZE;
      68  
      69    size_t blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset;
      70    if (freebytes < blsize)
      71      goto fail;
      72  
      73    size_t n = (freebytes - blsize) / map->l_tls_align;
      74  
      75    /* Account optional static TLS surplus usage.  */
      76    size_t use = freebytes - n * map->l_tls_align - map->l_tls_firstbyte_offset;
      77    if (optional && use > GL(dl_tls_static_optional))
      78      goto fail;
      79    else if (optional)
      80      GL(dl_tls_static_optional) -= use;
      81  
      82    size_t offset = GL(dl_tls_static_used) + use;
      83  
      84    map->l_tls_offset = GL(dl_tls_static_used) = offset;
      85  #elif TLS_DTV_AT_TP
      86    /* dl_tls_static_used includes the TCB at the beginning.  */
      87    size_t offset = (ALIGN_UP(GL(dl_tls_static_used)
      88  			    - map->l_tls_firstbyte_offset,
      89  			    map->l_tls_align)
      90  		   + map->l_tls_firstbyte_offset);
      91    size_t used = offset + map->l_tls_blocksize;
      92  
      93    if (used > GLRO (dl_tls_static_size))
      94      goto fail;
      95  
      96    /* Account optional static TLS surplus usage.  */
      97    size_t use = used - GL(dl_tls_static_used);
      98    if (optional && use > GL(dl_tls_static_optional))
      99      goto fail;
     100    else if (optional)
     101      GL(dl_tls_static_optional) -= use;
     102  
     103    map->l_tls_offset = offset;
     104    map->l_tls_firstbyte_offset = GL(dl_tls_static_used);
     105    GL(dl_tls_static_used) = used;
     106  #else
     107  # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
     108  #endif
     109  
     110    /* If the object is not yet relocated we cannot initialize the
     111       static TLS region.  Delay it.  */
     112    if (map->l_real->l_relocated)
     113      {
     114  #ifdef SHARED
     115        if (__builtin_expect (THREAD_DTV()[0].counter != GL(dl_tls_generation),
     116  			    0))
     117  	/* Update the slot information data for at least the generation of
     118  	   the DSO we are allocating data for.  */
     119  	(void) _dl_update_slotinfo (map->l_tls_modid);
     120  #endif
     121  
     122        dl_init_static_tls (map);
     123      }
     124    else
     125      map->l_need_tls_init = 1;
     126  
     127    return 0;
     128  }
     129  
     130  /* This function intentionally does not return any value but signals error
     131     directly, as static TLS should be rare and code handling it should
     132     not be inlined as much as possible.  */
     133  void
     134  __attribute_noinline__
     135  _dl_allocate_static_tls (struct link_map *map)
     136  {
     137    if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
     138        || _dl_try_allocate_static_tls (map, false))
     139      {
     140        _dl_signal_error (0, map->l_name, NULL, N_("\
     141  cannot allocate memory in static TLS block"));
     142      }
     143  }
     144  
     145  #if !PTHREAD_IN_LIBC
     146  /* Initialize static TLS area and DTV for current (only) thread.
     147     libpthread implementations should provide their own hook
     148     to handle all threads.  */
     149  void
     150  _dl_nothread_init_static_tls (struct link_map *map)
     151  {
     152  #if TLS_TCB_AT_TP
     153    void *dest = (char *) THREAD_SELF - map->l_tls_offset;
     154  #elif TLS_DTV_AT_TP
     155    void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE;
     156  #else
     157  # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
     158  #endif
     159  
     160    /* Initialize the memory.  */
     161    memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
     162  	  '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
     163  }
     164  #endif /* !PTHREAD_IN_LIBC */
     165  
     166  static __always_inline lookup_t
     167  resolve_map (lookup_t l, struct r_scope_elem *scope[], const ElfW(Sym) **ref,
     168  	     const struct r_found_version *version, unsigned long int r_type)
     169  {
     170    if (ELFW(ST_BIND) ((*ref)->st_info) == STB_LOCAL
     171        || __glibc_unlikely (dl_symbol_visibility_binds_local_p (*ref)))
     172      return l;
     173  
     174    if (__glibc_unlikely (*ref == l->l_lookup_cache.sym)
     175        && elf_machine_type_class (r_type) == l->l_lookup_cache.type_class)
     176      {
     177        bump_num_cache_relocations ();
     178        *ref = l->l_lookup_cache.ret;
     179      }
     180    else
     181      {
     182        const int tc = elf_machine_type_class (r_type);
     183        l->l_lookup_cache.type_class = tc;
     184        l->l_lookup_cache.sym = *ref;
     185        const char *undef_name
     186  	  = (const char *) D_PTR (l, l_info[DT_STRTAB]) + (*ref)->st_name;
     187        const struct r_found_version *v = NULL;
     188        if (version != NULL && version->hash != 0)
     189  	v = version;
     190        lookup_t lr = _dl_lookup_symbol_x (
     191  	  undef_name, l, ref, scope, v, tc,
     192  	  DL_LOOKUP_ADD_DEPENDENCY | DL_LOOKUP_FOR_RELOCATE, NULL);
     193        l->l_lookup_cache.ret = *ref;
     194        l->l_lookup_cache.value = lr;
     195      }
     196    return l->l_lookup_cache.value;
     197  }
     198  
     199  /* This macro is used as a callback from the ELF_DYNAMIC_RELOCATE code.  */
     200  #define RESOLVE_MAP resolve_map
     201  
     202  #include "dynamic-link.h"
     203  
     204  void
     205  _dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[],
     206  		     int reloc_mode, int consider_profiling)
     207  {
     208    struct textrels
     209    {
     210      caddr_t start;
     211      size_t len;
     212      int prot;
     213      struct textrels *next;
     214    } *textrels = NULL;
     215    /* Initialize it to make the compiler happy.  */
     216    const char *errstring = NULL;
     217    int lazy = reloc_mode & RTLD_LAZY;
     218    int skip_ifunc = reloc_mode & __RTLD_NOIFUNC;
     219  
     220  #ifdef SHARED
     221    bool consider_symbind = false;
     222    /* If we are auditing, install the same handlers we need for profiling.  */
     223    if ((reloc_mode & __RTLD_AUDIT) == 0)
     224      {
     225        struct audit_ifaces *afct = GLRO(dl_audit);
     226        for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
     227  	{
     228  	  /* Profiling is needed only if PLT hooks are provided.  */
     229  	  if (afct->ARCH_LA_PLTENTER != NULL
     230  	      || afct->ARCH_LA_PLTEXIT != NULL)
     231  	    consider_profiling = 1;
     232  	  if (afct->symbind != NULL)
     233  	    consider_symbind = true;
     234  
     235  	  afct = afct->next;
     236  	}
     237      }
     238  #elif defined PROF
     239    /* Never use dynamic linker profiling for gprof profiling code.  */
     240  # define consider_profiling 0
     241  #else
     242  # define consider_symbind 0
     243  #endif
     244  
     245    if (l->l_relocated)
     246      return;
     247  
     248    /* If DT_BIND_NOW is set relocate all references in this object.  We
     249       do not do this if we are profiling, of course.  */
     250    // XXX Correct for auditing?
     251    if (!consider_profiling
     252        && __builtin_expect (l->l_info[DT_BIND_NOW] != NULL, 0))
     253      lazy = 0;
     254  
     255    if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_RELOC))
     256      _dl_debug_printf ("\nrelocation processing: %s%s\n",
     257  		      DSO_FILENAME (l->l_name), lazy ? " (lazy)" : "");
     258  
     259    /* DT_TEXTREL is now in level 2 and might phase out at some time.
     260       But we rewrite the DT_FLAGS entry to a DT_TEXTREL entry to make
     261       testing easier and therefore it will be available at all time.  */
     262    if (__glibc_unlikely (l->l_info[DT_TEXTREL] != NULL))
     263      {
     264        /* Bletch.  We must make read-only segments writable
     265  	 long enough to relocate them.  */
     266        const ElfW(Phdr) *ph;
     267        for (ph = l->l_phdr; ph < &l->l_phdr[l->l_phnum]; ++ph)
     268  	if (ph->p_type == PT_LOAD && (ph->p_flags & PF_W) == 0)
     269  	  {
     270  	    struct textrels *newp;
     271  
     272  	    newp = (struct textrels *) alloca (sizeof (*newp));
     273  	    newp->len = ALIGN_UP (ph->p_vaddr + ph->p_memsz, GLRO(dl_pagesize))
     274  			- ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize));
     275  	    newp->start = PTR_ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize))
     276  			  + (caddr_t) l->l_addr;
     277  
     278  	    newp->prot = 0;
     279  	    if (ph->p_flags & PF_R)
     280  	      newp->prot |= PROT_READ;
     281  	    if (ph->p_flags & PF_W)
     282  	      newp->prot |= PROT_WRITE;
     283  	    if (ph->p_flags & PF_X)
     284  	      newp->prot |= PROT_EXEC;
     285  
     286  	    if (__mprotect (newp->start, newp->len, newp->prot|PROT_WRITE) < 0)
     287  	      {
     288  		errstring = N_("cannot make segment writable for relocation");
     289  	      call_error:
     290  		_dl_signal_error (errno, l->l_name, NULL, errstring);
     291  	      }
     292  
     293  	    newp->next = textrels;
     294  	    textrels = newp;
     295  	  }
     296      }
     297  
     298    {
     299      /* Do the actual relocation of the object's GOT and other data.  */
     300  
     301      ELF_DYNAMIC_RELOCATE (l, scope, lazy, consider_profiling, skip_ifunc);
     302  
     303  #ifndef PROF
     304      if ((consider_profiling || consider_symbind)
     305  	&& l->l_info[DT_PLTRELSZ] != NULL)
     306        {
     307  	/* Allocate the array which will contain the already found
     308  	   relocations.  If the shared object lacks a PLT (for example
     309  	   if it only contains lead function) the l_info[DT_PLTRELSZ]
     310  	   will be NULL.  */
     311  	size_t sizeofrel = l->l_info[DT_PLTREL]->d_un.d_val == DT_RELA
     312  			   ? sizeof (ElfW(Rela))
     313  			   : sizeof (ElfW(Rel));
     314  	size_t relcount = l->l_info[DT_PLTRELSZ]->d_un.d_val / sizeofrel;
     315  	l->l_reloc_result = calloc (sizeof (l->l_reloc_result[0]), relcount);
     316  
     317  	if (l->l_reloc_result == NULL)
     318  	  {
     319  	    errstring = N_("\
     320  %s: out of memory to store relocation results for %s\n");
     321  	    _dl_fatal_printf (errstring, RTLD_PROGNAME, l->l_name);
     322  	  }
     323        }
     324  #endif
     325    }
     326  
     327    /* Mark the object so we know this work has been done.  */
     328    l->l_relocated = 1;
     329  
     330    /* Undo the segment protection changes.  */
     331    while (__builtin_expect (textrels != NULL, 0))
     332      {
     333        if (__mprotect (textrels->start, textrels->len, textrels->prot) < 0)
     334  	{
     335  	  errstring = N_("cannot restore segment prot after reloc");
     336  	  goto call_error;
     337  	}
     338  
     339  #ifdef CLEAR_CACHE
     340        CLEAR_CACHE (textrels->start, textrels->start + textrels->len);
     341  #endif
     342  
     343        textrels = textrels->next;
     344      }
     345  
     346    /* In case we can protect the data now that the relocations are
     347       done, do it.  */
     348    if (l->l_relro_size != 0)
     349      _dl_protect_relro (l);
     350  }
     351  
     352  
     353  void
     354  _dl_protect_relro (struct link_map *l)
     355  {
     356    ElfW(Addr) start = ALIGN_DOWN((l->l_addr
     357  				 + l->l_relro_addr),
     358  				GLRO(dl_pagesize));
     359    ElfW(Addr) end = ALIGN_DOWN((l->l_addr
     360  			       + l->l_relro_addr
     361  			       + l->l_relro_size),
     362  			      GLRO(dl_pagesize));
     363    if (start != end
     364        && __mprotect ((void *) start, end - start, PROT_READ) < 0)
     365      {
     366        static const char errstring[] = N_("\
     367  cannot apply additional memory protection after relocation");
     368        _dl_signal_error (errno, l->l_name, NULL, errstring);
     369      }
     370  }
     371  
     372  void
     373  __attribute_noinline__
     374  _dl_reloc_bad_type (struct link_map *map, unsigned int type, int plt)
     375  {
     376  #define DIGIT(b)	_itoa_lower_digits[(b) & 0xf];
     377  
     378    /* XXX We cannot translate these messages.  */
     379    static const char msg[2][32
     380  #if __ELF_NATIVE_CLASS == 64
     381  			   + 6
     382  #endif
     383    ] = { "unexpected reloc type 0x",
     384  	"unexpected PLT reloc type 0x" };
     385    char msgbuf[sizeof (msg[0])];
     386    char *cp;
     387  
     388    cp = __stpcpy (msgbuf, msg[plt]);
     389  #if __ELF_NATIVE_CLASS == 64
     390    if (__builtin_expect(type > 0xff, 0))
     391      {
     392        *cp++ = DIGIT (type >> 28);
     393        *cp++ = DIGIT (type >> 24);
     394        *cp++ = DIGIT (type >> 20);
     395        *cp++ = DIGIT (type >> 16);
     396        *cp++ = DIGIT (type >> 12);
     397        *cp++ = DIGIT (type >> 8);
     398      }
     399  #endif
     400    *cp++ = DIGIT (type >> 4);
     401    *cp++ = DIGIT (type);
     402    *cp = '\0';
     403  
     404    _dl_signal_error (0, map->l_name, NULL, msgbuf);
     405  }