(root)/
gcc-13.2.0/
libbacktrace/
mmap.c
       1  /* mmap.c -- Memory allocation with mmap.
       2     Copyright (C) 2012-2023 Free Software Foundation, Inc.
       3     Written by Ian Lance Taylor, Google.
       4  
       5  Redistribution and use in source and binary forms, with or without
       6  modification, are permitted provided that the following conditions are
       7  met:
       8  
       9      (1) Redistributions of source code must retain the above copyright
      10      notice, this list of conditions and the following disclaimer.
      11  
      12      (2) Redistributions in binary form must reproduce the above copyright
      13      notice, this list of conditions and the following disclaimer in
      14      the documentation and/or other materials provided with the
      15      distribution.
      16  
      17      (3) The name of the author may not be used to
      18      endorse or promote products derived from this software without
      19      specific prior written permission.
      20  
      21  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
      22  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
      23  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
      24  DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
      25  INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
      26  (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
      27  SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
      28  HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
      29  STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
      30  IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
      31  POSSIBILITY OF SUCH DAMAGE.  */
      32  
      33  #include "config.h"
      34  
      35  #include <errno.h>
      36  #include <string.h>
      37  #include <stdlib.h>
      38  #include <unistd.h>
      39  #include <sys/types.h>
      40  #include <sys/mman.h>
      41  
      42  #include "backtrace.h"
      43  #include "internal.h"
      44  
      45  #ifndef HAVE_DECL_GETPAGESIZE
      46  extern int getpagesize (void);
      47  #endif
      48  
      49  /* Memory allocation on systems that provide anonymous mmap.  This
      50     permits the backtrace functions to be invoked from a signal
      51     handler, assuming that mmap is async-signal safe.  */
      52  
      53  #ifndef MAP_ANONYMOUS
      54  #define MAP_ANONYMOUS MAP_ANON
      55  #endif
      56  
      57  #ifndef MAP_FAILED
      58  #define MAP_FAILED ((void *)-1)
      59  #endif
      60  
      61  /* A list of free memory blocks.  */
      62  
      63  struct backtrace_freelist_struct
      64  {
      65    /* Next on list.  */
      66    struct backtrace_freelist_struct *next;
      67    /* Size of this block, including this structure.  */
      68    size_t size;
      69  };
      70  
      71  /* Free memory allocated by backtrace_alloc.  */
      72  
      73  static void
      74  backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size)
      75  {
      76    /* Just leak small blocks.  We don't have to be perfect.  Don't put
      77       more than 16 entries on the free list, to avoid wasting time
      78       searching when allocating a block.  If we have more than 16
      79       entries, leak the smallest entry.  */
      80  
      81    if (size >= sizeof (struct backtrace_freelist_struct))
      82      {
      83        size_t c;
      84        struct backtrace_freelist_struct **ppsmall;
      85        struct backtrace_freelist_struct **pp;
      86        struct backtrace_freelist_struct *p;
      87  
      88        c = 0;
      89        ppsmall = NULL;
      90        for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
      91  	{
      92  	  if (ppsmall == NULL || (*pp)->size < (*ppsmall)->size)
      93  	    ppsmall = pp;
      94  	  ++c;
      95  	}
      96        if (c >= 16)
      97  	{
      98  	  if (size <= (*ppsmall)->size)
      99  	    return;
     100  	  *ppsmall = (*ppsmall)->next;
     101  	}
     102  
     103        p = (struct backtrace_freelist_struct *) addr;
     104        p->next = state->freelist;
     105        p->size = size;
     106        state->freelist = p;
     107      }
     108  }
     109  
     110  /* Allocate memory like malloc.  If ERROR_CALLBACK is NULL, don't
     111     report an error.  */
     112  
     113  void *
     114  backtrace_alloc (struct backtrace_state *state,
     115  		 size_t size, backtrace_error_callback error_callback,
     116  		 void *data)
     117  {
     118    void *ret;
     119    int locked;
     120    struct backtrace_freelist_struct **pp;
     121    size_t pagesize;
     122    size_t asksize;
     123    void *page;
     124  
     125    ret = NULL;
     126  
     127    /* If we can acquire the lock, then see if there is space on the
     128       free list.  If we can't acquire the lock, drop straight into
     129       using mmap.  __sync_lock_test_and_set returns the old state of
     130       the lock, so we have acquired it if it returns 0.  */
     131  
     132    if (!state->threaded)
     133      locked = 1;
     134    else
     135      locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
     136  
     137    if (locked)
     138      {
     139        for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
     140  	{
     141  	  if ((*pp)->size >= size)
     142  	    {
     143  	      struct backtrace_freelist_struct *p;
     144  
     145  	      p = *pp;
     146  	      *pp = p->next;
     147  
     148  	      /* Round for alignment; we assume that no type we care about
     149  		 is more than 8 bytes.  */
     150  	      size = (size + 7) & ~ (size_t) 7;
     151  	      if (size < p->size)
     152  		backtrace_free_locked (state, (char *) p + size,
     153  				       p->size - size);
     154  
     155  	      ret = (void *) p;
     156  
     157  	      break;
     158  	    }
     159  	}
     160  
     161        if (state->threaded)
     162  	__sync_lock_release (&state->lock_alloc);
     163      }
     164  
     165    if (ret == NULL)
     166      {
     167        /* Allocate a new page.  */
     168  
     169        pagesize = getpagesize ();
     170        asksize = (size + pagesize - 1) & ~ (pagesize - 1);
     171        page = mmap (NULL, asksize, PROT_READ | PROT_WRITE,
     172  		   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
     173        if (page == MAP_FAILED)
     174  	{
     175  	  if (error_callback)
     176  	    error_callback (data, "mmap", errno);
     177  	}
     178        else
     179  	{
     180  	  size = (size + 7) & ~ (size_t) 7;
     181  	  if (size < asksize)
     182  	    backtrace_free (state, (char *) page + size, asksize - size,
     183  			    error_callback, data);
     184  
     185  	  ret = page;
     186  	}
     187      }
     188  
     189    return ret;
     190  }
     191  
     192  /* Free memory allocated by backtrace_alloc.  */
     193  
     194  void
     195  backtrace_free (struct backtrace_state *state, void *addr, size_t size,
     196  		backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
     197  		void *data ATTRIBUTE_UNUSED)
     198  {
     199    int locked;
     200  
     201    /* If we are freeing a large aligned block, just release it back to
     202       the system.  This case arises when growing a vector for a large
     203       binary with lots of debug info.  Calling munmap here may cause us
     204       to call mmap again if there is also a large shared library; we
     205       just live with that.  */
     206    if (size >= 16 * 4096)
     207      {
     208        size_t pagesize;
     209  
     210        pagesize = getpagesize ();
     211        if (((uintptr_t) addr & (pagesize - 1)) == 0
     212  	  && (size & (pagesize - 1)) == 0)
     213  	{
     214  	  /* If munmap fails for some reason, just add the block to
     215  	     the freelist.  */
     216  	  if (munmap (addr, size) == 0)
     217  	    return;
     218  	}
     219      }
     220  
     221    /* If we can acquire the lock, add the new space to the free list.
     222       If we can't acquire the lock, just leak the memory.
     223       __sync_lock_test_and_set returns the old state of the lock, so we
     224       have acquired it if it returns 0.  */
     225  
     226    if (!state->threaded)
     227      locked = 1;
     228    else
     229      locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
     230  
     231    if (locked)
     232      {
     233        backtrace_free_locked (state, addr, size);
     234  
     235        if (state->threaded)
     236  	__sync_lock_release (&state->lock_alloc);
     237      }
     238  }
     239  
     240  /* Grow VEC by SIZE bytes.  */
     241  
     242  void *
     243  backtrace_vector_grow (struct backtrace_state *state,size_t size,
     244  		       backtrace_error_callback error_callback,
     245  		       void *data, struct backtrace_vector *vec)
     246  {
     247    void *ret;
     248  
     249    if (size > vec->alc)
     250      {
     251        size_t pagesize;
     252        size_t alc;
     253        void *base;
     254  
     255        pagesize = getpagesize ();
     256        alc = vec->size + size;
     257        if (vec->size == 0)
     258  	alc = 16 * size;
     259        else if (alc < pagesize)
     260  	{
     261  	  alc *= 2;
     262  	  if (alc > pagesize)
     263  	    alc = pagesize;
     264  	}
     265        else
     266  	{
     267  	  alc *= 2;
     268  	  alc = (alc + pagesize - 1) & ~ (pagesize - 1);
     269  	}
     270        base = backtrace_alloc (state, alc, error_callback, data);
     271        if (base == NULL)
     272  	return NULL;
     273        if (vec->base != NULL)
     274  	{
     275  	  memcpy (base, vec->base, vec->size);
     276  	  backtrace_free (state, vec->base, vec->size + vec->alc,
     277  			  error_callback, data);
     278  	}
     279        vec->base = base;
     280        vec->alc = alc - vec->size;
     281      }
     282  
     283    ret = (char *) vec->base + vec->size;
     284    vec->size += size;
     285    vec->alc -= size;
     286    return ret;
     287  }
     288  
     289  /* Finish the current allocation on VEC.  */
     290  
     291  void *
     292  backtrace_vector_finish (
     293    struct backtrace_state *state ATTRIBUTE_UNUSED,
     294    struct backtrace_vector *vec,
     295    backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
     296    void *data ATTRIBUTE_UNUSED)
     297  {
     298    void *ret;
     299  
     300    ret = vec->base;
     301    vec->base = (char *) vec->base + vec->size;
     302    vec->size = 0;
     303    return ret;
     304  }
     305  
     306  /* Release any extra space allocated for VEC.  */
     307  
     308  int
     309  backtrace_vector_release (struct backtrace_state *state,
     310  			  struct backtrace_vector *vec,
     311  			  backtrace_error_callback error_callback,
     312  			  void *data)
     313  {
     314    size_t size;
     315    size_t alc;
     316    size_t aligned;
     317  
     318    /* Make sure that the block that we free is aligned on an 8-byte
     319       boundary.  */
     320    size = vec->size;
     321    alc = vec->alc;
     322    aligned = (size + 7) & ~ (size_t) 7;
     323    alc -= aligned - size;
     324  
     325    backtrace_free (state, (char *) vec->base + aligned, alc,
     326  		  error_callback, data);
     327    vec->alc = 0;
     328    if (vec->size == 0)
     329      vec->base = NULL;
     330    return 1;
     331  }