(root)/
glibc-2.38/
malloc/
tst-mallocstate.c
       1  /* Emulate Emacs heap dumping to test malloc_set_state.
       2     Copyright (C) 2001-2023 Free Software Foundation, Inc.
       3     This file is part of the GNU C Library.
       4  
       5     The GNU C Library is free software; you can redistribute it and/or
       6     modify it under the terms of the GNU Lesser General Public
       7     License as published by the Free Software Foundation; either
       8     version 2.1 of the License, or (at your option) any later version.
       9  
      10     The GNU C Library is distributed in the hope that it will be useful,
      11     but WITHOUT ANY WARRANTY; without even the implied warranty of
      12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      13     Lesser General Public License for more details.
      14  
      15     You should have received a copy of the GNU Lesser General Public
      16     License along with the GNU C Library; if not, see
      17     <https://www.gnu.org/licenses/>.  */
      18  
      19  #include <errno.h>
      20  #include <stdbool.h>
      21  #include <stdio.h>
      22  #include <string.h>
      23  #include <libc-symbols.h>
      24  #include <shlib-compat.h>
      25  #include <support/check.h>
      26  #include <support/support.h>
      27  #include <support/test-driver.h>
      28  
      29  #include "malloc.h"
      30  
      31  /* Make the compatibility symbols availabile to this test case.  */
      32  void *malloc_get_state (void);
      33  compat_symbol_reference (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
      34  int malloc_set_state (void *);
      35  compat_symbol_reference (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
      36  
      37  /* Maximum object size in the fake heap.  */
      38  enum { max_size = 64 };
      39  
      40  /* Allocation actions.  These are randomized actions executed on the
      41     dumped heap (see allocation_tasks below).  They are interspersed
      42     with operations on the new heap (see heap_activity).  */
      43  enum allocation_action
      44    {
      45      action_free,                /* Dumped and freed.  */
      46      action_realloc,             /* Dumped and realloc'ed.  */
      47      action_realloc_same,        /* Dumped and realloc'ed, same size.  */
      48      action_realloc_smaller,     /* Dumped and realloc'ed, shrunk.  */
      49      action_count
      50    };
      51  
      52  /* Dumped heap.  Initialize it, so that the object is placed into the
      53     .data section, for increased realism.  The size is an upper bound;
      54     we use about half of the space.  */
      55  static size_t dumped_heap[action_count * max_size * max_size
      56                            / sizeof (size_t)] = {1};
      57  
      58  /* Next free space in the dumped heap.  Also top of the heap at the
      59     end of the initialization procedure.  */
      60  static size_t *next_heap_chunk;
      61  
      62  /* Copied from malloc.c and hooks.c.  The version is deliberately
      63     lower than the final version of malloc_set_state.  */
      64  # define NBINS 128
      65  # define MALLOC_STATE_MAGIC   0x444c4541l
      66  # define MALLOC_STATE_VERSION (0 * 0x100l + 4l)
      67  static struct
      68  {
      69    long magic;
      70    long version;
      71    void *av[NBINS * 2 + 2];
      72    char *sbrk_base;
      73    int sbrked_mem_bytes;
      74    unsigned long trim_threshold;
      75    unsigned long top_pad;
      76    unsigned int n_mmaps_max;
      77    unsigned long mmap_threshold;
      78    int check_action;
      79    unsigned long max_sbrked_mem;
      80    unsigned long max_total_mem;
      81    unsigned int n_mmaps;
      82    unsigned int max_n_mmaps;
      83    unsigned long mmapped_mem;
      84    unsigned long max_mmapped_mem;
      85    int using_malloc_checking;
      86    unsigned long max_fast;
      87    unsigned long arena_test;
      88    unsigned long arena_max;
      89    unsigned long narenas;
      90  } save_state =
      91    {
      92      .magic = MALLOC_STATE_MAGIC,
      93      .version = MALLOC_STATE_VERSION,
      94    };
      95  
      96  /* Allocate a blob in the fake heap.  */
      97  static void *
      98  dumped_heap_alloc (size_t length)
      99  {
     100    /* malloc needs three state bits in the size field, so the minimum
     101       alignment is 8 even on 32-bit architectures.  malloc_set_state
     102       should be compatible with such heaps even if it currently
     103       provides more alignment to applications.  */
     104    enum
     105    {
     106      heap_alignment = 8,
     107      heap_alignment_mask = heap_alignment - 1
     108    };
     109    _Static_assert (sizeof (size_t) <= heap_alignment,
     110                    "size_t compatible with heap alignment");
     111  
     112    /* Need at least this many bytes for metadata and application
     113       data. */
     114    size_t chunk_size = sizeof (size_t) + length;
     115    /* Round up the allocation size to the heap alignment.  */
     116    chunk_size += heap_alignment_mask;
     117    chunk_size &= ~heap_alignment_mask;
     118    TEST_VERIFY_EXIT ((chunk_size & 3) == 0);
     119    if (next_heap_chunk == NULL)
     120      /* Initialize the top of the heap.  Add one word of zero padding,
     121         to match existing practice.  */
     122      {
     123        dumped_heap[0] = 0;
     124        next_heap_chunk = dumped_heap + 1;
     125      }
     126    else
     127      /* The previous chunk is allocated. */
     128      chunk_size |= 1;
     129    *next_heap_chunk = chunk_size;
     130  
     131    /* User data starts after the chunk header.  */
     132    void *result = next_heap_chunk + 1;
     133    next_heap_chunk += chunk_size / sizeof (size_t);
     134  
     135    /* Mark the previous chunk as used.   */
     136    *next_heap_chunk = 1;
     137    return result;
     138  }
     139  
     140  /* Global seed variable for the random number generator.  */
     141  static unsigned long long global_seed;
     142  
     143  /* Simple random number generator.  The numbers are in the range from
     144     0 to UINT_MAX (inclusive).  */
     145  static unsigned int
     146  rand_next (unsigned long long *seed)
     147  {
     148    /* Linear congruential generated as used for MMIX.  */
     149    *seed = *seed * 6364136223846793005ULL + 1442695040888963407ULL;
     150    return *seed >> 32;
     151  }
     152  
     153  /* Fill LENGTH bytes at BUFFER with random contents, as determined by
     154     SEED.  */
     155  static void
     156  randomize_buffer (unsigned char *buffer, size_t length,
     157                    unsigned long long seed)
     158  {
     159    for (size_t i = 0; i < length; ++i)
     160      buffer[i] = rand_next (&seed);
     161  }
     162  
     163  /* Dumps the buffer to standard output,  in hexadecimal.  */
     164  static void
     165  dump_hex (unsigned char *buffer, size_t length)
     166  {
     167    for (int i = 0; i < length; ++i)
     168      printf (" %02X", buffer[i]);
     169  }
     170  
     171  /* Set to true if an error is encountered.  */
     172  static bool errors = false;
     173  
     174  /* Keep track of object allocations.  */
     175  struct allocation
     176  {
     177    unsigned char *data;
     178    unsigned int size;
     179    unsigned int seed;
     180  };
     181  
     182  /* Check that the allocation task allocation has the expected
     183     contents.  */
     184  static void
     185  check_allocation (const struct allocation *alloc, int index)
     186  {
     187    size_t size = alloc->size;
     188    if (alloc->data == NULL)
     189      {
     190        printf ("error: NULL pointer for allocation of size %zu at %d, seed %u\n",
     191                size, index, alloc->seed);
     192        errors = true;
     193        return;
     194      }
     195  
     196    unsigned char expected[4096];
     197    if (size > sizeof (expected))
     198      {
     199        printf ("error: invalid allocation size %zu at %d, seed %u\n",
     200                size, index, alloc->seed);
     201        errors = true;
     202        return;
     203      }
     204    randomize_buffer (expected, size, alloc->seed);
     205    if (memcmp (alloc->data, expected, size) != 0)
     206      {
     207        printf ("error: allocation %d data mismatch, size %zu, seed %u\n",
     208                index, size, alloc->seed);
     209        printf ("  expected:");
     210        dump_hex (expected, size);
     211        putc ('\n', stdout);
     212        printf ("    actual:");
     213        dump_hex (alloc->data, size);
     214        putc ('\n', stdout);
     215        errors = true;
     216      }
     217  }
     218  
     219  /* A heap allocation combined with pending actions on it.  */
     220  struct allocation_task
     221  {
     222    struct allocation allocation;
     223    enum allocation_action action;
     224  };
     225  
     226  /* Allocation tasks.  Initialized by init_allocation_tasks and used by
     227     perform_allocations.  */
     228  enum { allocation_task_count = action_count * max_size };
     229  static struct allocation_task allocation_tasks[allocation_task_count];
     230  
     231  /* Fisher-Yates shuffle of allocation_tasks.  */
     232  static void
     233  shuffle_allocation_tasks (void)
     234  {
     235    for (int i = 0; i < allocation_task_count - 1; ++i)
     236      {
     237        /* Pick pair in the tail of the array.  */
     238        int j = i + (rand_next (&global_seed)
     239                     % ((unsigned) (allocation_task_count - i)));
     240        TEST_VERIFY_EXIT (j >= 0 && j < allocation_task_count);
     241        /* Exchange. */
     242        struct allocation_task tmp = allocation_tasks[i];
     243        allocation_tasks[i] = allocation_tasks[j];
     244        allocation_tasks[j] = tmp;
     245      }
     246  }
     247  
     248  /* Set up the allocation tasks and the dumped heap.  */
     249  static void
     250  initial_allocations (void)
     251  {
     252    /* Initialize in a position-dependent way.  */
     253    for (int i = 0; i < allocation_task_count; ++i)
     254      allocation_tasks[i] = (struct allocation_task)
     255        {
     256          .allocation =
     257            {
     258              .size = 1 + (i / action_count),
     259              .seed = i,
     260            },
     261          .action = i % action_count
     262        };
     263  
     264    /* Execute the tasks in a random order.  */
     265    shuffle_allocation_tasks ();
     266  
     267    /* Initialize the contents of the dumped heap.   */
     268    for (int i = 0; i < allocation_task_count; ++i)
     269      {
     270        struct allocation_task *task = allocation_tasks + i;
     271        task->allocation.data = dumped_heap_alloc (task->allocation.size);
     272        randomize_buffer (task->allocation.data, task->allocation.size,
     273                          task->allocation.seed);
     274      }
     275  
     276    for (int i = 0; i < allocation_task_count; ++i)
     277      check_allocation (&allocation_tasks[i].allocation, i);
     278  }
     279  
     280  /* Indicates whether init_heap has run.  This variable needs to be
     281     volatile because malloc is declared __THROW, which implies it is a
     282     leaf function, but we expect it to run our hooks.  */
     283  static volatile bool heap_initialized;
     284  
     285  /* Executed by glibc malloc, through __malloc_initialize_hook
     286     below.  */
     287  static void
     288  init_heap (void)
     289  {
     290    if (test_verbose)
     291      printf ("info: performing heap initialization\n");
     292    heap_initialized = true;
     293  
     294    /* Populate the dumped heap.  */
     295    initial_allocations ();
     296  
     297    /* Complete initialization of the saved heap data structure.  */
     298    save_state.sbrk_base = (void *) dumped_heap;
     299    save_state.sbrked_mem_bytes = sizeof (dumped_heap);
     300    /* Top pointer.  Adjust so that it points to the start of struct
     301       malloc_chunk.  */
     302    save_state.av[2] = (void *) (next_heap_chunk - 1);
     303  
     304    /* Integrate the dumped heap into the process heap.  */
     305    TEST_VERIFY_EXIT (malloc_set_state (&save_state) == 0);
     306  }
     307  
     308  /* Interpose the initialization callback.  */
     309  void (*volatile __malloc_initialize_hook) (void) = init_heap;
     310  compat_symbol_reference (libc, __malloc_initialize_hook,
     311                           __malloc_initialize_hook, GLIBC_2_0);
     312  
     313  /* Simulate occasional unrelated heap activity in the non-dumped
     314     heap.  */
     315  enum { heap_activity_allocations_count = 32 };
     316  static struct allocation heap_activity_allocations
     317    [heap_activity_allocations_count] = {};
     318  static int heap_activity_seed_counter = 1000 * 1000;
     319  
     320  static void
     321  heap_activity (void)
     322  {
     323    /* Only do this from time to time.  */
     324    if ((rand_next (&global_seed) % 4) == 0)
     325      {
     326        int slot = rand_next (&global_seed) % heap_activity_allocations_count;
     327        struct allocation *alloc = heap_activity_allocations + slot;
     328        if (alloc->data == NULL)
     329          {
     330            alloc->size = rand_next (&global_seed) % (4096U + 1);
     331            alloc->data = xmalloc (alloc->size);
     332            alloc->seed = heap_activity_seed_counter++;
     333            randomize_buffer (alloc->data, alloc->size, alloc->seed);
     334            check_allocation (alloc, 1000 + slot);
     335          }
     336        else
     337          {
     338            check_allocation (alloc, 1000 + slot);
     339            free (alloc->data);
     340            alloc->data = NULL;
     341          }
     342      }
     343  }
     344  
     345  static void
     346  heap_activity_deallocate (void)
     347  {
     348    for (int i = 0; i < heap_activity_allocations_count; ++i)
     349      free (heap_activity_allocations[i].data);
     350  }
     351  
     352  /* Perform a full heap check across the dumped heap allocation tasks,
     353     and the simulated heap activity directly above.  */
     354  static void
     355  full_heap_check (void)
     356  {
     357    /* Dumped heap.  */
     358    for (int i = 0; i < allocation_task_count; ++i)
     359      if (allocation_tasks[i].allocation.data != NULL)
     360        check_allocation (&allocation_tasks[i].allocation, i);
     361  
     362    /* Heap activity allocations.  */
     363    for (int i = 0; i < heap_activity_allocations_count; ++i)
     364      if (heap_activity_allocations[i].data != NULL)
     365        check_allocation (heap_activity_allocations + i, i);
     366  }
     367  
     368  /* Used as an optimization barrier to force a heap allocation.  */
     369  __attribute__ ((noinline, noclone))
     370  static void
     371  my_free (void *ptr)
     372  {
     373    free (ptr);
     374  }
     375  
     376  static int
     377  do_test (void)
     378  {
     379    my_free (malloc (1));
     380    TEST_VERIFY_EXIT (heap_initialized);
     381  
     382    /* The first pass performs the randomly generated allocation
     383       tasks.  */
     384    if (test_verbose)
     385      printf ("info: first pass through allocation tasks\n");
     386    full_heap_check ();
     387  
     388    /* Execute the post-undump tasks in a random order.  */
     389    shuffle_allocation_tasks ();
     390  
     391    for (int i = 0; i < allocation_task_count; ++i)
     392      {
     393        heap_activity ();
     394        struct allocation_task *task = allocation_tasks + i;
     395        switch (task->action)
     396          {
     397          case action_free:
     398            check_allocation (&task->allocation, i);
     399            free (task->allocation.data);
     400            task->allocation.data = NULL;
     401            break;
     402  
     403          case action_realloc:
     404            check_allocation (&task->allocation, i);
     405            task->allocation.data = xrealloc
     406              (task->allocation.data, task->allocation.size + max_size);
     407            check_allocation (&task->allocation, i);
     408            break;
     409  
     410          case action_realloc_same:
     411            check_allocation (&task->allocation, i);
     412            task->allocation.data = xrealloc
     413              (task->allocation.data, task->allocation.size);
     414            check_allocation (&task->allocation, i);
     415            break;
     416  
     417          case action_realloc_smaller:
     418            check_allocation (&task->allocation, i);
     419            size_t new_size = task->allocation.size - 1;
     420            task->allocation.data = xrealloc (task->allocation.data, new_size);
     421            if (new_size == 0)
     422              {
     423                if (task->allocation.data != NULL)
     424                  {
     425                    printf ("error: realloc with size zero did not deallocate\n");
     426                    errors = true;
     427                  }
     428                /* No further action on this task.  */
     429                task->action = action_free;
     430              }
     431            else
     432              {
     433                task->allocation.size = new_size;
     434                check_allocation (&task->allocation, i);
     435              }
     436            break;
     437  
     438          case action_count:
     439            FAIL_EXIT1 ("task->action should never be action_count");
     440          }
     441        full_heap_check ();
     442      }
     443  
     444    /* The second pass frees the objects which were allocated during the
     445       first pass.  */
     446    if (test_verbose)
     447      printf ("info: second pass through allocation tasks\n");
     448  
     449    shuffle_allocation_tasks ();
     450    for (int i = 0; i < allocation_task_count; ++i)
     451      {
     452        heap_activity ();
     453        struct allocation_task *task = allocation_tasks + i;
     454        switch (task->action)
     455          {
     456          case action_free:
     457            /* Already freed, nothing to do.  */
     458            break;
     459  
     460          case action_realloc:
     461          case action_realloc_same:
     462          case action_realloc_smaller:
     463            check_allocation (&task->allocation, i);
     464            free (task->allocation.data);
     465            task->allocation.data = NULL;
     466            break;
     467  
     468          case action_count:
     469            FAIL_EXIT1 ("task->action should never be action_count");
     470          }
     471        full_heap_check ();
     472      }
     473  
     474    heap_activity_deallocate ();
     475  
     476    /* Check that the malloc_get_state stub behaves in the intended
     477       way.  */
     478    errno = 0;
     479    if (malloc_get_state () != NULL)
     480      {
     481        printf ("error: malloc_get_state succeeded\n");
     482        errors = true;
     483      }
     484    if (errno != ENOSYS)
     485      {
     486        printf ("error: malloc_get_state: %m\n");
     487        errors = true;
     488      }
     489  
     490    return errors;
     491  }
     492  
     493  #include <support/test-driver.c>