(root)/
Python-3.12.0/
Include/
internal/
pycore_obmalloc.h
       1  #ifndef Py_INTERNAL_OBMALLOC_H
       2  #define Py_INTERNAL_OBMALLOC_H
       3  #ifdef __cplusplus
       4  extern "C" {
       5  #endif
       6  
       7  #ifndef Py_BUILD_CORE
       8  #  error "this header requires Py_BUILD_CORE define"
       9  #endif
      10  
      11  
      12  typedef unsigned int pymem_uint;  /* assuming >= 16 bits */
      13  
      14  #undef  uint
      15  #define uint pymem_uint
      16  
      17  
      18  /* An object allocator for Python.
      19  
      20     Here is an introduction to the layers of the Python memory architecture,
      21     showing where the object allocator is actually used (layer +2), It is
      22     called for every object allocation and deallocation (PyObject_New/Del),
      23     unless the object-specific allocators implement a proprietary allocation
      24     scheme (ex.: ints use a simple free list). This is also the place where
      25     the cyclic garbage collector operates selectively on container objects.
      26  
      27  
      28      Object-specific allocators
      29      _____   ______   ______       ________
      30     [ int ] [ dict ] [ list ] ... [ string ]       Python core         |
      31  +3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
      32      _______________________________       |                           |
      33     [   Python's object allocator   ]      |                           |
      34  +2 | ####### Object memory ####### | <------ Internal buffers ------> |
      35      ______________________________________________________________    |
      36     [          Python's raw memory allocator (PyMem_ API)          ]   |
      37  +1 | <----- Python memory (under PyMem manager's control) ------> |   |
      38      __________________________________________________________________
      39     [    Underlying general-purpose allocator (ex: C library malloc)   ]
      40   0 | <------ Virtual memory allocated for the python process -------> |
      41  
      42     =========================================================================
      43      _______________________________________________________________________
      44     [                OS-specific Virtual Memory Manager (VMM)               ]
      45  -1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
      46      __________________________________   __________________________________
      47     [                                  ] [                                  ]
      48  -2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
      49  
      50  */
      51  /*==========================================================================*/
      52  
      53  /* A fast, special-purpose memory allocator for small blocks, to be used
      54     on top of a general-purpose malloc -- heavily based on previous art. */
      55  
      56  /* Vladimir Marangozov -- August 2000 */
      57  
      58  /*
      59   * "Memory management is where the rubber meets the road -- if we do the wrong
      60   * thing at any level, the results will not be good. And if we don't make the
      61   * levels work well together, we are in serious trouble." (1)
      62   *
      63   * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
      64   *    "Dynamic Storage Allocation: A Survey and Critical Review",
      65   *    in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
      66   */
      67  
      68  /* #undef WITH_MEMORY_LIMITS */         /* disable mem limit checks  */
      69  
      70  /*==========================================================================*/
      71  
      72  /*
      73   * Allocation strategy abstract:
      74   *
      75   * For small requests, the allocator sub-allocates <Big> blocks of memory.
      76   * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the
      77   * system's allocator.
      78   *
      79   * Small requests are grouped in size classes spaced 8 bytes apart, due
      80   * to the required valid alignment of the returned address. Requests of
      81   * a particular size are serviced from memory pools of 4K (one VMM page).
      82   * Pools are fragmented on demand and contain free lists of blocks of one
      83   * particular size class. In other words, there is a fixed-size allocator
      84   * for each size class. Free pools are shared by the different allocators
      85   * thus minimizing the space reserved for a particular size class.
      86   *
      87   * This allocation strategy is a variant of what is known as "simple
      88   * segregated storage based on array of free lists". The main drawback of
      89   * simple segregated storage is that we might end up with lot of reserved
      90   * memory for the different free lists, which degenerate in time. To avoid
      91   * this, we partition each free list in pools and we share dynamically the
      92   * reserved space between all free lists. This technique is quite efficient
      93   * for memory intensive programs which allocate mainly small-sized blocks.
      94   *
      95   * For small requests we have the following table:
      96   *
      97   * Request in bytes     Size of allocated block      Size class idx
      98   * ----------------------------------------------------------------
      99   *        1-8                     8                       0
     100   *        9-16                   16                       1
     101   *       17-24                   24                       2
     102   *       25-32                   32                       3
     103   *       33-40                   40                       4
     104   *       41-48                   48                       5
     105   *       49-56                   56                       6
     106   *       57-64                   64                       7
     107   *       65-72                   72                       8
     108   *        ...                   ...                     ...
     109   *      497-504                 504                      62
     110   *      505-512                 512                      63
     111   *
     112   *      0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying
     113   *      allocator.
     114   */
     115  
     116  /*==========================================================================*/
     117  
     118  /*
     119   * -- Main tunable settings section --
     120   */
     121  
     122  /*
     123   * Alignment of addresses returned to the user. 8-bytes alignment works
     124   * on most current architectures (with 32-bit or 64-bit address buses).
     125   * The alignment value is also used for grouping small requests in size
     126   * classes spaced ALIGNMENT bytes apart.
     127   *
     128   * You shouldn't change this unless you know what you are doing.
     129   */
     130  
     131  #if SIZEOF_VOID_P > 4
     132  #define ALIGNMENT              16               /* must be 2^N */
     133  #define ALIGNMENT_SHIFT         4
     134  #else
     135  #define ALIGNMENT               8               /* must be 2^N */
     136  #define ALIGNMENT_SHIFT         3
     137  #endif
     138  
     139  /* Return the number of bytes in size class I, as a uint. */
     140  #define INDEX2SIZE(I) (((pymem_uint)(I) + 1) << ALIGNMENT_SHIFT)
     141  
     142  /*
     143   * Max size threshold below which malloc requests are considered to be
     144   * small enough in order to use preallocated memory pools. You can tune
     145   * this value according to your application behaviour and memory needs.
     146   *
     147   * Note: a size threshold of 512 guarantees that newly created dictionaries
     148   * will be allocated from preallocated memory pools on 64-bit.
     149   *
     150   * The following invariants must hold:
     151   *      1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512
     152   *      2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
     153   *
     154   * Although not required, for better performance and space efficiency,
     155   * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
     156   */
     157  #define SMALL_REQUEST_THRESHOLD 512
     158  #define NB_SMALL_SIZE_CLASSES   (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
     159  
     160  /*
     161   * The system's VMM page size can be obtained on most unices with a
     162   * getpagesize() call or deduced from various header files. To make
     163   * things simpler, we assume that it is 4K, which is OK for most systems.
     164   * It is probably better if this is the native page size, but it doesn't
     165   * have to be.  In theory, if SYSTEM_PAGE_SIZE is larger than the native page
     166   * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
     167   * violation fault.  4K is apparently OK for all the platforms that python
     168   * currently targets.
     169   */
     170  #define SYSTEM_PAGE_SIZE        (4 * 1024)
     171  
     172  /*
     173   * Maximum amount of memory managed by the allocator for small requests.
     174   */
     175  #ifdef WITH_MEMORY_LIMITS
     176  #ifndef SMALL_MEMORY_LIMIT
     177  #define SMALL_MEMORY_LIMIT      (64 * 1024 * 1024)      /* 64 MB -- more? */
     178  #endif
     179  #endif
     180  
     181  #if !defined(WITH_PYMALLOC_RADIX_TREE)
     182  /* Use radix-tree to track arena memory regions, for address_in_range().
     183   * Enable by default since it allows larger pool sizes.  Can be disabled
     184   * using -DWITH_PYMALLOC_RADIX_TREE=0 */
     185  #define WITH_PYMALLOC_RADIX_TREE 1
     186  #endif
     187  
     188  #if SIZEOF_VOID_P > 4
     189  /* on 64-bit platforms use larger pools and arenas if we can */
     190  #define USE_LARGE_ARENAS
     191  #if WITH_PYMALLOC_RADIX_TREE
     192  /* large pools only supported if radix-tree is enabled */
     193  #define USE_LARGE_POOLS
     194  #endif
     195  #endif
     196  
     197  /*
     198   * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
     199   * on a page boundary. This is a reserved virtual address space for the
     200   * current process (obtained through a malloc()/mmap() call). In no way this
     201   * means that the memory arenas will be used entirely. A malloc(<Big>) is
     202   * usually an address range reservation for <Big> bytes, unless all pages within
     203   * this space are referenced subsequently. So malloc'ing big blocks and not
     204   * using them does not mean "wasting memory". It's an addressable range
     205   * wastage...
     206   *
     207   * Arenas are allocated with mmap() on systems supporting anonymous memory
     208   * mappings to reduce heap fragmentation.
     209   */
     210  #ifdef USE_LARGE_ARENAS
     211  #define ARENA_BITS              20                    /* 1 MiB */
     212  #else
     213  #define ARENA_BITS              18                    /* 256 KiB */
     214  #endif
     215  #define ARENA_SIZE              (1 << ARENA_BITS)
     216  #define ARENA_SIZE_MASK         (ARENA_SIZE - 1)
     217  
     218  #ifdef WITH_MEMORY_LIMITS
     219  #define MAX_ARENAS              (SMALL_MEMORY_LIMIT / ARENA_SIZE)
     220  #endif
     221  
     222  /*
     223   * Size of the pools used for small blocks.  Must be a power of 2.
     224   */
     225  #ifdef USE_LARGE_POOLS
     226  #define POOL_BITS               14                  /* 16 KiB */
     227  #else
     228  #define POOL_BITS               12                  /* 4 KiB */
     229  #endif
     230  #define POOL_SIZE               (1 << POOL_BITS)
     231  #define POOL_SIZE_MASK          (POOL_SIZE - 1)
     232  
     233  #if !WITH_PYMALLOC_RADIX_TREE
     234  #if POOL_SIZE != SYSTEM_PAGE_SIZE
     235  #   error "pool size must be equal to system page size"
     236  #endif
     237  #endif
     238  
     239  #define MAX_POOLS_IN_ARENA  (ARENA_SIZE / POOL_SIZE)
     240  #if MAX_POOLS_IN_ARENA * POOL_SIZE != ARENA_SIZE
     241  #   error "arena size not an exact multiple of pool size"
     242  #endif
     243  
     244  /*
     245   * -- End of tunable settings section --
     246   */
     247  
     248  /*==========================================================================*/
     249  
     250  /* When you say memory, my mind reasons in terms of (pointers to) blocks */
     251  typedef uint8_t pymem_block;
     252  
     253  /* Pool for small blocks. */
     254  struct pool_header {
     255      union { pymem_block *_padding;
     256              uint count; } ref;          /* number of allocated blocks    */
     257      pymem_block *freeblock;             /* pool's free list head         */
     258      struct pool_header *nextpool;       /* next pool of this size class  */
     259      struct pool_header *prevpool;       /* previous pool       ""        */
     260      uint arenaindex;                    /* index into arenas of base adr */
     261      uint szidx;                         /* block size class index        */
     262      uint nextoffset;                    /* bytes to virgin block         */
     263      uint maxnextoffset;                 /* largest valid nextoffset      */
     264  };
     265  
     266  typedef struct pool_header *poolp;
     267  
     268  /* Record keeping for arenas. */
     269  struct arena_object {
     270      /* The address of the arena, as returned by malloc.  Note that 0
     271       * will never be returned by a successful malloc, and is used
     272       * here to mark an arena_object that doesn't correspond to an
     273       * allocated arena.
     274       */
     275      uintptr_t address;
     276  
     277      /* Pool-aligned pointer to the next pool to be carved off. */
     278      pymem_block* pool_address;
     279  
     280      /* The number of available pools in the arena:  free pools + never-
     281       * allocated pools.
     282       */
     283      uint nfreepools;
     284  
     285      /* The total number of pools in the arena, whether or not available. */
     286      uint ntotalpools;
     287  
     288      /* Singly-linked list of available pools. */
     289      struct pool_header* freepools;
     290  
     291      /* Whenever this arena_object is not associated with an allocated
     292       * arena, the nextarena member is used to link all unassociated
     293       * arena_objects in the singly-linked `unused_arena_objects` list.
     294       * The prevarena member is unused in this case.
     295       *
     296       * When this arena_object is associated with an allocated arena
     297       * with at least one available pool, both members are used in the
     298       * doubly-linked `usable_arenas` list, which is maintained in
     299       * increasing order of `nfreepools` values.
     300       *
     301       * Else this arena_object is associated with an allocated arena
     302       * all of whose pools are in use.  `nextarena` and `prevarena`
     303       * are both meaningless in this case.
     304       */
     305      struct arena_object* nextarena;
     306      struct arena_object* prevarena;
     307  };
     308  
     309  #define POOL_OVERHEAD   _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT)
     310  
     311  #define DUMMY_SIZE_IDX          0xffff  /* size class of newly cached pools */
     312  
     313  /* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
     314  #define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE))
     315  
     316  /* Return total number of blocks in pool of size index I, as a uint. */
     317  #define NUMBLOCKS(I) ((pymem_uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
     318  
     319  /*==========================================================================*/
     320  
     321  /*
     322   * Pool table -- headed, circular, doubly-linked lists of partially used pools.
     323  
     324  This is involved.  For an index i, usedpools[i+i] is the header for a list of
     325  all partially used pools holding small blocks with "size class idx" i. So
     326  usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
     327  16, and so on:  index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
     328  
     329  Pools are carved off an arena's highwater mark (an arena_object's pool_address
     330  member) as needed.  Once carved off, a pool is in one of three states forever
     331  after:
     332  
     333  used == partially used, neither empty nor full
     334      At least one block in the pool is currently allocated, and at least one
     335      block in the pool is not currently allocated (note this implies a pool
     336      has room for at least two blocks).
     337      This is a pool's initial state, as a pool is created only when malloc
     338      needs space.
     339      The pool holds blocks of a fixed size, and is in the circular list headed
     340      at usedpools[i] (see above).  It's linked to the other used pools of the
     341      same size class via the pool_header's nextpool and prevpool members.
     342      If all but one block is currently allocated, a malloc can cause a
     343      transition to the full state.  If all but one block is not currently
     344      allocated, a free can cause a transition to the empty state.
     345  
     346  full == all the pool's blocks are currently allocated
     347      On transition to full, a pool is unlinked from its usedpools[] list.
     348      It's not linked to from anything then anymore, and its nextpool and
     349      prevpool members are meaningless until it transitions back to used.
     350      A free of a block in a full pool puts the pool back in the used state.
     351      Then it's linked in at the front of the appropriate usedpools[] list, so
     352      that the next allocation for its size class will reuse the freed block.
     353  
     354  empty == all the pool's blocks are currently available for allocation
     355      On transition to empty, a pool is unlinked from its usedpools[] list,
     356      and linked to the front of its arena_object's singly-linked freepools list,
     357      via its nextpool member.  The prevpool member has no meaning in this case.
     358      Empty pools have no inherent size class:  the next time a malloc finds
     359      an empty list in usedpools[], it takes the first pool off of freepools.
     360      If the size class needed happens to be the same as the size class the pool
     361      last had, some pool initialization can be skipped.
     362  
     363  
     364  Block Management
     365  
     366  Blocks within pools are again carved out as needed.  pool->freeblock points to
     367  the start of a singly-linked list of free blocks within the pool.  When a
     368  block is freed, it's inserted at the front of its pool's freeblock list.  Note
     369  that the available blocks in a pool are *not* linked all together when a pool
     370  is initialized.  Instead only "the first two" (lowest addresses) blocks are
     371  set up, returning the first such block, and setting pool->freeblock to a
     372  one-block list holding the second such block.  This is consistent with that
     373  pymalloc strives at all levels (arena, pool, and block) never to touch a piece
     374  of memory until it's actually needed.
     375  
     376  So long as a pool is in the used state, we're certain there *is* a block
     377  available for allocating, and pool->freeblock is not NULL.  If pool->freeblock
     378  points to the end of the free list before we've carved the entire pool into
     379  blocks, that means we simply haven't yet gotten to one of the higher-address
     380  blocks.  The offset from the pool_header to the start of "the next" virgin
     381  block is stored in the pool_header nextoffset member, and the largest value
     382  of nextoffset that makes sense is stored in the maxnextoffset member when a
     383  pool is initialized.  All the blocks in a pool have been passed out at least
     384  once when and only when nextoffset > maxnextoffset.
     385  
     386  
     387  Major obscurity:  While the usedpools vector is declared to have poolp
     388  entries, it doesn't really.  It really contains two pointers per (conceptual)
     389  poolp entry, the nextpool and prevpool members of a pool_header.  The
     390  excruciating initialization code below fools C so that
     391  
     392      usedpool[i+i]
     393  
     394  "acts like" a genuine poolp, but only so long as you only reference its
     395  nextpool and prevpool members.  The "- 2*sizeof(pymem_block *)" gibberish is
     396  compensating for that a pool_header's nextpool and prevpool members
     397  immediately follow a pool_header's first two members:
     398  
     399      union { pymem_block *_padding;
     400              uint count; } ref;
     401      pymem_block *freeblock;
     402  
     403  each of which consume sizeof(pymem_block *) bytes.  So what usedpools[i+i] really
     404  contains is a fudged-up pointer p such that *if* C believes it's a poolp
     405  pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
     406  circular list is empty).
     407  
     408  It's unclear why the usedpools setup is so convoluted.  It could be to
     409  minimize the amount of cache required to hold this heavily-referenced table
     410  (which only *needs* the two interpool pointer members of a pool_header). OTOH,
     411  referencing code has to remember to "double the index" and doing so isn't
     412  free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
     413  on that C doesn't insert any padding anywhere in a pool_header at or before
     414  the prevpool member.
     415  **************************************************************************** */
     416  
     417  #define OBMALLOC_USED_POOLS_SIZE (2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8)
     418  
     419  struct _obmalloc_pools {
     420      poolp used[OBMALLOC_USED_POOLS_SIZE];
     421  };
     422  
     423  
     424  /*==========================================================================
     425  Arena management.
     426  
     427  `arenas` is a vector of arena_objects.  It contains maxarenas entries, some of
     428  which may not be currently used (== they're arena_objects that aren't
     429  currently associated with an allocated arena).  Note that arenas proper are
     430  separately malloc'ed.
     431  
     432  Prior to Python 2.5, arenas were never free()'ed.  Starting with Python 2.5,
     433  we do try to free() arenas, and use some mild heuristic strategies to increase
     434  the likelihood that arenas eventually can be freed.
     435  
     436  unused_arena_objects
     437  
     438      This is a singly-linked list of the arena_objects that are currently not
     439      being used (no arena is associated with them).  Objects are taken off the
     440      head of the list in new_arena(), and are pushed on the head of the list in
     441      PyObject_Free() when the arena is empty.  Key invariant:  an arena_object
     442      is on this list if and only if its .address member is 0.
     443  
     444  usable_arenas
     445  
     446      This is a doubly-linked list of the arena_objects associated with arenas
     447      that have pools available.  These pools are either waiting to be reused,
     448      or have not been used before.  The list is sorted to have the most-
     449      allocated arenas first (ascending order based on the nfreepools member).
     450      This means that the next allocation will come from a heavily used arena,
     451      which gives the nearly empty arenas a chance to be returned to the system.
     452      In my unscientific tests this dramatically improved the number of arenas
     453      that could be freed.
     454  
     455  Note that an arena_object associated with an arena all of whose pools are
     456  currently in use isn't on either list.
     457  
     458  Changed in Python 3.8:  keeping usable_arenas sorted by number of free pools
     459  used to be done by one-at-a-time linear search when an arena's number of
     460  free pools changed.  That could, overall, consume time quadratic in the
     461  number of arenas.  That didn't really matter when there were only a few
     462  hundred arenas (typical!), but could be a timing disaster when there were
     463  hundreds of thousands.  See bpo-37029.
     464  
     465  Now we have a vector of "search fingers" to eliminate the need to search:
     466  nfp2lasta[nfp] returns the last ("rightmost") arena in usable_arenas
     467  with nfp free pools.  This is NULL if and only if there is no arena with
     468  nfp free pools in usable_arenas.
     469  */
     470  
     471  /* How many arena_objects do we initially allocate?
     472   * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
     473   * `arenas` vector.
     474   */
     475  #define INITIAL_ARENA_OBJECTS 16
     476  
     477  struct _obmalloc_mgmt {
     478      /* Array of objects used to track chunks of memory (arenas). */
     479      struct arena_object* arenas;
     480      /* Number of slots currently allocated in the `arenas` vector. */
     481      uint maxarenas;
     482  
     483      /* The head of the singly-linked, NULL-terminated list of available
     484       * arena_objects.
     485       */
     486      struct arena_object* unused_arena_objects;
     487  
     488      /* The head of the doubly-linked, NULL-terminated at each end, list of
     489       * arena_objects associated with arenas that have pools available.
     490       */
     491      struct arena_object* usable_arenas;
     492  
     493      /* nfp2lasta[nfp] is the last arena in usable_arenas with nfp free pools */
     494      struct arena_object* nfp2lasta[MAX_POOLS_IN_ARENA + 1];
     495  
     496      /* Number of arenas allocated that haven't been free()'d. */
     497      size_t narenas_currently_allocated;
     498  
     499      /* Total number of times malloc() called to allocate an arena. */
     500      size_t ntimes_arena_allocated;
     501      /* High water mark (max value ever seen) for narenas_currently_allocated. */
     502      size_t narenas_highwater;
     503  
     504      Py_ssize_t raw_allocated_blocks;
     505  };
     506  
     507  
     508  #if WITH_PYMALLOC_RADIX_TREE
     509  /*==========================================================================*/
     510  /* radix tree for tracking arena usage.  If enabled, used to implement
     511     address_in_range().
     512  
     513     memory address bit allocation for keys
     514  
     515     64-bit pointers, IGNORE_BITS=0 and 2^20 arena size:
     516       15 -> MAP_TOP_BITS
     517       15 -> MAP_MID_BITS
     518       14 -> MAP_BOT_BITS
     519       20 -> ideal aligned arena
     520     ----
     521       64
     522  
     523     64-bit pointers, IGNORE_BITS=16, and 2^20 arena size:
     524       16 -> IGNORE_BITS
     525       10 -> MAP_TOP_BITS
     526       10 -> MAP_MID_BITS
     527        8 -> MAP_BOT_BITS
     528       20 -> ideal aligned arena
     529     ----
     530       64
     531  
     532     32-bit pointers and 2^18 arena size:
     533       14 -> MAP_BOT_BITS
     534       18 -> ideal aligned arena
     535     ----
     536       32
     537  
     538  */
     539  
     540  #if SIZEOF_VOID_P == 8
     541  
     542  /* number of bits in a pointer */
     543  #define POINTER_BITS 64
     544  
     545  /* High bits of memory addresses that will be ignored when indexing into the
     546   * radix tree.  Setting this to zero is the safe default.  For most 64-bit
     547   * machines, setting this to 16 would be safe.  The kernel would not give
     548   * user-space virtual memory addresses that have significant information in
     549   * those high bits.  The main advantage to setting IGNORE_BITS > 0 is that less
     550   * virtual memory will be used for the top and middle radix tree arrays.  Those
     551   * arrays are allocated in the BSS segment and so will typically consume real
     552   * memory only if actually accessed.
     553   */
     554  #define IGNORE_BITS 0
     555  
     556  /* use the top and mid layers of the radix tree */
     557  #define USE_INTERIOR_NODES
     558  
     559  #elif SIZEOF_VOID_P == 4
     560  
     561  #define POINTER_BITS 32
     562  #define IGNORE_BITS 0
     563  
     564  #else
     565  
     566   /* Currently this code works for 64-bit or 32-bit pointers only.  */
     567  #error "obmalloc radix tree requires 64-bit or 32-bit pointers."
     568  
     569  #endif /* SIZEOF_VOID_P */
     570  
     571  /* arena_coverage_t members require this to be true  */
     572  #if ARENA_BITS >= 32
     573  #   error "arena size must be < 2^32"
     574  #endif
     575  
     576  /* the lower bits of the address that are not ignored */
     577  #define ADDRESS_BITS (POINTER_BITS - IGNORE_BITS)
     578  
     579  #ifdef USE_INTERIOR_NODES
     580  /* number of bits used for MAP_TOP and MAP_MID nodes */
     581  #define INTERIOR_BITS ((ADDRESS_BITS - ARENA_BITS + 2) / 3)
     582  #else
     583  #define INTERIOR_BITS 0
     584  #endif
     585  
     586  #define MAP_TOP_BITS INTERIOR_BITS
     587  #define MAP_TOP_LENGTH (1 << MAP_TOP_BITS)
     588  #define MAP_TOP_MASK (MAP_TOP_LENGTH - 1)
     589  
     590  #define MAP_MID_BITS INTERIOR_BITS
     591  #define MAP_MID_LENGTH (1 << MAP_MID_BITS)
     592  #define MAP_MID_MASK (MAP_MID_LENGTH - 1)
     593  
     594  #define MAP_BOT_BITS (ADDRESS_BITS - ARENA_BITS - 2*INTERIOR_BITS)
     595  #define MAP_BOT_LENGTH (1 << MAP_BOT_BITS)
     596  #define MAP_BOT_MASK (MAP_BOT_LENGTH - 1)
     597  
     598  #define MAP_BOT_SHIFT ARENA_BITS
     599  #define MAP_MID_SHIFT (MAP_BOT_BITS + MAP_BOT_SHIFT)
     600  #define MAP_TOP_SHIFT (MAP_MID_BITS + MAP_MID_SHIFT)
     601  
     602  #define AS_UINT(p) ((uintptr_t)(p))
     603  #define MAP_BOT_INDEX(p) ((AS_UINT(p) >> MAP_BOT_SHIFT) & MAP_BOT_MASK)
     604  #define MAP_MID_INDEX(p) ((AS_UINT(p) >> MAP_MID_SHIFT) & MAP_MID_MASK)
     605  #define MAP_TOP_INDEX(p) ((AS_UINT(p) >> MAP_TOP_SHIFT) & MAP_TOP_MASK)
     606  
     607  #if IGNORE_BITS > 0
     608  /* Return the ignored part of the pointer address.  Those bits should be same
     609   * for all valid pointers if IGNORE_BITS is set correctly.
     610   */
     611  #define HIGH_BITS(p) (AS_UINT(p) >> ADDRESS_BITS)
     612  #else
     613  #define HIGH_BITS(p) 0
     614  #endif
     615  
     616  
     617  /* This is the leaf of the radix tree.  See arena_map_mark_used() for the
     618   * meaning of these members. */
     619  typedef struct {
     620      int32_t tail_hi;
     621      int32_t tail_lo;
     622  } arena_coverage_t;
     623  
     624  typedef struct arena_map_bot {
     625      /* The members tail_hi and tail_lo are accessed together.  So, it
     626       * better to have them as an array of structs, rather than two
     627       * arrays.
     628       */
     629      arena_coverage_t arenas[MAP_BOT_LENGTH];
     630  } arena_map_bot_t;
     631  
     632  #ifdef USE_INTERIOR_NODES
     633  typedef struct arena_map_mid {
     634      struct arena_map_bot *ptrs[MAP_MID_LENGTH];
     635  } arena_map_mid_t;
     636  
     637  typedef struct arena_map_top {
     638      struct arena_map_mid *ptrs[MAP_TOP_LENGTH];
     639  } arena_map_top_t;
     640  #endif
     641  
     642  struct _obmalloc_usage {
     643      /* The root of radix tree.  Note that by initializing like this, the memory
     644       * should be in the BSS.  The OS will only memory map pages as the MAP_MID
     645       * nodes get used (OS pages are demand loaded as needed).
     646       */
     647  #ifdef USE_INTERIOR_NODES
     648      arena_map_top_t arena_map_root;
     649      /* accounting for number of used interior nodes */
     650      int arena_map_mid_count;
     651      int arena_map_bot_count;
     652  #else
     653      arena_map_bot_t arena_map_root;
     654  #endif
     655  };
     656  
     657  #endif /* WITH_PYMALLOC_RADIX_TREE */
     658  
     659  
     660  struct _obmalloc_global_state {
     661      int dump_debug_stats;
     662      Py_ssize_t interpreter_leaks;
     663  };
     664  
     665  struct _obmalloc_state {
     666      struct _obmalloc_pools pools;
     667      struct _obmalloc_mgmt mgmt;
     668      struct _obmalloc_usage usage;
     669  };
     670  
     671  
     672  #undef  uint
     673  
     674  
     675  /* Allocate memory directly from the O/S virtual memory system,
     676   * where supported. Otherwise fallback on malloc */
     677  void *_PyObject_VirtualAlloc(size_t size);
     678  void _PyObject_VirtualFree(void *, size_t size);
     679  
     680  
     681  /* This function returns the number of allocated memory blocks, regardless of size */
     682  extern Py_ssize_t _Py_GetGlobalAllocatedBlocks(void);
     683  #define _Py_GetAllocatedBlocks() \
     684      _Py_GetGlobalAllocatedBlocks()
     685  extern Py_ssize_t _PyInterpreterState_GetAllocatedBlocks(PyInterpreterState *);
     686  extern void _PyInterpreterState_FinalizeAllocatedBlocks(PyInterpreterState *);
     687  
     688  
     689  #ifdef WITH_PYMALLOC
     690  // Export the symbol for the 3rd party guppy3 project
     691  PyAPI_FUNC(int) _PyObject_DebugMallocStats(FILE *out);
     692  #endif
     693  
     694  
     695  #ifdef __cplusplus
     696  }
     697  #endif
     698  #endif  // !Py_INTERNAL_OBMALLOC_H