(root)/
gcc-13.2.0/
libstdc++-v3/
include/
ext/
mt_allocator.h
       1  // MT-optimized allocator -*- C++ -*-
       2  
       3  // Copyright (C) 2003-2023 Free Software Foundation, Inc.
       4  //
       5  // This file is part of the GNU ISO C++ Library.  This library is free
       6  // software; you can redistribute it and/or modify it under the
       7  // terms of the GNU General Public License as published by the
       8  // Free Software Foundation; either version 3, or (at your option)
       9  // any later version.
      10  
      11  // This library is distributed in the hope that it will be useful,
      12  // but WITHOUT ANY WARRANTY; without even the implied warranty of
      13  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      14  // GNU General Public License for more details.
      15  
      16  // Under Section 7 of GPL version 3, you are granted additional
      17  // permissions described in the GCC Runtime Library Exception, version
      18  // 3.1, as published by the Free Software Foundation.
      19  
      20  // You should have received a copy of the GNU General Public License and
      21  // a copy of the GCC Runtime Library Exception along with this program;
      22  // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
      23  // <http://www.gnu.org/licenses/>.
      24  
      25  /** @file ext/mt_allocator.h
      26   *  This file is a GNU extension to the Standard C++ Library.
      27   */
      28  
      29  #ifndef _MT_ALLOCATOR_H
      30  #define _MT_ALLOCATOR_H 1
      31  
      32  #include <bits/requires_hosted.h> // getenv
      33  
      34  #include <new>
      35  #include <cstdlib>
      36  #include <bits/functexcept.h>
      37  #include <ext/atomicity.h>
      38  #include <bits/move.h>
      39  #if __cplusplus >= 201103L
      40  #include <type_traits>
      41  #endif
      42  
      43  namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
      44  {
      45  _GLIBCXX_BEGIN_NAMESPACE_VERSION
      46  
      47  
      48    typedef void (*__destroy_handler)(void*);
      49  
      50    /// Base class for pool object.
      51    struct __pool_base
      52    {
      53      // Using short int as type for the binmap implies we are never
      54      // caching blocks larger than 32768 with this allocator.
      55      typedef unsigned short int _Binmap_type;
      56      typedef std::size_t size_t;
      57  
      58      // Variables used to configure the behavior of the allocator,
      59      // assigned and explained in detail below.
      60      struct _Tune
      61      {
      62        // Compile time constants for the default _Tune values.
      63        enum { _S_align = 8 };
      64        enum { _S_max_bytes = 128 };
      65        enum { _S_min_bin = 8 };
      66        enum { _S_chunk_size = 4096 - 4 * sizeof(void*) };
      67        enum { _S_max_threads = 4096 };
      68        enum { _S_freelist_headroom = 10 };
      69  
      70        // Alignment needed.
      71        // NB: In any case must be >= sizeof(_Block_record), that
      72        // is 4 on 32 bit machines and 8 on 64 bit machines.
      73        size_t	_M_align;
      74        
      75        // Allocation requests (after round-up to power of 2) below
      76        // this value will be handled by the allocator. A raw new/
      77        // call will be used for requests larger than this value.
      78        // NB: Must be much smaller than _M_chunk_size and in any
      79        // case <= 32768.
      80        size_t	_M_max_bytes; 
      81  
      82        // Size in bytes of the smallest bin.
      83        // NB: Must be a power of 2 and >= _M_align (and of course
      84        // much smaller than _M_max_bytes).
      85        size_t	_M_min_bin;
      86  
      87        // In order to avoid fragmenting and minimize the number of
      88        // new() calls we always request new memory using this
      89        // value. Based on previous discussions on the libstdc++
      90        // mailing list we have chosen the value below.
      91        // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
      92        // NB: At least one order of magnitude > _M_max_bytes. 
      93        size_t	_M_chunk_size;
      94  
      95        // The maximum number of supported threads. For
      96        // single-threaded operation, use one. Maximum values will
      97        // vary depending on details of the underlying system. (For
      98        // instance, Linux 2.4.18 reports 4070 in
      99        // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
     100        // 65534)
     101        size_t 	_M_max_threads;
     102  
     103        // Each time a deallocation occurs in a threaded application
     104        // we make sure that there are no more than
     105        // _M_freelist_headroom % of used memory on the freelist. If
     106        // the number of additional records is more than
     107        // _M_freelist_headroom % of the freelist, we move these
     108        // records back to the global pool.
     109        size_t 	_M_freelist_headroom;
     110        
     111        // Set to true forces all allocations to use new().
     112        bool 	_M_force_new; 
     113        
     114        explicit
     115        _Tune()
     116        : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
     117        _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads), 
     118        _M_freelist_headroom(_S_freelist_headroom), 
     119        _M_force_new(std::getenv("GLIBCXX_FORCE_NEW") ? true : false)
     120        { }
     121  
     122        explicit
     123        _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk, 
     124  	    size_t __maxthreads, size_t __headroom, bool __force) 
     125        : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
     126        _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
     127        _M_freelist_headroom(__headroom), _M_force_new(__force)
     128        { }
     129      };
     130      
     131      struct _Block_address
     132      {
     133        void* 			_M_initial;
     134        _Block_address* 		_M_next;
     135      };
     136      
     137      const _Tune&
     138      _M_get_options() const
     139      { return _M_options; }
     140  
     141      void
     142      _M_set_options(_Tune __t)
     143      { 
     144        if (!_M_init)
     145  	_M_options = __t;
     146      }
     147  
     148      bool
     149      _M_check_threshold(size_t __bytes)
     150      { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; }
     151  
     152      size_t
     153      _M_get_binmap(size_t __bytes)
     154      { return _M_binmap[__bytes]; }
     155  
     156      size_t
     157      _M_get_align()
     158      { return _M_options._M_align; }
     159  
     160      explicit 
     161      __pool_base() 
     162      : _M_options(_Tune()), _M_binmap(0), _M_init(false) { }
     163  
     164      explicit 
     165      __pool_base(const _Tune& __options)
     166      : _M_options(__options), _M_binmap(0), _M_init(false) { }
     167  
     168    private:
     169      explicit 
     170      __pool_base(const __pool_base&);
     171  
     172      __pool_base&
     173      operator=(const __pool_base&);
     174  
     175    protected:
     176      // Configuration options.
     177      _Tune 	       		_M_options;
     178      
     179      _Binmap_type* 		_M_binmap;
     180  
     181      // Configuration of the pool object via _M_options can happen
     182      // after construction but before initialization. After
     183      // initialization is complete, this variable is set to true.
     184      bool 			_M_init;
     185    };
     186  
     187  
     188    /**
     189     *  @brief  Data describing the underlying memory pool, parameterized on
     190     *  threading support.
     191     */
     192    template<bool _Thread>
     193      class __pool;
     194  
     195    /// Specialization for single thread.
     196    template<>
     197      class __pool<false> : public __pool_base
     198      {
     199      public:
     200        union _Block_record
     201        {
     202  	// Points to the block_record of the next free block.
     203  	_Block_record* 			_M_next;
     204        };
     205  
     206        struct _Bin_record
     207        {
     208  	// An "array" of pointers to the first free block.
     209  	_Block_record**			_M_first;
     210  
     211  	// A list of the initial addresses of all allocated blocks.
     212  	_Block_address*		     	_M_address;
     213        };
     214        
     215        void
     216        _M_initialize_once()
     217        {
     218  	if (__builtin_expect(_M_init == false, false))
     219  	  _M_initialize();
     220        }
     221  
     222        void
     223        _M_destroy() throw();
     224  
     225        char* 
     226        _M_reserve_block(size_t __bytes, const size_t __thread_id);
     227      
     228        void
     229        _M_reclaim_block(char* __p, size_t __bytes) throw ();
     230      
     231        size_t 
     232        _M_get_thread_id() { return 0; }
     233        
     234        const _Bin_record&
     235        _M_get_bin(size_t __which)
     236        { return _M_bin[__which]; }
     237        
     238        void
     239        _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
     240        { }
     241  
     242        explicit __pool() 
     243        : _M_bin(0), _M_bin_size(1) { }
     244  
     245        explicit __pool(const __pool_base::_Tune& __tune) 
     246        : __pool_base(__tune), _M_bin(0), _M_bin_size(1) { }
     247  
     248      private:
     249        // An "array" of bin_records each of which represents a specific
     250        // power of 2 size. Memory to this "array" is allocated in
     251        // _M_initialize().
     252        _Bin_record*		 _M_bin;
     253        
     254        // Actual value calculated in _M_initialize().
     255        size_t 	       	     	_M_bin_size;     
     256  
     257        void
     258        _M_initialize();
     259    };
     260   
     261  #ifdef __GTHREADS
     262    /// Specialization for thread enabled, via gthreads.h.
     263    template<>
     264      class __pool<true> : public __pool_base
     265      {
     266      public:
     267        // Each requesting thread is assigned an id ranging from 1 to
     268        // _S_max_threads. Thread id 0 is used as a global memory pool.
     269        // In order to get constant performance on the thread assignment
     270        // routine, we keep a list of free ids. When a thread first
     271        // requests memory we remove the first record in this list and
     272        // stores the address in a __gthread_key. When initializing the
     273        // __gthread_key we specify a destructor. When this destructor
     274        // (i.e. the thread dies) is called, we return the thread id to
     275        // the front of this list.
     276        struct _Thread_record
     277        {
     278  	// Points to next free thread id record. NULL if last record in list.
     279  	_Thread_record*			_M_next;
     280  	
     281  	// Thread id ranging from 1 to _S_max_threads.
     282  	size_t                          _M_id;
     283        };
     284        
     285        union _Block_record
     286        {
     287  	// Points to the block_record of the next free block.
     288  	_Block_record*			_M_next;
     289  	
     290  	// The thread id of the thread which has requested this block.
     291  	size_t                          _M_thread_id;
     292        };
     293        
     294        struct _Bin_record
     295        {
     296  	// An "array" of pointers to the first free block for each
     297  	// thread id. Memory to this "array" is allocated in
     298  	// _S_initialize() for _S_max_threads + global pool 0.
     299  	_Block_record**			_M_first;
     300  	
     301  	// A list of the initial addresses of all allocated blocks.
     302  	_Block_address*		     	_M_address;
     303  
     304  	// An "array" of counters used to keep track of the amount of
     305  	// blocks that are on the freelist/used for each thread id.
     306  	// - Note that the second part of the allocated _M_used "array"
     307  	//   actually hosts (atomic) counters of reclaimed blocks:  in
     308  	//   _M_reserve_block and in _M_reclaim_block those numbers are
     309  	//   subtracted from the first ones to obtain the actual size
     310  	//   of the "working set" of the given thread.
     311  	// - Memory to these "arrays" is allocated in _S_initialize()
     312  	//   for _S_max_threads + global pool 0.
     313  	size_t*				_M_free;
     314  	size_t*			        _M_used;
     315  	
     316  	// Each bin has its own mutex which is used to ensure data
     317  	// integrity while changing "ownership" on a block.  The mutex
     318  	// is initialized in _S_initialize().
     319  	__gthread_mutex_t*              _M_mutex;
     320        };
     321        
     322        // XXX GLIBCXX_ABI Deprecated
     323        void
     324        _M_initialize(__destroy_handler);
     325  
     326        void
     327        _M_initialize_once()
     328        {
     329  	if (__builtin_expect(_M_init == false, false))
     330  	  _M_initialize();
     331        }
     332  
     333        void
     334        _M_destroy() throw();
     335  
     336        char* 
     337        _M_reserve_block(size_t __bytes, const size_t __thread_id);
     338      
     339        void
     340        _M_reclaim_block(char* __p, size_t __bytes) throw ();
     341      
     342        const _Bin_record&
     343        _M_get_bin(size_t __which)
     344        { return _M_bin[__which]; }
     345        
     346        void
     347        _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block, 
     348  			 size_t __thread_id)
     349        {
     350  	if (__gthread_active_p())
     351  	  {
     352  	    __block->_M_thread_id = __thread_id;
     353  	    --__bin._M_free[__thread_id];
     354  	    ++__bin._M_used[__thread_id];
     355  	  }
     356        }
     357  
     358        // XXX GLIBCXX_ABI Deprecated
     359        void
     360        _M_destroy_thread_key(void*) throw ();
     361  
     362        size_t 
     363        _M_get_thread_id();
     364  
     365        explicit __pool() 
     366        : _M_bin(0), _M_bin_size(1), _M_thread_freelist(0) 
     367        { }
     368  
     369        explicit __pool(const __pool_base::_Tune& __tune) 
     370        : __pool_base(__tune), _M_bin(0), _M_bin_size(1), 
     371  	_M_thread_freelist(0) 
     372        { }
     373  
     374      private:
     375        // An "array" of bin_records each of which represents a specific
     376        // power of 2 size. Memory to this "array" is allocated in
     377        // _M_initialize().
     378        _Bin_record*		_M_bin;
     379  
     380        // Actual value calculated in _M_initialize().
     381        size_t 	       	     	_M_bin_size;
     382  
     383        _Thread_record* 		_M_thread_freelist;
     384        void*			_M_thread_freelist_initial;
     385  
     386        void
     387        _M_initialize();
     388      };
     389  #endif
     390  
     391    template<template <bool> class _PoolTp, bool _Thread>
     392      struct __common_pool
     393      {
     394        typedef _PoolTp<_Thread> 		pool_type;
     395        
     396        static pool_type&
     397        _S_get_pool()
     398        { 
     399  	static pool_type _S_pool;
     400  	return _S_pool;
     401        }
     402      };
     403  
     404    template<template <bool> class _PoolTp, bool _Thread>
     405      struct __common_pool_base;
     406  
     407    template<template <bool> class _PoolTp>
     408      struct __common_pool_base<_PoolTp, false> 
     409      : public __common_pool<_PoolTp, false>
     410      {
     411        using  __common_pool<_PoolTp, false>::_S_get_pool;
     412  
     413        static void
     414        _S_initialize_once()
     415        {
     416  	static bool __init;
     417  	if (__builtin_expect(__init == false, false))
     418  	  {
     419  	    _S_get_pool()._M_initialize_once(); 
     420  	    __init = true;
     421  	  }
     422        }
     423      };
     424  
     425  #ifdef __GTHREADS
     426    template<template <bool> class _PoolTp>
     427      struct __common_pool_base<_PoolTp, true>
     428      : public __common_pool<_PoolTp, true>
     429      {
     430        using  __common_pool<_PoolTp, true>::_S_get_pool;
     431        
     432        static void
     433        _S_initialize() 
     434        { _S_get_pool()._M_initialize_once(); }
     435  
     436        static void
     437        _S_initialize_once()
     438        { 
     439  	static bool __init;
     440  	if (__builtin_expect(__init == false, false))
     441  	  {
     442  	    if (__gthread_active_p())
     443  	      {
     444  		// On some platforms, __gthread_once_t is an aggregate.
     445  		static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
     446  		__gthread_once(&__once, _S_initialize);
     447  	      }
     448  
     449  	    // Double check initialization. May be necessary on some
     450  	    // systems for proper construction when not compiling with
     451  	    // thread flags.
     452  	    _S_get_pool()._M_initialize_once(); 
     453  	    __init = true;
     454  	  }
     455        }
     456      };
     457  #endif
     458  
     459    /// Policy for shared __pool objects.
     460    template<template <bool> class _PoolTp, bool _Thread>
     461      struct __common_pool_policy : public __common_pool_base<_PoolTp, _Thread>
     462      {
     463        template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp, 
     464  	       bool _Thread1 = _Thread>
     465          struct _M_rebind
     466          { typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
     467  
     468        using  __common_pool_base<_PoolTp, _Thread>::_S_get_pool;
     469        using  __common_pool_base<_PoolTp, _Thread>::_S_initialize_once;
     470    };
     471   
     472  
     473    template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
     474      struct __per_type_pool
     475      {
     476        typedef _Tp 			value_type;
     477        typedef _PoolTp<_Thread> 		pool_type;
     478        
     479        static pool_type&
     480        _S_get_pool()
     481        {
     482  	using std::size_t;
     483  	// Sane defaults for the _PoolTp.
     484  	typedef typename pool_type::_Block_record _Block_record;
     485  	const static size_t __a = (__alignof__(_Tp) >= sizeof(_Block_record)
     486  				   ? __alignof__(_Tp) : sizeof(_Block_record));
     487  
     488  	typedef typename __pool_base::_Tune _Tune;
     489  	static _Tune _S_tune(__a, sizeof(_Tp) * 64,
     490  			     sizeof(_Tp) * 2 >= __a ? sizeof(_Tp) * 2 : __a,
     491  			     sizeof(_Tp) * size_t(_Tune::_S_chunk_size),
     492  			     _Tune::_S_max_threads,
     493  			     _Tune::_S_freelist_headroom,
     494  			     std::getenv("GLIBCXX_FORCE_NEW") ? true : false);
     495  	static pool_type _S_pool(_S_tune);
     496  	return _S_pool;
     497        }
     498      };
     499  
     500    template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
     501      struct __per_type_pool_base;
     502  
     503    template<typename _Tp, template <bool> class _PoolTp>
     504      struct __per_type_pool_base<_Tp, _PoolTp, false> 
     505      : public __per_type_pool<_Tp, _PoolTp, false> 
     506      {
     507        using  __per_type_pool<_Tp, _PoolTp, false>::_S_get_pool;
     508  
     509        static void
     510        _S_initialize_once()
     511        {
     512  	static bool __init;
     513  	if (__builtin_expect(__init == false, false))
     514  	  {
     515  	    _S_get_pool()._M_initialize_once(); 
     516  	    __init = true;
     517  	  }
     518        }
     519      };
     520  
     521   #ifdef __GTHREADS
     522   template<typename _Tp, template <bool> class _PoolTp>
     523      struct __per_type_pool_base<_Tp, _PoolTp, true> 
     524      : public __per_type_pool<_Tp, _PoolTp, true> 
     525      {
     526        using  __per_type_pool<_Tp, _PoolTp, true>::_S_get_pool;
     527  
     528        static void
     529        _S_initialize() 
     530        { _S_get_pool()._M_initialize_once(); }
     531  
     532        static void
     533        _S_initialize_once()
     534        { 
     535  	static bool __init;
     536  	if (__builtin_expect(__init == false, false))
     537  	  {
     538  	    if (__gthread_active_p())
     539  	      {
     540  		// On some platforms, __gthread_once_t is an aggregate.
     541  		static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
     542  		__gthread_once(&__once, _S_initialize);
     543  	      }
     544  
     545  	    // Double check initialization. May be necessary on some
     546  	    // systems for proper construction when not compiling with
     547  	    // thread flags.
     548  	    _S_get_pool()._M_initialize_once(); 
     549  	    __init = true;
     550  	  }
     551        }
     552      };
     553  #endif
     554  
     555    /// Policy for individual __pool objects.
     556    template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
     557      struct __per_type_pool_policy 
     558      : public __per_type_pool_base<_Tp, _PoolTp, _Thread>
     559      {
     560        template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp, 
     561  	       bool _Thread1 = _Thread>
     562          struct _M_rebind
     563          { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
     564  
     565        using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_get_pool;
     566        using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_initialize_once;
     567    };
     568  
     569  
     570    /// Base class for _Tp dependent member functions.
     571    template<typename _Tp>
     572      class __mt_alloc_base 
     573      {
     574      public:
     575        typedef std::size_t               size_type;
     576        typedef std::ptrdiff_t            difference_type;
     577        typedef _Tp*                      pointer;
     578        typedef const _Tp*                const_pointer;
     579        typedef _Tp&                      reference;
     580        typedef const _Tp&                const_reference;
     581        typedef _Tp                       value_type;
     582  
     583  #if __cplusplus >= 201103L
     584        // _GLIBCXX_RESOLVE_LIB_DEFECTS
     585        // 2103. propagate_on_container_move_assignment
     586        typedef std::true_type propagate_on_container_move_assignment;
     587  #endif
     588  
     589        pointer
     590        address(reference __x) const _GLIBCXX_NOEXCEPT
     591        { return std::__addressof(__x); }
     592  
     593        const_pointer
     594        address(const_reference __x) const _GLIBCXX_NOEXCEPT
     595        { return std::__addressof(__x); }
     596  
     597        size_type
     598        max_size() const _GLIBCXX_USE_NOEXCEPT 
     599        { return size_type(-1) / sizeof(_Tp); }
     600  
     601  #if __cplusplus >= 201103L
     602        template<typename _Up, typename... _Args>
     603          void
     604          construct(_Up* __p, _Args&&... __args)
     605  	{ ::new((void *)__p) _Up(std::forward<_Args>(__args)...); }
     606  
     607        template<typename _Up>
     608          void 
     609          destroy(_Up* __p) { __p->~_Up(); }
     610  #else
     611        // _GLIBCXX_RESOLVE_LIB_DEFECTS
     612        // 402. wrong new expression in [some_] allocator::construct
     613        void 
     614        construct(pointer __p, const _Tp& __val) 
     615        { ::new((void *)__p) _Tp(__val); }
     616  
     617        void 
     618        destroy(pointer __p) { __p->~_Tp(); }
     619  #endif
     620      };
     621  
     622  #ifdef __GTHREADS
     623  #define __thread_default true
     624  #else
     625  #define __thread_default false
     626  #endif
     627  
     628    /**
     629     *  @brief  This is a fixed size (power of 2) allocator which - when
     630     *  compiled with thread support - will maintain one freelist per
     631     *  size per thread plus a @a global one. Steps are taken to limit
     632     *  the per thread freelist sizes (by returning excess back to
     633     *  the @a global list).
     634     *  @ingroup allocators
     635     *
     636     *  Further details:
     637     *  https://gcc.gnu.org/onlinedocs/libstdc++/manual/mt_allocator.html
     638     */
     639    template<typename _Tp, 
     640  	   typename _Poolp = __common_pool_policy<__pool, __thread_default> >
     641      class __mt_alloc : public __mt_alloc_base<_Tp>
     642      {
     643      public:
     644        typedef std::size_t                    	size_type;
     645        typedef std::ptrdiff_t                 	difference_type;
     646        typedef _Tp*                      	pointer;
     647        typedef const _Tp*                	const_pointer;
     648        typedef _Tp&                      	reference;
     649        typedef const _Tp&                	const_reference;
     650        typedef _Tp                       	value_type;
     651        typedef _Poolp      			__policy_type;
     652        typedef typename _Poolp::pool_type	__pool_type;
     653  
     654        template<typename _Tp1, typename _Poolp1 = _Poolp>
     655          struct rebind
     656          { 
     657  	  typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
     658  	  typedef __mt_alloc<_Tp1, pol_type> other;
     659  	};
     660  
     661        __mt_alloc() _GLIBCXX_USE_NOEXCEPT { }
     662  
     663        __mt_alloc(const __mt_alloc&) _GLIBCXX_USE_NOEXCEPT { }
     664  
     665        template<typename _Tp1, typename _Poolp1>
     666          __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>&) _GLIBCXX_USE_NOEXCEPT { }
     667  
     668        ~__mt_alloc() _GLIBCXX_USE_NOEXCEPT { }
     669  
     670        _GLIBCXX_NODISCARD pointer
     671        allocate(size_type __n, const void* = 0);
     672  
     673        void
     674        deallocate(pointer __p, size_type __n);
     675  
     676        const __pool_base::_Tune
     677        _M_get_options()
     678        { 
     679  	// Return a copy, not a reference, for external consumption.
     680  	return __policy_type::_S_get_pool()._M_get_options();
     681        }
     682        
     683        void
     684        _M_set_options(__pool_base::_Tune __t)
     685        { __policy_type::_S_get_pool()._M_set_options(__t); }
     686      };
     687  
     688    template<typename _Tp, typename _Poolp>
     689      _GLIBCXX_NODISCARD typename __mt_alloc<_Tp, _Poolp>::pointer
     690      __mt_alloc<_Tp, _Poolp>::
     691      allocate(size_type __n, const void*)
     692      {
     693        if (__n > this->max_size())
     694  	std::__throw_bad_alloc();
     695  
     696  #if __cpp_aligned_new
     697        // Types with extended alignment are handled by operator new/delete.
     698        if (alignof(_Tp) > __STDCPP_DEFAULT_NEW_ALIGNMENT__)
     699  	{
     700  	  std::align_val_t __al = std::align_val_t(alignof(_Tp));
     701  	  return static_cast<_Tp*>(::operator new(__n * sizeof(_Tp), __al));
     702  	}
     703  #endif
     704  
     705        __policy_type::_S_initialize_once();
     706  
     707        // Requests larger than _M_max_bytes are handled by operator
     708        // new/delete directly.
     709        __pool_type& __pool = __policy_type::_S_get_pool();
     710        const size_type __bytes = __n * sizeof(_Tp);
     711        if (__pool._M_check_threshold(__bytes))
     712  	{
     713  	  void* __ret = ::operator new(__bytes);
     714  	  return static_cast<_Tp*>(__ret);
     715  	}
     716        
     717        // Round up to power of 2 and figure out which bin to use.
     718        const size_type __which = __pool._M_get_binmap(__bytes);
     719        const size_type __thread_id = __pool._M_get_thread_id();
     720        
     721        // Find out if we have blocks on our freelist.  If so, go ahead
     722        // and use them directly without having to lock anything.
     723        char* __c;
     724        typedef typename __pool_type::_Bin_record _Bin_record;
     725        const _Bin_record& __bin = __pool._M_get_bin(__which);
     726        if (__bin._M_first[__thread_id])
     727  	{
     728  	  // Already reserved.
     729  	  typedef typename __pool_type::_Block_record _Block_record;
     730  	  _Block_record* __block = __bin._M_first[__thread_id];
     731  	  __bin._M_first[__thread_id] = __block->_M_next;
     732  	  
     733  	  __pool._M_adjust_freelist(__bin, __block, __thread_id);
     734  	  __c = reinterpret_cast<char*>(__block) + __pool._M_get_align();
     735  	}
     736        else
     737  	{
     738  	  // Null, reserve.
     739  	  __c = __pool._M_reserve_block(__bytes, __thread_id);
     740  	}
     741        return static_cast<_Tp*>(static_cast<void*>(__c));
     742      }
     743    
     744    template<typename _Tp, typename _Poolp>
     745      void
     746      __mt_alloc<_Tp, _Poolp>::
     747      deallocate(pointer __p, size_type __n)
     748      {
     749        if (__builtin_expect(__p != 0, true))
     750  	{
     751  #if __cpp_aligned_new
     752  	  // Types with extended alignment are handled by operator new/delete.
     753  	  if (alignof(_Tp) > __STDCPP_DEFAULT_NEW_ALIGNMENT__)
     754  	    {
     755  	      ::operator delete(__p, std::align_val_t(alignof(_Tp)));
     756  	      return;
     757  	    }
     758  #endif
     759  
     760  	  // Requests larger than _M_max_bytes are handled by
     761  	  // operators new/delete directly.
     762  	  __pool_type& __pool = __policy_type::_S_get_pool();
     763  	  const size_type __bytes = __n * sizeof(_Tp);
     764  	  if (__pool._M_check_threshold(__bytes))
     765  	    ::operator delete(__p);
     766  	  else
     767  	    __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
     768  	}
     769      }
     770    
     771    template<typename _Tp, typename _Poolp>
     772      inline bool
     773      operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
     774      { return true; }
     775    
     776  #if __cpp_impl_three_way_comparison < 201907L
     777    template<typename _Tp, typename _Poolp>
     778      inline bool
     779      operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
     780      { return false; }
     781  #endif
     782  
     783  #undef __thread_default
     784  
     785  _GLIBCXX_END_NAMESPACE_VERSION
     786  } // namespace
     787  
     788  #endif