(root)/
gcc-13.2.0/
libsanitizer/
sanitizer_common/
sanitizer_allocator_primary64.h
       1  //===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
       2  //
       3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
       4  // See https://llvm.org/LICENSE.txt for license information.
       5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
       6  //
       7  //===----------------------------------------------------------------------===//
       8  //
       9  // Part of the Sanitizer Allocator.
      10  //
      11  //===----------------------------------------------------------------------===//
      12  #ifndef SANITIZER_ALLOCATOR_H
      13  #error This file must be included inside sanitizer_allocator.h
      14  #endif
      15  
      16  template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
      17  
      18  // SizeClassAllocator64 -- allocator for 64-bit address space.
      19  // The template parameter Params is a class containing the actual parameters.
      20  //
      21  // Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
      22  // If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically by mmap.
      23  // Otherwise SpaceBeg=kSpaceBeg (fixed address).
      24  // kSpaceSize is a power of two.
      25  // At the beginning the entire space is mprotect-ed, then small parts of it
      26  // are mapped on demand.
      27  //
      28  // Region: a part of Space dedicated to a single size class.
      29  // There are kNumClasses Regions of equal size.
      30  //
      31  // UserChunk: a piece of memory returned to user.
      32  // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
      33  
      34  // FreeArray is an array free-d chunks (stored as 4-byte offsets)
      35  //
      36  // A Region looks like this:
      37  // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
      38  
      39  struct SizeClassAllocator64FlagMasks {  //  Bit masks.
      40    enum {
      41      kRandomShuffleChunks = 1,
      42    };
      43  };
      44  
      45  template <typename Allocator>
      46  class MemoryMapper {
      47   public:
      48    typedef typename Allocator::CompactPtrT CompactPtrT;
      49  
      50    explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {}
      51  
      52    bool GetAndResetStats(uptr &ranges, uptr &bytes) {
      53      ranges = released_ranges_count_;
      54      released_ranges_count_ = 0;
      55      bytes = released_bytes_;
      56      released_bytes_ = 0;
      57      return ranges != 0;
      58    }
      59  
      60    u64 *MapPackedCounterArrayBuffer(uptr count) {
      61      buffer_.clear();
      62      buffer_.resize(count);
      63      return buffer_.data();
      64    }
      65  
      66    // Releases [from, to) range of pages back to OS.
      67    void ReleasePageRangeToOS(uptr class_id, CompactPtrT from, CompactPtrT to) {
      68      const uptr region_base = allocator_.GetRegionBeginBySizeClass(class_id);
      69      const uptr from_page = allocator_.CompactPtrToPointer(region_base, from);
      70      const uptr to_page = allocator_.CompactPtrToPointer(region_base, to);
      71      ReleaseMemoryPagesToOS(from_page, to_page);
      72      released_ranges_count_++;
      73      released_bytes_ += to_page - from_page;
      74    }
      75  
      76   private:
      77    const Allocator &allocator_;
      78    uptr released_ranges_count_ = 0;
      79    uptr released_bytes_ = 0;
      80    InternalMmapVector<u64> buffer_;
      81  };
      82  
      83  template <class Params>
      84  class SizeClassAllocator64 {
      85   public:
      86    using AddressSpaceView = typename Params::AddressSpaceView;
      87    static const uptr kSpaceBeg = Params::kSpaceBeg;
      88    static const uptr kSpaceSize = Params::kSpaceSize;
      89    static const uptr kMetadataSize = Params::kMetadataSize;
      90    typedef typename Params::SizeClassMap SizeClassMap;
      91    typedef typename Params::MapUnmapCallback MapUnmapCallback;
      92  
      93    static const bool kRandomShuffleChunks =
      94        Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
      95  
      96    typedef SizeClassAllocator64<Params> ThisT;
      97    typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
      98    typedef MemoryMapper<ThisT> MemoryMapperT;
      99  
     100    // When we know the size class (the region base) we can represent a pointer
     101    // as a 4-byte integer (offset from the region start shifted right by 4).
     102    typedef u32 CompactPtrT;
     103    static const uptr kCompactPtrScale = 4;
     104    CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const {
     105      return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
     106    }
     107    uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const {
     108      return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
     109    }
     110  
     111    // If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W
     112    // at heap_start and places the heap there.  This mode requires kSpaceBeg ==
     113    // ~(uptr)0.
     114    void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
     115      uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
     116      PremappedHeap = heap_start != 0;
     117      if (PremappedHeap) {
     118        CHECK(!kUsingConstantSpaceBeg);
     119        NonConstSpaceBeg = heap_start;
     120        uptr RegionInfoSize = AdditionalSize();
     121        RegionInfoSpace =
     122            address_range.Init(RegionInfoSize, PrimaryAllocatorName);
     123        CHECK_NE(RegionInfoSpace, ~(uptr)0);
     124        CHECK_EQ(RegionInfoSpace,
     125                 address_range.MapOrDie(RegionInfoSpace, RegionInfoSize,
     126                                        "SizeClassAllocator: region info"));
     127        MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize);
     128      } else {
     129        if (kUsingConstantSpaceBeg) {
     130          CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
     131          CHECK_EQ(kSpaceBeg,
     132                   address_range.Init(TotalSpaceSize, PrimaryAllocatorName,
     133                                      kSpaceBeg));
     134        } else {
     135          // Combined allocator expects that an 2^N allocation is always aligned
     136          // to 2^N. For this to work, the start of the space needs to be aligned
     137          // as high as the largest size class (which also needs to be a power of
     138          // 2).
     139          NonConstSpaceBeg = address_range.InitAligned(
     140              TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
     141          CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
     142        }
     143        RegionInfoSpace = SpaceEnd();
     144        MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(),
     145                             "SizeClassAllocator: region info");
     146      }
     147      SetReleaseToOSIntervalMs(release_to_os_interval_ms);
     148      // Check that the RegionInfo array is aligned on the CacheLine size.
     149      DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0);
     150    }
     151  
     152    s32 ReleaseToOSIntervalMs() const {
     153      return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
     154    }
     155  
     156    void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
     157      atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
     158                   memory_order_relaxed);
     159    }
     160  
     161    void ForceReleaseToOS() {
     162      MemoryMapperT memory_mapper(*this);
     163      for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
     164        Lock l(&GetRegionInfo(class_id)->mutex);
     165        MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);
     166      }
     167    }
     168  
     169    static bool CanAllocate(uptr size, uptr alignment) {
     170      return size <= SizeClassMap::kMaxSize &&
     171        alignment <= SizeClassMap::kMaxSize;
     172    }
     173  
     174    NOINLINE void ReturnToAllocator(MemoryMapperT *memory_mapper,
     175                                    AllocatorStats *stat, uptr class_id,
     176                                    const CompactPtrT *chunks, uptr n_chunks) {
     177      RegionInfo *region = GetRegionInfo(class_id);
     178      uptr region_beg = GetRegionBeginBySizeClass(class_id);
     179      CompactPtrT *free_array = GetFreeArray(region_beg);
     180  
     181      Lock l(&region->mutex);
     182      uptr old_num_chunks = region->num_freed_chunks;
     183      uptr new_num_freed_chunks = old_num_chunks + n_chunks;
     184      // Failure to allocate free array space while releasing memory is non
     185      // recoverable.
     186      if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,
     187                                         new_num_freed_chunks))) {
     188        Report("FATAL: Internal error: %s's allocator exhausted the free list "
     189               "space for size class %zd (%zd bytes).\n", SanitizerToolName,
     190               class_id, ClassIdToSize(class_id));
     191        Die();
     192      }
     193      for (uptr i = 0; i < n_chunks; i++)
     194        free_array[old_num_chunks + i] = chunks[i];
     195      region->num_freed_chunks = new_num_freed_chunks;
     196      region->stats.n_freed += n_chunks;
     197  
     198      MaybeReleaseToOS(memory_mapper, class_id, false /*force*/);
     199    }
     200  
     201    NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
     202                                   CompactPtrT *chunks, uptr n_chunks) {
     203      RegionInfo *region = GetRegionInfo(class_id);
     204      uptr region_beg = GetRegionBeginBySizeClass(class_id);
     205      CompactPtrT *free_array = GetFreeArray(region_beg);
     206  
     207      Lock l(&region->mutex);
     208  #if SANITIZER_WINDOWS
     209      /* On Windows unmapping of memory during __sanitizer_purge_allocator is
     210      explicit and immediate, so unmapped regions must be explicitly mapped back
     211      in when they are accessed again. */
     212      if (region->rtoi.last_released_bytes > 0) {
     213        MmapFixedOrDie(region_beg, region->mapped_user,
     214                                        "SizeClassAllocator: region data");
     215        region->rtoi.n_freed_at_last_release = 0;
     216        region->rtoi.last_released_bytes = 0;
     217      }
     218  #endif
     219      if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
     220        if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,
     221                                        n_chunks - region->num_freed_chunks)))
     222          return false;
     223        CHECK_GE(region->num_freed_chunks, n_chunks);
     224      }
     225      region->num_freed_chunks -= n_chunks;
     226      uptr base_idx = region->num_freed_chunks;
     227      for (uptr i = 0; i < n_chunks; i++)
     228        chunks[i] = free_array[base_idx + i];
     229      region->stats.n_allocated += n_chunks;
     230      return true;
     231    }
     232  
     233    bool PointerIsMine(const void *p) const {
     234      uptr P = reinterpret_cast<uptr>(p);
     235      if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
     236        return P / kSpaceSize == kSpaceBeg / kSpaceSize;
     237      return P >= SpaceBeg() && P < SpaceEnd();
     238    }
     239  
     240    uptr GetRegionBegin(const void *p) {
     241      if (kUsingConstantSpaceBeg)
     242        return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
     243      uptr space_beg = SpaceBeg();
     244      return ((reinterpret_cast<uptr>(p)  - space_beg) & ~(kRegionSize - 1)) +
     245          space_beg;
     246    }
     247  
     248    uptr GetRegionBeginBySizeClass(uptr class_id) const {
     249      return SpaceBeg() + kRegionSize * class_id;
     250    }
     251  
     252    uptr GetSizeClass(const void *p) {
     253      if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
     254        return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
     255      return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
     256             kNumClassesRounded;
     257    }
     258  
     259    void *GetBlockBegin(const void *p) {
     260      uptr class_id = GetSizeClass(p);
     261      if (class_id >= kNumClasses) return nullptr;
     262      uptr size = ClassIdToSize(class_id);
     263      if (!size) return nullptr;
     264      uptr chunk_idx = GetChunkIdx((uptr)p, size);
     265      uptr reg_beg = GetRegionBegin(p);
     266      uptr beg = chunk_idx * size;
     267      uptr next_beg = beg + size;
     268      const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));
     269      if (region->mapped_user >= next_beg)
     270        return reinterpret_cast<void*>(reg_beg + beg);
     271      return nullptr;
     272    }
     273  
     274    uptr GetActuallyAllocatedSize(void *p) {
     275      CHECK(PointerIsMine(p));
     276      return ClassIdToSize(GetSizeClass(p));
     277    }
     278  
     279    static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
     280  
     281    void *GetMetaData(const void *p) {
     282      CHECK(kMetadataSize);
     283      uptr class_id = GetSizeClass(p);
     284      uptr size = ClassIdToSize(class_id);
     285      if (!size)
     286        return nullptr;
     287      uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
     288      uptr region_beg = GetRegionBeginBySizeClass(class_id);
     289      return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
     290                                      (1 + chunk_idx) * kMetadataSize);
     291    }
     292  
     293    uptr TotalMemoryUsed() {
     294      uptr res = 0;
     295      for (uptr i = 0; i < kNumClasses; i++)
     296        res += GetRegionInfo(i)->allocated_user;
     297      return res;
     298    }
     299  
     300    // Test-only.
     301    void TestOnlyUnmap() {
     302      UnmapWithCallbackOrDie((uptr)address_range.base(), address_range.size());
     303    }
     304  
     305    static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats) {
     306      for (uptr class_id = 0; class_id < kNumClasses; class_id++)
     307        if (stats[class_id] == start)
     308          stats[class_id] = rss;
     309    }
     310  
     311    void PrintStats(uptr class_id, uptr rss) {
     312      RegionInfo *region = GetRegionInfo(class_id);
     313      if (region->mapped_user == 0) return;
     314      uptr in_use = region->stats.n_allocated - region->stats.n_freed;
     315      uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
     316      Printf(
     317          "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
     318          "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
     319          "last released: %6lldK region: 0x%zx\n",
     320          region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
     321          region->mapped_user >> 10, region->stats.n_allocated,
     322          region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
     323          rss >> 10, region->rtoi.num_releases,
     324          region->rtoi.last_released_bytes >> 10,
     325          SpaceBeg() + kRegionSize * class_id);
     326    }
     327  
     328    void PrintStats() {
     329      uptr rss_stats[kNumClasses];
     330      for (uptr class_id = 0; class_id < kNumClasses; class_id++)
     331        rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
     332      GetMemoryProfile(FillMemoryProfile, rss_stats);
     333  
     334      uptr total_mapped = 0;
     335      uptr total_rss = 0;
     336      uptr n_allocated = 0;
     337      uptr n_freed = 0;
     338      for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
     339        RegionInfo *region = GetRegionInfo(class_id);
     340        if (region->mapped_user != 0) {
     341          total_mapped += region->mapped_user;
     342          total_rss += rss_stats[class_id];
     343        }
     344        n_allocated += region->stats.n_allocated;
     345        n_freed += region->stats.n_freed;
     346      }
     347  
     348      Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in "
     349             "%zd allocations; remains %zd\n", total_mapped >> 20,
     350             total_rss >> 20, n_allocated, n_allocated - n_freed);
     351      for (uptr class_id = 1; class_id < kNumClasses; class_id++)
     352        PrintStats(class_id, rss_stats[class_id]);
     353    }
     354  
     355    // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
     356    // introspection API.
     357    void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
     358      for (uptr i = 0; i < kNumClasses; i++) {
     359        GetRegionInfo(i)->mutex.Lock();
     360      }
     361    }
     362  
     363    void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
     364      for (int i = (int)kNumClasses - 1; i >= 0; i--) {
     365        GetRegionInfo(i)->mutex.Unlock();
     366      }
     367    }
     368  
     369    // Iterate over all existing chunks.
     370    // The allocator must be locked when calling this function.
     371    void ForEachChunk(ForEachChunkCallback callback, void *arg) {
     372      for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
     373        RegionInfo *region = GetRegionInfo(class_id);
     374        uptr chunk_size = ClassIdToSize(class_id);
     375        uptr region_beg = SpaceBeg() + class_id * kRegionSize;
     376        uptr region_allocated_user_size =
     377            AddressSpaceView::Load(region)->allocated_user;
     378        for (uptr chunk = region_beg;
     379             chunk < region_beg + region_allocated_user_size;
     380             chunk += chunk_size) {
     381          // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
     382          callback(chunk, arg);
     383        }
     384      }
     385    }
     386  
     387    static uptr ClassIdToSize(uptr class_id) {
     388      return SizeClassMap::Size(class_id);
     389    }
     390  
     391    static uptr AdditionalSize() {
     392      return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
     393                       GetPageSizeCached());
     394    }
     395  
     396    typedef SizeClassMap SizeClassMapT;
     397    static const uptr kNumClasses = SizeClassMap::kNumClasses;
     398    static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
     399  
     400    // A packed array of counters. Each counter occupies 2^n bits, enough to store
     401    // counter's max_value. Ctor will try to allocate the required buffer via
     402    // mapper->MapPackedCounterArrayBuffer and the caller is expected to check
     403    // whether the initialization was successful by checking IsAllocated() result.
     404    // For the performance sake, none of the accessors check the validity of the
     405    // arguments, it is assumed that index is always in [0, n) range and the value
     406    // is not incremented past max_value.
     407    class PackedCounterArray {
     408     public:
     409      template <typename MemoryMapper>
     410      PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapper *mapper)
     411          : n(num_counters) {
     412        CHECK_GT(num_counters, 0);
     413        CHECK_GT(max_value, 0);
     414        constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
     415        // Rounding counter storage size up to the power of two allows for using
     416        // bit shifts calculating particular counter's index and offset.
     417        uptr counter_size_bits =
     418            RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1);
     419        CHECK_LE(counter_size_bits, kMaxCounterBits);
     420        counter_size_bits_log = Log2(counter_size_bits);
     421        counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits);
     422  
     423        uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log;
     424        CHECK_GT(packing_ratio, 0);
     425        packing_ratio_log = Log2(packing_ratio);
     426        bit_offset_mask = packing_ratio - 1;
     427  
     428        buffer = mapper->MapPackedCounterArrayBuffer(
     429            RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log);
     430      }
     431  
     432      bool IsAllocated() const {
     433        return !!buffer;
     434      }
     435  
     436      u64 GetCount() const {
     437        return n;
     438      }
     439  
     440      uptr Get(uptr i) const {
     441        DCHECK_LT(i, n);
     442        uptr index = i >> packing_ratio_log;
     443        uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
     444        return (buffer[index] >> bit_offset) & counter_mask;
     445      }
     446  
     447      void Inc(uptr i) const {
     448        DCHECK_LT(Get(i), counter_mask);
     449        uptr index = i >> packing_ratio_log;
     450        uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
     451        buffer[index] += 1ULL << bit_offset;
     452      }
     453  
     454      void IncRange(uptr from, uptr to) const {
     455        DCHECK_LE(from, to);
     456        for (uptr i = from; i <= to; i++)
     457          Inc(i);
     458      }
     459  
     460     private:
     461      const u64 n;
     462      u64 counter_size_bits_log;
     463      u64 counter_mask;
     464      u64 packing_ratio_log;
     465      u64 bit_offset_mask;
     466      u64* buffer;
     467    };
     468  
     469    template <class MemoryMapperT>
     470    class FreePagesRangeTracker {
     471     public:
     472      FreePagesRangeTracker(MemoryMapperT *mapper, uptr class_id)
     473          : memory_mapper(mapper),
     474            class_id(class_id),
     475            page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)) {}
     476  
     477      void NextPage(bool freed) {
     478        if (freed) {
     479          if (!in_the_range) {
     480            current_range_start_page = current_page;
     481            in_the_range = true;
     482          }
     483        } else {
     484          CloseOpenedRange();
     485        }
     486        current_page++;
     487      }
     488  
     489      void Done() {
     490        CloseOpenedRange();
     491      }
     492  
     493     private:
     494      void CloseOpenedRange() {
     495        if (in_the_range) {
     496          memory_mapper->ReleasePageRangeToOS(
     497              class_id, current_range_start_page << page_size_scaled_log,
     498              current_page << page_size_scaled_log);
     499          in_the_range = false;
     500        }
     501      }
     502  
     503      MemoryMapperT *const memory_mapper = nullptr;
     504      const uptr class_id = 0;
     505      const uptr page_size_scaled_log = 0;
     506      bool in_the_range = false;
     507      uptr current_page = 0;
     508      uptr current_range_start_page = 0;
     509    };
     510  
     511    // Iterates over the free_array to identify memory pages containing freed
     512    // chunks only and returns these pages back to OS.
     513    // allocated_pages_count is the total number of pages allocated for the
     514    // current bucket.
     515    template <typename MemoryMapper>
     516    static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
     517                                      uptr free_array_count, uptr chunk_size,
     518                                      uptr allocated_pages_count,
     519                                      MemoryMapper *memory_mapper,
     520                                      uptr class_id) {
     521      const uptr page_size = GetPageSizeCached();
     522  
     523      // Figure out the number of chunks per page and whether we can take a fast
     524      // path (the number of chunks per page is the same for all pages).
     525      uptr full_pages_chunk_count_max;
     526      bool same_chunk_count_per_page;
     527      if (chunk_size <= page_size && page_size % chunk_size == 0) {
     528        // Same number of chunks per page, no cross overs.
     529        full_pages_chunk_count_max = page_size / chunk_size;
     530        same_chunk_count_per_page = true;
     531      } else if (chunk_size <= page_size && page_size % chunk_size != 0 &&
     532          chunk_size % (page_size % chunk_size) == 0) {
     533        // Some chunks are crossing page boundaries, which means that the page
     534        // contains one or two partial chunks, but all pages contain the same
     535        // number of chunks.
     536        full_pages_chunk_count_max = page_size / chunk_size + 1;
     537        same_chunk_count_per_page = true;
     538      } else if (chunk_size <= page_size) {
     539        // Some chunks are crossing page boundaries, which means that the page
     540        // contains one or two partial chunks.
     541        full_pages_chunk_count_max = page_size / chunk_size + 2;
     542        same_chunk_count_per_page = false;
     543      } else if (chunk_size > page_size && chunk_size % page_size == 0) {
     544        // One chunk covers multiple pages, no cross overs.
     545        full_pages_chunk_count_max = 1;
     546        same_chunk_count_per_page = true;
     547      } else if (chunk_size > page_size) {
     548        // One chunk covers multiple pages, Some chunks are crossing page
     549        // boundaries. Some pages contain one chunk, some contain two.
     550        full_pages_chunk_count_max = 2;
     551        same_chunk_count_per_page = false;
     552      } else {
     553        UNREACHABLE("All chunk_size/page_size ratios must be handled.");
     554      }
     555  
     556      PackedCounterArray counters(allocated_pages_count,
     557                                  full_pages_chunk_count_max, memory_mapper);
     558      if (!counters.IsAllocated())
     559        return;
     560  
     561      const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale;
     562      const uptr page_size_scaled = page_size >> kCompactPtrScale;
     563      const uptr page_size_scaled_log = Log2(page_size_scaled);
     564  
     565      // Iterate over free chunks and count how many free chunks affect each
     566      // allocated page.
     567      if (chunk_size <= page_size && page_size % chunk_size == 0) {
     568        // Each chunk affects one page only.
     569        for (uptr i = 0; i < free_array_count; i++)
     570          counters.Inc(free_array[i] >> page_size_scaled_log);
     571      } else {
     572        // In all other cases chunks might affect more than one page.
     573        for (uptr i = 0; i < free_array_count; i++) {
     574          counters.IncRange(
     575              free_array[i] >> page_size_scaled_log,
     576              (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log);
     577        }
     578      }
     579  
     580      // Iterate over pages detecting ranges of pages with chunk counters equal
     581      // to the expected number of chunks for the particular page.
     582      FreePagesRangeTracker<MemoryMapper> range_tracker(memory_mapper, class_id);
     583      if (same_chunk_count_per_page) {
     584        // Fast path, every page has the same number of chunks affecting it.
     585        for (uptr i = 0; i < counters.GetCount(); i++)
     586          range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max);
     587      } else {
     588        // Show path, go through the pages keeping count how many chunks affect
     589        // each page.
     590        const uptr pn =
     591            chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1;
     592        const uptr pnc = pn * chunk_size_scaled;
     593        // The idea is to increment the current page pointer by the first chunk
     594        // size, middle portion size (the portion of the page covered by chunks
     595        // except the first and the last one) and then the last chunk size, adding
     596        // up the number of chunks on the current page and checking on every step
     597        // whether the page boundary was crossed.
     598        uptr prev_page_boundary = 0;
     599        uptr current_boundary = 0;
     600        for (uptr i = 0; i < counters.GetCount(); i++) {
     601          uptr page_boundary = prev_page_boundary + page_size_scaled;
     602          uptr chunks_per_page = pn;
     603          if (current_boundary < page_boundary) {
     604            if (current_boundary > prev_page_boundary)
     605              chunks_per_page++;
     606            current_boundary += pnc;
     607            if (current_boundary < page_boundary) {
     608              chunks_per_page++;
     609              current_boundary += chunk_size_scaled;
     610            }
     611          }
     612          prev_page_boundary = page_boundary;
     613  
     614          range_tracker.NextPage(counters.Get(i) == chunks_per_page);
     615        }
     616      }
     617      range_tracker.Done();
     618    }
     619  
     620   private:
     621    friend class MemoryMapper<ThisT>;
     622  
     623    ReservedAddressRange address_range;
     624  
     625    static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
     626    // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
     627    // In the worst case it may require kRegionSize/SizeClassMap::kMinSize
     628    // elements, but in reality this will not happen. For simplicity we
     629    // dedicate 1/8 of the region's virtual space to FreeArray.
     630    static const uptr kFreeArraySize = kRegionSize / 8;
     631  
     632    static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
     633    uptr NonConstSpaceBeg;
     634    uptr SpaceBeg() const {
     635      return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
     636    }
     637    uptr SpaceEnd() const { return  SpaceBeg() + kSpaceSize; }
     638    // kRegionSize must be >= 2^32.
     639    COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
     640    // kRegionSize must be <= 2^36, see CompactPtrT.
     641    COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
     642    // Call mmap for user memory with at least this size.
     643    static const uptr kUserMapSize = 1 << 16;
     644    // Call mmap for metadata memory with at least this size.
     645    static const uptr kMetaMapSize = 1 << 16;
     646    // Call mmap for free array memory with at least this size.
     647    static const uptr kFreeArrayMapSize = 1 << 16;
     648  
     649    atomic_sint32_t release_to_os_interval_ms_;
     650  
     651    uptr RegionInfoSpace;
     652  
     653    // True if the user has already mapped the entire heap R/W.
     654    bool PremappedHeap;
     655  
     656    struct Stats {
     657      uptr n_allocated;
     658      uptr n_freed;
     659    };
     660  
     661    struct ReleaseToOsInfo {
     662      uptr n_freed_at_last_release;
     663      uptr num_releases;
     664      u64 last_release_at_ns;
     665      u64 last_released_bytes;
     666    };
     667  
     668    struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
     669      Mutex mutex;
     670      uptr num_freed_chunks;  // Number of elements in the freearray.
     671      uptr mapped_free_array;  // Bytes mapped for freearray.
     672      uptr allocated_user;  // Bytes allocated for user memory.
     673      uptr allocated_meta;  // Bytes allocated for metadata.
     674      uptr mapped_user;  // Bytes mapped for user memory.
     675      uptr mapped_meta;  // Bytes mapped for metadata.
     676      u32 rand_state;  // Seed for random shuffle, used if kRandomShuffleChunks.
     677      bool exhausted;  // Whether region is out of space for new chunks.
     678      Stats stats;
     679      ReleaseToOsInfo rtoi;
     680    };
     681    COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0);
     682  
     683    RegionInfo *GetRegionInfo(uptr class_id) const {
     684      DCHECK_LT(class_id, kNumClasses);
     685      RegionInfo *regions = reinterpret_cast<RegionInfo *>(RegionInfoSpace);
     686      return &regions[class_id];
     687    }
     688  
     689    uptr GetMetadataEnd(uptr region_beg) const {
     690      return region_beg + kRegionSize - kFreeArraySize;
     691    }
     692  
     693    uptr GetChunkIdx(uptr chunk, uptr size) const {
     694      if (!kUsingConstantSpaceBeg)
     695        chunk -= SpaceBeg();
     696  
     697      uptr offset = chunk % kRegionSize;
     698      // Here we divide by a non-constant. This is costly.
     699      // size always fits into 32-bits. If the offset fits too, use 32-bit div.
     700      if (offset >> (SANITIZER_WORDSIZE / 2))
     701        return offset / size;
     702      return (u32)offset / (u32)size;
     703    }
     704  
     705    CompactPtrT *GetFreeArray(uptr region_beg) const {
     706      return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg));
     707    }
     708  
     709    bool MapWithCallback(uptr beg, uptr size, const char *name) {
     710      if (PremappedHeap)
     711        return beg >= NonConstSpaceBeg &&
     712               beg + size <= NonConstSpaceBeg + kSpaceSize;
     713      uptr mapped = address_range.Map(beg, size, name);
     714      if (UNLIKELY(!mapped))
     715        return false;
     716      CHECK_EQ(beg, mapped);
     717      MapUnmapCallback().OnMap(beg, size);
     718      return true;
     719    }
     720  
     721    void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
     722      if (PremappedHeap) {
     723        CHECK_GE(beg, NonConstSpaceBeg);
     724        CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize);
     725        return;
     726      }
     727      CHECK_EQ(beg, address_range.MapOrDie(beg, size, name));
     728      MapUnmapCallback().OnMap(beg, size);
     729    }
     730  
     731    void UnmapWithCallbackOrDie(uptr beg, uptr size) {
     732      if (PremappedHeap)
     733        return;
     734      MapUnmapCallback().OnUnmap(beg, size);
     735      address_range.Unmap(beg, size);
     736    }
     737  
     738    bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
     739                              uptr num_freed_chunks) {
     740      uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
     741      if (region->mapped_free_array < needed_space) {
     742        uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
     743        CHECK_LE(new_mapped_free_array, kFreeArraySize);
     744        uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
     745                               region->mapped_free_array;
     746        uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
     747        if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size,
     748                                      "SizeClassAllocator: freearray")))
     749          return false;
     750        region->mapped_free_array = new_mapped_free_array;
     751      }
     752      return true;
     753    }
     754  
     755    // Check whether this size class is exhausted.
     756    bool IsRegionExhausted(RegionInfo *region, uptr class_id,
     757                           uptr additional_map_size) {
     758      if (LIKELY(region->mapped_user + region->mapped_meta +
     759                 additional_map_size <= kRegionSize - kFreeArraySize))
     760        return false;
     761      if (!region->exhausted) {
     762        region->exhausted = true;
     763        Printf("%s: Out of memory. ", SanitizerToolName);
     764        Printf("The process has exhausted %zuMB for size class %zu.\n",
     765               kRegionSize >> 20, ClassIdToSize(class_id));
     766      }
     767      return true;
     768    }
     769  
     770    NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,
     771                                    RegionInfo *region, uptr requested_count) {
     772      // region->mutex is held.
     773      const uptr region_beg = GetRegionBeginBySizeClass(class_id);
     774      const uptr size = ClassIdToSize(class_id);
     775  
     776      const uptr total_user_bytes =
     777          region->allocated_user + requested_count * size;
     778      // Map more space for chunks, if necessary.
     779      if (LIKELY(total_user_bytes > region->mapped_user)) {
     780        if (UNLIKELY(region->mapped_user == 0)) {
     781          if (!kUsingConstantSpaceBeg && kRandomShuffleChunks)
     782            // The random state is initialized from ASLR.
     783            region->rand_state = static_cast<u32>(region_beg >> 12);
     784          // Postpone the first release to OS attempt for ReleaseToOSIntervalMs,
     785          // preventing just allocated memory from being released sooner than
     786          // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls
     787          // for short lived processes.
     788          // Do it only when the feature is turned on, to avoid a potentially
     789          // extraneous syscall.
     790          if (ReleaseToOSIntervalMs() >= 0)
     791            region->rtoi.last_release_at_ns = MonotonicNanoTime();
     792        }
     793        // Do the mmap for the user memory.
     794        const uptr user_map_size =
     795            RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize);
     796        if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size)))
     797          return false;
     798        if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,
     799                                      user_map_size,
     800                                      "SizeClassAllocator: region data")))
     801          return false;
     802        stat->Add(AllocatorStatMapped, user_map_size);
     803        region->mapped_user += user_map_size;
     804      }
     805      const uptr new_chunks_count =
     806          (region->mapped_user - region->allocated_user) / size;
     807  
     808      if (kMetadataSize) {
     809        // Calculate the required space for metadata.
     810        const uptr total_meta_bytes =
     811            region->allocated_meta + new_chunks_count * kMetadataSize;
     812        const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ?
     813            RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0;
     814        // Map more space for metadata, if necessary.
     815        if (meta_map_size) {
     816          if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size)))
     817            return false;
     818          if (UNLIKELY(!MapWithCallback(
     819              GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,
     820              meta_map_size, "SizeClassAllocator: region metadata")))
     821            return false;
     822          region->mapped_meta += meta_map_size;
     823        }
     824      }
     825  
     826      // If necessary, allocate more space for the free array and populate it with
     827      // newly allocated chunks.
     828      const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;
     829      if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)))
     830        return false;
     831      CompactPtrT *free_array = GetFreeArray(region_beg);
     832      for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count;
     833           i++, chunk += size)
     834        free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);
     835      if (kRandomShuffleChunks)
     836        RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count,
     837                      &region->rand_state);
     838  
     839      // All necessary memory is mapped and now it is safe to advance all
     840      // 'allocated_*' counters.
     841      region->num_freed_chunks += new_chunks_count;
     842      region->allocated_user += new_chunks_count * size;
     843      CHECK_LE(region->allocated_user, region->mapped_user);
     844      region->allocated_meta += new_chunks_count * kMetadataSize;
     845      CHECK_LE(region->allocated_meta, region->mapped_meta);
     846      region->exhausted = false;
     847  
     848      // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent
     849      // MaybeReleaseToOS from releasing just allocated pages or protect these
     850      // not yet used chunks some other way.
     851  
     852      return true;
     853    }
     854  
     855    // Attempts to release RAM occupied by freed chunks back to OS. The region is
     856    // expected to be locked.
     857    //
     858    // TODO(morehouse): Support a callback on memory release so HWASan can release
     859    // aliases as well.
     860    void MaybeReleaseToOS(MemoryMapperT *memory_mapper, uptr class_id,
     861                          bool force) {
     862      RegionInfo *region = GetRegionInfo(class_id);
     863      const uptr chunk_size = ClassIdToSize(class_id);
     864      const uptr page_size = GetPageSizeCached();
     865  
     866      uptr n = region->num_freed_chunks;
     867      if (n * chunk_size < page_size)
     868        return;  // No chance to release anything.
     869      if ((region->stats.n_freed -
     870           region->rtoi.n_freed_at_last_release) * chunk_size < page_size) {
     871        return;  // Nothing new to release.
     872      }
     873  
     874      if (!force) {
     875        s32 interval_ms = ReleaseToOSIntervalMs();
     876        if (interval_ms < 0)
     877          return;
     878  
     879        if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >
     880            MonotonicNanoTime()) {
     881          return;  // Memory was returned recently.
     882        }
     883      }
     884  
     885      ReleaseFreeMemoryToOS(
     886          GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
     887          RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper,
     888          class_id);
     889  
     890      uptr ranges, bytes;
     891      if (memory_mapper->GetAndResetStats(ranges, bytes)) {
     892        region->rtoi.n_freed_at_last_release = region->stats.n_freed;
     893        region->rtoi.num_releases += ranges;
     894        region->rtoi.last_released_bytes = bytes;
     895      }
     896      region->rtoi.last_release_at_ns = MonotonicNanoTime();
     897    }
     898  };