(root)/
gcc-13.2.0/
libsanitizer/
sanitizer_common/
sanitizer_quarantine.h
       1  //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
       2  //
       3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
       4  // See https://llvm.org/LICENSE.txt for license information.
       5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
       6  //
       7  //===----------------------------------------------------------------------===//
       8  //
       9  // Memory quarantine for AddressSanitizer and potentially other tools.
      10  // Quarantine caches some specified amount of memory in per-thread caches,
      11  // then evicts to global FIFO queue. When the queue reaches specified threshold,
      12  // oldest memory is recycled.
      13  //
      14  //===----------------------------------------------------------------------===//
      15  
      16  #ifndef SANITIZER_QUARANTINE_H
      17  #define SANITIZER_QUARANTINE_H
      18  
      19  #include "sanitizer_internal_defs.h"
      20  #include "sanitizer_mutex.h"
      21  #include "sanitizer_list.h"
      22  
      23  namespace __sanitizer {
      24  
      25  template<typename Node> class QuarantineCache;
      26  
      27  struct QuarantineBatch {
      28    static const uptr kSize = 1021;
      29    QuarantineBatch *next;
      30    uptr size;
      31    uptr count;
      32    void *batch[kSize];
      33  
      34    void init(void *ptr, uptr size) {
      35      count = 1;
      36      batch[0] = ptr;
      37      this->size = size + sizeof(QuarantineBatch);  // Account for the batch size.
      38    }
      39  
      40    // The total size of quarantined nodes recorded in this batch.
      41    uptr quarantined_size() const {
      42      return size - sizeof(QuarantineBatch);
      43    }
      44  
      45    void push_back(void *ptr, uptr size) {
      46      CHECK_LT(count, kSize);
      47      batch[count++] = ptr;
      48      this->size += size;
      49    }
      50  
      51    bool can_merge(const QuarantineBatch* const from) const {
      52      return count + from->count <= kSize;
      53    }
      54  
      55    void merge(QuarantineBatch* const from) {
      56      CHECK_LE(count + from->count, kSize);
      57      CHECK_GE(size, sizeof(QuarantineBatch));
      58  
      59      for (uptr i = 0; i < from->count; ++i)
      60        batch[count + i] = from->batch[i];
      61      count += from->count;
      62      size += from->quarantined_size();
      63  
      64      from->count = 0;
      65      from->size = sizeof(QuarantineBatch);
      66    }
      67  };
      68  
      69  COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13));  // 8Kb.
      70  
      71  // The callback interface is:
      72  // void Callback::Recycle(Node *ptr);
      73  // void *cb.Allocate(uptr size);
      74  // void cb.Deallocate(void *ptr);
      75  template<typename Callback, typename Node>
      76  class Quarantine {
      77   public:
      78    typedef QuarantineCache<Callback> Cache;
      79  
      80    explicit Quarantine(LinkerInitialized)
      81        : cache_(LINKER_INITIALIZED) {
      82    }
      83  
      84    void Init(uptr size, uptr cache_size) {
      85      // Thread local quarantine size can be zero only when global quarantine size
      86      // is zero (it allows us to perform just one atomic read per Put() call).
      87      CHECK((size == 0 && cache_size == 0) || cache_size != 0);
      88  
      89      atomic_store_relaxed(&max_size_, size);
      90      atomic_store_relaxed(&min_size_, size / 10 * 9);  // 90% of max size.
      91      atomic_store_relaxed(&max_cache_size_, cache_size);
      92  
      93      cache_mutex_.Init();
      94      recycle_mutex_.Init();
      95    }
      96  
      97    uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
      98    uptr GetCacheSize() const {
      99      return atomic_load_relaxed(&max_cache_size_);
     100    }
     101  
     102    void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
     103      uptr cache_size = GetCacheSize();
     104      if (cache_size) {
     105        c->Enqueue(cb, ptr, size);
     106      } else {
     107        // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
     108        cb.Recycle(ptr);
     109      }
     110      // Check cache size anyway to accommodate for runtime cache_size change.
     111      if (c->Size() > cache_size)
     112        Drain(c, cb);
     113    }
     114  
     115    void NOINLINE Drain(Cache *c, Callback cb) {
     116      {
     117        SpinMutexLock l(&cache_mutex_);
     118        cache_.Transfer(c);
     119      }
     120      if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
     121        Recycle(atomic_load_relaxed(&min_size_), cb);
     122    }
     123  
     124    void NOINLINE DrainAndRecycle(Cache *c, Callback cb) {
     125      {
     126        SpinMutexLock l(&cache_mutex_);
     127        cache_.Transfer(c);
     128      }
     129      recycle_mutex_.Lock();
     130      Recycle(0, cb);
     131    }
     132  
     133    void PrintStats() const {
     134      // It assumes that the world is stopped, just as the allocator's PrintStats.
     135      Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
     136             GetSize() >> 20, GetCacheSize() >> 10);
     137      cache_.PrintStats();
     138    }
     139  
     140   private:
     141    // Read-only data.
     142    char pad0_[kCacheLineSize];
     143    atomic_uintptr_t max_size_;
     144    atomic_uintptr_t min_size_;
     145    atomic_uintptr_t max_cache_size_;
     146    char pad1_[kCacheLineSize];
     147    StaticSpinMutex cache_mutex_;
     148    StaticSpinMutex recycle_mutex_;
     149    Cache cache_;
     150    char pad2_[kCacheLineSize];
     151  
     152    void NOINLINE Recycle(uptr min_size, Callback cb)
     153        SANITIZER_REQUIRES(recycle_mutex_) SANITIZER_RELEASE(recycle_mutex_) {
     154      Cache tmp;
     155      {
     156        SpinMutexLock l(&cache_mutex_);
     157        // Go over the batches and merge partially filled ones to
     158        // save some memory, otherwise batches themselves (since the memory used
     159        // by them is counted against quarantine limit) can overcome the actual
     160        // user's quarantined chunks, which diminishes the purpose of the
     161        // quarantine.
     162        uptr cache_size = cache_.Size();
     163        uptr overhead_size = cache_.OverheadSize();
     164        CHECK_GE(cache_size, overhead_size);
     165        // Do the merge only when overhead exceeds this predefined limit (might
     166        // require some tuning). It saves us merge attempt when the batch list
     167        // quarantine is unlikely to contain batches suitable for merge.
     168        const uptr kOverheadThresholdPercents = 100;
     169        if (cache_size > overhead_size &&
     170            overhead_size * (100 + kOverheadThresholdPercents) >
     171                cache_size * kOverheadThresholdPercents) {
     172          cache_.MergeBatches(&tmp);
     173        }
     174        // Extract enough chunks from the quarantine to get below the max
     175        // quarantine size and leave some leeway for the newly quarantined chunks.
     176        while (cache_.Size() > min_size) {
     177          tmp.EnqueueBatch(cache_.DequeueBatch());
     178        }
     179      }
     180      recycle_mutex_.Unlock();
     181      DoRecycle(&tmp, cb);
     182    }
     183  
     184    void NOINLINE DoRecycle(Cache *c, Callback cb) {
     185      while (QuarantineBatch *b = c->DequeueBatch()) {
     186        const uptr kPrefetch = 16;
     187        CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
     188        for (uptr i = 0; i < kPrefetch; i++)
     189          PREFETCH(b->batch[i]);
     190        for (uptr i = 0, count = b->count; i < count; i++) {
     191          if (i + kPrefetch < count)
     192            PREFETCH(b->batch[i + kPrefetch]);
     193          cb.Recycle((Node*)b->batch[i]);
     194        }
     195        cb.Deallocate(b);
     196      }
     197    }
     198  };
     199  
     200  // Per-thread cache of memory blocks.
     201  template<typename Callback>
     202  class QuarantineCache {
     203   public:
     204    explicit QuarantineCache(LinkerInitialized) {
     205    }
     206  
     207    QuarantineCache()
     208        : size_() {
     209      list_.clear();
     210    }
     211  
     212    // Total memory used, including internal accounting.
     213    uptr Size() const {
     214      return atomic_load_relaxed(&size_);
     215    }
     216  
     217    // Memory used for internal accounting.
     218    uptr OverheadSize() const {
     219      return list_.size() * sizeof(QuarantineBatch);
     220    }
     221  
     222    void Enqueue(Callback cb, void *ptr, uptr size) {
     223      if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
     224        QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
     225        CHECK(b);
     226        b->init(ptr, size);
     227        EnqueueBatch(b);
     228      } else {
     229        list_.back()->push_back(ptr, size);
     230        SizeAdd(size);
     231      }
     232    }
     233  
     234    void Transfer(QuarantineCache *from_cache) {
     235      list_.append_back(&from_cache->list_);
     236      SizeAdd(from_cache->Size());
     237  
     238      atomic_store_relaxed(&from_cache->size_, 0);
     239    }
     240  
     241    void EnqueueBatch(QuarantineBatch *b) {
     242      list_.push_back(b);
     243      SizeAdd(b->size);
     244    }
     245  
     246    QuarantineBatch *DequeueBatch() {
     247      if (list_.empty())
     248        return nullptr;
     249      QuarantineBatch *b = list_.front();
     250      list_.pop_front();
     251      SizeSub(b->size);
     252      return b;
     253    }
     254  
     255    void MergeBatches(QuarantineCache *to_deallocate) {
     256      uptr extracted_size = 0;
     257      QuarantineBatch *current = list_.front();
     258      while (current && current->next) {
     259        if (current->can_merge(current->next)) {
     260          QuarantineBatch *extracted = current->next;
     261          // Move all the chunks into the current batch.
     262          current->merge(extracted);
     263          CHECK_EQ(extracted->count, 0);
     264          CHECK_EQ(extracted->size, sizeof(QuarantineBatch));
     265          // Remove the next batch from the list and account for its size.
     266          list_.extract(current, extracted);
     267          extracted_size += extracted->size;
     268          // Add it to deallocation list.
     269          to_deallocate->EnqueueBatch(extracted);
     270        } else {
     271          current = current->next;
     272        }
     273      }
     274      SizeSub(extracted_size);
     275    }
     276  
     277    void PrintStats() const {
     278      uptr batch_count = 0;
     279      uptr total_overhead_bytes = 0;
     280      uptr total_bytes = 0;
     281      uptr total_quarantine_chunks = 0;
     282      for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
     283        batch_count++;
     284        total_bytes += (*it).size;
     285        total_overhead_bytes += (*it).size - (*it).quarantined_size();
     286        total_quarantine_chunks += (*it).count;
     287      }
     288      uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize;
     289      int chunks_usage_percent = quarantine_chunks_capacity == 0 ?
     290          0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity;
     291      uptr total_quarantined_bytes = total_bytes - total_overhead_bytes;
     292      int memory_overhead_percent = total_quarantined_bytes == 0 ?
     293          0 : total_overhead_bytes * 100 / total_quarantined_bytes;
     294      Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
     295             "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
     296             "\n",
     297             batch_count, total_bytes, total_quarantined_bytes,
     298             total_quarantine_chunks, quarantine_chunks_capacity,
     299             chunks_usage_percent, memory_overhead_percent);
     300    }
     301  
     302   private:
     303    typedef IntrusiveList<QuarantineBatch> List;
     304  
     305    List list_;
     306    atomic_uintptr_t size_;
     307  
     308    void SizeAdd(uptr add) {
     309      atomic_store_relaxed(&size_, Size() + add);
     310    }
     311    void SizeSub(uptr sub) {
     312      atomic_store_relaxed(&size_, Size() - sub);
     313    }
     314  };
     315  
     316  } // namespace __sanitizer
     317  
     318  #endif // SANITIZER_QUARANTINE_H