(root)/
gcc-13.2.0/
libsanitizer/
hwasan/
hwasan_thread_list.h
       1  //===-- hwasan_thread_list.h ------------------------------------*- C++ -*-===//
       2  //
       3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
       4  // See https://llvm.org/LICENSE.txt for license information.
       5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
       6  //
       7  //===----------------------------------------------------------------------===//
       8  //
       9  // This file is a part of HWAddressSanitizer.
      10  //
      11  //===----------------------------------------------------------------------===//
      12  
      13  // HwasanThreadList is a registry for live threads, as well as an allocator for
      14  // HwasanThread objects and their stack history ring buffers. There are
      15  // constraints on memory layout of the shadow region and CompactRingBuffer that
      16  // are part of the ABI contract between compiler-rt and llvm.
      17  //
      18  // * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
      19  // * All stack ring buffers are located within (2**kShadowBaseAlignment)
      20  // sized region below and adjacent to the shadow region.
      21  // * Each ring buffer has a size of (2**N)*4096 where N is in [0, 8), and is
      22  // aligned to twice its size. The value of N can be different for each buffer.
      23  //
      24  // These constrains guarantee that, given an address A of any element of the
      25  // ring buffer,
      26  //     A_next = (A + sizeof(uptr)) & ~((1 << (N + 13)) - 1)
      27  //   is the address of the next element of that ring buffer (with wrap-around).
      28  // And, with K = kShadowBaseAlignment,
      29  //     S = (A | ((1 << K) - 1)) + 1
      30  //   (align up to kShadowBaseAlignment) is the start of the shadow region.
      31  //
      32  // These calculations are used in compiler instrumentation to update the ring
      33  // buffer and obtain the base address of shadow using only two inputs: address
      34  // of the current element of the ring buffer, and N (i.e. size of the ring
      35  // buffer). Since the value of N is very limited, we pack both inputs into a
      36  // single thread-local word as
      37  //   (1 << (N + 56)) | A
      38  // See the implementation of class CompactRingBuffer, which is what is stored in
      39  // said thread-local word.
      40  //
      41  // Note the unusual way of aligning up the address of the shadow:
      42  //   (A | ((1 << K) - 1)) + 1
      43  // It is only correct if A is not already equal to the shadow base address, but
      44  // it saves 2 instructions on AArch64.
      45  
      46  #include "hwasan.h"
      47  #include "hwasan_allocator.h"
      48  #include "hwasan_flags.h"
      49  #include "hwasan_thread.h"
      50  
      51  #include "sanitizer_common/sanitizer_placement_new.h"
      52  
      53  namespace __hwasan {
      54  
      55  static uptr RingBufferSize() {
      56    uptr desired_bytes = flags()->stack_history_size * sizeof(uptr);
      57    // FIXME: increase the limit to 8 once this bug is fixed:
      58    // https://bugs.llvm.org/show_bug.cgi?id=39030
      59    for (int shift = 1; shift < 7; ++shift) {
      60      uptr size = 4096 * (1ULL << shift);
      61      if (size >= desired_bytes)
      62        return size;
      63    }
      64    Printf("stack history size too large: %d\n", flags()->stack_history_size);
      65    CHECK(0);
      66    return 0;
      67  }
      68  
      69  struct ThreadStats {
      70    uptr n_live_threads;
      71    uptr total_stack_size;
      72  };
      73  
      74  class HwasanThreadList {
      75   public:
      76    HwasanThreadList(uptr storage, uptr size)
      77        : free_space_(storage), free_space_end_(storage + size) {
      78      // [storage, storage + size) is used as a vector of
      79      // thread_alloc_size_-sized, ring_buffer_size_*2-aligned elements.
      80      // Each element contains
      81      // * a ring buffer at offset 0,
      82      // * a Thread object at offset ring_buffer_size_.
      83      ring_buffer_size_ = RingBufferSize();
      84      thread_alloc_size_ =
      85          RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
      86    }
      87  
      88    Thread *CreateCurrentThread(const Thread::InitState *state = nullptr) {
      89      Thread *t = nullptr;
      90      {
      91        SpinMutexLock l(&free_list_mutex_);
      92        if (!free_list_.empty()) {
      93          t = free_list_.back();
      94          free_list_.pop_back();
      95        }
      96      }
      97      if (t) {
      98        uptr start = (uptr)t - ring_buffer_size_;
      99        internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
     100      } else {
     101        t = AllocThread();
     102      }
     103      {
     104        SpinMutexLock l(&live_list_mutex_);
     105        live_list_.push_back(t);
     106      }
     107      t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_, state);
     108      AddThreadStats(t);
     109      return t;
     110    }
     111  
     112    void DontNeedThread(Thread *t) {
     113      uptr start = (uptr)t - ring_buffer_size_;
     114      ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
     115    }
     116  
     117    void RemoveThreadFromLiveList(Thread *t) {
     118      SpinMutexLock l(&live_list_mutex_);
     119      for (Thread *&t2 : live_list_)
     120        if (t2 == t) {
     121          // To remove t2, copy the last element of the list in t2's position, and
     122          // pop_back(). This works even if t2 is itself the last element.
     123          t2 = live_list_.back();
     124          live_list_.pop_back();
     125          return;
     126        }
     127      CHECK(0 && "thread not found in live list");
     128    }
     129  
     130    void ReleaseThread(Thread *t) {
     131      RemoveThreadStats(t);
     132      t->Destroy();
     133      DontNeedThread(t);
     134      RemoveThreadFromLiveList(t);
     135      SpinMutexLock l(&free_list_mutex_);
     136      free_list_.push_back(t);
     137    }
     138  
     139    Thread *GetThreadByBufferAddress(uptr p) {
     140      return (Thread *)(RoundDownTo(p, ring_buffer_size_ * 2) +
     141                        ring_buffer_size_);
     142    }
     143  
     144    uptr MemoryUsedPerThread() {
     145      uptr res = sizeof(Thread) + ring_buffer_size_;
     146      if (auto sz = flags()->heap_history_size)
     147        res += HeapAllocationsRingBuffer::SizeInBytes(sz);
     148      return res;
     149    }
     150  
     151    template <class CB>
     152    void VisitAllLiveThreads(CB cb) {
     153      SpinMutexLock l(&live_list_mutex_);
     154      for (Thread *t : live_list_) cb(t);
     155    }
     156  
     157    void AddThreadStats(Thread *t) {
     158      SpinMutexLock l(&stats_mutex_);
     159      stats_.n_live_threads++;
     160      stats_.total_stack_size += t->stack_size();
     161    }
     162  
     163    void RemoveThreadStats(Thread *t) {
     164      SpinMutexLock l(&stats_mutex_);
     165      stats_.n_live_threads--;
     166      stats_.total_stack_size -= t->stack_size();
     167    }
     168  
     169    ThreadStats GetThreadStats() {
     170      SpinMutexLock l(&stats_mutex_);
     171      return stats_;
     172    }
     173  
     174    uptr GetRingBufferSize() const { return ring_buffer_size_; }
     175  
     176   private:
     177    Thread *AllocThread() {
     178      SpinMutexLock l(&free_space_mutex_);
     179      uptr align = ring_buffer_size_ * 2;
     180      CHECK(IsAligned(free_space_, align));
     181      Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
     182      free_space_ += thread_alloc_size_;
     183      CHECK(free_space_ <= free_space_end_ && "out of thread memory");
     184      return t;
     185    }
     186  
     187    SpinMutex free_space_mutex_;
     188    uptr free_space_;
     189    uptr free_space_end_;
     190    uptr ring_buffer_size_;
     191    uptr thread_alloc_size_;
     192  
     193    SpinMutex free_list_mutex_;
     194    InternalMmapVector<Thread *> free_list_;
     195    SpinMutex live_list_mutex_;
     196    InternalMmapVector<Thread *> live_list_;
     197  
     198    ThreadStats stats_;
     199    SpinMutex stats_mutex_;
     200  };
     201  
     202  void InitThreadList(uptr storage, uptr size);
     203  HwasanThreadList &hwasanThreadList();
     204  
     205  } // namespace __hwasan