(root)/
gcc-13.2.0/
libsanitizer/
sanitizer_common/
sanitizer_common.h
       1  //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
       2  //
       3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
       4  // See https://llvm.org/LICENSE.txt for license information.
       5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
       6  //
       7  //===----------------------------------------------------------------------===//
       8  //
       9  // This file is shared between run-time libraries of sanitizers.
      10  //
      11  // It declares common functions and classes that are used in both runtimes.
      12  // Implementation of some functions are provided in sanitizer_common, while
      13  // others must be defined by run-time library itself.
      14  //===----------------------------------------------------------------------===//
      15  #ifndef SANITIZER_COMMON_H
      16  #define SANITIZER_COMMON_H
      17  
      18  #include "sanitizer_flags.h"
      19  #include "sanitizer_internal_defs.h"
      20  #include "sanitizer_libc.h"
      21  #include "sanitizer_list.h"
      22  #include "sanitizer_mutex.h"
      23  
      24  #if defined(_MSC_VER) && !defined(__clang__)
      25  extern "C" void _ReadWriteBarrier();
      26  #pragma intrinsic(_ReadWriteBarrier)
      27  #endif
      28  
      29  namespace __sanitizer {
      30  
      31  struct AddressInfo;
      32  struct BufferedStackTrace;
      33  struct SignalContext;
      34  struct StackTrace;
      35  
      36  // Constants.
      37  const uptr kWordSize = SANITIZER_WORDSIZE / 8;
      38  const uptr kWordSizeInBits = 8 * kWordSize;
      39  
      40  const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
      41  
      42  const uptr kMaxPathLength = 4096;
      43  
      44  const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
      45  
      46  const uptr kErrorMessageBufferSize = 1 << 16;
      47  
      48  // Denotes fake PC values that come from JIT/JAVA/etc.
      49  // For such PC values __tsan_symbolize_external_ex() will be called.
      50  const u64 kExternalPCBit = 1ULL << 60;
      51  
      52  extern const char *SanitizerToolName;  // Can be changed by the tool.
      53  
      54  extern atomic_uint32_t current_verbosity;
      55  inline void SetVerbosity(int verbosity) {
      56    atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
      57  }
      58  inline int Verbosity() {
      59    return atomic_load(&current_verbosity, memory_order_relaxed);
      60  }
      61  
      62  #if SANITIZER_ANDROID
      63  inline uptr GetPageSize() {
      64  // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
      65    return 4096;
      66  }
      67  inline uptr GetPageSizeCached() {
      68    return 4096;
      69  }
      70  #else
      71  uptr GetPageSize();
      72  extern uptr PageSizeCached;
      73  inline uptr GetPageSizeCached() {
      74    if (!PageSizeCached)
      75      PageSizeCached = GetPageSize();
      76    return PageSizeCached;
      77  }
      78  #endif
      79  uptr GetMmapGranularity();
      80  uptr GetMaxVirtualAddress();
      81  uptr GetMaxUserVirtualAddress();
      82  // Threads
      83  tid_t GetTid();
      84  int TgKill(pid_t pid, tid_t tid, int sig);
      85  uptr GetThreadSelf();
      86  void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
      87                                  uptr *stack_bottom);
      88  void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
      89                            uptr *tls_addr, uptr *tls_size);
      90  
      91  // Memory management
      92  void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
      93  inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
      94    return MmapOrDie(size, mem_type, /*raw_report*/ true);
      95  }
      96  void UnmapOrDie(void *addr, uptr size);
      97  // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
      98  // case returns nullptr.
      99  void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
     100  bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
     101       WARN_UNUSED_RESULT;
     102  bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
     103                               const char *name = nullptr) WARN_UNUSED_RESULT;
     104  void *MmapNoReserveOrDie(uptr size, const char *mem_type);
     105  void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
     106  // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
     107  // that case returns nullptr.
     108  void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
     109                                   const char *name = nullptr);
     110  void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
     111  void *MmapNoAccess(uptr size);
     112  // Map aligned chunk of address space; size and alignment are powers of two.
     113  // Dies on all but out of memory errors, in the latter case returns nullptr.
     114  void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
     115                                     const char *mem_type);
     116  // Disallow access to a memory range.  Use MmapFixedNoAccess to allocate an
     117  // unaccessible memory.
     118  bool MprotectNoAccess(uptr addr, uptr size);
     119  bool MprotectReadOnly(uptr addr, uptr size);
     120  
     121  void MprotectMallocZones(void *addr, int prot);
     122  
     123  #if SANITIZER_WINDOWS
     124  // Zero previously mmap'd memory. Currently used only on Windows.
     125  bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT;
     126  #endif
     127  
     128  #if SANITIZER_LINUX
     129  // Unmap memory. Currently only used on Linux.
     130  void UnmapFromTo(uptr from, uptr to);
     131  #endif
     132  
     133  // Maps shadow_size_bytes of shadow memory and returns shadow address. It will
     134  // be aligned to the mmap granularity * 2^shadow_scale, or to
     135  // 2^min_shadow_base_alignment if that is larger. The returned address will
     136  // have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
     137  // shadow_size_bytes bytes on the right, which on linux is mapped no access.
     138  // The high_mem_end may be updated if the original shadow size doesn't fit.
     139  uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
     140                        uptr min_shadow_base_alignment, uptr &high_mem_end);
     141  
     142  // Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
     143  // Reserves 2*S bytes of address space to the right of the returned address and
     144  // ring_buffer_size bytes to the left.  The returned address is aligned to 2*S.
     145  // Also creates num_aliases regions of accessible memory starting at offset S
     146  // from the returned address.  Each region has size alias_size and is backed by
     147  // the same physical memory.
     148  uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
     149                                  uptr num_aliases, uptr ring_buffer_size);
     150  
     151  // Reserve memory range [beg, end]. If madvise_shadow is true then apply
     152  // madvise (e.g. hugepages, core dumping) requested by options.
     153  void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
     154                                bool madvise_shadow = true);
     155  
     156  // Protect size bytes of memory starting at addr. Also try to protect
     157  // several pages at the start of the address space as specified by
     158  // zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
     159  void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
     160                  uptr zero_base_max_shadow_start);
     161  
     162  // Find an available address space.
     163  uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
     164                                uptr *largest_gap_found, uptr *max_occupied_addr);
     165  
     166  // Used to check if we can map shadow memory to a fixed location.
     167  bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
     168  // Releases memory pages entirely within the [beg, end] address range. Noop if
     169  // the provided range does not contain at least one entire page.
     170  void ReleaseMemoryPagesToOS(uptr beg, uptr end);
     171  void IncreaseTotalMmap(uptr size);
     172  void DecreaseTotalMmap(uptr size);
     173  uptr GetRSS();
     174  void SetShadowRegionHugePageMode(uptr addr, uptr length);
     175  bool DontDumpShadowMemory(uptr addr, uptr length);
     176  // Check if the built VMA size matches the runtime one.
     177  void CheckVMASize();
     178  void RunMallocHooks(void *ptr, uptr size);
     179  void RunFreeHooks(void *ptr);
     180  
     181  class ReservedAddressRange {
     182   public:
     183    uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
     184    uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
     185    uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
     186    uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
     187    void Unmap(uptr addr, uptr size);
     188    void *base() const { return base_; }
     189    uptr size() const { return size_; }
     190  
     191   private:
     192    void* base_;
     193    uptr size_;
     194    const char* name_;
     195    uptr os_handle_;
     196  };
     197  
     198  typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
     199                                 /*out*/ uptr *stats);
     200  
     201  // Parse the contents of /proc/self/smaps and generate a memory profile.
     202  // |cb| is a tool-specific callback that fills the |stats| array.
     203  void GetMemoryProfile(fill_profile_f cb, uptr *stats);
     204  void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
     205                              uptr smaps_len);
     206  
     207  // Simple low-level (mmap-based) allocator for internal use. Doesn't have
     208  // constructor, so all instances of LowLevelAllocator should be
     209  // linker initialized.
     210  class LowLevelAllocator {
     211   public:
     212    // Requires an external lock.
     213    void *Allocate(uptr size);
     214   private:
     215    char *allocated_end_;
     216    char *allocated_current_;
     217  };
     218  // Set the min alignment of LowLevelAllocator to at least alignment.
     219  void SetLowLevelAllocateMinAlignment(uptr alignment);
     220  typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
     221  // Allows to register tool-specific callbacks for LowLevelAllocator.
     222  // Passing NULL removes the callback.
     223  void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
     224  
     225  // IO
     226  void CatastrophicErrorWrite(const char *buffer, uptr length);
     227  void RawWrite(const char *buffer);
     228  bool ColorizeReports();
     229  void RemoveANSIEscapeSequencesFromString(char *buffer);
     230  void Printf(const char *format, ...) FORMAT(1, 2);
     231  void Report(const char *format, ...) FORMAT(1, 2);
     232  void SetPrintfAndReportCallback(void (*callback)(const char *));
     233  #define VReport(level, ...)                                              \
     234    do {                                                                   \
     235      if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
     236    } while (0)
     237  #define VPrintf(level, ...)                                              \
     238    do {                                                                   \
     239      if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
     240    } while (0)
     241  
     242  // Lock sanitizer error reporting and protects against nested errors.
     243  class ScopedErrorReportLock {
     244   public:
     245    ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
     246    ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
     247  
     248    static void Lock() SANITIZER_ACQUIRE(mutex_);
     249    static void Unlock() SANITIZER_RELEASE(mutex_);
     250    static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
     251  
     252   private:
     253    static atomic_uintptr_t reporting_thread_;
     254    static StaticSpinMutex mutex_;
     255  };
     256  
     257  extern uptr stoptheworld_tracer_pid;
     258  extern uptr stoptheworld_tracer_ppid;
     259  
     260  bool IsAccessibleMemoryRange(uptr beg, uptr size);
     261  
     262  // Error report formatting.
     263  const char *StripPathPrefix(const char *filepath,
     264                              const char *strip_file_prefix);
     265  // Strip the directories from the module name.
     266  const char *StripModuleName(const char *module);
     267  
     268  // OS
     269  uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
     270  uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
     271  uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
     272  uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
     273  const char *GetProcessName();
     274  void UpdateProcessName();
     275  void CacheBinaryName();
     276  void DisableCoreDumperIfNecessary();
     277  void DumpProcessMap();
     278  const char *GetEnv(const char *name);
     279  bool SetEnv(const char *name, const char *value);
     280  
     281  u32 GetUid();
     282  void ReExec();
     283  void CheckASLR();
     284  void CheckMPROTECT();
     285  char **GetArgv();
     286  char **GetEnviron();
     287  void PrintCmdline();
     288  bool StackSizeIsUnlimited();
     289  void SetStackSizeLimitInBytes(uptr limit);
     290  bool AddressSpaceIsUnlimited();
     291  void SetAddressSpaceUnlimited();
     292  void AdjustStackSize(void *attr);
     293  void PlatformPrepareForSandboxing(void *args);
     294  void SetSandboxingCallback(void (*f)());
     295  
     296  void InitializeCoverage(bool enabled, const char *coverage_dir);
     297  
     298  void InitTlsSize();
     299  uptr GetTlsSize();
     300  
     301  // Other
     302  void WaitForDebugger(unsigned seconds, const char *label);
     303  void SleepForSeconds(unsigned seconds);
     304  void SleepForMillis(unsigned millis);
     305  u64 NanoTime();
     306  u64 MonotonicNanoTime();
     307  int Atexit(void (*function)(void));
     308  bool TemplateMatch(const char *templ, const char *str);
     309  
     310  // Exit
     311  void NORETURN Abort();
     312  void NORETURN Die();
     313  void NORETURN
     314  CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
     315  void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
     316                                        const char *mmap_type, error_t err,
     317                                        bool raw_report = false);
     318  
     319  // Returns true if the platform-specific error reported is an OOM error.
     320  bool ErrorIsOOM(error_t err);
     321  
     322  // This reports an error in the form:
     323  //
     324  //   `ERROR: {{SanitizerToolName}}: out of memory: {{err_msg}}`
     325  //
     326  // Downstream tools that read sanitizer output will know that errors starting
     327  // in this format are specifically OOM errors.
     328  #define ERROR_OOM(err_msg, ...) \
     329    Report("ERROR: %s: out of memory: " err_msg, SanitizerToolName, __VA_ARGS__)
     330  
     331  // Specific tools may override behavior of "Die" function to do tool-specific
     332  // job.
     333  typedef void (*DieCallbackType)(void);
     334  
     335  // It's possible to add several callbacks that would be run when "Die" is
     336  // called. The callbacks will be run in the opposite order. The tools are
     337  // strongly recommended to setup all callbacks during initialization, when there
     338  // is only a single thread.
     339  bool AddDieCallback(DieCallbackType callback);
     340  bool RemoveDieCallback(DieCallbackType callback);
     341  
     342  void SetUserDieCallback(DieCallbackType callback);
     343  
     344  void SetCheckUnwindCallback(void (*callback)());
     345  
     346  // Functions related to signal handling.
     347  typedef void (*SignalHandlerType)(int, void *, void *);
     348  HandleSignalMode GetHandleSignalMode(int signum);
     349  void InstallDeadlySignalHandlers(SignalHandlerType handler);
     350  
     351  // Signal reporting.
     352  // Each sanitizer uses slightly different implementation of stack unwinding.
     353  typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
     354                                                const void *callback_context,
     355                                                BufferedStackTrace *stack);
     356  // Print deadly signal report and die.
     357  void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
     358                          UnwindSignalStackCallbackType unwind,
     359                          const void *unwind_context);
     360  
     361  // Part of HandleDeadlySignal, exposed for asan.
     362  void StartReportDeadlySignal();
     363  // Part of HandleDeadlySignal, exposed for asan.
     364  void ReportDeadlySignal(const SignalContext &sig, u32 tid,
     365                          UnwindSignalStackCallbackType unwind,
     366                          const void *unwind_context);
     367  
     368  // Alternative signal stack (POSIX-only).
     369  void SetAlternateSignalStack();
     370  void UnsetAlternateSignalStack();
     371  
     372  // Construct a one-line string:
     373  //   SUMMARY: SanitizerToolName: error_message
     374  // and pass it to __sanitizer_report_error_summary.
     375  // If alt_tool_name is provided, it's used in place of SanitizerToolName.
     376  void ReportErrorSummary(const char *error_message,
     377                          const char *alt_tool_name = nullptr);
     378  // Same as above, but construct error_message as:
     379  //   error_type file:line[:column][ function]
     380  void ReportErrorSummary(const char *error_type, const AddressInfo &info,
     381                          const char *alt_tool_name = nullptr);
     382  // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
     383  void ReportErrorSummary(const char *error_type, const StackTrace *trace,
     384                          const char *alt_tool_name = nullptr);
     385  
     386  void ReportMmapWriteExec(int prot, int mflags);
     387  
     388  // Math
     389  #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
     390  extern "C" {
     391  unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
     392  unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
     393  #if defined(_WIN64)
     394  unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
     395  unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
     396  #endif
     397  }
     398  #endif
     399  
     400  inline uptr MostSignificantSetBitIndex(uptr x) {
     401    CHECK_NE(x, 0U);
     402    unsigned long up;
     403  #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
     404  # ifdef _WIN64
     405    up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
     406  # else
     407    up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
     408  # endif
     409  #elif defined(_WIN64)
     410    _BitScanReverse64(&up, x);
     411  #else
     412    _BitScanReverse(&up, x);
     413  #endif
     414    return up;
     415  }
     416  
     417  inline uptr LeastSignificantSetBitIndex(uptr x) {
     418    CHECK_NE(x, 0U);
     419    unsigned long up;
     420  #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
     421  # ifdef _WIN64
     422    up = __builtin_ctzll(x);
     423  # else
     424    up = __builtin_ctzl(x);
     425  # endif
     426  #elif defined(_WIN64)
     427    _BitScanForward64(&up, x);
     428  #else
     429    _BitScanForward(&up, x);
     430  #endif
     431    return up;
     432  }
     433  
     434  inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
     435  
     436  inline uptr RoundUpToPowerOfTwo(uptr size) {
     437    CHECK(size);
     438    if (IsPowerOfTwo(size)) return size;
     439  
     440    uptr up = MostSignificantSetBitIndex(size);
     441    CHECK_LT(size, (1ULL << (up + 1)));
     442    CHECK_GT(size, (1ULL << up));
     443    return 1ULL << (up + 1);
     444  }
     445  
     446  inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
     447    RAW_CHECK(IsPowerOfTwo(boundary));
     448    return (size + boundary - 1) & ~(boundary - 1);
     449  }
     450  
     451  inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
     452    return x & ~(boundary - 1);
     453  }
     454  
     455  inline constexpr bool IsAligned(uptr a, uptr alignment) {
     456    return (a & (alignment - 1)) == 0;
     457  }
     458  
     459  inline uptr Log2(uptr x) {
     460    CHECK(IsPowerOfTwo(x));
     461    return LeastSignificantSetBitIndex(x);
     462  }
     463  
     464  // Don't use std::min, std::max or std::swap, to minimize dependency
     465  // on libstdc++.
     466  template <class T>
     467  constexpr T Min(T a, T b) {
     468    return a < b ? a : b;
     469  }
     470  template <class T>
     471  constexpr T Max(T a, T b) {
     472    return a > b ? a : b;
     473  }
     474  template <class T>
     475  constexpr T Abs(T a) {
     476    return a < 0 ? -a : a;
     477  }
     478  template<class T> void Swap(T& a, T& b) {
     479    T tmp = a;
     480    a = b;
     481    b = tmp;
     482  }
     483  
     484  // Char handling
     485  inline bool IsSpace(int c) {
     486    return (c == ' ') || (c == '\n') || (c == '\t') ||
     487           (c == '\f') || (c == '\r') || (c == '\v');
     488  }
     489  inline bool IsDigit(int c) {
     490    return (c >= '0') && (c <= '9');
     491  }
     492  inline int ToLower(int c) {
     493    return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
     494  }
     495  
     496  // A low-level vector based on mmap. May incur a significant memory overhead for
     497  // small vectors.
     498  // WARNING: The current implementation supports only POD types.
     499  template<typename T>
     500  class InternalMmapVectorNoCtor {
     501   public:
     502    using value_type = T;
     503    void Initialize(uptr initial_capacity) {
     504      capacity_bytes_ = 0;
     505      size_ = 0;
     506      data_ = 0;
     507      reserve(initial_capacity);
     508    }
     509    void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
     510    T &operator[](uptr i) {
     511      CHECK_LT(i, size_);
     512      return data_[i];
     513    }
     514    const T &operator[](uptr i) const {
     515      CHECK_LT(i, size_);
     516      return data_[i];
     517    }
     518    void push_back(const T &element) {
     519      CHECK_LE(size_, capacity());
     520      if (size_ == capacity()) {
     521        uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
     522        Realloc(new_capacity);
     523      }
     524      internal_memcpy(&data_[size_++], &element, sizeof(T));
     525    }
     526    T &back() {
     527      CHECK_GT(size_, 0);
     528      return data_[size_ - 1];
     529    }
     530    void pop_back() {
     531      CHECK_GT(size_, 0);
     532      size_--;
     533    }
     534    uptr size() const {
     535      return size_;
     536    }
     537    const T *data() const {
     538      return data_;
     539    }
     540    T *data() {
     541      return data_;
     542    }
     543    uptr capacity() const { return capacity_bytes_ / sizeof(T); }
     544    void reserve(uptr new_size) {
     545      // Never downsize internal buffer.
     546      if (new_size > capacity())
     547        Realloc(new_size);
     548    }
     549    void resize(uptr new_size) {
     550      if (new_size > size_) {
     551        reserve(new_size);
     552        internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
     553      }
     554      size_ = new_size;
     555    }
     556  
     557    void clear() { size_ = 0; }
     558    bool empty() const { return size() == 0; }
     559  
     560    const T *begin() const {
     561      return data();
     562    }
     563    T *begin() {
     564      return data();
     565    }
     566    const T *end() const {
     567      return data() + size();
     568    }
     569    T *end() {
     570      return data() + size();
     571    }
     572  
     573    void swap(InternalMmapVectorNoCtor &other) {
     574      Swap(data_, other.data_);
     575      Swap(capacity_bytes_, other.capacity_bytes_);
     576      Swap(size_, other.size_);
     577    }
     578  
     579   private:
     580    void Realloc(uptr new_capacity) {
     581      CHECK_GT(new_capacity, 0);
     582      CHECK_LE(size_, new_capacity);
     583      uptr new_capacity_bytes =
     584          RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
     585      T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
     586      internal_memcpy(new_data, data_, size_ * sizeof(T));
     587      UnmapOrDie(data_, capacity_bytes_);
     588      data_ = new_data;
     589      capacity_bytes_ = new_capacity_bytes;
     590    }
     591  
     592    T *data_;
     593    uptr capacity_bytes_;
     594    uptr size_;
     595  };
     596  
     597  template <typename T>
     598  bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
     599                  const InternalMmapVectorNoCtor<T> &rhs) {
     600    if (lhs.size() != rhs.size()) return false;
     601    return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
     602  }
     603  
     604  template <typename T>
     605  bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
     606                  const InternalMmapVectorNoCtor<T> &rhs) {
     607    return !(lhs == rhs);
     608  }
     609  
     610  template<typename T>
     611  class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
     612   public:
     613    InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
     614    explicit InternalMmapVector(uptr cnt) {
     615      InternalMmapVectorNoCtor<T>::Initialize(cnt);
     616      this->resize(cnt);
     617    }
     618    ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
     619    // Disallow copies and moves.
     620    InternalMmapVector(const InternalMmapVector &) = delete;
     621    InternalMmapVector &operator=(const InternalMmapVector &) = delete;
     622    InternalMmapVector(InternalMmapVector &&) = delete;
     623    InternalMmapVector &operator=(InternalMmapVector &&) = delete;
     624  };
     625  
     626  class InternalScopedString {
     627   public:
     628    InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
     629  
     630    uptr length() const { return buffer_.size() - 1; }
     631    void clear() {
     632      buffer_.resize(1);
     633      buffer_[0] = '\0';
     634    }
     635    void append(const char *format, ...) FORMAT(2, 3);
     636    const char *data() const { return buffer_.data(); }
     637    char *data() { return buffer_.data(); }
     638  
     639   private:
     640    InternalMmapVector<char> buffer_;
     641  };
     642  
     643  template <class T>
     644  struct CompareLess {
     645    bool operator()(const T &a, const T &b) const { return a < b; }
     646  };
     647  
     648  // HeapSort for arrays and InternalMmapVector.
     649  template <class T, class Compare = CompareLess<T>>
     650  void Sort(T *v, uptr size, Compare comp = {}) {
     651    if (size < 2)
     652      return;
     653    // Stage 1: insert elements to the heap.
     654    for (uptr i = 1; i < size; i++) {
     655      uptr j, p;
     656      for (j = i; j > 0; j = p) {
     657        p = (j - 1) / 2;
     658        if (comp(v[p], v[j]))
     659          Swap(v[j], v[p]);
     660        else
     661          break;
     662      }
     663    }
     664    // Stage 2: swap largest element with the last one,
     665    // and sink the new top.
     666    for (uptr i = size - 1; i > 0; i--) {
     667      Swap(v[0], v[i]);
     668      uptr j, max_ind;
     669      for (j = 0; j < i; j = max_ind) {
     670        uptr left = 2 * j + 1;
     671        uptr right = 2 * j + 2;
     672        max_ind = j;
     673        if (left < i && comp(v[max_ind], v[left]))
     674          max_ind = left;
     675        if (right < i && comp(v[max_ind], v[right]))
     676          max_ind = right;
     677        if (max_ind != j)
     678          Swap(v[j], v[max_ind]);
     679        else
     680          break;
     681      }
     682    }
     683  }
     684  
     685  // Works like std::lower_bound: finds the first element that is not less
     686  // than the val.
     687  template <class Container, class T,
     688            class Compare = CompareLess<typename Container::value_type>>
     689  uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
     690    uptr first = 0;
     691    uptr last = v.size();
     692    while (last > first) {
     693      uptr mid = (first + last) / 2;
     694      if (comp(v[mid], val))
     695        first = mid + 1;
     696      else
     697        last = mid;
     698    }
     699    return first;
     700  }
     701  
     702  enum ModuleArch {
     703    kModuleArchUnknown,
     704    kModuleArchI386,
     705    kModuleArchX86_64,
     706    kModuleArchX86_64H,
     707    kModuleArchARMV6,
     708    kModuleArchARMV7,
     709    kModuleArchARMV7S,
     710    kModuleArchARMV7K,
     711    kModuleArchARM64,
     712    kModuleArchLoongArch64,
     713    kModuleArchRISCV64,
     714    kModuleArchHexagon
     715  };
     716  
     717  // Sorts and removes duplicates from the container.
     718  template <class Container,
     719            class Compare = CompareLess<typename Container::value_type>>
     720  void SortAndDedup(Container &v, Compare comp = {}) {
     721    Sort(v.data(), v.size(), comp);
     722    uptr size = v.size();
     723    if (size < 2)
     724      return;
     725    uptr last = 0;
     726    for (uptr i = 1; i < size; ++i) {
     727      if (comp(v[last], v[i])) {
     728        ++last;
     729        if (last != i)
     730          v[last] = v[i];
     731      } else {
     732        CHECK(!comp(v[i], v[last]));
     733      }
     734    }
     735    v.resize(last + 1);
     736  }
     737  
     738  constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
     739  
     740  // Opens the file 'file_name" and reads up to 'max_len' bytes.
     741  // The resulting buffer is mmaped and stored in '*buff'.
     742  // Returns true if file was successfully opened and read.
     743  bool ReadFileToVector(const char *file_name,
     744                        InternalMmapVectorNoCtor<char> *buff,
     745                        uptr max_len = kDefaultFileMaxSize,
     746                        error_t *errno_p = nullptr);
     747  
     748  // Opens the file 'file_name" and reads up to 'max_len' bytes.
     749  // This function is less I/O efficient than ReadFileToVector as it may reread
     750  // file multiple times to avoid mmap during read attempts. It's used to read
     751  // procmap, so short reads with mmap in between can produce inconsistent result.
     752  // The resulting buffer is mmaped and stored in '*buff'.
     753  // The size of the mmaped region is stored in '*buff_size'.
     754  // The total number of read bytes is stored in '*read_len'.
     755  // Returns true if file was successfully opened and read.
     756  bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
     757                        uptr *read_len, uptr max_len = kDefaultFileMaxSize,
     758                        error_t *errno_p = nullptr);
     759  
     760  int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
     761                              uptr *pc_offset);
     762  
     763  // When adding a new architecture, don't forget to also update
     764  // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
     765  inline const char *ModuleArchToString(ModuleArch arch) {
     766    switch (arch) {
     767      case kModuleArchUnknown:
     768        return "";
     769      case kModuleArchI386:
     770        return "i386";
     771      case kModuleArchX86_64:
     772        return "x86_64";
     773      case kModuleArchX86_64H:
     774        return "x86_64h";
     775      case kModuleArchARMV6:
     776        return "armv6";
     777      case kModuleArchARMV7:
     778        return "armv7";
     779      case kModuleArchARMV7S:
     780        return "armv7s";
     781      case kModuleArchARMV7K:
     782        return "armv7k";
     783      case kModuleArchARM64:
     784        return "arm64";
     785      case kModuleArchLoongArch64:
     786        return "loongarch64";
     787      case kModuleArchRISCV64:
     788        return "riscv64";
     789      case kModuleArchHexagon:
     790        return "hexagon";
     791    }
     792    CHECK(0 && "Invalid module arch");
     793    return "";
     794  }
     795  
     796  const uptr kModuleUUIDSize = 32;
     797  const uptr kMaxSegName = 16;
     798  
     799  // Represents a binary loaded into virtual memory (e.g. this can be an
     800  // executable or a shared object).
     801  class LoadedModule {
     802   public:
     803    LoadedModule()
     804        : full_name_(nullptr),
     805          base_address_(0),
     806          max_address_(0),
     807          arch_(kModuleArchUnknown),
     808          uuid_size_(0),
     809          instrumented_(false) {
     810      internal_memset(uuid_, 0, kModuleUUIDSize);
     811      ranges_.clear();
     812    }
     813    void set(const char *module_name, uptr base_address);
     814    void set(const char *module_name, uptr base_address, ModuleArch arch,
     815             u8 uuid[kModuleUUIDSize], bool instrumented);
     816    void setUuid(const char *uuid, uptr size);
     817    void clear();
     818    void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
     819                         const char *name = nullptr);
     820    bool containsAddress(uptr address) const;
     821  
     822    const char *full_name() const { return full_name_; }
     823    uptr base_address() const { return base_address_; }
     824    uptr max_address() const { return max_address_; }
     825    ModuleArch arch() const { return arch_; }
     826    const u8 *uuid() const { return uuid_; }
     827    uptr uuid_size() const { return uuid_size_; }
     828    bool instrumented() const { return instrumented_; }
     829  
     830    struct AddressRange {
     831      AddressRange *next;
     832      uptr beg;
     833      uptr end;
     834      bool executable;
     835      bool writable;
     836      char name[kMaxSegName];
     837  
     838      AddressRange(uptr beg, uptr end, bool executable, bool writable,
     839                   const char *name)
     840          : next(nullptr),
     841            beg(beg),
     842            end(end),
     843            executable(executable),
     844            writable(writable) {
     845        internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
     846      }
     847    };
     848  
     849    const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
     850  
     851   private:
     852    char *full_name_;  // Owned.
     853    uptr base_address_;
     854    uptr max_address_;
     855    ModuleArch arch_;
     856    uptr uuid_size_;
     857    u8 uuid_[kModuleUUIDSize];
     858    bool instrumented_;
     859    IntrusiveList<AddressRange> ranges_;
     860  };
     861  
     862  // List of LoadedModules. OS-dependent implementation is responsible for
     863  // filling this information.
     864  class ListOfModules {
     865   public:
     866    ListOfModules() : initialized(false) {}
     867    ~ListOfModules() { clear(); }
     868    void init();
     869    void fallbackInit();  // Uses fallback init if available, otherwise clears
     870    const LoadedModule *begin() const { return modules_.begin(); }
     871    LoadedModule *begin() { return modules_.begin(); }
     872    const LoadedModule *end() const { return modules_.end(); }
     873    LoadedModule *end() { return modules_.end(); }
     874    uptr size() const { return modules_.size(); }
     875    const LoadedModule &operator[](uptr i) const {
     876      CHECK_LT(i, modules_.size());
     877      return modules_[i];
     878    }
     879  
     880   private:
     881    void clear() {
     882      for (auto &module : modules_) module.clear();
     883      modules_.clear();
     884    }
     885    void clearOrInit() {
     886      initialized ? clear() : modules_.Initialize(kInitialCapacity);
     887      initialized = true;
     888    }
     889  
     890    InternalMmapVectorNoCtor<LoadedModule> modules_;
     891    // We rarely have more than 16K loaded modules.
     892    static const uptr kInitialCapacity = 1 << 14;
     893    bool initialized;
     894  };
     895  
     896  // Callback type for iterating over a set of memory ranges.
     897  typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
     898  
     899  enum AndroidApiLevel {
     900    ANDROID_NOT_ANDROID = 0,
     901    ANDROID_KITKAT = 19,
     902    ANDROID_LOLLIPOP_MR1 = 22,
     903    ANDROID_POST_LOLLIPOP = 23
     904  };
     905  
     906  void WriteToSyslog(const char *buffer);
     907  
     908  #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
     909  #define SANITIZER_WIN_TRACE 1
     910  #else
     911  #define SANITIZER_WIN_TRACE 0
     912  #endif
     913  
     914  #if SANITIZER_APPLE || SANITIZER_WIN_TRACE
     915  void LogFullErrorReport(const char *buffer);
     916  #else
     917  inline void LogFullErrorReport(const char *buffer) {}
     918  #endif
     919  
     920  #if SANITIZER_LINUX || SANITIZER_APPLE
     921  void WriteOneLineToSyslog(const char *s);
     922  void LogMessageOnPrintf(const char *str);
     923  #else
     924  inline void WriteOneLineToSyslog(const char *s) {}
     925  inline void LogMessageOnPrintf(const char *str) {}
     926  #endif
     927  
     928  #if SANITIZER_LINUX || SANITIZER_WIN_TRACE
     929  // Initialize Android logging. Any writes before this are silently lost.
     930  void AndroidLogInit();
     931  void SetAbortMessage(const char *);
     932  #else
     933  inline void AndroidLogInit() {}
     934  // FIXME: MacOS implementation could use CRSetCrashLogMessage.
     935  inline void SetAbortMessage(const char *) {}
     936  #endif
     937  
     938  #if SANITIZER_ANDROID
     939  void SanitizerInitializeUnwinder();
     940  AndroidApiLevel AndroidGetApiLevel();
     941  #else
     942  inline void AndroidLogWrite(const char *buffer_unused) {}
     943  inline void SanitizerInitializeUnwinder() {}
     944  inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
     945  #endif
     946  
     947  inline uptr GetPthreadDestructorIterations() {
     948  #if SANITIZER_ANDROID
     949    return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
     950  #elif SANITIZER_POSIX
     951    return 4;
     952  #else
     953  // Unused on Windows.
     954    return 0;
     955  #endif
     956  }
     957  
     958  void *internal_start_thread(void *(*func)(void*), void *arg);
     959  void internal_join_thread(void *th);
     960  void MaybeStartBackgroudThread();
     961  
     962  // Make the compiler think that something is going on there.
     963  // Use this inside a loop that looks like memset/memcpy/etc to prevent the
     964  // compiler from recognising it and turning it into an actual call to
     965  // memset/memcpy/etc.
     966  static inline void SanitizerBreakOptimization(void *arg) {
     967  #if defined(_MSC_VER) && !defined(__clang__)
     968    _ReadWriteBarrier();
     969  #else
     970    __asm__ __volatile__("" : : "r" (arg) : "memory");
     971  #endif
     972  }
     973  
     974  struct SignalContext {
     975    void *siginfo;
     976    void *context;
     977    uptr addr;
     978    uptr pc;
     979    uptr sp;
     980    uptr bp;
     981    bool is_memory_access;
     982    enum WriteFlag { Unknown, Read, Write } write_flag;
     983  
     984    // In some cases the kernel cannot provide the true faulting address; `addr`
     985    // will be zero then.  This field allows to distinguish between these cases
     986    // and dereferences of null.
     987    bool is_true_faulting_addr;
     988  
     989    // VS2013 doesn't implement unrestricted unions, so we need a trivial default
     990    // constructor
     991    SignalContext() = default;
     992  
     993    // Creates signal context in a platform-specific manner.
     994    // SignalContext is going to keep pointers to siginfo and context without
     995    // owning them.
     996    SignalContext(void *siginfo, void *context)
     997        : siginfo(siginfo),
     998          context(context),
     999          addr(GetAddress()),
    1000          is_memory_access(IsMemoryAccess()),
    1001          write_flag(GetWriteFlag()),
    1002          is_true_faulting_addr(IsTrueFaultingAddress()) {
    1003      InitPcSpBp();
    1004    }
    1005  
    1006    static void DumpAllRegisters(void *context);
    1007  
    1008    // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
    1009    int GetType() const;
    1010  
    1011    // String description of the signal.
    1012    const char *Describe() const;
    1013  
    1014    // Returns true if signal is stack overflow.
    1015    bool IsStackOverflow() const;
    1016  
    1017   private:
    1018    // Platform specific initialization.
    1019    void InitPcSpBp();
    1020    uptr GetAddress() const;
    1021    WriteFlag GetWriteFlag() const;
    1022    bool IsMemoryAccess() const;
    1023    bool IsTrueFaultingAddress() const;
    1024  };
    1025  
    1026  void InitializePlatformEarly();
    1027  
    1028  template <typename Fn>
    1029  class RunOnDestruction {
    1030   public:
    1031    explicit RunOnDestruction(Fn fn) : fn_(fn) {}
    1032    ~RunOnDestruction() { fn_(); }
    1033  
    1034   private:
    1035    Fn fn_;
    1036  };
    1037  
    1038  // A simple scope guard. Usage:
    1039  // auto cleanup = at_scope_exit([]{ do_cleanup; });
    1040  template <typename Fn>
    1041  RunOnDestruction<Fn> at_scope_exit(Fn fn) {
    1042    return RunOnDestruction<Fn>(fn);
    1043  }
    1044  
    1045  // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
    1046  // if a process uses virtual memory over 4TB (as many sanitizers like
    1047  // to do).  This function will abort the process if running on a kernel
    1048  // that looks vulnerable.
    1049  #if SANITIZER_LINUX && SANITIZER_S390_64
    1050  void AvoidCVE_2016_2143();
    1051  #else
    1052  inline void AvoidCVE_2016_2143() {}
    1053  #endif
    1054  
    1055  struct StackDepotStats {
    1056    uptr n_uniq_ids;
    1057    uptr allocated;
    1058  };
    1059  
    1060  // The default value for allocator_release_to_os_interval_ms common flag to
    1061  // indicate that sanitizer allocator should not attempt to release memory to OS.
    1062  const s32 kReleaseToOSIntervalNever = -1;
    1063  
    1064  void CheckNoDeepBind(const char *filename, int flag);
    1065  
    1066  // Returns the requested amount of random data (up to 256 bytes) that can then
    1067  // be used to seed a PRNG. Defaults to blocking like the underlying syscall.
    1068  bool GetRandom(void *buffer, uptr length, bool blocking = true);
    1069  
    1070  // Returns the number of logical processors on the system.
    1071  u32 GetNumberOfCPUs();
    1072  extern u32 NumberOfCPUsCached;
    1073  inline u32 GetNumberOfCPUsCached() {
    1074    if (!NumberOfCPUsCached)
    1075      NumberOfCPUsCached = GetNumberOfCPUs();
    1076    return NumberOfCPUsCached;
    1077  }
    1078  
    1079  template <typename T>
    1080  class ArrayRef {
    1081   public:
    1082    ArrayRef() {}
    1083    ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
    1084  
    1085    T *begin() { return begin_; }
    1086    T *end() { return end_; }
    1087  
    1088   private:
    1089    T *begin_ = nullptr;
    1090    T *end_ = nullptr;
    1091  };
    1092  
    1093  }  // namespace __sanitizer
    1094  
    1095  inline void *operator new(__sanitizer::operator_new_size_type size,
    1096                            __sanitizer::LowLevelAllocator &alloc) {
    1097    return alloc.Allocate(size);
    1098  }
    1099  
    1100  #endif  // SANITIZER_COMMON_H