1  //=-- lsan_common.h -------------------------------------------------------===//
       2  //
       3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
       4  // See https://llvm.org/LICENSE.txt for license information.
       5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
       6  //
       7  //===----------------------------------------------------------------------===//
       8  //
       9  // This file is a part of LeakSanitizer.
      10  // Private LSan header.
      11  //
      12  //===----------------------------------------------------------------------===//
      13  
      14  #ifndef LSAN_COMMON_H
      15  #define LSAN_COMMON_H
      16  
      17  #include "sanitizer_common/sanitizer_allocator.h"
      18  #include "sanitizer_common/sanitizer_common.h"
      19  #include "sanitizer_common/sanitizer_internal_defs.h"
      20  #include "sanitizer_common/sanitizer_platform.h"
      21  #include "sanitizer_common/sanitizer_stackdepot.h"
      22  #include "sanitizer_common/sanitizer_stoptheworld.h"
      23  #include "sanitizer_common/sanitizer_symbolizer.h"
      24  
      25  // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
      26  // Also, LSan doesn't like 32 bit architectures
      27  // because of "small" (4 bytes) pointer size that leads to high false negative
      28  // ratio on large leaks. But we still want to have it for some 32 bit arches
      29  // (e.g. x86), see https://github.com/google/sanitizers/issues/403.
      30  // To enable LeakSanitizer on a new architecture, one needs to implement the
      31  // internal_clone function as well as (probably) adjust the TLS machinery for
      32  // the new architecture inside the sanitizer library.
      33  // Exclude leak-detection on arm32 for Android because `__aeabi_read_tp`
      34  // is missing. This caused a link error.
      35  #if SANITIZER_ANDROID && (__ANDROID_API__ < 28 || defined(__arm__))
      36  #  define CAN_SANITIZE_LEAKS 0
      37  #elif (SANITIZER_LINUX || SANITIZER_APPLE) && (SANITIZER_WORDSIZE == 64) && \
      38      (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) ||  \
      39       defined(__powerpc64__) || defined(__s390x__))
      40  #  define CAN_SANITIZE_LEAKS 1
      41  #elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_APPLE)
      42  #  define CAN_SANITIZE_LEAKS 1
      43  #elif defined(__arm__) && SANITIZER_LINUX
      44  #  define CAN_SANITIZE_LEAKS 1
      45  #elif SANITIZER_RISCV64 && SANITIZER_LINUX
      46  #  define CAN_SANITIZE_LEAKS 1
      47  #elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
      48  #  define CAN_SANITIZE_LEAKS 1
      49  #else
      50  #  define CAN_SANITIZE_LEAKS 0
      51  #endif
      52  
      53  namespace __sanitizer {
      54  class FlagParser;
      55  class ThreadRegistry;
      56  class ThreadContextBase;
      57  struct DTLS;
      58  }
      59  
      60  namespace __lsan {
      61  
      62  // Chunk tags.
      63  enum ChunkTag {
      64    kDirectlyLeaked = 0,  // default
      65    kIndirectlyLeaked = 1,
      66    kReachable = 2,
      67    kIgnored = 3
      68  };
      69  
      70  struct Flags {
      71  #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
      72  #include "lsan_flags.inc"
      73  #undef LSAN_FLAG
      74  
      75    void SetDefaults();
      76    uptr pointer_alignment() const {
      77      return use_unaligned ? 1 : sizeof(uptr);
      78    }
      79  };
      80  
      81  extern Flags lsan_flags;
      82  inline Flags *flags() { return &lsan_flags; }
      83  void RegisterLsanFlags(FlagParser *parser, Flags *f);
      84  
      85  struct LeakedChunk {
      86    uptr chunk;
      87    u32 stack_trace_id;
      88    uptr leaked_size;
      89    ChunkTag tag;
      90  };
      91  
      92  using LeakedChunks = InternalMmapVector<LeakedChunk>;
      93  
      94  struct Leak {
      95    u32 id;
      96    uptr hit_count;
      97    uptr total_size;
      98    u32 stack_trace_id;
      99    bool is_directly_leaked;
     100    bool is_suppressed;
     101  };
     102  
     103  struct LeakedObject {
     104    u32 leak_id;
     105    uptr addr;
     106    uptr size;
     107  };
     108  
     109  // Aggregates leaks by stack trace prefix.
     110  class LeakReport {
     111   public:
     112    LeakReport() {}
     113    void AddLeakedChunks(const LeakedChunks &chunks);
     114    void ReportTopLeaks(uptr max_leaks);
     115    void PrintSummary();
     116    uptr ApplySuppressions();
     117    uptr UnsuppressedLeakCount();
     118    uptr IndirectUnsuppressedLeakCount();
     119  
     120   private:
     121    void PrintReportForLeak(uptr index);
     122    void PrintLeakedObjectsForLeak(uptr index);
     123  
     124    u32 next_id_ = 0;
     125    InternalMmapVector<Leak> leaks_;
     126    InternalMmapVector<LeakedObject> leaked_objects_;
     127  };
     128  
     129  typedef InternalMmapVector<uptr> Frontier;
     130  
     131  // Platform-specific functions.
     132  void InitializePlatformSpecificModules();
     133  void ProcessGlobalRegions(Frontier *frontier);
     134  void ProcessPlatformSpecificAllocations(Frontier *frontier);
     135  
     136  struct RootRegion {
     137    uptr begin;
     138    uptr size;
     139  };
     140  
     141  // LockStuffAndStopTheWorld can start to use Scan* calls to collect into
     142  // this Frontier vector before the StopTheWorldCallback actually runs.
     143  // This is used when the OS has a unified callback API for suspending
     144  // threads and enumerating roots.
     145  struct CheckForLeaksParam {
     146    Frontier frontier;
     147    LeakedChunks leaks;
     148    tid_t caller_tid;
     149    uptr caller_sp;
     150    bool success = false;
     151  };
     152  
     153  InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions();
     154  void ScanRootRegion(Frontier *frontier, RootRegion const ®ion,
     155                      uptr region_begin, uptr region_end, bool is_readable);
     156  void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg);
     157  void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs);
     158  // Run stoptheworld while holding any platform-specific locks, as well as the
     159  // allocator and thread registry locks.
     160  void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
     161                                CheckForLeaksParam* argument);
     162  
     163  void ScanRangeForPointers(uptr begin, uptr end,
     164                            Frontier *frontier,
     165                            const char *region_type, ChunkTag tag);
     166  void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
     167  
     168  enum IgnoreObjectResult {
     169    kIgnoreObjectSuccess,
     170    kIgnoreObjectAlreadyIgnored,
     171    kIgnoreObjectInvalid
     172  };
     173  
     174  // Functions called from the parent tool.
     175  const char *MaybeCallLsanDefaultOptions();
     176  void InitCommonLsan();
     177  void DoLeakCheck();
     178  void DoRecoverableLeakCheckVoid();
     179  void DisableCounterUnderflow();
     180  bool DisabledInThisThread();
     181  
     182  // Used to implement __lsan::ScopedDisabler.
     183  void DisableInThisThread();
     184  void EnableInThisThread();
     185  // Can be used to ignore memory allocated by an intercepted
     186  // function.
     187  struct ScopedInterceptorDisabler {
     188    ScopedInterceptorDisabler() { DisableInThisThread(); }
     189    ~ScopedInterceptorDisabler() { EnableInThisThread(); }
     190  };
     191  
     192  // According to Itanium C++ ABI array cookie is a one word containing
     193  // size of allocated array.
     194  static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
     195                                             uptr addr) {
     196    return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
     197           *reinterpret_cast<uptr *>(chunk_beg) == 0;
     198  }
     199  
     200  // According to ARM C++ ABI array cookie consists of two words:
     201  // struct array_cookie {
     202  //   std::size_t element_size; // element_size != 0
     203  //   std::size_t element_count;
     204  // };
     205  static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
     206                                         uptr addr) {
     207    return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
     208           *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
     209  }
     210  
     211  // Special case for "new T[0]" where T is a type with DTOR.
     212  // new T[0] will allocate a cookie (one or two words) for the array size (0)
     213  // and store a pointer to the end of allocated chunk. The actual cookie layout
     214  // varies between platforms according to their C++ ABI implementation.
     215  inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
     216                                          uptr addr) {
     217  #if defined(__arm__)
     218    return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
     219  #else
     220    return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
     221  #endif
     222  }
     223  
     224  // The following must be implemented in the parent tool.
     225  
     226  void ForEachChunk(ForEachChunkCallback callback, void *arg);
     227  // Returns the address range occupied by the global allocator object.
     228  void GetAllocatorGlobalRange(uptr *begin, uptr *end);
     229  // Wrappers for allocator's ForceLock()/ForceUnlock().
     230  void LockAllocator();
     231  void UnlockAllocator();
     232  // Returns true if [addr, addr + sizeof(void *)) is poisoned.
     233  bool WordIsPoisoned(uptr addr);
     234  // Wrappers for ThreadRegistry access.
     235  void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
     236  void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
     237  
     238  struct ScopedStopTheWorldLock {
     239    ScopedStopTheWorldLock() {
     240      LockThreadRegistry();
     241      LockAllocator();
     242    }
     243  
     244    ~ScopedStopTheWorldLock() {
     245      UnlockAllocator();
     246      UnlockThreadRegistry();
     247    }
     248  
     249    ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete;
     250    ScopedStopTheWorldLock(const ScopedStopTheWorldLock &) = delete;
     251  };
     252  
     253  ThreadRegistry *GetThreadRegistryLocked();
     254  bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
     255                             uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
     256                             uptr *cache_end, DTLS **dtls);
     257  void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
     258  void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
     259                              void *arg);
     260  // If called from the main thread, updates the main thread's TID in the thread
     261  // registry. We need this to handle processes that fork() without a subsequent
     262  // exec(), which invalidates the recorded TID. To update it, we must call
     263  // gettid() from the main thread. Our solution is to call this function before
     264  // leak checking and also before every call to pthread_create() (to handle cases
     265  // where leak checking is initiated from a non-main thread).
     266  void EnsureMainThreadIDIsCorrect();
     267  // If p points into a chunk that has been allocated to the user, returns its
     268  // user-visible address. Otherwise, returns 0.
     269  uptr PointsIntoChunk(void *p);
     270  // Returns address of user-visible chunk contained in this allocator chunk.
     271  uptr GetUserBegin(uptr chunk);
     272  // Helper for __lsan_ignore_object().
     273  IgnoreObjectResult IgnoreObjectLocked(const void *p);
     274  
     275  // Return the linker module, if valid for the platform.
     276  LoadedModule *GetLinker();
     277  
     278  // Return true if LSan has finished leak checking and reported leaks.
     279  bool HasReportedLeaks();
     280  
     281  // Run platform-specific leak handlers.
     282  void HandleLeaks();
     283  
     284  // Wrapper for chunk metadata operations.
     285  class LsanMetadata {
     286   public:
     287    // Constructor accepts address of user-visible chunk.
     288    explicit LsanMetadata(uptr chunk);
     289    bool allocated() const;
     290    ChunkTag tag() const;
     291    void set_tag(ChunkTag value);
     292    uptr requested_size() const;
     293    u32 stack_trace_id() const;
     294   private:
     295    void *metadata_;
     296  };
     297  
     298  }  // namespace __lsan
     299  
     300  extern "C" {
     301  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
     302  const char *__lsan_default_options();
     303  
     304  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
     305  int __lsan_is_turned_off();
     306  
     307  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
     308  const char *__lsan_default_suppressions();
     309  
     310  SANITIZER_INTERFACE_ATTRIBUTE
     311  void __lsan_register_root_region(const void *p, __lsan::uptr size);
     312  
     313  SANITIZER_INTERFACE_ATTRIBUTE
     314  void __lsan_unregister_root_region(const void *p, __lsan::uptr size);
     315  
     316  }  // extern "C"
     317  
     318  #endif  // LSAN_COMMON_H