(root)/
gcc-13.2.0/
libsanitizer/
tsan/
tsan_rtl.h
       1  //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
       2  //
       3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
       4  // See https://llvm.org/LICENSE.txt for license information.
       5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
       6  //
       7  //===----------------------------------------------------------------------===//
       8  //
       9  // This file is a part of ThreadSanitizer (TSan), a race detector.
      10  //
      11  // Main internal TSan header file.
      12  //
      13  // Ground rules:
      14  //   - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
      15  //     function-scope locals)
      16  //   - All functions/classes/etc reside in namespace __tsan, except for those
      17  //     declared in tsan_interface.h.
      18  //   - Platform-specific files should be used instead of ifdefs (*).
      19  //   - No system headers included in header files (*).
      20  //   - Platform specific headres included only into platform-specific files (*).
      21  //
      22  //  (*) Except when inlining is critical for performance.
      23  //===----------------------------------------------------------------------===//
      24  
      25  #ifndef TSAN_RTL_H
      26  #define TSAN_RTL_H
      27  
      28  #include "sanitizer_common/sanitizer_allocator.h"
      29  #include "sanitizer_common/sanitizer_allocator_internal.h"
      30  #include "sanitizer_common/sanitizer_asm.h"
      31  #include "sanitizer_common/sanitizer_common.h"
      32  #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
      33  #include "sanitizer_common/sanitizer_libignore.h"
      34  #include "sanitizer_common/sanitizer_suppressions.h"
      35  #include "sanitizer_common/sanitizer_thread_registry.h"
      36  #include "sanitizer_common/sanitizer_vector.h"
      37  #include "tsan_defs.h"
      38  #include "tsan_flags.h"
      39  #include "tsan_ignoreset.h"
      40  #include "tsan_ilist.h"
      41  #include "tsan_mman.h"
      42  #include "tsan_mutexset.h"
      43  #include "tsan_platform.h"
      44  #include "tsan_report.h"
      45  #include "tsan_shadow.h"
      46  #include "tsan_stack_trace.h"
      47  #include "tsan_sync.h"
      48  #include "tsan_trace.h"
      49  #include "tsan_vector_clock.h"
      50  
      51  #if SANITIZER_WORDSIZE != 64
      52  # error "ThreadSanitizer is supported only on 64-bit platforms"
      53  #endif
      54  
      55  namespace __tsan {
      56  
      57  #if !SANITIZER_GO
      58  struct MapUnmapCallback;
      59  #if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
      60  
      61  struct AP32 {
      62    static const uptr kSpaceBeg = 0;
      63    static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
      64    static const uptr kMetadataSize = 0;
      65    typedef __sanitizer::CompactSizeClassMap SizeClassMap;
      66    static const uptr kRegionSizeLog = 20;
      67    using AddressSpaceView = LocalAddressSpaceView;
      68    typedef __tsan::MapUnmapCallback MapUnmapCallback;
      69    static const uptr kFlags = 0;
      70  };
      71  typedef SizeClassAllocator32<AP32> PrimaryAllocator;
      72  #else
      73  struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
      74  #    if defined(__s390x__)
      75    typedef MappingS390x Mapping;
      76  #    else
      77    typedef Mapping48AddressSpace Mapping;
      78  #    endif
      79    static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
      80    static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
      81    static const uptr kMetadataSize = 0;
      82    typedef DefaultSizeClassMap SizeClassMap;
      83    typedef __tsan::MapUnmapCallback MapUnmapCallback;
      84    static const uptr kFlags = 0;
      85    using AddressSpaceView = LocalAddressSpaceView;
      86  };
      87  typedef SizeClassAllocator64<AP64> PrimaryAllocator;
      88  #endif
      89  typedef CombinedAllocator<PrimaryAllocator> Allocator;
      90  typedef Allocator::AllocatorCache AllocatorCache;
      91  Allocator *allocator();
      92  #endif
      93  
      94  struct ThreadSignalContext;
      95  
      96  struct JmpBuf {
      97    uptr sp;
      98    int int_signal_send;
      99    bool in_blocking_func;
     100    uptr in_signal_handler;
     101    uptr *shadow_stack_pos;
     102  };
     103  
     104  // A Processor represents a physical thread, or a P for Go.
     105  // It is used to store internal resources like allocate cache, and does not
     106  // participate in race-detection logic (invisible to end user).
     107  // In C++ it is tied to an OS thread just like ThreadState, however ideally
     108  // it should be tied to a CPU (this way we will have fewer allocator caches).
     109  // In Go it is tied to a P, so there are significantly fewer Processor's than
     110  // ThreadState's (which are tied to Gs).
     111  // A ThreadState must be wired with a Processor to handle events.
     112  struct Processor {
     113    ThreadState *thr; // currently wired thread, or nullptr
     114  #if !SANITIZER_GO
     115    AllocatorCache alloc_cache;
     116    InternalAllocatorCache internal_alloc_cache;
     117  #endif
     118    DenseSlabAllocCache block_cache;
     119    DenseSlabAllocCache sync_cache;
     120    DDPhysicalThread *dd_pt;
     121  };
     122  
     123  #if !SANITIZER_GO
     124  // ScopedGlobalProcessor temporary setups a global processor for the current
     125  // thread, if it does not have one. Intended for interceptors that can run
     126  // at the very thread end, when we already destroyed the thread processor.
     127  struct ScopedGlobalProcessor {
     128    ScopedGlobalProcessor();
     129    ~ScopedGlobalProcessor();
     130  };
     131  #endif
     132  
     133  struct TidEpoch {
     134    Tid tid;
     135    Epoch epoch;
     136  };
     137  
     138  struct TidSlot {
     139    Mutex mtx;
     140    Sid sid;
     141    atomic_uint32_t raw_epoch;
     142    ThreadState *thr;
     143    Vector<TidEpoch> journal;
     144    INode node;
     145  
     146    Epoch epoch() const {
     147      return static_cast<Epoch>(atomic_load(&raw_epoch, memory_order_relaxed));
     148    }
     149  
     150    void SetEpoch(Epoch v) {
     151      atomic_store(&raw_epoch, static_cast<u32>(v), memory_order_relaxed);
     152    }
     153  
     154    TidSlot();
     155  } ALIGNED(SANITIZER_CACHE_LINE_SIZE);
     156  
     157  // This struct is stored in TLS.
     158  struct ThreadState {
     159    FastState fast_state;
     160    int ignore_sync;
     161  #if !SANITIZER_GO
     162    int ignore_interceptors;
     163  #endif
     164    uptr *shadow_stack_pos;
     165  
     166    // Current position in tctx->trace.Back()->events (Event*).
     167    atomic_uintptr_t trace_pos;
     168    // PC of the last memory access, used to compute PC deltas in the trace.
     169    uptr trace_prev_pc;
     170  
     171    // Technically `current` should be a separate THREADLOCAL variable;
     172    // but it is placed here in order to share cache line with previous fields.
     173    ThreadState* current;
     174  
     175    atomic_sint32_t pending_signals;
     176  
     177    VectorClock clock;
     178  
     179    // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
     180    // We do not distinguish beteween ignoring reads and writes
     181    // for better performance.
     182    int ignore_reads_and_writes;
     183    int suppress_reports;
     184    // Go does not support ignores.
     185  #if !SANITIZER_GO
     186    IgnoreSet mop_ignore_set;
     187    IgnoreSet sync_ignore_set;
     188  #endif
     189    uptr *shadow_stack;
     190    uptr *shadow_stack_end;
     191  #if !SANITIZER_GO
     192    Vector<JmpBuf> jmp_bufs;
     193    int in_symbolizer;
     194    atomic_uintptr_t in_blocking_func;
     195    bool in_ignored_lib;
     196    bool is_inited;
     197  #endif
     198    MutexSet mset;
     199    bool is_dead;
     200    const Tid tid;
     201    uptr stk_addr;
     202    uptr stk_size;
     203    uptr tls_addr;
     204    uptr tls_size;
     205    ThreadContext *tctx;
     206  
     207    DDLogicalThread *dd_lt;
     208  
     209    TidSlot *slot;
     210    uptr slot_epoch;
     211    bool slot_locked;
     212  
     213    // Current wired Processor, or nullptr. Required to handle any events.
     214    Processor *proc1;
     215  #if !SANITIZER_GO
     216    Processor *proc() { return proc1; }
     217  #else
     218    Processor *proc();
     219  #endif
     220  
     221    atomic_uintptr_t in_signal_handler;
     222    ThreadSignalContext *signal_ctx;
     223  
     224  #if !SANITIZER_GO
     225    StackID last_sleep_stack_id;
     226    VectorClock last_sleep_clock;
     227  #endif
     228  
     229    // Set in regions of runtime that must be signal-safe and fork-safe.
     230    // If set, malloc must not be called.
     231    int nomalloc;
     232  
     233    const ReportDesc *current_report;
     234  
     235    explicit ThreadState(Tid tid);
     236  } ALIGNED(SANITIZER_CACHE_LINE_SIZE);
     237  
     238  #if !SANITIZER_GO
     239  #if SANITIZER_APPLE || SANITIZER_ANDROID
     240  ThreadState *cur_thread();
     241  void set_cur_thread(ThreadState *thr);
     242  void cur_thread_finalize();
     243  inline ThreadState *cur_thread_init() { return cur_thread(); }
     244  #  else
     245  __attribute__((tls_model("initial-exec")))
     246  extern THREADLOCAL char cur_thread_placeholder[];
     247  inline ThreadState *cur_thread() {
     248    return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
     249  }
     250  inline ThreadState *cur_thread_init() {
     251    ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
     252    if (UNLIKELY(!thr->current))
     253      thr->current = thr;
     254    return thr->current;
     255  }
     256  inline void set_cur_thread(ThreadState *thr) {
     257    reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
     258  }
     259  inline void cur_thread_finalize() { }
     260  #  endif  // SANITIZER_APPLE || SANITIZER_ANDROID
     261  #endif  // SANITIZER_GO
     262  
     263  class ThreadContext final : public ThreadContextBase {
     264   public:
     265    explicit ThreadContext(Tid tid);
     266    ~ThreadContext();
     267    ThreadState *thr;
     268    StackID creation_stack_id;
     269    VectorClock *sync;
     270    uptr sync_epoch;
     271    Trace trace;
     272  
     273    // Override superclass callbacks.
     274    void OnDead() override;
     275    void OnJoined(void *arg) override;
     276    void OnFinished() override;
     277    void OnStarted(void *arg) override;
     278    void OnCreated(void *arg) override;
     279    void OnReset() override;
     280    void OnDetached(void *arg) override;
     281  };
     282  
     283  struct RacyStacks {
     284    MD5Hash hash[2];
     285    bool operator==(const RacyStacks &other) const;
     286  };
     287  
     288  struct RacyAddress {
     289    uptr addr_min;
     290    uptr addr_max;
     291  };
     292  
     293  struct FiredSuppression {
     294    ReportType type;
     295    uptr pc_or_addr;
     296    Suppression *supp;
     297  };
     298  
     299  struct Context {
     300    Context();
     301  
     302    bool initialized;
     303  #if !SANITIZER_GO
     304    bool after_multithreaded_fork;
     305  #endif
     306  
     307    MetaMap metamap;
     308  
     309    Mutex report_mtx;
     310    int nreported;
     311    atomic_uint64_t last_symbolize_time_ns;
     312  
     313    void *background_thread;
     314    atomic_uint32_t stop_background_thread;
     315  
     316    ThreadRegistry thread_registry;
     317  
     318    // This is used to prevent a very unlikely but very pathological behavior.
     319    // Since memory access handling is not synchronized with DoReset,
     320    // a thread running concurrently with DoReset can leave a bogus shadow value
     321    // that will be later falsely detected as a race. For such false races
     322    // RestoreStack will return false and we will not report it.
     323    // However, consider that a thread leaves a whole lot of such bogus values
     324    // and these values are later read by a whole lot of threads.
     325    // This will cause massive amounts of ReportRace calls and lots of
     326    // serialization. In very pathological cases the resulting slowdown
     327    // can be >100x. This is very unlikely, but it was presumably observed
     328    // in practice: https://github.com/google/sanitizers/issues/1552
     329    // If this happens, previous access sid+epoch will be the same for all of
     330    // these false races b/c if the thread will try to increment epoch, it will
     331    // notice that DoReset has happened and will stop producing bogus shadow
     332    // values. So, last_spurious_race is used to remember the last sid+epoch
     333    // for which RestoreStack returned false. Then it is used to filter out
     334    // races with the same sid+epoch very early and quickly.
     335    // It is of course possible that multiple threads left multiple bogus shadow
     336    // values and all of them are read by lots of threads at the same time.
     337    // In such case last_spurious_race will only be able to deduplicate a few
     338    // races from one thread, then few from another and so on. An alternative
     339    // would be to hold an array of such sid+epoch, but we consider such scenario
     340    // as even less likely.
     341    // Note: this can lead to some rare false negatives as well:
     342    // 1. When a legit access with the same sid+epoch participates in a race
     343    // as the "previous" memory access, it will be wrongly filtered out.
     344    // 2. When RestoreStack returns false for a legit memory access because it
     345    // was already evicted from the thread trace, we will still remember it in
     346    // last_spurious_race. Then if there is another racing memory access from
     347    // the same thread that happened in the same epoch, but was stored in the
     348    // next thread trace part (which is still preserved in the thread trace),
     349    // we will also wrongly filter it out while RestoreStack would actually
     350    // succeed for that second memory access.
     351    RawShadow last_spurious_race;
     352  
     353    Mutex racy_mtx;
     354    Vector<RacyStacks> racy_stacks;
     355    // Number of fired suppressions may be large enough.
     356    Mutex fired_suppressions_mtx;
     357    InternalMmapVector<FiredSuppression> fired_suppressions;
     358    DDetector *dd;
     359  
     360    Flags flags;
     361    fd_t memprof_fd;
     362  
     363    // The last slot index (kFreeSid) is used to denote freed memory.
     364    TidSlot slots[kThreadSlotCount - 1];
     365  
     366    // Protects global_epoch, slot_queue, trace_part_recycle.
     367    Mutex slot_mtx;
     368    uptr global_epoch;  // guarded by slot_mtx and by all slot mutexes
     369    bool resetting;     // global reset is in progress
     370    IList<TidSlot, &TidSlot::node> slot_queue SANITIZER_GUARDED_BY(slot_mtx);
     371    IList<TraceHeader, &TraceHeader::global, TracePart> trace_part_recycle
     372        SANITIZER_GUARDED_BY(slot_mtx);
     373    uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
     374    uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
     375    uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
     376  #if SANITIZER_GO
     377    uptr mapped_shadow_begin;
     378    uptr mapped_shadow_end;
     379  #endif
     380  };
     381  
     382  extern Context *ctx;  // The one and the only global runtime context.
     383  
     384  ALWAYS_INLINE Flags *flags() {
     385    return &ctx->flags;
     386  }
     387  
     388  struct ScopedIgnoreInterceptors {
     389    ScopedIgnoreInterceptors() {
     390  #if !SANITIZER_GO
     391      cur_thread()->ignore_interceptors++;
     392  #endif
     393    }
     394  
     395    ~ScopedIgnoreInterceptors() {
     396  #if !SANITIZER_GO
     397      cur_thread()->ignore_interceptors--;
     398  #endif
     399    }
     400  };
     401  
     402  const char *GetObjectTypeFromTag(uptr tag);
     403  const char *GetReportHeaderFromTag(uptr tag);
     404  uptr TagFromShadowStackFrame(uptr pc);
     405  
     406  class ScopedReportBase {
     407   public:
     408    void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, Tid tid,
     409                         StackTrace stack, const MutexSet *mset);
     410    void AddStack(StackTrace stack, bool suppressable = false);
     411    void AddThread(const ThreadContext *tctx, bool suppressable = false);
     412    void AddThread(Tid tid, bool suppressable = false);
     413    void AddUniqueTid(Tid unique_tid);
     414    int AddMutex(uptr addr, StackID creation_stack_id);
     415    void AddLocation(uptr addr, uptr size);
     416    void AddSleep(StackID stack_id);
     417    void SetCount(int count);
     418    void SetSigNum(int sig);
     419  
     420    const ReportDesc *GetReport() const;
     421  
     422   protected:
     423    ScopedReportBase(ReportType typ, uptr tag);
     424    ~ScopedReportBase();
     425  
     426   private:
     427    ReportDesc *rep_;
     428    // Symbolizer makes lots of intercepted calls. If we try to process them,
     429    // at best it will cause deadlocks on internal mutexes.
     430    ScopedIgnoreInterceptors ignore_interceptors_;
     431  
     432    ScopedReportBase(const ScopedReportBase &) = delete;
     433    void operator=(const ScopedReportBase &) = delete;
     434  };
     435  
     436  class ScopedReport : public ScopedReportBase {
     437   public:
     438    explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
     439    ~ScopedReport();
     440  
     441   private:
     442    ScopedErrorReportLock lock_;
     443  };
     444  
     445  bool ShouldReport(ThreadState *thr, ReportType typ);
     446  ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
     447  
     448  // The stack could look like:
     449  //   <start> | <main> | <foo> | tag | <bar>
     450  // This will extract the tag and keep:
     451  //   <start> | <main> | <foo> | <bar>
     452  template<typename StackTraceTy>
     453  void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
     454    if (stack->size < 2) return;
     455    uptr possible_tag_pc = stack->trace[stack->size - 2];
     456    uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
     457    if (possible_tag == kExternalTagNone) return;
     458    stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
     459    stack->size -= 1;
     460    if (tag) *tag = possible_tag;
     461  }
     462  
     463  template<typename StackTraceTy>
     464  void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
     465                          uptr *tag = nullptr) {
     466    uptr size = thr->shadow_stack_pos - thr->shadow_stack;
     467    uptr start = 0;
     468    if (size + !!toppc > kStackTraceMax) {
     469      start = size + !!toppc - kStackTraceMax;
     470      size = kStackTraceMax - !!toppc;
     471    }
     472    stack->Init(&thr->shadow_stack[start], size, toppc);
     473    ExtractTagFromStack(stack, tag);
     474  }
     475  
     476  #define GET_STACK_TRACE_FATAL(thr, pc) \
     477    VarSizeStackTrace stack; \
     478    ObtainCurrentStack(thr, pc, &stack); \
     479    stack.ReverseOrder();
     480  
     481  void MapShadow(uptr addr, uptr size);
     482  void MapThreadTrace(uptr addr, uptr size, const char *name);
     483  void DontNeedShadowFor(uptr addr, uptr size);
     484  void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
     485  void InitializeShadowMemory();
     486  void InitializeInterceptors();
     487  void InitializeLibIgnore();
     488  void InitializeDynamicAnnotations();
     489  
     490  void ForkBefore(ThreadState *thr, uptr pc);
     491  void ForkParentAfter(ThreadState *thr, uptr pc);
     492  void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);
     493  
     494  void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
     495                  AccessType typ);
     496  bool OutputReport(ThreadState *thr, const ScopedReport &srep);
     497  bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
     498  bool IsExpectedReport(uptr addr, uptr size);
     499  
     500  #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
     501  # define DPrintf Printf
     502  #else
     503  # define DPrintf(...)
     504  #endif
     505  
     506  #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
     507  # define DPrintf2 Printf
     508  #else
     509  # define DPrintf2(...)
     510  #endif
     511  
     512  StackID CurrentStackId(ThreadState *thr, uptr pc);
     513  ReportStack *SymbolizeStackId(StackID stack_id);
     514  void PrintCurrentStack(ThreadState *thr, uptr pc);
     515  void PrintCurrentStackSlow(uptr pc);  // uses libunwind
     516  MBlock *JavaHeapBlock(uptr addr, uptr *start);
     517  
     518  void Initialize(ThreadState *thr);
     519  void MaybeSpawnBackgroundThread();
     520  int Finalize(ThreadState *thr);
     521  
     522  void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
     523  void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
     524  
     525  void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
     526                    AccessType typ);
     527  void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
     528                             AccessType typ);
     529  // This creates 2 non-inlined specialized versions of MemoryAccessRange.
     530  template <bool is_read>
     531  void MemoryAccessRangeT(ThreadState *thr, uptr pc, uptr addr, uptr size);
     532  
     533  ALWAYS_INLINE
     534  void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
     535                         bool is_write) {
     536    if (size == 0)
     537      return;
     538    if (is_write)
     539      MemoryAccessRangeT<false>(thr, pc, addr, size);
     540    else
     541      MemoryAccessRangeT<true>(thr, pc, addr, size);
     542  }
     543  
     544  void ShadowSet(RawShadow *p, RawShadow *end, RawShadow v);
     545  void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
     546  void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
     547  void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
     548  void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
     549                                           uptr size);
     550  
     551  void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
     552  void ThreadIgnoreEnd(ThreadState *thr);
     553  void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
     554  void ThreadIgnoreSyncEnd(ThreadState *thr);
     555  
     556  Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
     557  void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
     558                   ThreadType thread_type);
     559  void ThreadFinish(ThreadState *thr);
     560  Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
     561  void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);
     562  void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);
     563  void ThreadFinalize(ThreadState *thr);
     564  void ThreadSetName(ThreadState *thr, const char *name);
     565  int ThreadCount(ThreadState *thr);
     566  void ProcessPendingSignalsImpl(ThreadState *thr);
     567  void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);
     568  
     569  Processor *ProcCreate();
     570  void ProcDestroy(Processor *proc);
     571  void ProcWire(Processor *proc, ThreadState *thr);
     572  void ProcUnwire(Processor *proc, ThreadState *thr);
     573  
     574  // Note: the parameter is called flagz, because flags is already taken
     575  // by the global function that returns flags.
     576  void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
     577  void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
     578  void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
     579  void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
     580      int rec = 1);
     581  int  MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
     582  void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
     583  void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
     584  void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
     585  void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
     586  void MutexRepair(ThreadState *thr, uptr pc, uptr addr);  // call on EOWNERDEAD
     587  void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
     588  
     589  void Acquire(ThreadState *thr, uptr pc, uptr addr);
     590  // AcquireGlobal synchronizes the current thread with all other threads.
     591  // In terms of happens-before relation, it draws a HB edge from all threads
     592  // (where they happen to execute right now) to the current thread. We use it to
     593  // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
     594  // right before executing finalizers. This provides a coarse, but simple
     595  // approximation of the actual required synchronization.
     596  void AcquireGlobal(ThreadState *thr);
     597  void Release(ThreadState *thr, uptr pc, uptr addr);
     598  void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
     599  void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
     600  void AfterSleep(ThreadState *thr, uptr pc);
     601  void IncrementEpoch(ThreadState *thr);
     602  
     603  #if !SANITIZER_GO
     604  uptr ALWAYS_INLINE HeapEnd() {
     605    return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
     606  }
     607  #endif
     608  
     609  void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
     610  void SlotDetach(ThreadState *thr);
     611  void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
     612  void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx);
     613  void DoReset(ThreadState *thr, uptr epoch);
     614  void FlushShadowMemory();
     615  
     616  ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
     617  void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
     618  void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
     619  
     620  // These need to match __tsan_switch_to_fiber_* flags defined in
     621  // tsan_interface.h. See documentation there as well.
     622  enum FiberSwitchFlags {
     623    FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
     624  };
     625  
     626  class SlotLocker {
     627   public:
     628    ALWAYS_INLINE
     629    SlotLocker(ThreadState *thr, bool recursive = false)
     630        : thr_(thr), locked_(recursive ? thr->slot_locked : false) {
     631  #if !SANITIZER_GO
     632      // We are in trouble if we are here with in_blocking_func set.
     633      // If in_blocking_func is set, all signals will be delivered synchronously,
     634      // which means we can't lock slots since the signal handler will try
     635      // to lock it recursively and deadlock.
     636      DCHECK(!atomic_load(&thr->in_blocking_func, memory_order_relaxed));
     637  #endif
     638      if (!locked_)
     639        SlotLock(thr_);
     640    }
     641  
     642    ALWAYS_INLINE
     643    ~SlotLocker() {
     644      if (!locked_)
     645        SlotUnlock(thr_);
     646    }
     647  
     648   private:
     649    ThreadState *thr_;
     650    bool locked_;
     651  };
     652  
     653  class SlotUnlocker {
     654   public:
     655    SlotUnlocker(ThreadState *thr) : thr_(thr), locked_(thr->slot_locked) {
     656      if (locked_)
     657        SlotUnlock(thr_);
     658    }
     659  
     660    ~SlotUnlocker() {
     661      if (locked_)
     662        SlotLock(thr_);
     663    }
     664  
     665   private:
     666    ThreadState *thr_;
     667    bool locked_;
     668  };
     669  
     670  ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) {
     671    if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals)))
     672      ProcessPendingSignalsImpl(thr);
     673  }
     674  
     675  extern bool is_initialized;
     676  
     677  ALWAYS_INLINE
     678  void LazyInitialize(ThreadState *thr) {
     679    // If we can use .preinit_array, assume that __tsan_init
     680    // called from .preinit_array initializes runtime before
     681    // any instrumented code except ANDROID.
     682  #if (!SANITIZER_CAN_USE_PREINIT_ARRAY || defined(__ANDROID__))
     683    if (UNLIKELY(!is_initialized))
     684      Initialize(thr);
     685  #endif
     686  }
     687  
     688  void TraceResetForTesting();
     689  void TraceSwitchPart(ThreadState *thr);
     690  void TraceSwitchPartImpl(ThreadState *thr);
     691  bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
     692                    AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
     693                    MutexSet *pmset, uptr *ptag);
     694  
     695  template <typename EventT>
     696  ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
     697                                                     EventT **ev) {
     698    // TraceSwitchPart accesses shadow_stack, but it's called infrequently,
     699    // so we check it here proactively.
     700    DCHECK(thr->shadow_stack);
     701    Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
     702  #if SANITIZER_DEBUG
     703    // TraceSwitch acquires these mutexes,
     704    // so we lock them here to detect deadlocks more reliably.
     705    { Lock lock(&ctx->slot_mtx); }
     706    { Lock lock(&thr->tctx->trace.mtx); }
     707    TracePart *current = thr->tctx->trace.parts.Back();
     708    if (current) {
     709      DCHECK_GE(pos, &current->events[0]);
     710      DCHECK_LE(pos, &current->events[TracePart::kSize]);
     711    } else {
     712      DCHECK_EQ(pos, nullptr);
     713    }
     714  #endif
     715    // TracePart is allocated with mmap and is at least 4K aligned.
     716    // So the following check is a faster way to check for part end.
     717    // It may have false positives in the middle of the trace,
     718    // they are filtered out in TraceSwitch.
     719    if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))
     720      return false;
     721    *ev = reinterpret_cast<EventT *>(pos);
     722    return true;
     723  }
     724  
     725  template <typename EventT>
     726  ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {
     727    DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
     728    atomic_store_relaxed(&thr->trace_pos, (uptr)(evp + 1));
     729  }
     730  
     731  template <typename EventT>
     732  void TraceEvent(ThreadState *thr, EventT ev) {
     733    EventT *evp;
     734    if (!TraceAcquire(thr, &evp)) {
     735      TraceSwitchPart(thr);
     736      UNUSED bool res = TraceAcquire(thr, &evp);
     737      DCHECK(res);
     738    }
     739    *evp = ev;
     740    TraceRelease(thr, evp);
     741  }
     742  
     743  ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
     744                                                     uptr pc = 0) {
     745    if (!kCollectHistory)
     746      return true;
     747    EventFunc *ev;
     748    if (UNLIKELY(!TraceAcquire(thr, &ev)))
     749      return false;
     750    ev->is_access = 0;
     751    ev->is_func = 1;
     752    ev->pc = pc;
     753    TraceRelease(thr, ev);
     754    return true;
     755  }
     756  
     757  WARN_UNUSED_RESULT
     758  bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
     759                            AccessType typ);
     760  WARN_UNUSED_RESULT
     761  bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
     762                                 AccessType typ);
     763  void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
     764                              AccessType typ);
     765  void TraceFunc(ThreadState *thr, uptr pc = 0);
     766  void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
     767                      StackID stk);
     768  void TraceMutexUnlock(ThreadState *thr, uptr addr);
     769  void TraceTime(ThreadState *thr);
     770  
     771  void TraceRestartFuncExit(ThreadState *thr);
     772  void TraceRestartFuncEntry(ThreadState *thr, uptr pc);
     773  
     774  void GrowShadowStack(ThreadState *thr);
     775  
     776  ALWAYS_INLINE
     777  void FuncEntry(ThreadState *thr, uptr pc) {
     778    DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.sid(), (void *)pc);
     779    if (UNLIKELY(!TryTraceFunc(thr, pc)))
     780      return TraceRestartFuncEntry(thr, pc);
     781    DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
     782  #if !SANITIZER_GO
     783    DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
     784  #else
     785    if (thr->shadow_stack_pos == thr->shadow_stack_end)
     786      GrowShadowStack(thr);
     787  #endif
     788    thr->shadow_stack_pos[0] = pc;
     789    thr->shadow_stack_pos++;
     790  }
     791  
     792  ALWAYS_INLINE
     793  void FuncExit(ThreadState *thr) {
     794    DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.sid());
     795    if (UNLIKELY(!TryTraceFunc(thr, 0)))
     796      return TraceRestartFuncExit(thr);
     797    DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
     798  #if !SANITIZER_GO
     799    DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
     800  #endif
     801    thr->shadow_stack_pos--;
     802  }
     803  
     804  #if !SANITIZER_GO
     805  extern void (*on_initialize)(void);
     806  extern int (*on_finalize)(int);
     807  #endif
     808  }  // namespace __tsan
     809  
     810  #endif  // TSAN_RTL_H