1  //===-- tsan_shadow.h -------------------------------------------*- C++ -*-===//
       2  //
       3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
       4  // See https://llvm.org/LICENSE.txt for license information.
       5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
       6  //
       7  //===----------------------------------------------------------------------===//
       8  
       9  #ifndef TSAN_SHADOW_H
      10  #define TSAN_SHADOW_H
      11  
      12  #include "tsan_defs.h"
      13  
      14  namespace __tsan {
      15  
      16  class FastState {
      17   public:
      18    FastState() { Reset(); }
      19  
      20    void Reset() {
      21      part_.unused0_ = 0;
      22      part_.sid_ = static_cast<u8>(kFreeSid);
      23      part_.epoch_ = static_cast<u16>(kEpochLast);
      24      part_.unused1_ = 0;
      25      part_.ignore_accesses_ = false;
      26    }
      27  
      28    void SetSid(Sid sid) { part_.sid_ = static_cast<u8>(sid); }
      29  
      30    Sid sid() const { return static_cast<Sid>(part_.sid_); }
      31  
      32    Epoch epoch() const { return static_cast<Epoch>(part_.epoch_); }
      33  
      34    void SetEpoch(Epoch epoch) { part_.epoch_ = static_cast<u16>(epoch); }
      35  
      36    void SetIgnoreBit() { part_.ignore_accesses_ = 1; }
      37    void ClearIgnoreBit() { part_.ignore_accesses_ = 0; }
      38    bool GetIgnoreBit() const { return part_.ignore_accesses_; }
      39  
      40   private:
      41    friend class Shadow;
      42    struct Parts {
      43      u32 unused0_ : 8;
      44      u32 sid_ : 8;
      45      u32 epoch_ : kEpochBits;
      46      u32 unused1_ : 1;
      47      u32 ignore_accesses_ : 1;
      48    };
      49    union {
      50      Parts part_;
      51      u32 raw_;
      52    };
      53  };
      54  
      55  static_assert(sizeof(FastState) == kShadowSize, "bad FastState size");
      56  
      57  class Shadow {
      58   public:
      59    static constexpr RawShadow kEmpty = static_cast<RawShadow>(0);
      60  
      61    Shadow(FastState state, u32 addr, u32 size, AccessType typ) {
      62      raw_ = state.raw_;
      63      DCHECK_GT(size, 0);
      64      DCHECK_LE(size, 8);
      65      UNUSED Sid sid0 = part_.sid_;
      66      UNUSED u16 epoch0 = part_.epoch_;
      67      raw_ |= (!!(typ & kAccessAtomic) << kIsAtomicShift) |
      68              (!!(typ & kAccessRead) << kIsReadShift) |
      69              (((((1u << size) - 1) << (addr & 0x7)) & 0xff) << kAccessShift);
      70      // Note: we don't check kAccessAtomic because it overlaps with
      71      // FastState::ignore_accesses_ and it may be set spuriously.
      72      DCHECK_EQ(part_.is_read_, !!(typ & kAccessRead));
      73      DCHECK_EQ(sid(), sid0);
      74      DCHECK_EQ(epoch(), epoch0);
      75    }
      76  
      77    explicit Shadow(RawShadow x = Shadow::kEmpty) { raw_ = static_cast<u32>(x); }
      78  
      79    RawShadow raw() const { return static_cast<RawShadow>(raw_); }
      80    Sid sid() const { return part_.sid_; }
      81    Epoch epoch() const { return static_cast<Epoch>(part_.epoch_); }
      82    u8 access() const { return part_.access_; }
      83  
      84    void GetAccess(uptr *addr, uptr *size, AccessType *typ) const {
      85      DCHECK(part_.access_ != 0 || raw_ == static_cast<u32>(Shadow::kRodata));
      86      if (addr)
      87        *addr = part_.access_ ? __builtin_ffs(part_.access_) - 1 : 0;
      88      if (size)
      89        *size = part_.access_ == kFreeAccess ? kShadowCell
      90                                             : __builtin_popcount(part_.access_);
      91      if (typ) {
      92        *typ = part_.is_read_ ? kAccessRead : kAccessWrite;
      93        if (part_.is_atomic_)
      94          *typ |= kAccessAtomic;
      95        if (part_.access_ == kFreeAccess)
      96          *typ |= kAccessFree;
      97      }
      98    }
      99  
     100    ALWAYS_INLINE
     101    bool IsBothReadsOrAtomic(AccessType typ) const {
     102      u32 is_read = !!(typ & kAccessRead);
     103      u32 is_atomic = !!(typ & kAccessAtomic);
     104      bool res =
     105          raw_ & ((is_atomic << kIsAtomicShift) | (is_read << kIsReadShift));
     106      DCHECK_EQ(res,
     107                (part_.is_read_ && is_read) || (part_.is_atomic_ && is_atomic));
     108      return res;
     109    }
     110  
     111    ALWAYS_INLINE
     112    bool IsRWWeakerOrEqual(AccessType typ) const {
     113      u32 is_read = !!(typ & kAccessRead);
     114      u32 is_atomic = !!(typ & kAccessAtomic);
     115      UNUSED u32 res0 =
     116          (part_.is_atomic_ > is_atomic) ||
     117          (part_.is_atomic_ == is_atomic && part_.is_read_ >= is_read);
     118  #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
     119      const u32 kAtomicReadMask = (1 << kIsAtomicShift) | (1 << kIsReadShift);
     120      bool res = (raw_ & kAtomicReadMask) >=
     121                 ((is_atomic << kIsAtomicShift) | (is_read << kIsReadShift));
     122  
     123      DCHECK_EQ(res, res0);
     124      return res;
     125  #else
     126      return res0;
     127  #endif
     128    }
     129  
     130    // The FreedMarker must not pass "the same access check" so that we don't
     131    // return from the race detection algorithm early.
     132    static RawShadow FreedMarker() {
     133      FastState fs;
     134      fs.SetSid(kFreeSid);
     135      fs.SetEpoch(kEpochLast);
     136      Shadow s(fs, 0, 8, kAccessWrite);
     137      return s.raw();
     138    }
     139  
     140    static RawShadow FreedInfo(Sid sid, Epoch epoch) {
     141      Shadow s;
     142      s.part_.sid_ = sid;
     143      s.part_.epoch_ = static_cast<u16>(epoch);
     144      s.part_.access_ = kFreeAccess;
     145      return s.raw();
     146    }
     147  
     148   private:
     149    struct Parts {
     150      u8 access_;
     151      Sid sid_;
     152      u16 epoch_ : kEpochBits;
     153      u16 is_read_ : 1;
     154      u16 is_atomic_ : 1;
     155    };
     156    union {
     157      Parts part_;
     158      u32 raw_;
     159    };
     160  
     161    static constexpr u8 kFreeAccess = 0x81;
     162  
     163  #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
     164    static constexpr uptr kAccessShift = 0;
     165    static constexpr uptr kIsReadShift = 30;
     166    static constexpr uptr kIsAtomicShift = 31;
     167  #else
     168    static constexpr uptr kAccessShift = 24;
     169    static constexpr uptr kIsReadShift = 1;
     170    static constexpr uptr kIsAtomicShift = 0;
     171  #endif
     172  
     173   public:
     174    // .rodata shadow marker, see MapRodata and ContainsSameAccessFast.
     175    static constexpr RawShadow kRodata =
     176        static_cast<RawShadow>(1 << kIsReadShift);
     177  };
     178  
     179  static_assert(sizeof(Shadow) == kShadowSize, "bad Shadow size");
     180  
     181  ALWAYS_INLINE RawShadow LoadShadow(RawShadow *p) {
     182    return static_cast<RawShadow>(
     183        atomic_load((atomic_uint32_t *)p, memory_order_relaxed));
     184  }
     185  
     186  ALWAYS_INLINE void StoreShadow(RawShadow *sp, RawShadow s) {
     187    atomic_store((atomic_uint32_t *)sp, static_cast<u32>(s),
     188                 memory_order_relaxed);
     189  }
     190  
     191  }  // namespace __tsan
     192  
     193  #endif