(root)/
gcc-13.2.0/
libsanitizer/
sanitizer_common/
sanitizer_atomic_msvc.h
       1  //===-- sanitizer_atomic_msvc.h ---------------------------------*- C++ -*-===//
       2  //
       3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
       4  // See https://llvm.org/LICENSE.txt for license information.
       5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
       6  //
       7  //===----------------------------------------------------------------------===//
       8  //
       9  // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
      10  // Not intended for direct inclusion. Include sanitizer_atomic.h.
      11  //
      12  //===----------------------------------------------------------------------===//
      13  
      14  #ifndef SANITIZER_ATOMIC_MSVC_H
      15  #define SANITIZER_ATOMIC_MSVC_H
      16  
      17  extern "C" void _ReadWriteBarrier();
      18  #pragma intrinsic(_ReadWriteBarrier)
      19  extern "C" void _mm_mfence();
      20  #pragma intrinsic(_mm_mfence)
      21  extern "C" void _mm_pause();
      22  #pragma intrinsic(_mm_pause)
      23  extern "C" char _InterlockedExchange8(char volatile *Addend, char Value);
      24  #pragma intrinsic(_InterlockedExchange8)
      25  extern "C" short _InterlockedExchange16(short volatile *Addend, short Value);
      26  #pragma intrinsic(_InterlockedExchange16)
      27  extern "C" long _InterlockedExchange(long volatile *Addend, long Value);
      28  #pragma intrinsic(_InterlockedExchange)
      29  extern "C" long _InterlockedExchangeAdd(long volatile *Addend, long Value);
      30  #pragma intrinsic(_InterlockedExchangeAdd)
      31  extern "C" char _InterlockedCompareExchange8(char volatile *Destination,
      32                                               char Exchange, char Comparand);
      33  #pragma intrinsic(_InterlockedCompareExchange8)
      34  extern "C" short _InterlockedCompareExchange16(short volatile *Destination,
      35                                                 short Exchange, short Comparand);
      36  #pragma intrinsic(_InterlockedCompareExchange16)
      37  extern "C" long long _InterlockedCompareExchange64(
      38      long long volatile *Destination, long long Exchange, long long Comparand);
      39  #pragma intrinsic(_InterlockedCompareExchange64)
      40  extern "C" void *_InterlockedCompareExchangePointer(
      41      void *volatile *Destination,
      42      void *Exchange, void *Comparand);
      43  #pragma intrinsic(_InterlockedCompareExchangePointer)
      44  extern "C" long __cdecl _InterlockedCompareExchange(long volatile *Destination,
      45                                                      long Exchange,
      46                                                      long Comparand);
      47  #pragma intrinsic(_InterlockedCompareExchange)
      48  
      49  #ifdef _WIN64
      50  extern "C" long long _InterlockedExchangeAdd64(long long volatile *Addend,
      51                                                 long long Value);
      52  #pragma intrinsic(_InterlockedExchangeAdd64)
      53  #endif
      54  
      55  namespace __sanitizer {
      56  
      57  inline void atomic_signal_fence(memory_order) {
      58    _ReadWriteBarrier();
      59  }
      60  
      61  inline void atomic_thread_fence(memory_order) {
      62    _mm_mfence();
      63  }
      64  
      65  inline void proc_yield(int cnt) {
      66    for (int i = 0; i < cnt; i++)
      67      _mm_pause();
      68  }
      69  
      70  template<typename T>
      71  inline typename T::Type atomic_load(
      72      const volatile T *a, memory_order mo) {
      73    DCHECK(mo & (memory_order_relaxed | memory_order_consume
      74        | memory_order_acquire | memory_order_seq_cst));
      75    DCHECK(!((uptr)a % sizeof(*a)));
      76    typename T::Type v;
      77    // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
      78    if (mo == memory_order_relaxed) {
      79      v = a->val_dont_use;
      80    } else {
      81      atomic_signal_fence(memory_order_seq_cst);
      82      v = a->val_dont_use;
      83      atomic_signal_fence(memory_order_seq_cst);
      84    }
      85    return v;
      86  }
      87  
      88  template<typename T>
      89  inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
      90    DCHECK(mo & (memory_order_relaxed | memory_order_release
      91        | memory_order_seq_cst));
      92    DCHECK(!((uptr)a % sizeof(*a)));
      93    // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
      94    if (mo == memory_order_relaxed) {
      95      a->val_dont_use = v;
      96    } else {
      97      atomic_signal_fence(memory_order_seq_cst);
      98      a->val_dont_use = v;
      99      atomic_signal_fence(memory_order_seq_cst);
     100    }
     101    if (mo == memory_order_seq_cst)
     102      atomic_thread_fence(memory_order_seq_cst);
     103  }
     104  
     105  inline u32 atomic_fetch_add(volatile atomic_uint32_t *a,
     106      u32 v, memory_order mo) {
     107    (void)mo;
     108    DCHECK(!((uptr)a % sizeof(*a)));
     109    return (u32)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
     110                                        (long)v);
     111  }
     112  
     113  inline uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
     114      uptr v, memory_order mo) {
     115    (void)mo;
     116    DCHECK(!((uptr)a % sizeof(*a)));
     117  #ifdef _WIN64
     118    return (uptr)_InterlockedExchangeAdd64((volatile long long *)&a->val_dont_use,
     119                                           (long long)v);
     120  #else
     121    return (uptr)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
     122                                         (long)v);
     123  #endif
     124  }
     125  
     126  inline u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
     127      u32 v, memory_order mo) {
     128    (void)mo;
     129    DCHECK(!((uptr)a % sizeof(*a)));
     130    return (u32)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
     131                                        -(long)v);
     132  }
     133  
     134  inline uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
     135      uptr v, memory_order mo) {
     136    (void)mo;
     137    DCHECK(!((uptr)a % sizeof(*a)));
     138  #ifdef _WIN64
     139    return (uptr)_InterlockedExchangeAdd64((volatile long long *)&a->val_dont_use,
     140                                           -(long long)v);
     141  #else
     142    return (uptr)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
     143                                         -(long)v);
     144  #endif
     145  }
     146  
     147  inline u8 atomic_exchange(volatile atomic_uint8_t *a,
     148      u8 v, memory_order mo) {
     149    (void)mo;
     150    DCHECK(!((uptr)a % sizeof(*a)));
     151    return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v);
     152  }
     153  
     154  inline u16 atomic_exchange(volatile atomic_uint16_t *a,
     155      u16 v, memory_order mo) {
     156    (void)mo;
     157    DCHECK(!((uptr)a % sizeof(*a)));
     158    return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v);
     159  }
     160  
     161  inline u32 atomic_exchange(volatile atomic_uint32_t *a,
     162      u32 v, memory_order mo) {
     163    (void)mo;
     164    DCHECK(!((uptr)a % sizeof(*a)));
     165    return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);
     166  }
     167  
     168  inline bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
     169                                             u8 *cmp,
     170                                             u8 xchgv,
     171                                             memory_order mo) {
     172    (void)mo;
     173    DCHECK(!((uptr)a % sizeof(*a)));
     174    u8 cmpv = *cmp;
     175  #ifdef _WIN64
     176    u8 prev = (u8)_InterlockedCompareExchange8(
     177        (volatile char*)&a->val_dont_use, (char)xchgv, (char)cmpv);
     178  #else
     179    u8 prev;
     180    __asm {
     181      mov al, cmpv
     182      mov ecx, a
     183      mov dl, xchgv
     184      lock cmpxchg [ecx], dl
     185      mov prev, al
     186    }
     187  #endif
     188    if (prev == cmpv)
     189      return true;
     190    *cmp = prev;
     191    return false;
     192  }
     193  
     194  inline bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
     195                                             uptr *cmp,
     196                                             uptr xchg,
     197                                             memory_order mo) {
     198    uptr cmpv = *cmp;
     199    uptr prev = (uptr)_InterlockedCompareExchangePointer(
     200        (void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
     201    if (prev == cmpv)
     202      return true;
     203    *cmp = prev;
     204    return false;
     205  }
     206  
     207  inline bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
     208                                             u16 *cmp,
     209                                             u16 xchg,
     210                                             memory_order mo) {
     211    u16 cmpv = *cmp;
     212    u16 prev = (u16)_InterlockedCompareExchange16(
     213        (volatile short*)&a->val_dont_use, (short)xchg, (short)cmpv);
     214    if (prev == cmpv)
     215      return true;
     216    *cmp = prev;
     217    return false;
     218  }
     219  
     220  inline bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
     221                                             u32 *cmp,
     222                                             u32 xchg,
     223                                             memory_order mo) {
     224    u32 cmpv = *cmp;
     225    u32 prev = (u32)_InterlockedCompareExchange(
     226        (volatile long*)&a->val_dont_use, (long)xchg, (long)cmpv);
     227    if (prev == cmpv)
     228      return true;
     229    *cmp = prev;
     230    return false;
     231  }
     232  
     233  inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
     234                                             u64 *cmp,
     235                                             u64 xchg,
     236                                             memory_order mo) {
     237    u64 cmpv = *cmp;
     238    u64 prev = (u64)_InterlockedCompareExchange64(
     239        (volatile long long*)&a->val_dont_use, (long long)xchg, (long long)cmpv);
     240    if (prev == cmpv)
     241      return true;
     242    *cmp = prev;
     243    return false;
     244  }
     245  
     246  template<typename T>
     247  inline bool atomic_compare_exchange_weak(volatile T *a,
     248                                           typename T::Type *cmp,
     249                                           typename T::Type xchg,
     250                                           memory_order mo) {
     251    return atomic_compare_exchange_strong(a, cmp, xchg, mo);
     252  }
     253  
     254  }  // namespace __sanitizer
     255  
     256  #endif  // SANITIZER_ATOMIC_CLANG_H