(root)/
gcc-13.2.0/
libsanitizer/
sanitizer_common/
sanitizer_atomic_clang_other.h
       1  //===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
       2  //
       3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
       4  // See https://llvm.org/LICENSE.txt for license information.
       5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
       6  //
       7  //===----------------------------------------------------------------------===//
       8  //
       9  // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
      10  // Not intended for direct inclusion. Include sanitizer_atomic.h.
      11  //
      12  //===----------------------------------------------------------------------===//
      13  
      14  #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
      15  #define SANITIZER_ATOMIC_CLANG_OTHER_H
      16  
      17  namespace __sanitizer {
      18  
      19  
      20  inline void proc_yield(int cnt) {
      21    __asm__ __volatile__("" ::: "memory");
      22  }
      23  
      24  template<typename T>
      25  inline typename T::Type atomic_load(
      26      const volatile T *a, memory_order mo) {
      27    DCHECK(mo & (memory_order_relaxed | memory_order_consume
      28        | memory_order_acquire | memory_order_seq_cst));
      29    DCHECK(!((uptr)a % sizeof(*a)));
      30    typename T::Type v;
      31  
      32    if (sizeof(*a) < 8 || sizeof(void*) == 8) {
      33      // Assume that aligned loads are atomic.
      34      if (mo == memory_order_relaxed) {
      35        v = a->val_dont_use;
      36      } else if (mo == memory_order_consume) {
      37        // Assume that processor respects data dependencies
      38        // (and that compiler won't break them).
      39        __asm__ __volatile__("" ::: "memory");
      40        v = a->val_dont_use;
      41        __asm__ __volatile__("" ::: "memory");
      42      } else if (mo == memory_order_acquire) {
      43        __asm__ __volatile__("" ::: "memory");
      44        v = a->val_dont_use;
      45        __sync_synchronize();
      46      } else {  // seq_cst
      47        // E.g. on POWER we need a hw fence even before the store.
      48        __sync_synchronize();
      49        v = a->val_dont_use;
      50        __sync_synchronize();
      51      }
      52    } else {
      53      __atomic_load(const_cast<typename T::Type volatile *>(&a->val_dont_use), &v,
      54                    __ATOMIC_SEQ_CST);
      55    }
      56    return v;
      57  }
      58  
      59  template<typename T>
      60  inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
      61    DCHECK(mo & (memory_order_relaxed | memory_order_release
      62        | memory_order_seq_cst));
      63    DCHECK(!((uptr)a % sizeof(*a)));
      64  
      65    if (sizeof(*a) < 8 || sizeof(void*) == 8) {
      66      // Assume that aligned loads are atomic.
      67      if (mo == memory_order_relaxed) {
      68        a->val_dont_use = v;
      69      } else if (mo == memory_order_release) {
      70        __sync_synchronize();
      71        a->val_dont_use = v;
      72        __asm__ __volatile__("" ::: "memory");
      73      } else {  // seq_cst
      74        __sync_synchronize();
      75        a->val_dont_use = v;
      76        __sync_synchronize();
      77      }
      78    } else {
      79      __atomic_store(&a->val_dont_use, &v, __ATOMIC_SEQ_CST);
      80    }
      81  }
      82  
      83  }  // namespace __sanitizer
      84  
      85  #endif  // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H