(root)/
gcc-13.2.0/
libsanitizer/
sanitizer_common/
sanitizer_atomic_clang_x86.h
       1  //===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
       2  //
       3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
       4  // See https://llvm.org/LICENSE.txt for license information.
       5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
       6  //
       7  //===----------------------------------------------------------------------===//
       8  //
       9  // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
      10  // Not intended for direct inclusion. Include sanitizer_atomic.h.
      11  //
      12  //===----------------------------------------------------------------------===//
      13  
      14  #ifndef SANITIZER_ATOMIC_CLANG_X86_H
      15  #define SANITIZER_ATOMIC_CLANG_X86_H
      16  
      17  namespace __sanitizer {
      18  
      19  inline void proc_yield(int cnt) {
      20    __asm__ __volatile__("" ::: "memory");
      21    for (int i = 0; i < cnt; i++)
      22      __asm__ __volatile__("pause");
      23    __asm__ __volatile__("" ::: "memory");
      24  }
      25  
      26  template<typename T>
      27  inline typename T::Type atomic_load(
      28      const volatile T *a, memory_order mo) {
      29    DCHECK(mo & (memory_order_relaxed | memory_order_consume
      30        | memory_order_acquire | memory_order_seq_cst));
      31    DCHECK(!((uptr)a % sizeof(*a)));
      32    typename T::Type v;
      33  
      34    if (sizeof(*a) < 8 || sizeof(void*) == 8) {
      35      // Assume that aligned loads are atomic.
      36      if (mo == memory_order_relaxed) {
      37        v = a->val_dont_use;
      38      } else if (mo == memory_order_consume) {
      39        // Assume that processor respects data dependencies
      40        // (and that compiler won't break them).
      41        __asm__ __volatile__("" ::: "memory");
      42        v = a->val_dont_use;
      43        __asm__ __volatile__("" ::: "memory");
      44      } else if (mo == memory_order_acquire) {
      45        __asm__ __volatile__("" ::: "memory");
      46        v = a->val_dont_use;
      47        // On x86 loads are implicitly acquire.
      48        __asm__ __volatile__("" ::: "memory");
      49      } else {  // seq_cst
      50        // On x86 plain MOV is enough for seq_cst store.
      51        __asm__ __volatile__("" ::: "memory");
      52        v = a->val_dont_use;
      53        __asm__ __volatile__("" ::: "memory");
      54      }
      55    } else {
      56      // 64-bit load on 32-bit platform.
      57      __asm__ __volatile__(
      58          "movq %1, %%mm0;"  // Use mmx reg for 64-bit atomic moves
      59          "movq %%mm0, %0;"  // (ptr could be read-only)
      60          "emms;"            // Empty mmx state/Reset FP regs
      61          : "=m" (v)
      62          : "m" (a->val_dont_use)
      63          : // mark the mmx registers as clobbered
      64  #ifdef __MMX__
      65            "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
      66  #endif  // #ifdef __MMX__
      67            "memory");
      68    }
      69    return v;
      70  }
      71  
      72  template<typename T>
      73  inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
      74    DCHECK(mo & (memory_order_relaxed | memory_order_release
      75        | memory_order_seq_cst));
      76    DCHECK(!((uptr)a % sizeof(*a)));
      77  
      78    if (sizeof(*a) < 8 || sizeof(void*) == 8) {
      79      // Assume that aligned loads are atomic.
      80      if (mo == memory_order_relaxed) {
      81        a->val_dont_use = v;
      82      } else if (mo == memory_order_release) {
      83        // On x86 stores are implicitly release.
      84        __asm__ __volatile__("" ::: "memory");
      85        a->val_dont_use = v;
      86        __asm__ __volatile__("" ::: "memory");
      87      } else {  // seq_cst
      88        // On x86 stores are implicitly release.
      89        __asm__ __volatile__("" ::: "memory");
      90        a->val_dont_use = v;
      91        __sync_synchronize();
      92      }
      93    } else {
      94      // 64-bit store on 32-bit platform.
      95      __asm__ __volatile__(
      96          "movq %1, %%mm0;"  // Use mmx reg for 64-bit atomic moves
      97          "movq %%mm0, %0;"
      98          "emms;"            // Empty mmx state/Reset FP regs
      99          : "=m" (a->val_dont_use)
     100          : "m" (v)
     101          : // mark the mmx registers as clobbered
     102  #ifdef __MMX__
     103            "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
     104  #endif  // #ifdef __MMX__
     105            "memory");
     106      if (mo == memory_order_seq_cst)
     107        __sync_synchronize();
     108    }
     109  }
     110  
     111  }  // namespace __sanitizer
     112  
     113  #endif  // #ifndef SANITIZER_ATOMIC_CLANG_X86_H