(root)/
glibc-2.38/
sysdeps/
alpha/
atomic-machine.h
       1  /* Copyright (C) 2003-2023 Free Software Foundation, Inc.
       2     This file is part of the GNU C Library.
       3  
       4     The GNU C Library is free software; you can redistribute it and/or
       5     modify it under the terms of the GNU Lesser General Public
       6     License as published by the Free Software Foundation; either
       7     version 2.1 of the License, or (at your option) any later version.
       8  
       9     The GNU C Library is distributed in the hope that it will be useful,
      10     but WITHOUT ANY WARRANTY; without even the implied warranty of
      11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      12     Lesser General Public License for more details.
      13  
      14     You should have received a copy of the GNU Lesser General Public
      15     License along with the GNU C Library.  If not, see
      16     <https://www.gnu.org/licenses/>.  */
      17  
      18  #include <stdint.h>
      19  
      20  #define __HAVE_64B_ATOMICS 1
      21  #define USE_ATOMIC_COMPILER_BUILTINS 0
      22  
      23  /* XXX Is this actually correct?  */
      24  #define ATOMIC_EXCHANGE_USES_CAS 1
      25  
      26  
      27  #define __MB		"	mb\n"
      28  
      29  
      30  /* Compare and exchange.  For all of the "xxx" routines, we expect a
      31     "__prev" and a "__cmp" variable to be provided by the enclosing scope,
      32     in which values are returned.  */
      33  
      34  #define __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2)	\
      35  ({									\
      36    unsigned long __tmp, __snew, __addr64;				\
      37    __asm__ __volatile__ (						\
      38  		mb1							\
      39  	"	andnot	%[__addr8],7,%[__addr64]\n"			\
      40  	"	insbl	%[__new],%[__addr8],%[__snew]\n"		\
      41  	"1:	ldq_l	%[__tmp],0(%[__addr64])\n"			\
      42  	"	extbl	%[__tmp],%[__addr8],%[__prev]\n"		\
      43  	"	cmpeq	%[__prev],%[__old],%[__cmp]\n"			\
      44  	"	beq	%[__cmp],2f\n"					\
      45  	"	mskbl	%[__tmp],%[__addr8],%[__tmp]\n"			\
      46  	"	or	%[__snew],%[__tmp],%[__tmp]\n"			\
      47  	"	stq_c	%[__tmp],0(%[__addr64])\n"			\
      48  	"	beq	%[__tmp],1b\n"					\
      49  		mb2							\
      50  	"2:"								\
      51  	: [__prev] "=&r" (__prev),					\
      52  	  [__snew] "=&r" (__snew),					\
      53  	  [__tmp] "=&r" (__tmp),					\
      54  	  [__cmp] "=&r" (__cmp),					\
      55  	  [__addr64] "=&r" (__addr64)					\
      56  	: [__addr8] "r" (mem),						\
      57  	  [__old] "Ir" ((uint64_t)(uint8_t)(uint64_t)(old)),		\
      58  	  [__new] "r" (new)						\
      59  	: "memory");							\
      60  })
      61  
      62  #define __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2) \
      63  ({									\
      64    unsigned long __tmp, __snew, __addr64;				\
      65    __asm__ __volatile__ (						\
      66  		mb1							\
      67  	"	andnot	%[__addr16],7,%[__addr64]\n"			\
      68  	"	inswl	%[__new],%[__addr16],%[__snew]\n"		\
      69  	"1:	ldq_l	%[__tmp],0(%[__addr64])\n"			\
      70  	"	extwl	%[__tmp],%[__addr16],%[__prev]\n"		\
      71  	"	cmpeq	%[__prev],%[__old],%[__cmp]\n"			\
      72  	"	beq	%[__cmp],2f\n"					\
      73  	"	mskwl	%[__tmp],%[__addr16],%[__tmp]\n"		\
      74  	"	or	%[__snew],%[__tmp],%[__tmp]\n"			\
      75  	"	stq_c	%[__tmp],0(%[__addr64])\n"			\
      76  	"	beq	%[__tmp],1b\n"					\
      77  		mb2							\
      78  	"2:"								\
      79  	: [__prev] "=&r" (__prev),					\
      80  	  [__snew] "=&r" (__snew),					\
      81  	  [__tmp] "=&r" (__tmp),					\
      82  	  [__cmp] "=&r" (__cmp),					\
      83  	  [__addr64] "=&r" (__addr64)					\
      84  	: [__addr16] "r" (mem),						\
      85  	  [__old] "Ir" ((uint64_t)(uint16_t)(uint64_t)(old)),		\
      86  	  [__new] "r" (new)						\
      87  	: "memory");							\
      88  })
      89  
      90  #define __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2) \
      91  ({									\
      92    __asm__ __volatile__ (						\
      93  		mb1							\
      94  	"1:	ldl_l	%[__prev],%[__mem]\n"				\
      95  	"	cmpeq	%[__prev],%[__old],%[__cmp]\n"			\
      96  	"	beq	%[__cmp],2f\n"					\
      97  	"	mov	%[__new],%[__cmp]\n"				\
      98  	"	stl_c	%[__cmp],%[__mem]\n"				\
      99  	"	beq	%[__cmp],1b\n"					\
     100  		mb2							\
     101  	"2:"								\
     102  	: [__prev] "=&r" (__prev),					\
     103  	  [__cmp] "=&r" (__cmp)						\
     104  	: [__mem] "m" (*(mem)),						\
     105  	  [__old] "Ir" ((uint64_t)(int32_t)(uint64_t)(old)),		\
     106  	  [__new] "Ir" (new)						\
     107  	: "memory");							\
     108  })
     109  
     110  #define __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2) \
     111  ({									\
     112    __asm__ __volatile__ (						\
     113  		mb1							\
     114  	"1:	ldq_l	%[__prev],%[__mem]\n"				\
     115  	"	cmpeq	%[__prev],%[__old],%[__cmp]\n"			\
     116  	"	beq	%[__cmp],2f\n"					\
     117  	"	mov	%[__new],%[__cmp]\n"				\
     118  	"	stq_c	%[__cmp],%[__mem]\n"				\
     119  	"	beq	%[__cmp],1b\n"					\
     120  		mb2							\
     121  	"2:"								\
     122  	: [__prev] "=&r" (__prev),					\
     123  	  [__cmp] "=&r" (__cmp)						\
     124  	: [__mem] "m" (*(mem)),						\
     125  	  [__old] "Ir" ((uint64_t)(old)),				\
     126  	  [__new] "Ir" (new)						\
     127  	: "memory");							\
     128  })
     129  
     130  /* For all "bool" routines, we return FALSE if exchange successful.  */
     131  
     132  #define __arch_compare_and_exchange_bool_8_int(mem, new, old, mb1, mb2)	\
     133  ({ unsigned long __prev; int __cmp;					\
     134     __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2);	\
     135     !__cmp; })
     136  
     137  #define __arch_compare_and_exchange_bool_16_int(mem, new, old, mb1, mb2) \
     138  ({ unsigned long __prev; int __cmp;					\
     139     __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2);	\
     140     !__cmp; })
     141  
     142  #define __arch_compare_and_exchange_bool_32_int(mem, new, old, mb1, mb2) \
     143  ({ unsigned long __prev; int __cmp;					\
     144     __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2);	\
     145     !__cmp; })
     146  
     147  #define __arch_compare_and_exchange_bool_64_int(mem, new, old, mb1, mb2) \
     148  ({ unsigned long __prev; int __cmp;					\
     149     __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2);	\
     150     !__cmp; })
     151  
     152  /* For all "val" routines, return the old value whether exchange
     153     successful or not.  */
     154  
     155  #define __arch_compare_and_exchange_val_8_int(mem, new, old, mb1, mb2)	\
     156  ({ unsigned long __prev; int __cmp;					\
     157     __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2);	\
     158     (typeof (*mem))__prev; })
     159  
     160  #define __arch_compare_and_exchange_val_16_int(mem, new, old, mb1, mb2) \
     161  ({ unsigned long __prev; int __cmp;					\
     162     __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2);	\
     163     (typeof (*mem))__prev; })
     164  
     165  #define __arch_compare_and_exchange_val_32_int(mem, new, old, mb1, mb2) \
     166  ({ unsigned long __prev; int __cmp;					\
     167     __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2);	\
     168     (typeof (*mem))__prev; })
     169  
     170  #define __arch_compare_and_exchange_val_64_int(mem, new, old, mb1, mb2) \
     171  ({ unsigned long __prev; int __cmp;					\
     172     __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2);	\
     173     (typeof (*mem))__prev; })
     174  
     175  /* Compare and exchange with "acquire" semantics, ie barrier after.  */
     176  
     177  #define atomic_compare_and_exchange_bool_acq(mem, new, old)	\
     178    __atomic_bool_bysize (__arch_compare_and_exchange_bool, int,	\
     179  		        mem, new, old, "", __MB)
     180  
     181  #define atomic_compare_and_exchange_val_acq(mem, new, old)	\
     182    __atomic_val_bysize (__arch_compare_and_exchange_val, int,	\
     183  		       mem, new, old, "", __MB)
     184  
     185  /* Compare and exchange with "release" semantics, ie barrier before.  */
     186  
     187  #define atomic_compare_and_exchange_val_rel(mem, new, old)	\
     188    __atomic_val_bysize (__arch_compare_and_exchange_val, int,	\
     189  		       mem, new, old, __MB, "")
     190  
     191  
     192  /* Atomically store value and return the previous value.  */
     193  
     194  #define __arch_exchange_8_int(mem, value, mb1, mb2)			\
     195  ({									\
     196    unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret;		\
     197    __asm__ __volatile__ (						\
     198  		mb1							\
     199  	"	andnot	%[__addr8],7,%[__addr64]\n"			\
     200  	"	insbl	%[__value],%[__addr8],%[__sval]\n"		\
     201  	"1:	ldq_l	%[__tmp],0(%[__addr64])\n"			\
     202  	"	extbl	%[__tmp],%[__addr8],%[__ret]\n"			\
     203  	"	mskbl	%[__tmp],%[__addr8],%[__tmp]\n"			\
     204  	"	or	%[__sval],%[__tmp],%[__tmp]\n"			\
     205  	"	stq_c	%[__tmp],0(%[__addr64])\n"			\
     206  	"	beq	%[__tmp],1b\n"					\
     207  		mb2							\
     208  	: [__ret] "=&r" (__ret),					\
     209  	  [__sval] "=&r" (__sval),					\
     210  	  [__tmp] "=&r" (__tmp),					\
     211  	  [__addr64] "=&r" (__addr64)					\
     212  	: [__addr8] "r" (mem),						\
     213  	  [__value] "r" (value)						\
     214  	: "memory");							\
     215    __ret; })
     216  
     217  #define __arch_exchange_16_int(mem, value, mb1, mb2)			\
     218  ({									\
     219    unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret;		\
     220    __asm__ __volatile__ (						\
     221  		mb1							\
     222  	"	andnot	%[__addr16],7,%[__addr64]\n"			\
     223  	"	inswl	%[__value],%[__addr16],%[__sval]\n"		\
     224  	"1:	ldq_l	%[__tmp],0(%[__addr64])\n"			\
     225  	"	extwl	%[__tmp],%[__addr16],%[__ret]\n"		\
     226  	"	mskwl	%[__tmp],%[__addr16],%[__tmp]\n"		\
     227  	"	or	%[__sval],%[__tmp],%[__tmp]\n"			\
     228  	"	stq_c	%[__tmp],0(%[__addr64])\n"			\
     229  	"	beq	%[__tmp],1b\n"					\
     230  		mb2							\
     231  	: [__ret] "=&r" (__ret),					\
     232  	  [__sval] "=&r" (__sval),					\
     233  	  [__tmp] "=&r" (__tmp),					\
     234  	  [__addr64] "=&r" (__addr64)					\
     235  	: [__addr16] "r" (mem),						\
     236  	  [__value] "r" (value)						\
     237  	: "memory");							\
     238    __ret; })
     239  
     240  #define __arch_exchange_32_int(mem, value, mb1, mb2)			\
     241  ({									\
     242    signed int __tmp; __typeof(*mem) __ret;				\
     243    __asm__ __volatile__ (						\
     244  		mb1							\
     245  	"1:	ldl_l	%[__ret],%[__mem]\n"				\
     246  	"	mov	%[__val],%[__tmp]\n"				\
     247  	"	stl_c	%[__tmp],%[__mem]\n"				\
     248  	"	beq	%[__tmp],1b\n"					\
     249  		mb2							\
     250  	: [__ret] "=&r" (__ret),					\
     251  	  [__tmp] "=&r" (__tmp)						\
     252  	: [__mem] "m" (*(mem)),						\
     253  	  [__val] "Ir" (value)						\
     254  	: "memory");							\
     255    __ret; })
     256  
     257  #define __arch_exchange_64_int(mem, value, mb1, mb2)			\
     258  ({									\
     259    unsigned long __tmp; __typeof(*mem) __ret;				\
     260    __asm__ __volatile__ (						\
     261  		mb1							\
     262  	"1:	ldq_l	%[__ret],%[__mem]\n"				\
     263  	"	mov	%[__val],%[__tmp]\n"				\
     264  	"	stq_c	%[__tmp],%[__mem]\n"				\
     265  	"	beq	%[__tmp],1b\n"					\
     266  		mb2							\
     267  	: [__ret] "=&r" (__ret),					\
     268  	  [__tmp] "=&r" (__tmp)						\
     269  	: [__mem] "m" (*(mem)),						\
     270  	  [__val] "Ir" (value)						\
     271  	: "memory");							\
     272    __ret; })
     273  
     274  #define atomic_exchange_acq(mem, value) \
     275    __atomic_val_bysize (__arch_exchange, int, mem, value, "", __MB)
     276  
     277  #define atomic_exchange_rel(mem, value) \
     278    __atomic_val_bysize (__arch_exchange, int, mem, value, __MB, "")
     279  
     280  
     281  /* Atomically add value and return the previous (unincremented) value.  */
     282  
     283  #define __arch_exchange_and_add_8_int(mem, value, mb1, mb2) \
     284    ({ __builtin_trap (); 0; })
     285  
     286  #define __arch_exchange_and_add_16_int(mem, value, mb1, mb2) \
     287    ({ __builtin_trap (); 0; })
     288  
     289  #define __arch_exchange_and_add_32_int(mem, value, mb1, mb2)		\
     290  ({									\
     291    signed int __tmp; __typeof(*mem) __ret;				\
     292    __asm__ __volatile__ (						\
     293  		mb1							\
     294  	"1:	ldl_l	%[__ret],%[__mem]\n"				\
     295  	"	addl	%[__ret],%[__val],%[__tmp]\n"			\
     296  	"	stl_c	%[__tmp],%[__mem]\n"				\
     297  	"	beq	%[__tmp],1b\n"					\
     298  		mb2							\
     299  	: [__ret] "=&r" (__ret),					\
     300  	  [__tmp] "=&r" (__tmp)						\
     301  	: [__mem] "m" (*(mem)),						\
     302  	  [__val] "Ir" ((signed int)(value))				\
     303  	: "memory");							\
     304    __ret; })
     305  
     306  #define __arch_exchange_and_add_64_int(mem, value, mb1, mb2)		\
     307  ({									\
     308    unsigned long __tmp; __typeof(*mem) __ret;				\
     309    __asm__ __volatile__ (						\
     310  		mb1							\
     311  	"1:	ldq_l	%[__ret],%[__mem]\n"				\
     312  	"	addq	%[__ret],%[__val],%[__tmp]\n"			\
     313  	"	stq_c	%[__tmp],%[__mem]\n"				\
     314  	"	beq	%[__tmp],1b\n"					\
     315  		mb2							\
     316  	: [__ret] "=&r" (__ret),					\
     317  	  [__tmp] "=&r" (__tmp)						\
     318  	: [__mem] "m" (*(mem)),						\
     319  	  [__val] "Ir" ((unsigned long)(value))				\
     320  	: "memory");							\
     321    __ret; })
     322  
     323  /* ??? Barrier semantics for atomic_exchange_and_add appear to be
     324     undefined.  Use full barrier for now, as that's safe.  */
     325  #define atomic_exchange_and_add(mem, value) \
     326    __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, __MB, __MB)
     327  
     328  
     329  /* ??? Blah, I'm lazy.  Implement these later.  Can do better than the
     330     compare-and-exchange loop provided by generic code.
     331  
     332  #define atomic_decrement_if_positive(mem)
     333  #define atomic_bit_test_set(mem, bit)
     334  
     335  */
     336  
     337  #define atomic_full_barrier()	__asm ("mb" : : : "memory");
     338  #define atomic_read_barrier()	__asm ("mb" : : : "memory");
     339  #define atomic_write_barrier()	__asm ("wmb" : : : "memory");