1  /* Legacy sub-word atomics for RISC-V.
       2   
       3     Copyright (C) 2016-2023 Free Software Foundation, Inc.
       4  
       5  This file is part of GCC.
       6  
       7  GCC is free software; you can redistribute it and/or modify it under
       8  the terms of the GNU General Public License as published by the Free
       9  Software Foundation; either version 3, or (at your option) any later
      10  version.
      11  
      12  GCC is distributed in the hope that it will be useful, but WITHOUT ANY
      13  WARRANTY; without even the implied warranty of MERCHANTABILITY or
      14  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
      15  for more details.
      16  
      17  Under Section 7 of GPL version 3, you are granted additional
      18  permissions described in the GCC Runtime Library Exception, version
      19  3.1, as published by the Free Software Foundation.
      20  
      21  You should have received a copy of the GNU General Public License and
      22  a copy of the GCC Runtime Library Exception along with this program;
      23  see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
      24  <http://www.gnu.org/licenses/>.  */
      25  
      26  #ifdef __riscv_atomic
      27  
      28  #include <stdbool.h>
      29  
      30  #define INVERT		"not %[tmp1], %[tmp1]\n\t"
      31  #define DONT_INVERT	""
      32  
      33  /* Logic duplicated in gcc/gcc/config/riscv/sync.md for use when inlining is enabled */
      34  
      35  #define GENERATE_FETCH_AND_OP(type, size, opname, insn, invert, cop)	\
      36    type __sync_fetch_and_ ## opname ## _ ## size (type *p, type v)	\
      37    {									\
      38      unsigned long aligned_addr = ((unsigned long) p) & ~3UL;		\
      39      int shift = (((unsigned long) p) & 3) * 8;				\
      40      unsigned mask = ((1U << ((sizeof v) * 8)) - 1) << shift;		\
      41      unsigned old, tmp1, tmp2;						\
      42  									\
      43      asm volatile ("1:\n\t"						\
      44  		  "lr.w.aq %[old], %[mem]\n\t"				\
      45  		  #insn " %[tmp1], %[old], %[value]\n\t"		\
      46  		  invert						\
      47  		  "and %[tmp1], %[tmp1], %[mask]\n\t"			\
      48  		  "and %[tmp2], %[old], %[not_mask]\n\t"		\
      49  		  "or %[tmp2], %[tmp2], %[tmp1]\n\t"			\
      50  		  "sc.w.rl %[tmp1], %[tmp2], %[mem]\n\t"		\
      51  		  "bnez %[tmp1], 1b"					\
      52  		  : [old] "=&r" (old),					\
      53  		    [mem] "+A" (*(volatile unsigned*) aligned_addr),	\
      54  		    [tmp1] "=&r" (tmp1),				\
      55  		    [tmp2] "=&r" (tmp2)					\
      56  		  : [value] "r" (((unsigned) v) << shift),		\
      57  		    [mask] "r" (mask),					\
      58  		    [not_mask] "r" (~mask));				\
      59  									\
      60      return (type) (old >> shift);					\
      61    }									\
      62  									\
      63    type __sync_ ## opname ## _and_fetch_ ## size (type *p, type v)	\
      64    {									\
      65      type o = __sync_fetch_and_ ## opname ## _ ## size (p, v);		\
      66      return cop;								\
      67    }
      68  
      69  #define GENERATE_COMPARE_AND_SWAP(type, size)				\
      70    type __sync_val_compare_and_swap_ ## size (type *p, type o, type n)	\
      71    {									\
      72      unsigned long aligned_addr = ((unsigned long) p) & ~3UL;		\
      73      int shift = (((unsigned long) p) & 3) * 8;				\
      74      unsigned mask = ((1U << ((sizeof o) * 8)) - 1) << shift;		\
      75      unsigned old, tmp1;							\
      76  									\
      77      asm volatile ("1:\n\t"						\
      78  		  "lr.w.aq %[old], %[mem]\n\t"				\
      79  		  "and %[tmp1], %[old], %[mask]\n\t"			\
      80  		  "bne %[tmp1], %[o], 1f\n\t"				\
      81  		  "and %[tmp1], %[old], %[not_mask]\n\t"		\
      82  		  "or %[tmp1], %[tmp1], %[n]\n\t"			\
      83  		  "sc.w.rl %[tmp1], %[tmp1], %[mem]\n\t"		\
      84  		  "bnez %[tmp1], 1b\n\t"				\
      85  		  "1:"							\
      86  		  : [old] "=&r" (old),					\
      87  		    [mem] "+A" (*(volatile unsigned*) aligned_addr),	\
      88  		    [tmp1] "=&r" (tmp1)					\
      89  		  : [o] "r" ((((unsigned) o) << shift) & mask),		\
      90  		    [n] "r" ((((unsigned) n) << shift) & mask),		\
      91  		    [mask] "r" (mask),					\
      92  		    [not_mask] "r" (~mask));				\
      93  									\
      94      return (type) (old >> shift);					\
      95    }									\
      96    bool __sync_bool_compare_and_swap_ ## size (type *p, type o, type n)	\
      97    {									\
      98      return __sync_val_compare_and_swap(p, o, n) == o;			\
      99    }
     100  
     101  #define GENERATE_ALL(type, size)					\
     102    GENERATE_FETCH_AND_OP(type, size, add, add, DONT_INVERT, o + v)	\
     103    GENERATE_FETCH_AND_OP(type, size, sub, sub, DONT_INVERT, o - v)	\
     104    GENERATE_FETCH_AND_OP(type, size, and, and, DONT_INVERT, o & v)	\
     105    GENERATE_FETCH_AND_OP(type, size, xor, xor, DONT_INVERT, o ^ v)	\
     106    GENERATE_FETCH_AND_OP(type, size, or, or, DONT_INVERT, o | v)		\
     107    GENERATE_FETCH_AND_OP(type, size, nand, and, INVERT, ~(o & v))	\
     108    GENERATE_COMPARE_AND_SWAP(type, size)
     109  
     110  GENERATE_ALL(unsigned char, 1)
     111  GENERATE_ALL(unsigned short, 2)
     112  
     113  #endif