(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
sve/
acle/
asm/
ldnf1_f64.c
       1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
       2  
       3  #include "test_sve_acle.h"
       4  
       5  /*
       6  ** ldnf1_f64_base:
       7  **	ldnf1d	z0\.d, p0/z, \[x0\]
       8  **	ret
       9  */
      10  TEST_LOAD (ldnf1_f64_base, svfloat64_t, float64_t,
      11  	   z0 = svldnf1_f64 (p0, x0),
      12  	   z0 = svldnf1 (p0, x0))
      13  
      14  /*
      15  ** ldnf1_f64_index:
      16  **	add	(x[0-9]+), x0, x1, lsl 3
      17  **	ldnf1d	z0\.d, p0/z, \[\1\]
      18  **	ret
      19  */
      20  TEST_LOAD (ldnf1_f64_index, svfloat64_t, float64_t,
      21  	   z0 = svldnf1_f64 (p0, x0 + x1),
      22  	   z0 = svldnf1 (p0, x0 + x1))
      23  
      24  /*
      25  ** ldnf1_f64_1:
      26  **	ldnf1d	z0\.d, p0/z, \[x0, #1, mul vl\]
      27  **	ret
      28  */
      29  TEST_LOAD (ldnf1_f64_1, svfloat64_t, float64_t,
      30  	   z0 = svldnf1_f64 (p0, x0 + svcntd ()),
      31  	   z0 = svldnf1 (p0, x0 + svcntd ()))
      32  
      33  /*
      34  ** ldnf1_f64_7:
      35  **	ldnf1d	z0\.d, p0/z, \[x0, #7, mul vl\]
      36  **	ret
      37  */
      38  TEST_LOAD (ldnf1_f64_7, svfloat64_t, float64_t,
      39  	   z0 = svldnf1_f64 (p0, x0 + svcntd () * 7),
      40  	   z0 = svldnf1 (p0, x0 + svcntd () * 7))
      41  
      42  /*
      43  ** ldnf1_f64_8:
      44  **	incb	x0, all, mul #8
      45  **	ldnf1d	z0\.d, p0/z, \[x0\]
      46  **	ret
      47  */
      48  TEST_LOAD (ldnf1_f64_8, svfloat64_t, float64_t,
      49  	   z0 = svldnf1_f64 (p0, x0 + svcntd () * 8),
      50  	   z0 = svldnf1 (p0, x0 + svcntd () * 8))
      51  
      52  /*
      53  ** ldnf1_f64_m1:
      54  **	ldnf1d	z0\.d, p0/z, \[x0, #-1, mul vl\]
      55  **	ret
      56  */
      57  TEST_LOAD (ldnf1_f64_m1, svfloat64_t, float64_t,
      58  	   z0 = svldnf1_f64 (p0, x0 - svcntd ()),
      59  	   z0 = svldnf1 (p0, x0 - svcntd ()))
      60  
      61  /*
      62  ** ldnf1_f64_m8:
      63  **	ldnf1d	z0\.d, p0/z, \[x0, #-8, mul vl\]
      64  **	ret
      65  */
      66  TEST_LOAD (ldnf1_f64_m8, svfloat64_t, float64_t,
      67  	   z0 = svldnf1_f64 (p0, x0 - svcntd () * 8),
      68  	   z0 = svldnf1 (p0, x0 - svcntd () * 8))
      69  
      70  /*
      71  ** ldnf1_f64_m9:
      72  **	decb	x0, all, mul #9
      73  **	ldnf1d	z0\.d, p0/z, \[x0\]
      74  **	ret
      75  */
      76  TEST_LOAD (ldnf1_f64_m9, svfloat64_t, float64_t,
      77  	   z0 = svldnf1_f64 (p0, x0 - svcntd () * 9),
      78  	   z0 = svldnf1 (p0, x0 - svcntd () * 9))
      79  
      80  /*
      81  ** ldnf1_vnum_f64_0:
      82  **	ldnf1d	z0\.d, p0/z, \[x0\]
      83  **	ret
      84  */
      85  TEST_LOAD (ldnf1_vnum_f64_0, svfloat64_t, float64_t,
      86  	   z0 = svldnf1_vnum_f64 (p0, x0, 0),
      87  	   z0 = svldnf1_vnum (p0, x0, 0))
      88  
      89  /*
      90  ** ldnf1_vnum_f64_1:
      91  **	ldnf1d	z0\.d, p0/z, \[x0, #1, mul vl\]
      92  **	ret
      93  */
      94  TEST_LOAD (ldnf1_vnum_f64_1, svfloat64_t, float64_t,
      95  	   z0 = svldnf1_vnum_f64 (p0, x0, 1),
      96  	   z0 = svldnf1_vnum (p0, x0, 1))
      97  
      98  /*
      99  ** ldnf1_vnum_f64_7:
     100  **	ldnf1d	z0\.d, p0/z, \[x0, #7, mul vl\]
     101  **	ret
     102  */
     103  TEST_LOAD (ldnf1_vnum_f64_7, svfloat64_t, float64_t,
     104  	   z0 = svldnf1_vnum_f64 (p0, x0, 7),
     105  	   z0 = svldnf1_vnum (p0, x0, 7))
     106  
     107  /*
     108  ** ldnf1_vnum_f64_8:
     109  **	incb	x0, all, mul #8
     110  **	ldnf1d	z0\.d, p0/z, \[x0\]
     111  **	ret
     112  */
     113  TEST_LOAD (ldnf1_vnum_f64_8, svfloat64_t, float64_t,
     114  	   z0 = svldnf1_vnum_f64 (p0, x0, 8),
     115  	   z0 = svldnf1_vnum (p0, x0, 8))
     116  
     117  /*
     118  ** ldnf1_vnum_f64_m1:
     119  **	ldnf1d	z0\.d, p0/z, \[x0, #-1, mul vl\]
     120  **	ret
     121  */
     122  TEST_LOAD (ldnf1_vnum_f64_m1, svfloat64_t, float64_t,
     123  	   z0 = svldnf1_vnum_f64 (p0, x0, -1),
     124  	   z0 = svldnf1_vnum (p0, x0, -1))
     125  
     126  /*
     127  ** ldnf1_vnum_f64_m8:
     128  **	ldnf1d	z0\.d, p0/z, \[x0, #-8, mul vl\]
     129  **	ret
     130  */
     131  TEST_LOAD (ldnf1_vnum_f64_m8, svfloat64_t, float64_t,
     132  	   z0 = svldnf1_vnum_f64 (p0, x0, -8),
     133  	   z0 = svldnf1_vnum (p0, x0, -8))
     134  
     135  /*
     136  ** ldnf1_vnum_f64_m9:
     137  **	decb	x0, all, mul #9
     138  **	ldnf1d	z0\.d, p0/z, \[x0\]
     139  **	ret
     140  */
     141  TEST_LOAD (ldnf1_vnum_f64_m9, svfloat64_t, float64_t,
     142  	   z0 = svldnf1_vnum_f64 (p0, x0, -9),
     143  	   z0 = svldnf1_vnum (p0, x0, -9))
     144  
     145  /*
     146  ** ldnf1_vnum_f64_x1:
     147  **	cntb	(x[0-9]+)
     148  **	madd	(x[0-9]+), (?:x1, \1|\1, x1), x0
     149  **	ldnf1d	z0\.d, p0/z, \[\2\]
     150  **	ret
     151  */
     152  TEST_LOAD (ldnf1_vnum_f64_x1, svfloat64_t, float64_t,
     153  	   z0 = svldnf1_vnum_f64 (p0, x0, x1),
     154  	   z0 = svldnf1_vnum (p0, x0, x1))