(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
sve/
acle/
asm/
ld1ro_u32.c
       1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
       2  /* { dg-additional-options "-march=armv8.6-a+f64mm" } */
       3  /* { dg-require-effective-target aarch64_asm_f64mm_ok }  */
       4  
       5  #include "test_sve_acle.h"
       6  
       7  /*
       8  ** ld1ro_u32_base:
       9  **	ld1row	z0\.s, p0/z, \[x0\]
      10  **	ret
      11  */
      12  TEST_LOAD (ld1ro_u32_base, svuint32_t, uint32_t,
      13  	   z0 = svld1ro_u32 (p0, x0),
      14  	   z0 = svld1ro (p0, x0))
      15  
      16  /*
      17  ** ld1ro_u32_index:
      18  **	ld1row	z0\.s, p0/z, \[x0, x1, lsl 2\]
      19  **	ret
      20  */
      21  TEST_LOAD (ld1ro_u32_index, svuint32_t, uint32_t,
      22  	   z0 = svld1ro_u32 (p0, x0 + x1),
      23  	   z0 = svld1ro (p0, x0 + x1))
      24  
      25  /*
      26  ** ld1ro_u32_1:
      27  **	add	(x[0-9]+), x0, #?4
      28  **	ld1row	z0\.s, p0/z, \[\1\]
      29  **	ret
      30  */
      31  TEST_LOAD (ld1ro_u32_1, svuint32_t, uint32_t,
      32  	   z0 = svld1ro_u32 (p0, x0 + 1),
      33  	   z0 = svld1ro (p0, x0 + 1))
      34  
      35  /*
      36  ** ld1ro_u32_4:
      37  **	add	(x[0-9]+), x0, #?16
      38  **	ld1row	z0\.s, p0/z, \[\1\]
      39  **	ret
      40  */
      41  TEST_LOAD (ld1ro_u32_4, svuint32_t, uint32_t,
      42  	   z0 = svld1ro_u32 (p0, x0 + 4),
      43  	   z0 = svld1ro (p0, x0 + 4))
      44  
      45  /*
      46  ** ld1ro_u32_64:
      47  **	add	(x[0-9]+), x0, #?256
      48  **	ld1row	z0\.s, p0/z, \[\1\]
      49  **	ret
      50  */
      51  TEST_LOAD (ld1ro_u32_64, svuint32_t, uint32_t,
      52  	   z0 = svld1ro_u32 (p0, x0 + 64),
      53  	   z0 = svld1ro (p0, x0 + 64))
      54  
      55  /*
      56  ** ld1ro_u32_m1:
      57  **	sub	(x[0-9]+), x0, #?4
      58  **	ld1row	z0\.s, p0/z, \[\1\]
      59  **	ret
      60  */
      61  TEST_LOAD (ld1ro_u32_m1, svuint32_t, uint32_t,
      62  	   z0 = svld1ro_u32 (p0, x0 - 1),
      63  	   z0 = svld1ro (p0, x0 - 1))
      64  
      65  /*
      66  ** ld1ro_u32_m4:
      67  **	sub	(x[0-9]+), x0, #?16
      68  **	ld1row	z0\.s, p0/z, \[\1\]
      69  **	ret
      70  */
      71  TEST_LOAD (ld1ro_u32_m4, svuint32_t, uint32_t,
      72  	   z0 = svld1ro_u32 (p0, x0 - 4),
      73  	   z0 = svld1ro (p0, x0 - 4))
      74  
      75  /*
      76  ** ld1ro_u32_m72:
      77  **	sub	(x[0-9]+), x0, #?288
      78  **	ld1row	z0\.s, p0/z, \[\1\]
      79  **	ret
      80  */
      81  TEST_LOAD (ld1ro_u32_m72, svuint32_t, uint32_t,
      82  	   z0 = svld1ro_u32 (p0, x0 - 72),
      83  	   z0 = svld1ro (p0, x0 - 72))
      84  
      85  /*
      86  ** ld1ro_u32_8:
      87  **	ld1row	z0\.s, p0/z, \[x0, #?32\]
      88  **	ret
      89  */
      90  TEST_LOAD (ld1ro_u32_8, svuint32_t, uint32_t,
      91  	   z0 = svld1ro_u32 (p0, x0 + 8),
      92  	   z0 = svld1ro (p0, x0 + 8))
      93  
      94  /*
      95  ** ld1ro_u32_56:
      96  **	ld1row	z0\.s, p0/z, \[x0, #?224\]
      97  **	ret
      98  */
      99  TEST_LOAD (ld1ro_u32_56, svuint32_t, uint32_t,
     100  	   z0 = svld1ro_u32 (p0, x0 + 56),
     101  	   z0 = svld1ro (p0, x0 + 56))
     102  
     103  /*
     104  ** ld1ro_u32_m8:
     105  **	ld1row	z0\.s, p0/z, \[x0, #?-32\]
     106  **	ret
     107  */
     108  TEST_LOAD (ld1ro_u32_m8, svuint32_t, uint32_t,
     109  	   z0 = svld1ro_u32 (p0, x0 - 8),
     110  	   z0 = svld1ro (p0, x0 - 8))
     111  
     112  /*
     113  ** ld1ro_u32_m64:
     114  **	ld1row	z0\.s, p0/z, \[x0, #?-256\]
     115  **	ret
     116  */
     117  TEST_LOAD (ld1ro_u32_m64, svuint32_t, uint32_t,
     118  	   z0 = svld1ro_u32 (p0, x0 - 64),
     119  	   z0 = svld1ro (p0, x0 - 64))
     120