1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
       2  /* { dg-additional-options "-march=armv8.6-a+f64mm" } */
       3  /* { dg-require-effective-target aarch64_asm_f64mm_ok }  */
       4  
       5  #include "test_sve_acle.h"
       6  
       7  /*
       8  ** ld1ro_f64_base:
       9  **	ld1rod	z0\.d, p0/z, \[x0\]
      10  **	ret
      11  */
      12  TEST_LOAD (ld1ro_f64_base, svfloat64_t, float64_t,
      13  	   z0 = svld1ro_f64 (p0, x0),
      14  	   z0 = svld1ro (p0, x0))
      15  
      16  /*
      17  ** ld1ro_f64_index:
      18  **	ld1rod	z0\.d, p0/z, \[x0, x1, lsl 3\]
      19  **	ret
      20  */
      21  TEST_LOAD (ld1ro_f64_index, svfloat64_t, float64_t,
      22  	   z0 = svld1ro_f64 (p0, x0 + x1),
      23  	   z0 = svld1ro (p0, x0 + x1))
      24  
      25  /*
      26  ** ld1ro_f64_1:
      27  **	add	(x[0-9]+), x0, #?8
      28  **	ld1rod	z0\.d, p0/z, \[\1\]
      29  **	ret
      30  */
      31  TEST_LOAD (ld1ro_f64_1, svfloat64_t, float64_t,
      32  	   z0 = svld1ro_f64 (p0, x0 + 1),
      33  	   z0 = svld1ro (p0, x0 + 1))
      34  
      35  /*
      36  ** ld1ro_f64_2:
      37  **	add	(x[0-9]+), x0, #?16
      38  **	ld1rod	z0\.d, p0/z, \[\1\]
      39  **	ret
      40  */
      41  TEST_LOAD (ld1ro_f64_2, svfloat64_t, float64_t,
      42  	   z0 = svld1ro_f64 (p0, x0 + 2),
      43  	   z0 = svld1ro (p0, x0 + 2))
      44  
      45  /*
      46  ** ld1ro_f64_32:
      47  **	add	(x[0-9]+), x0, #?256
      48  **	ld1rod	z0\.d, p0/z, \[\1\]
      49  **	ret
      50  */
      51  TEST_LOAD (ld1ro_f64_32, svfloat64_t, float64_t,
      52  	   z0 = svld1ro_f64 (p0, x0 + 32),
      53  	   z0 = svld1ro (p0, x0 + 32))
      54  
      55  /*
      56  ** ld1ro_f64_m1:
      57  **	sub	(x[0-9]+), x0, #?8
      58  **	ld1rod	z0\.d, p0/z, \[\1\]
      59  **	ret
      60  */
      61  TEST_LOAD (ld1ro_f64_m1, svfloat64_t, float64_t,
      62  	   z0 = svld1ro_f64 (p0, x0 - 1),
      63  	   z0 = svld1ro (p0, x0 - 1))
      64  
      65  /*
      66  ** ld1ro_f64_m2:
      67  **	sub	(x[0-9]+), x0, #?16
      68  **	ld1rod	z0\.d, p0/z, \[\1\]
      69  **	ret
      70  */
      71  TEST_LOAD (ld1ro_f64_m2, svfloat64_t, float64_t,
      72  	   z0 = svld1ro_f64 (p0, x0 - 2),
      73  	   z0 = svld1ro (p0, x0 - 2))
      74  
      75  /*
      76  ** ld1ro_f64_m36:
      77  **	sub	(x[0-9]+), x0, #?288
      78  **	ld1rod	z0\.d, p0/z, \[\1\]
      79  **	ret
      80  */
      81  TEST_LOAD (ld1ro_f64_m36, svfloat64_t, float64_t,
      82  	   z0 = svld1ro_f64 (p0, x0 - 36),
      83  	   z0 = svld1ro (p0, x0 - 36))
      84  
      85  /*
      86  ** ld1ro_f64_4:
      87  **	ld1rod	z0\.d, p0/z, \[x0, #?32\]
      88  **	ret
      89  */
      90  TEST_LOAD (ld1ro_f64_4, svfloat64_t, float64_t,
      91  	   z0 = svld1ro_f64 (p0, x0 + 4),
      92  	   z0 = svld1ro (p0, x0 + 4))
      93  
      94  /*
      95  ** ld1ro_f64_28:
      96  **	ld1rod	z0\.d, p0/z, \[x0, #?224\]
      97  **	ret
      98  */
      99  TEST_LOAD (ld1ro_f64_28, svfloat64_t, float64_t,
     100  	   z0 = svld1ro_f64 (p0, x0 + 28),
     101  	   z0 = svld1ro (p0, x0 + 28))
     102  
     103  /*
     104  ** ld1ro_f64_m4:
     105  **	ld1rod	z0\.d, p0/z, \[x0, #?-32\]
     106  **	ret
     107  */
     108  TEST_LOAD (ld1ro_f64_m4, svfloat64_t, float64_t,
     109  	   z0 = svld1ro_f64 (p0, x0 - 4),
     110  	   z0 = svld1ro (p0, x0 - 4))
     111  
     112  /*
     113  ** ld1ro_f64_m32:
     114  **	ld1rod	z0\.d, p0/z, \[x0, #?-256\]
     115  **	ret
     116  */
     117  TEST_LOAD (ld1ro_f64_m32, svfloat64_t, float64_t,
     118  	   z0 = svld1ro_f64 (p0, x0 - 32),
     119  	   z0 = svld1ro (p0, x0 - 32))
     120