(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
sve/
acle/
asm/
ld1ro_bf16.c
       1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
       2  /* { dg-additional-options "-march=armv8.6-a+f64mm" } */
       3  /* { dg-require-effective-target aarch64_asm_f64mm_ok }  */
       4  
       5  #include "test_sve_acle.h"
       6  
       7  /*
       8  ** ld1ro_bf16_base:
       9  **	ld1roh	z0\.h, p0/z, \[x0\]
      10  **	ret
      11  */
      12  TEST_LOAD (ld1ro_bf16_base, svbfloat16_t, bfloat16_t,
      13  	   z0 = svld1ro_bf16 (p0, x0),
      14  	   z0 = svld1ro (p0, x0))
      15  
      16  /*
      17  ** ld1ro_bf16_index:
      18  **	ld1roh	z0\.h, p0/z, \[x0, x1, lsl 1\]
      19  **	ret
      20  */
      21  TEST_LOAD (ld1ro_bf16_index, svbfloat16_t, bfloat16_t,
      22  	   z0 = svld1ro_bf16 (p0, x0 + x1),
      23  	   z0 = svld1ro (p0, x0 + x1))
      24  
      25  /*
      26  ** ld1ro_bf16_1:
      27  **	add	(x[0-9]+), x0, #?2
      28  **	ld1roh	z0\.h, p0/z, \[\1\]
      29  **	ret
      30  */
      31  TEST_LOAD (ld1ro_bf16_1, svbfloat16_t, bfloat16_t,
      32  	   z0 = svld1ro_bf16 (p0, x0 + 1),
      33  	   z0 = svld1ro (p0, x0 + 1))
      34  
      35  /*
      36  ** ld1ro_bf16_8:
      37  **	add	(x[0-9]+), x0, #?16
      38  **	ld1roh	z0\.h, p0/z, \[\1\]
      39  **	ret
      40  */
      41  TEST_LOAD (ld1ro_bf16_8, svbfloat16_t, bfloat16_t,
      42  	   z0 = svld1ro_bf16 (p0, x0 + 8),
      43  	   z0 = svld1ro (p0, x0 + 8))
      44  
      45  /*
      46  ** ld1ro_bf16_128:
      47  **	add	(x[0-9]+), x0, #?256
      48  **	ld1roh	z0\.h, p0/z, \[\1\]
      49  **	ret
      50  */
      51  TEST_LOAD (ld1ro_bf16_128, svbfloat16_t, bfloat16_t,
      52  	   z0 = svld1ro_bf16 (p0, x0 + 128),
      53  	   z0 = svld1ro (p0, x0 + 128))
      54  
      55  /*
      56  ** ld1ro_bf16_m1:
      57  **	sub	(x[0-9]+), x0, #?2
      58  **	ld1roh	z0\.h, p0/z, \[\1\]
      59  **	ret
      60  */
      61  TEST_LOAD (ld1ro_bf16_m1, svbfloat16_t, bfloat16_t,
      62  	   z0 = svld1ro_bf16 (p0, x0 - 1),
      63  	   z0 = svld1ro (p0, x0 - 1))
      64  
      65  /*
      66  ** ld1ro_bf16_m8:
      67  **	sub	(x[0-9]+), x0, #?16
      68  **	ld1roh	z0\.h, p0/z, \[\1\]
      69  **	ret
      70  */
      71  TEST_LOAD (ld1ro_bf16_m8, svbfloat16_t, bfloat16_t,
      72  	   z0 = svld1ro_bf16 (p0, x0 - 8),
      73  	   z0 = svld1ro (p0, x0 - 8))
      74  
      75  /*
      76  ** ld1ro_bf16_m144:
      77  **	sub	(x[0-9]+), x0, #?288
      78  **	ld1roh	z0\.h, p0/z, \[\1\]
      79  **	ret
      80  */
      81  TEST_LOAD (ld1ro_bf16_m144, svbfloat16_t, bfloat16_t,
      82  	   z0 = svld1ro_bf16 (p0, x0 - 144),
      83  	   z0 = svld1ro (p0, x0 - 144))
      84  
      85  /*
      86  ** ld1ro_bf16_16:
      87  **	ld1roh	z0\.h, p0/z, \[x0, #?32\]
      88  **	ret
      89  */
      90  TEST_LOAD (ld1ro_bf16_16, svbfloat16_t, bfloat16_t,
      91  	   z0 = svld1ro_bf16 (p0, x0 + 16),
      92  	   z0 = svld1ro (p0, x0 + 16))
      93  
      94  /*
      95  ** ld1ro_bf16_112:
      96  **	ld1roh	z0\.h, p0/z, \[x0, #?224\]
      97  **	ret
      98  */
      99  TEST_LOAD (ld1ro_bf16_112, svbfloat16_t, bfloat16_t,
     100  	   z0 = svld1ro_bf16 (p0, x0 + 112),
     101  	   z0 = svld1ro (p0, x0 + 112))
     102  
     103  /*
     104  ** ld1ro_bf16_m16:
     105  **	ld1roh	z0\.h, p0/z, \[x0, #?-32\]
     106  **	ret
     107  */
     108  TEST_LOAD (ld1ro_bf16_m16, svbfloat16_t, bfloat16_t,
     109  	   z0 = svld1ro_bf16 (p0, x0 - 16),
     110  	   z0 = svld1ro (p0, x0 - 16))
     111  
     112  /*
     113  ** ld1ro_bf16_m128:
     114  **	ld1roh	z0\.h, p0/z, \[x0, #?-256\]
     115  **	ret
     116  */
     117  TEST_LOAD (ld1ro_bf16_m128, svbfloat16_t, bfloat16_t,
     118  	   z0 = svld1ro_bf16 (p0, x0 - 128),
     119  	   z0 = svld1ro (p0, x0 - 128))
     120