(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
sve/
acle/
asm/
asr_wide_s8.c
       1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
       2  
       3  #include "test_sve_acle.h"
       4  
       5  /*
       6  ** asr_wide_s8_m_tied1:
       7  **	asr	z0\.b, p0/m, z0\.b, z4\.d
       8  **	ret
       9  */
      10  TEST_DUAL_Z (asr_wide_s8_m_tied1, svint8_t, svuint64_t,
      11  	     z0 = svasr_wide_s8_m (p0, z0, z4),
      12  	     z0 = svasr_wide_m (p0, z0, z4))
      13  
      14  /*
      15  ** asr_wide_s8_m_tied2:
      16  **	mov	(z[0-9]+\.d), z0\.d
      17  **	movprfx	z0, z4
      18  **	asr	z0\.b, p0/m, z0\.b, \1
      19  **	ret
      20  */
      21  TEST_DUAL_Z_REV (asr_wide_s8_m_tied2, svint8_t, svuint64_t,
      22  		 z0_res = svasr_wide_s8_m (p0, z4, z0),
      23  		 z0_res = svasr_wide_m (p0, z4, z0))
      24  
      25  /*
      26  ** asr_wide_s8_m_untied:
      27  **	movprfx	z0, z1
      28  **	asr	z0\.b, p0/m, z0\.b, z4\.d
      29  **	ret
      30  */
      31  TEST_DUAL_Z (asr_wide_s8_m_untied, svint8_t, svuint64_t,
      32  	     z0 = svasr_wide_s8_m (p0, z1, z4),
      33  	     z0 = svasr_wide_m (p0, z1, z4))
      34  
      35  /*
      36  ** asr_wide_x0_s8_m_tied1:
      37  **	mov	(z[0-9]+\.d), x0
      38  **	asr	z0\.b, p0/m, z0\.b, \1
      39  **	ret
      40  */
      41  TEST_UNIFORM_ZX (asr_wide_x0_s8_m_tied1, svint8_t, uint64_t,
      42  		 z0 = svasr_wide_n_s8_m (p0, z0, x0),
      43  		 z0 = svasr_wide_m (p0, z0, x0))
      44  
      45  /*
      46  ** asr_wide_x0_s8_m_untied:
      47  **	mov	(z[0-9]+\.d), x0
      48  **	movprfx	z0, z1
      49  **	asr	z0\.b, p0/m, z0\.b, \1
      50  **	ret
      51  */
      52  TEST_UNIFORM_ZX (asr_wide_x0_s8_m_untied, svint8_t, uint64_t,
      53  		 z0 = svasr_wide_n_s8_m (p0, z1, x0),
      54  		 z0 = svasr_wide_m (p0, z1, x0))
      55  
      56  /*
      57  ** asr_wide_1_s8_m_tied1:
      58  **	asr	z0\.b, p0/m, z0\.b, #1
      59  **	ret
      60  */
      61  TEST_UNIFORM_Z (asr_wide_1_s8_m_tied1, svint8_t,
      62  		z0 = svasr_wide_n_s8_m (p0, z0, 1),
      63  		z0 = svasr_wide_m (p0, z0, 1))
      64  
      65  /*
      66  ** asr_wide_1_s8_m_untied:
      67  **	movprfx	z0, z1
      68  **	asr	z0\.b, p0/m, z0\.b, #1
      69  **	ret
      70  */
      71  TEST_UNIFORM_Z (asr_wide_1_s8_m_untied, svint8_t,
      72  		z0 = svasr_wide_n_s8_m (p0, z1, 1),
      73  		z0 = svasr_wide_m (p0, z1, 1))
      74  
      75  /*
      76  ** asr_wide_7_s8_m_tied1:
      77  **	asr	z0\.b, p0/m, z0\.b, #7
      78  **	ret
      79  */
      80  TEST_UNIFORM_Z (asr_wide_7_s8_m_tied1, svint8_t,
      81  		z0 = svasr_wide_n_s8_m (p0, z0, 7),
      82  		z0 = svasr_wide_m (p0, z0, 7))
      83  
      84  /*
      85  ** asr_wide_7_s8_m_untied:
      86  **	movprfx	z0, z1
      87  **	asr	z0\.b, p0/m, z0\.b, #7
      88  **	ret
      89  */
      90  TEST_UNIFORM_Z (asr_wide_7_s8_m_untied, svint8_t,
      91  		z0 = svasr_wide_n_s8_m (p0, z1, 7),
      92  		z0 = svasr_wide_m (p0, z1, 7))
      93  
      94  /*
      95  ** asr_wide_8_s8_m_tied1:
      96  **	asr	z0\.b, p0/m, z0\.b, #8
      97  **	ret
      98  */
      99  TEST_UNIFORM_Z (asr_wide_8_s8_m_tied1, svint8_t,
     100  		z0 = svasr_wide_n_s8_m (p0, z0, 8),
     101  		z0 = svasr_wide_m (p0, z0, 8))
     102  
     103  /*
     104  ** asr_wide_8_s8_m_untied:
     105  **	movprfx	z0, z1
     106  **	asr	z0\.b, p0/m, z0\.b, #8
     107  **	ret
     108  */
     109  TEST_UNIFORM_Z (asr_wide_8_s8_m_untied, svint8_t,
     110  		z0 = svasr_wide_n_s8_m (p0, z1, 8),
     111  		z0 = svasr_wide_m (p0, z1, 8))
     112  
     113  /*
     114  ** asr_wide_s8_z_tied1:
     115  **	movprfx	z0\.b, p0/z, z0\.b
     116  **	asr	z0\.b, p0/m, z0\.b, z4\.d
     117  **	ret
     118  */
     119  TEST_DUAL_Z (asr_wide_s8_z_tied1, svint8_t, svuint64_t,
     120  	     z0 = svasr_wide_s8_z (p0, z0, z4),
     121  	     z0 = svasr_wide_z (p0, z0, z4))
     122  
     123  /*
     124  ** asr_wide_s8_z_tied2:
     125  **	mov	(z[0-9]+\.d), z0\.d
     126  **	movprfx	z0\.b, p0/z, z4\.b
     127  **	asr	z0\.b, p0/m, z0\.b, \1
     128  **	ret
     129  */
     130  TEST_DUAL_Z_REV (asr_wide_s8_z_tied2, svint8_t, svuint64_t,
     131  		 z0_res = svasr_wide_s8_z (p0, z4, z0),
     132  		 z0_res = svasr_wide_z (p0, z4, z0))
     133  
     134  /*
     135  ** asr_wide_s8_z_untied:
     136  **	movprfx	z0\.b, p0/z, z1\.b
     137  **	asr	z0\.b, p0/m, z0\.b, z4\.d
     138  **	ret
     139  */
     140  TEST_DUAL_Z (asr_wide_s8_z_untied, svint8_t, svuint64_t,
     141  	     z0 = svasr_wide_s8_z (p0, z1, z4),
     142  	     z0 = svasr_wide_z (p0, z1, z4))
     143  
     144  /*
     145  ** asr_wide_x0_s8_z_tied1:
     146  **	mov	(z[0-9]+\.d), x0
     147  **	movprfx	z0\.b, p0/z, z0\.b
     148  **	asr	z0\.b, p0/m, z0\.b, \1
     149  **	ret
     150  */
     151  TEST_UNIFORM_ZX (asr_wide_x0_s8_z_tied1, svint8_t, uint64_t,
     152  		 z0 = svasr_wide_n_s8_z (p0, z0, x0),
     153  		 z0 = svasr_wide_z (p0, z0, x0))
     154  
     155  /*
     156  ** asr_wide_x0_s8_z_untied: { xfail *-*-* }
     157  **	mov	(z[0-9]+\.d), x0
     158  **	movprfx	z0\.b, p0/z, z1\.b
     159  **	asr	z0\.b, p0/m, z0\.b, \1
     160  **	ret
     161  */
     162  TEST_UNIFORM_ZX (asr_wide_x0_s8_z_untied, svint8_t, uint64_t,
     163  		 z0 = svasr_wide_n_s8_z (p0, z1, x0),
     164  		 z0 = svasr_wide_z (p0, z1, x0))
     165  
     166  /*
     167  ** asr_wide_1_s8_z_tied1:
     168  **	movprfx	z0\.b, p0/z, z0\.b
     169  **	asr	z0\.b, p0/m, z0\.b, #1
     170  **	ret
     171  */
     172  TEST_UNIFORM_Z (asr_wide_1_s8_z_tied1, svint8_t,
     173  		z0 = svasr_wide_n_s8_z (p0, z0, 1),
     174  		z0 = svasr_wide_z (p0, z0, 1))
     175  
     176  /*
     177  ** asr_wide_1_s8_z_untied:
     178  **	movprfx	z0\.b, p0/z, z1\.b
     179  **	asr	z0\.b, p0/m, z0\.b, #1
     180  **	ret
     181  */
     182  TEST_UNIFORM_Z (asr_wide_1_s8_z_untied, svint8_t,
     183  		z0 = svasr_wide_n_s8_z (p0, z1, 1),
     184  		z0 = svasr_wide_z (p0, z1, 1))
     185  
     186  /*
     187  ** asr_wide_7_s8_z_tied1:
     188  **	movprfx	z0\.b, p0/z, z0\.b
     189  **	asr	z0\.b, p0/m, z0\.b, #7
     190  **	ret
     191  */
     192  TEST_UNIFORM_Z (asr_wide_7_s8_z_tied1, svint8_t,
     193  		z0 = svasr_wide_n_s8_z (p0, z0, 7),
     194  		z0 = svasr_wide_z (p0, z0, 7))
     195  
     196  /*
     197  ** asr_wide_7_s8_z_untied:
     198  **	movprfx	z0\.b, p0/z, z1\.b
     199  **	asr	z0\.b, p0/m, z0\.b, #7
     200  **	ret
     201  */
     202  TEST_UNIFORM_Z (asr_wide_7_s8_z_untied, svint8_t,
     203  		z0 = svasr_wide_n_s8_z (p0, z1, 7),
     204  		z0 = svasr_wide_z (p0, z1, 7))
     205  
     206  /*
     207  ** asr_wide_8_s8_z_tied1:
     208  **	movprfx	z0\.b, p0/z, z0\.b
     209  **	asr	z0\.b, p0/m, z0\.b, #8
     210  **	ret
     211  */
     212  TEST_UNIFORM_Z (asr_wide_8_s8_z_tied1, svint8_t,
     213  		z0 = svasr_wide_n_s8_z (p0, z0, 8),
     214  		z0 = svasr_wide_z (p0, z0, 8))
     215  
     216  /*
     217  ** asr_wide_8_s8_z_untied:
     218  **	movprfx	z0\.b, p0/z, z1\.b
     219  **	asr	z0\.b, p0/m, z0\.b, #8
     220  **	ret
     221  */
     222  TEST_UNIFORM_Z (asr_wide_8_s8_z_untied, svint8_t,
     223  		z0 = svasr_wide_n_s8_z (p0, z1, 8),
     224  		z0 = svasr_wide_z (p0, z1, 8))
     225  
     226  /*
     227  ** asr_wide_s8_x_tied1:
     228  **	asr	z0\.b, z0\.b, z4\.d
     229  **	ret
     230  */
     231  TEST_DUAL_Z (asr_wide_s8_x_tied1, svint8_t, svuint64_t,
     232  	     z0 = svasr_wide_s8_x (p0, z0, z4),
     233  	     z0 = svasr_wide_x (p0, z0, z4))
     234  
     235  /*
     236  ** asr_wide_s8_x_tied2:
     237  **	asr	z0\.b, z4\.b, z0\.d
     238  **	ret
     239  */
     240  TEST_DUAL_Z_REV (asr_wide_s8_x_tied2, svint8_t, svuint64_t,
     241  		 z0_res = svasr_wide_s8_x (p0, z4, z0),
     242  		 z0_res = svasr_wide_x (p0, z4, z0))
     243  
     244  /*
     245  ** asr_wide_s8_x_untied:
     246  **	asr	z0\.b, z1\.b, z4\.d
     247  **	ret
     248  */
     249  TEST_DUAL_Z (asr_wide_s8_x_untied, svint8_t, svuint64_t,
     250  	     z0 = svasr_wide_s8_x (p0, z1, z4),
     251  	     z0 = svasr_wide_x (p0, z1, z4))
     252  
     253  /*
     254  ** asr_wide_x0_s8_x_tied1:
     255  **	mov	(z[0-9]+\.d), x0
     256  **	asr	z0\.b, z0\.b, \1
     257  **	ret
     258  */
     259  TEST_UNIFORM_ZX (asr_wide_x0_s8_x_tied1, svint8_t, uint64_t,
     260  		 z0 = svasr_wide_n_s8_x (p0, z0, x0),
     261  		 z0 = svasr_wide_x (p0, z0, x0))
     262  
     263  /*
     264  ** asr_wide_x0_s8_x_untied:
     265  **	mov	(z[0-9]+\.d), x0
     266  **	asr	z0\.b, z1\.b, \1
     267  **	ret
     268  */
     269  TEST_UNIFORM_ZX (asr_wide_x0_s8_x_untied, svint8_t, uint64_t,
     270  		 z0 = svasr_wide_n_s8_x (p0, z1, x0),
     271  		 z0 = svasr_wide_x (p0, z1, x0))
     272  
     273  /*
     274  ** asr_wide_1_s8_x_tied1:
     275  **	asr	z0\.b, z0\.b, #1
     276  **	ret
     277  */
     278  TEST_UNIFORM_Z (asr_wide_1_s8_x_tied1, svint8_t,
     279  		z0 = svasr_wide_n_s8_x (p0, z0, 1),
     280  		z0 = svasr_wide_x (p0, z0, 1))
     281  
     282  /*
     283  ** asr_wide_1_s8_x_untied:
     284  **	asr	z0\.b, z1\.b, #1
     285  **	ret
     286  */
     287  TEST_UNIFORM_Z (asr_wide_1_s8_x_untied, svint8_t,
     288  		z0 = svasr_wide_n_s8_x (p0, z1, 1),
     289  		z0 = svasr_wide_x (p0, z1, 1))
     290  
     291  /*
     292  ** asr_wide_7_s8_x_tied1:
     293  **	asr	z0\.b, z0\.b, #7
     294  **	ret
     295  */
     296  TEST_UNIFORM_Z (asr_wide_7_s8_x_tied1, svint8_t,
     297  		z0 = svasr_wide_n_s8_x (p0, z0, 7),
     298  		z0 = svasr_wide_x (p0, z0, 7))
     299  
     300  /*
     301  ** asr_wide_7_s8_x_untied:
     302  **	asr	z0\.b, z1\.b, #7
     303  **	ret
     304  */
     305  TEST_UNIFORM_Z (asr_wide_7_s8_x_untied, svint8_t,
     306  		z0 = svasr_wide_n_s8_x (p0, z1, 7),
     307  		z0 = svasr_wide_x (p0, z1, 7))
     308  
     309  /*
     310  ** asr_wide_8_s8_x_tied1:
     311  **	asr	z0\.b, z0\.b, #8
     312  **	ret
     313  */
     314  TEST_UNIFORM_Z (asr_wide_8_s8_x_tied1, svint8_t,
     315  		z0 = svasr_wide_n_s8_x (p0, z0, 8),
     316  		z0 = svasr_wide_x (p0, z0, 8))
     317  
     318  /*
     319  ** asr_wide_8_s8_x_untied:
     320  **	asr	z0\.b, z1\.b, #8
     321  **	ret
     322  */
     323  TEST_UNIFORM_Z (asr_wide_8_s8_x_untied, svint8_t,
     324  		z0 = svasr_wide_n_s8_x (p0, z1, 8),
     325  		z0 = svasr_wide_x (p0, z1, 8))