(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
sve2/
acle/
asm/
rshl_s8.c
       1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
       2  
       3  #include "test_sve_acle.h"
       4  
       5  /*
       6  ** rshl_s8_m_tied1:
       7  **	srshl	z0\.b, p0/m, z0\.b, z4\.b
       8  **	ret
       9  */
      10  TEST_DUAL_Z (rshl_s8_m_tied1, svint8_t, svint8_t,
      11  	     z0 = svrshl_s8_m (p0, z0, z4),
      12  	     z0 = svrshl_m (p0, z0, z4))
      13  
      14  /*
      15  ** rshl_s8_m_tied2:
      16  **	mov	(z[0-9]+)\.d, z0\.d
      17  **	movprfx	z0, z4
      18  **	srshl	z0\.b, p0/m, z0\.b, \1\.b
      19  **	ret
      20  */
      21  TEST_DUAL_Z_REV (rshl_s8_m_tied2, svint8_t, svint8_t,
      22  		 z0_res = svrshl_s8_m (p0, z4, z0),
      23  		 z0_res = svrshl_m (p0, z4, z0))
      24  
      25  /*
      26  ** rshl_s8_m_untied:
      27  **	movprfx	z0, z1
      28  **	srshl	z0\.b, p0/m, z0\.b, z4\.b
      29  **	ret
      30  */
      31  TEST_DUAL_Z (rshl_s8_m_untied, svint8_t, svint8_t,
      32  	     z0 = svrshl_s8_m (p0, z1, z4),
      33  	     z0 = svrshl_m (p0, z1, z4))
      34  
      35  /*
      36  ** rshl_w0_s8_m_tied1:
      37  **	mov	(z[0-9]+\.b), w0
      38  **	srshl	z0\.b, p0/m, z0\.b, \1
      39  **	ret
      40  */
      41  TEST_UNIFORM_ZX (rshl_w0_s8_m_tied1, svint8_t, int8_t,
      42  		 z0 = svrshl_n_s8_m (p0, z0, x0),
      43  		 z0 = svrshl_m (p0, z0, x0))
      44  
      45  /*
      46  ** rshl_w0_s8_m_untied:: { xfail *-*-*}
      47  **	mov	(z[0-9]+\.b), w0
      48  **	movprfx	z0, z1
      49  **	srshl	z0\.b, p0/m, z0\.b, \1
      50  **	ret
      51  */
      52  TEST_UNIFORM_ZX (rshl_w0_s8_m_untied, svint8_t, int8_t,
      53  		 z0 = svrshl_n_s8_m (p0, z1, x0),
      54  		 z0 = svrshl_m (p0, z1, x0))
      55  
      56  /*
      57  ** rshl_m8_s8_m:
      58  **	srshr	z0\.b, p0/m, z0\.b, #8
      59  **	ret
      60  */
      61  TEST_UNIFORM_Z (rshl_m8_s8_m, svint8_t,
      62  		z0 = svrshl_n_s8_m (p0, z0, -8),
      63  		z0 = svrshl_m (p0, z0, -8))
      64  
      65  /*
      66  ** rshl_m2_s8_m:
      67  **	srshr	z0\.b, p0/m, z0\.b, #2
      68  **	ret
      69  */
      70  TEST_UNIFORM_Z (rshl_m2_s8_m, svint8_t,
      71  		z0 = svrshl_n_s8_m (p0, z0, -2),
      72  		z0 = svrshl_m (p0, z0, -2))
      73  
      74  /*
      75  ** rshl_m1_s8_m_tied1:
      76  **	srshr	z0\.b, p0/m, z0\.b, #1
      77  **	ret
      78  */
      79  TEST_UNIFORM_Z (rshl_m1_s8_m_tied1, svint8_t,
      80  		z0 = svrshl_n_s8_m (p0, z0, -1),
      81  		z0 = svrshl_m (p0, z0, -1))
      82  
      83  /*
      84  ** rshl_m1_s8_m_untied:
      85  **	movprfx	z0, z1
      86  **	srshr	z0\.b, p0/m, z0\.b, #1
      87  **	ret
      88  */
      89  TEST_UNIFORM_Z (rshl_m1_s8_m_untied, svint8_t,
      90  		z0 = svrshl_n_s8_m (p0, z1, -1),
      91  		z0 = svrshl_m (p0, z1, -1))
      92  
      93  /*
      94  ** rshl_1_s8_m_tied1:
      95  **	lsl	z0\.b, p0/m, z0\.b, #1
      96  **	ret
      97  */
      98  TEST_UNIFORM_Z (rshl_1_s8_m_tied1, svint8_t,
      99  		z0 = svrshl_n_s8_m (p0, z0, 1),
     100  		z0 = svrshl_m (p0, z0, 1))
     101  
     102  /*
     103  ** rshl_1_s8_m_untied:
     104  **	movprfx	z0, z1
     105  **	lsl	z0\.b, p0/m, z0\.b, #1
     106  **	ret
     107  */
     108  TEST_UNIFORM_Z (rshl_1_s8_m_untied, svint8_t,
     109  		z0 = svrshl_n_s8_m (p0, z1, 1),
     110  		z0 = svrshl_m (p0, z1, 1))
     111  
     112  /*
     113  ** rshl_2_s8_m:
     114  **	lsl	z0\.b, p0/m, z0\.b, #2
     115  **	ret
     116  */
     117  TEST_UNIFORM_Z (rshl_2_s8_m, svint8_t,
     118  		z0 = svrshl_n_s8_m (p0, z0, 2),
     119  		z0 = svrshl_m (p0, z0, 2))
     120  
     121  /*
     122  ** rshl_7_s8_m:
     123  **	lsl	z0\.b, p0/m, z0\.b, #7
     124  **	ret
     125  */
     126  TEST_UNIFORM_Z (rshl_7_s8_m, svint8_t,
     127  		z0 = svrshl_n_s8_m (p0, z0, 7),
     128  		z0 = svrshl_m (p0, z0, 7))
     129  
     130  /*
     131  ** rshl_s8_z_tied1:
     132  **	movprfx	z0\.b, p0/z, z0\.b
     133  **	srshl	z0\.b, p0/m, z0\.b, z4\.b
     134  **	ret
     135  */
     136  TEST_DUAL_Z (rshl_s8_z_tied1, svint8_t, svint8_t,
     137  	     z0 = svrshl_s8_z (p0, z0, z4),
     138  	     z0 = svrshl_z (p0, z0, z4))
     139  
     140  /*
     141  ** rshl_s8_z_tied2:
     142  **	movprfx	z0\.b, p0/z, z0\.b
     143  **	srshlr	z0\.b, p0/m, z0\.b, z4\.b
     144  **	ret
     145  */
     146  TEST_DUAL_Z_REV (rshl_s8_z_tied2, svint8_t, svint8_t,
     147  		 z0_res = svrshl_s8_z (p0, z4, z0),
     148  		 z0_res = svrshl_z (p0, z4, z0))
     149  
     150  /*
     151  ** rshl_s8_z_untied:
     152  ** (
     153  **	movprfx	z0\.b, p0/z, z1\.b
     154  **	srshl	z0\.b, p0/m, z0\.b, z4\.b
     155  ** |
     156  **	movprfx	z0\.b, p0/z, z4\.b
     157  **	srshlr	z0\.b, p0/m, z0\.b, z1\.b
     158  ** )
     159  **	ret
     160  */
     161  TEST_DUAL_Z (rshl_s8_z_untied, svint8_t, svint8_t,
     162  	     z0 = svrshl_s8_z (p0, z1, z4),
     163  	     z0 = svrshl_z (p0, z1, z4))
     164  
     165  /*
     166  ** rshl_w0_s8_z_tied1:
     167  **	mov	(z[0-9]+\.b), w0
     168  **	movprfx	z0\.b, p0/z, z0\.b
     169  **	srshl	z0\.b, p0/m, z0\.b, \1
     170  **	ret
     171  */
     172  TEST_UNIFORM_ZX (rshl_w0_s8_z_tied1, svint8_t, int8_t,
     173  		 z0 = svrshl_n_s8_z (p0, z0, x0),
     174  		 z0 = svrshl_z (p0, z0, x0))
     175  
     176  /*
     177  ** rshl_w0_s8_z_untied:
     178  **	mov	(z[0-9]+\.b), w0
     179  ** (
     180  **	movprfx	z0\.b, p0/z, z1\.b
     181  **	srshl	z0\.b, p0/m, z0\.b, \1
     182  ** |
     183  **	movprfx	z0\.b, p0/z, \1
     184  **	srshlr	z0\.b, p0/m, z0\.b, z1\.b
     185  ** )
     186  **	ret
     187  */
     188  TEST_UNIFORM_ZX (rshl_w0_s8_z_untied, svint8_t, int8_t,
     189  		 z0 = svrshl_n_s8_z (p0, z1, x0),
     190  		 z0 = svrshl_z (p0, z1, x0))
     191  
     192  /*
     193  ** rshl_m8_s8_z:
     194  **	movprfx	z0\.b, p0/z, z0\.b
     195  **	srshr	z0\.b, p0/m, z0\.b, #8
     196  **	ret
     197  */
     198  TEST_UNIFORM_Z (rshl_m8_s8_z, svint8_t,
     199  		z0 = svrshl_n_s8_z (p0, z0, -8),
     200  		z0 = svrshl_z (p0, z0, -8))
     201  
     202  /*
     203  ** rshl_m2_s8_z:
     204  **	movprfx	z0\.b, p0/z, z0\.b
     205  **	srshr	z0\.b, p0/m, z0\.b, #2
     206  **	ret
     207  */
     208  TEST_UNIFORM_Z (rshl_m2_s8_z, svint8_t,
     209  		z0 = svrshl_n_s8_z (p0, z0, -2),
     210  		z0 = svrshl_z (p0, z0, -2))
     211  
     212  /*
     213  ** rshl_m1_s8_z_tied1:
     214  **	movprfx	z0\.b, p0/z, z0\.b
     215  **	srshr	z0\.b, p0/m, z0\.b, #1
     216  **	ret
     217  */
     218  TEST_UNIFORM_Z (rshl_m1_s8_z_tied1, svint8_t,
     219  		z0 = svrshl_n_s8_z (p0, z0, -1),
     220  		z0 = svrshl_z (p0, z0, -1))
     221  
     222  /*
     223  ** rshl_m1_s8_z_untied:
     224  **	movprfx	z0\.b, p0/z, z1\.b
     225  **	srshr	z0\.b, p0/m, z0\.b, #1
     226  **	ret
     227  */
     228  TEST_UNIFORM_Z (rshl_m1_s8_z_untied, svint8_t,
     229  		z0 = svrshl_n_s8_z (p0, z1, -1),
     230  		z0 = svrshl_z (p0, z1, -1))
     231  
     232  /*
     233  ** rshl_1_s8_z_tied1:
     234  **	movprfx	z0\.b, p0/z, z0\.b
     235  **	lsl	z0\.b, p0/m, z0\.b, #1
     236  **	ret
     237  */
     238  TEST_UNIFORM_Z (rshl_1_s8_z_tied1, svint8_t,
     239  		z0 = svrshl_n_s8_z (p0, z0, 1),
     240  		z0 = svrshl_z (p0, z0, 1))
     241  
     242  /*
     243  ** rshl_1_s8_z_untied:
     244  **	movprfx	z0\.b, p0/z, z1\.b
     245  **	lsl	z0\.b, p0/m, z0\.b, #1
     246  **	ret
     247  */
     248  TEST_UNIFORM_Z (rshl_1_s8_z_untied, svint8_t,
     249  		z0 = svrshl_n_s8_z (p0, z1, 1),
     250  		z0 = svrshl_z (p0, z1, 1))
     251  
     252  /*
     253  ** rshl_2_s8_z:
     254  **	movprfx	z0\.b, p0/z, z0\.b
     255  **	lsl	z0\.b, p0/m, z0\.b, #2
     256  **	ret
     257  */
     258  TEST_UNIFORM_Z (rshl_2_s8_z, svint8_t,
     259  		z0 = svrshl_n_s8_z (p0, z0, 2),
     260  		z0 = svrshl_z (p0, z0, 2))
     261  
     262  /*
     263  ** rshl_7_s8_z:
     264  **	movprfx	z0\.b, p0/z, z0\.b
     265  **	lsl	z0\.b, p0/m, z0\.b, #7
     266  **	ret
     267  */
     268  TEST_UNIFORM_Z (rshl_7_s8_z, svint8_t,
     269  		z0 = svrshl_n_s8_z (p0, z0, 7),
     270  		z0 = svrshl_z (p0, z0, 7))
     271  
     272  /*
     273  ** rshl_s8_x_tied1:
     274  **	srshl	z0\.b, p0/m, z0\.b, z4\.b
     275  **	ret
     276  */
     277  TEST_DUAL_Z (rshl_s8_x_tied1, svint8_t, svint8_t,
     278  	     z0 = svrshl_s8_x (p0, z0, z4),
     279  	     z0 = svrshl_x (p0, z0, z4))
     280  
     281  /*
     282  ** rshl_s8_x_tied2:
     283  **	srshlr	z0\.b, p0/m, z0\.b, z4\.b
     284  **	ret
     285  */
     286  TEST_DUAL_Z_REV (rshl_s8_x_tied2, svint8_t, svint8_t,
     287  		 z0_res = svrshl_s8_x (p0, z4, z0),
     288  		 z0_res = svrshl_x (p0, z4, z0))
     289  
     290  /*
     291  ** rshl_s8_x_untied:
     292  ** (
     293  **	movprfx	z0, z1
     294  **	srshl	z0\.b, p0/m, z0\.b, z4\.b
     295  ** |
     296  **	movprfx	z0, z4
     297  **	srshlr	z0\.b, p0/m, z0\.b, z1\.b
     298  ** )
     299  **	ret
     300  */
     301  TEST_DUAL_Z (rshl_s8_x_untied, svint8_t, svint8_t,
     302  	     z0 = svrshl_s8_x (p0, z1, z4),
     303  	     z0 = svrshl_x (p0, z1, z4))
     304  
     305  /*
     306  ** rshl_w0_s8_x_tied1:
     307  **	mov	(z[0-9]+\.b), w0
     308  **	srshl	z0\.b, p0/m, z0\.b, \1
     309  **	ret
     310  */
     311  TEST_UNIFORM_ZX (rshl_w0_s8_x_tied1, svint8_t, int8_t,
     312  		 z0 = svrshl_n_s8_x (p0, z0, x0),
     313  		 z0 = svrshl_x (p0, z0, x0))
     314  
     315  /*
     316  ** rshl_w0_s8_x_untied:
     317  **	mov	z0\.b, w0
     318  **	srshlr	z0\.b, p0/m, z0\.b, z1\.b
     319  **	ret
     320  */
     321  TEST_UNIFORM_ZX (rshl_w0_s8_x_untied, svint8_t, int8_t,
     322  		 z0 = svrshl_n_s8_x (p0, z1, x0),
     323  		 z0 = svrshl_x (p0, z1, x0))
     324  
     325  /*
     326  ** rshl_m8_s8_x:
     327  **	srshr	z0\.b, p0/m, z0\.b, #8
     328  **	ret
     329  */
     330  TEST_UNIFORM_Z (rshl_m8_s8_x, svint8_t,
     331  		z0 = svrshl_n_s8_x (p0, z0, -8),
     332  		z0 = svrshl_x (p0, z0, -8))
     333  
     334  /*
     335  ** rshl_m2_s8_x:
     336  **	srshr	z0\.b, p0/m, z0\.b, #2
     337  **	ret
     338  */
     339  TEST_UNIFORM_Z (rshl_m2_s8_x, svint8_t,
     340  		z0 = svrshl_n_s8_x (p0, z0, -2),
     341  		z0 = svrshl_x (p0, z0, -2))
     342  
     343  /*
     344  ** rshl_m1_s8_x_tied1:
     345  **	srshr	z0\.b, p0/m, z0\.b, #1
     346  **	ret
     347  */
     348  TEST_UNIFORM_Z (rshl_m1_s8_x_tied1, svint8_t,
     349  		z0 = svrshl_n_s8_x (p0, z0, -1),
     350  		z0 = svrshl_x (p0, z0, -1))
     351  
     352  /*
     353  ** rshl_m1_s8_x_untied:
     354  **	movprfx	z0, z1
     355  **	srshr	z0\.b, p0/m, z0\.b, #1
     356  **	ret
     357  */
     358  TEST_UNIFORM_Z (rshl_m1_s8_x_untied, svint8_t,
     359  		z0 = svrshl_n_s8_x (p0, z1, -1),
     360  		z0 = svrshl_x (p0, z1, -1))
     361  
     362  /*
     363  ** rshl_1_s8_x_tied1:
     364  **	lsl	z0\.b, z0\.b, #1
     365  **	ret
     366  */
     367  TEST_UNIFORM_Z (rshl_1_s8_x_tied1, svint8_t,
     368  		z0 = svrshl_n_s8_x (p0, z0, 1),
     369  		z0 = svrshl_x (p0, z0, 1))
     370  
     371  /*
     372  ** rshl_1_s8_x_untied:
     373  **	lsl	z0\.b, z1\.b, #1
     374  **	ret
     375  */
     376  TEST_UNIFORM_Z (rshl_1_s8_x_untied, svint8_t,
     377  		z0 = svrshl_n_s8_x (p0, z1, 1),
     378  		z0 = svrshl_x (p0, z1, 1))
     379  
     380  /*
     381  ** rshl_2_s8_x:
     382  **	lsl	z0\.b, z0\.b, #2
     383  **	ret
     384  */
     385  TEST_UNIFORM_Z (rshl_2_s8_x, svint8_t,
     386  		z0 = svrshl_n_s8_x (p0, z0, 2),
     387  		z0 = svrshl_x (p0, z0, 2))
     388  
     389  /*
     390  ** rshl_7_s8_x:
     391  **	lsl	z0\.b, z0\.b, #7
     392  **	ret
     393  */
     394  TEST_UNIFORM_Z (rshl_7_s8_x, svint8_t,
     395  		z0 = svrshl_n_s8_x (p0, z0, 7),
     396  		z0 = svrshl_x (p0, z0, 7))