(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
sve/
acle/
asm/
lsr_wide_u32.c
       1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
       2  
       3  #include "test_sve_acle.h"
       4  
       5  /*
       6  ** lsr_wide_u32_m_tied1:
       7  **	lsr	z0\.s, p0/m, z0\.s, z4\.d
       8  **	ret
       9  */
      10  TEST_DUAL_Z (lsr_wide_u32_m_tied1, svuint32_t, svuint64_t,
      11  	     z0 = svlsr_wide_u32_m (p0, z0, z4),
      12  	     z0 = svlsr_wide_m (p0, z0, z4))
      13  
      14  /*
      15  ** lsr_wide_u32_m_tied2:
      16  **	mov	(z[0-9]+\.d), z0\.d
      17  **	movprfx	z0, z4
      18  **	lsr	z0\.s, p0/m, z0\.s, \1
      19  **	ret
      20  */
      21  TEST_DUAL_Z_REV (lsr_wide_u32_m_tied2, svuint32_t, svuint64_t,
      22  		 z0_res = svlsr_wide_u32_m (p0, z4, z0),
      23  		 z0_res = svlsr_wide_m (p0, z4, z0))
      24  
      25  /*
      26  ** lsr_wide_u32_m_untied:
      27  **	movprfx	z0, z1
      28  **	lsr	z0\.s, p0/m, z0\.s, z4\.d
      29  **	ret
      30  */
      31  TEST_DUAL_Z (lsr_wide_u32_m_untied, svuint32_t, svuint64_t,
      32  	     z0 = svlsr_wide_u32_m (p0, z1, z4),
      33  	     z0 = svlsr_wide_m (p0, z1, z4))
      34  
      35  /*
      36  ** lsr_wide_x0_u32_m_tied1:
      37  **	mov	(z[0-9]+\.d), x0
      38  **	lsr	z0\.s, p0/m, z0\.s, \1
      39  **	ret
      40  */
      41  TEST_UNIFORM_ZX (lsr_wide_x0_u32_m_tied1, svuint32_t, uint64_t,
      42  		 z0 = svlsr_wide_n_u32_m (p0, z0, x0),
      43  		 z0 = svlsr_wide_m (p0, z0, x0))
      44  
      45  /*
      46  ** lsr_wide_x0_u32_m_untied:
      47  **	mov	(z[0-9]+\.d), x0
      48  **	movprfx	z0, z1
      49  **	lsr	z0\.s, p0/m, z0\.s, \1
      50  **	ret
      51  */
      52  TEST_UNIFORM_ZX (lsr_wide_x0_u32_m_untied, svuint32_t, uint64_t,
      53  		 z0 = svlsr_wide_n_u32_m (p0, z1, x0),
      54  		 z0 = svlsr_wide_m (p0, z1, x0))
      55  
      56  /*
      57  ** lsr_wide_1_u32_m_tied1:
      58  **	lsr	z0\.s, p0/m, z0\.s, #1
      59  **	ret
      60  */
      61  TEST_UNIFORM_Z (lsr_wide_1_u32_m_tied1, svuint32_t,
      62  		z0 = svlsr_wide_n_u32_m (p0, z0, 1),
      63  		z0 = svlsr_wide_m (p0, z0, 1))
      64  
      65  /*
      66  ** lsr_wide_1_u32_m_untied:
      67  **	movprfx	z0, z1
      68  **	lsr	z0\.s, p0/m, z0\.s, #1
      69  **	ret
      70  */
      71  TEST_UNIFORM_Z (lsr_wide_1_u32_m_untied, svuint32_t,
      72  		z0 = svlsr_wide_n_u32_m (p0, z1, 1),
      73  		z0 = svlsr_wide_m (p0, z1, 1))
      74  
      75  /*
      76  ** lsr_wide_31_u32_m_tied1:
      77  **	lsr	z0\.s, p0/m, z0\.s, #31
      78  **	ret
      79  */
      80  TEST_UNIFORM_Z (lsr_wide_31_u32_m_tied1, svuint32_t,
      81  		z0 = svlsr_wide_n_u32_m (p0, z0, 31),
      82  		z0 = svlsr_wide_m (p0, z0, 31))
      83  
      84  /*
      85  ** lsr_wide_31_u32_m_untied:
      86  **	movprfx	z0, z1
      87  **	lsr	z0\.s, p0/m, z0\.s, #31
      88  **	ret
      89  */
      90  TEST_UNIFORM_Z (lsr_wide_31_u32_m_untied, svuint32_t,
      91  		z0 = svlsr_wide_n_u32_m (p0, z1, 31),
      92  		z0 = svlsr_wide_m (p0, z1, 31))
      93  
      94  /*
      95  ** lsr_wide_32_u32_m_tied1:
      96  **	lsr	z0\.s, p0/m, z0\.s, #32
      97  **	ret
      98  */
      99  TEST_UNIFORM_Z (lsr_wide_32_u32_m_tied1, svuint32_t,
     100  		z0 = svlsr_wide_n_u32_m (p0, z0, 32),
     101  		z0 = svlsr_wide_m (p0, z0, 32))
     102  
     103  /*
     104  ** lsr_wide_32_u32_m_untied:
     105  **	movprfx	z0, z1
     106  **	lsr	z0\.s, p0/m, z0\.s, #32
     107  **	ret
     108  */
     109  TEST_UNIFORM_Z (lsr_wide_32_u32_m_untied, svuint32_t,
     110  		z0 = svlsr_wide_n_u32_m (p0, z1, 32),
     111  		z0 = svlsr_wide_m (p0, z1, 32))
     112  
     113  /*
     114  ** lsr_wide_u32_z_tied1:
     115  **	movprfx	z0\.s, p0/z, z0\.s
     116  **	lsr	z0\.s, p0/m, z0\.s, z4\.d
     117  **	ret
     118  */
     119  TEST_DUAL_Z (lsr_wide_u32_z_tied1, svuint32_t, svuint64_t,
     120  	     z0 = svlsr_wide_u32_z (p0, z0, z4),
     121  	     z0 = svlsr_wide_z (p0, z0, z4))
     122  
     123  /*
     124  ** lsr_wide_u32_z_tied2:
     125  **	mov	(z[0-9]+\.d), z0\.d
     126  **	movprfx	z0\.s, p0/z, z4\.s
     127  **	lsr	z0\.s, p0/m, z0\.s, \1
     128  **	ret
     129  */
     130  TEST_DUAL_Z_REV (lsr_wide_u32_z_tied2, svuint32_t, svuint64_t,
     131  		 z0_res = svlsr_wide_u32_z (p0, z4, z0),
     132  		 z0_res = svlsr_wide_z (p0, z4, z0))
     133  
     134  /*
     135  ** lsr_wide_u32_z_untied:
     136  **	movprfx	z0\.s, p0/z, z1\.s
     137  **	lsr	z0\.s, p0/m, z0\.s, z4\.d
     138  **	ret
     139  */
     140  TEST_DUAL_Z (lsr_wide_u32_z_untied, svuint32_t, svuint64_t,
     141  	     z0 = svlsr_wide_u32_z (p0, z1, z4),
     142  	     z0 = svlsr_wide_z (p0, z1, z4))
     143  
     144  /*
     145  ** lsr_wide_x0_u32_z_tied1:
     146  **	mov	(z[0-9]+\.d), x0
     147  **	movprfx	z0\.s, p0/z, z0\.s
     148  **	lsr	z0\.s, p0/m, z0\.s, \1
     149  **	ret
     150  */
     151  TEST_UNIFORM_ZX (lsr_wide_x0_u32_z_tied1, svuint32_t, uint64_t,
     152  		 z0 = svlsr_wide_n_u32_z (p0, z0, x0),
     153  		 z0 = svlsr_wide_z (p0, z0, x0))
     154  
     155  /*
     156  ** lsr_wide_x0_u32_z_untied: { xfail *-*-* }
     157  **	mov	(z[0-9]+\.d), x0
     158  **	movprfx	z0\.s, p0/z, z1\.s
     159  **	lsr	z0\.s, p0/m, z0\.s, \1
     160  **	ret
     161  */
     162  TEST_UNIFORM_ZX (lsr_wide_x0_u32_z_untied, svuint32_t, uint64_t,
     163  		 z0 = svlsr_wide_n_u32_z (p0, z1, x0),
     164  		 z0 = svlsr_wide_z (p0, z1, x0))
     165  
     166  /*
     167  ** lsr_wide_1_u32_z_tied1:
     168  **	movprfx	z0\.s, p0/z, z0\.s
     169  **	lsr	z0\.s, p0/m, z0\.s, #1
     170  **	ret
     171  */
     172  TEST_UNIFORM_Z (lsr_wide_1_u32_z_tied1, svuint32_t,
     173  		z0 = svlsr_wide_n_u32_z (p0, z0, 1),
     174  		z0 = svlsr_wide_z (p0, z0, 1))
     175  
     176  /*
     177  ** lsr_wide_1_u32_z_untied:
     178  **	movprfx	z0\.s, p0/z, z1\.s
     179  **	lsr	z0\.s, p0/m, z0\.s, #1
     180  **	ret
     181  */
     182  TEST_UNIFORM_Z (lsr_wide_1_u32_z_untied, svuint32_t,
     183  		z0 = svlsr_wide_n_u32_z (p0, z1, 1),
     184  		z0 = svlsr_wide_z (p0, z1, 1))
     185  
     186  /*
     187  ** lsr_wide_31_u32_z_tied1:
     188  **	movprfx	z0\.s, p0/z, z0\.s
     189  **	lsr	z0\.s, p0/m, z0\.s, #31
     190  **	ret
     191  */
     192  TEST_UNIFORM_Z (lsr_wide_31_u32_z_tied1, svuint32_t,
     193  		z0 = svlsr_wide_n_u32_z (p0, z0, 31),
     194  		z0 = svlsr_wide_z (p0, z0, 31))
     195  
     196  /*
     197  ** lsr_wide_31_u32_z_untied:
     198  **	movprfx	z0\.s, p0/z, z1\.s
     199  **	lsr	z0\.s, p0/m, z0\.s, #31
     200  **	ret
     201  */
     202  TEST_UNIFORM_Z (lsr_wide_31_u32_z_untied, svuint32_t,
     203  		z0 = svlsr_wide_n_u32_z (p0, z1, 31),
     204  		z0 = svlsr_wide_z (p0, z1, 31))
     205  
     206  /*
     207  ** lsr_wide_32_u32_z_tied1:
     208  **	movprfx	z0\.s, p0/z, z0\.s
     209  **	lsr	z0\.s, p0/m, z0\.s, #32
     210  **	ret
     211  */
     212  TEST_UNIFORM_Z (lsr_wide_32_u32_z_tied1, svuint32_t,
     213  		z0 = svlsr_wide_n_u32_z (p0, z0, 32),
     214  		z0 = svlsr_wide_z (p0, z0, 32))
     215  
     216  /*
     217  ** lsr_wide_32_u32_z_untied:
     218  **	movprfx	z0\.s, p0/z, z1\.s
     219  **	lsr	z0\.s, p0/m, z0\.s, #32
     220  **	ret
     221  */
     222  TEST_UNIFORM_Z (lsr_wide_32_u32_z_untied, svuint32_t,
     223  		z0 = svlsr_wide_n_u32_z (p0, z1, 32),
     224  		z0 = svlsr_wide_z (p0, z1, 32))
     225  
     226  /*
     227  ** lsr_wide_u32_x_tied1:
     228  **	lsr	z0\.s, z0\.s, z4\.d
     229  **	ret
     230  */
     231  TEST_DUAL_Z (lsr_wide_u32_x_tied1, svuint32_t, svuint64_t,
     232  	     z0 = svlsr_wide_u32_x (p0, z0, z4),
     233  	     z0 = svlsr_wide_x (p0, z0, z4))
     234  
     235  /*
     236  ** lsr_wide_u32_x_tied2:
     237  **	lsr	z0\.s, z4\.s, z0\.d
     238  **	ret
     239  */
     240  TEST_DUAL_Z_REV (lsr_wide_u32_x_tied2, svuint32_t, svuint64_t,
     241  		 z0_res = svlsr_wide_u32_x (p0, z4, z0),
     242  		 z0_res = svlsr_wide_x (p0, z4, z0))
     243  
     244  /*
     245  ** lsr_wide_u32_x_untied:
     246  **	lsr	z0\.s, z1\.s, z4\.d
     247  **	ret
     248  */
     249  TEST_DUAL_Z (lsr_wide_u32_x_untied, svuint32_t, svuint64_t,
     250  	     z0 = svlsr_wide_u32_x (p0, z1, z4),
     251  	     z0 = svlsr_wide_x (p0, z1, z4))
     252  
     253  /*
     254  ** lsr_wide_x0_u32_x_tied1:
     255  **	mov	(z[0-9]+\.d), x0
     256  **	lsr	z0\.s, z0\.s, \1
     257  **	ret
     258  */
     259  TEST_UNIFORM_ZX (lsr_wide_x0_u32_x_tied1, svuint32_t, uint64_t,
     260  		 z0 = svlsr_wide_n_u32_x (p0, z0, x0),
     261  		 z0 = svlsr_wide_x (p0, z0, x0))
     262  
     263  /*
     264  ** lsr_wide_x0_u32_x_untied:
     265  **	mov	(z[0-9]+\.d), x0
     266  **	lsr	z0\.s, z1\.s, \1
     267  **	ret
     268  */
     269  TEST_UNIFORM_ZX (lsr_wide_x0_u32_x_untied, svuint32_t, uint64_t,
     270  		 z0 = svlsr_wide_n_u32_x (p0, z1, x0),
     271  		 z0 = svlsr_wide_x (p0, z1, x0))
     272  
     273  /*
     274  ** lsr_wide_1_u32_x_tied1:
     275  **	lsr	z0\.s, z0\.s, #1
     276  **	ret
     277  */
     278  TEST_UNIFORM_Z (lsr_wide_1_u32_x_tied1, svuint32_t,
     279  		z0 = svlsr_wide_n_u32_x (p0, z0, 1),
     280  		z0 = svlsr_wide_x (p0, z0, 1))
     281  
     282  /*
     283  ** lsr_wide_1_u32_x_untied:
     284  **	lsr	z0\.s, z1\.s, #1
     285  **	ret
     286  */
     287  TEST_UNIFORM_Z (lsr_wide_1_u32_x_untied, svuint32_t,
     288  		z0 = svlsr_wide_n_u32_x (p0, z1, 1),
     289  		z0 = svlsr_wide_x (p0, z1, 1))
     290  
     291  /*
     292  ** lsr_wide_31_u32_x_tied1:
     293  **	lsr	z0\.s, z0\.s, #31
     294  **	ret
     295  */
     296  TEST_UNIFORM_Z (lsr_wide_31_u32_x_tied1, svuint32_t,
     297  		z0 = svlsr_wide_n_u32_x (p0, z0, 31),
     298  		z0 = svlsr_wide_x (p0, z0, 31))
     299  
     300  /*
     301  ** lsr_wide_31_u32_x_untied:
     302  **	lsr	z0\.s, z1\.s, #31
     303  **	ret
     304  */
     305  TEST_UNIFORM_Z (lsr_wide_31_u32_x_untied, svuint32_t,
     306  		z0 = svlsr_wide_n_u32_x (p0, z1, 31),
     307  		z0 = svlsr_wide_x (p0, z1, 31))
     308  
     309  /*
     310  ** lsr_wide_32_u32_x_tied1:
     311  **	lsr	z0\.s, z0\.s, #32
     312  **	ret
     313  */
     314  TEST_UNIFORM_Z (lsr_wide_32_u32_x_tied1, svuint32_t,
     315  		z0 = svlsr_wide_n_u32_x (p0, z0, 32),
     316  		z0 = svlsr_wide_x (p0, z0, 32))
     317  
     318  /*
     319  ** lsr_wide_32_u32_x_untied:
     320  **	lsr	z0\.s, z1\.s, #32
     321  **	ret
     322  */
     323  TEST_UNIFORM_Z (lsr_wide_32_u32_x_untied, svuint32_t,
     324  		z0 = svlsr_wide_n_u32_x (p0, z1, 32),
     325  		z0 = svlsr_wide_x (p0, z1, 32))