1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
       2  
       3  #include "test_sve_acle.h"
       4  
       5  /*
       6  ** lsl_wide_s16_m_tied1:
       7  **	lsl	z0\.h, p0/m, z0\.h, z4\.d
       8  **	ret
       9  */
      10  TEST_DUAL_Z (lsl_wide_s16_m_tied1, svint16_t, svuint64_t,
      11  	     z0 = svlsl_wide_s16_m (p0, z0, z4),
      12  	     z0 = svlsl_wide_m (p0, z0, z4))
      13  
      14  /*
      15  ** lsl_wide_s16_m_tied2:
      16  **	mov	(z[0-9]+\.d), z0\.d
      17  **	movprfx	z0, z4
      18  **	lsl	z0\.h, p0/m, z0\.h, \1
      19  **	ret
      20  */
      21  TEST_DUAL_Z_REV (lsl_wide_s16_m_tied2, svint16_t, svuint64_t,
      22  		 z0_res = svlsl_wide_s16_m (p0, z4, z0),
      23  		 z0_res = svlsl_wide_m (p0, z4, z0))
      24  
      25  /*
      26  ** lsl_wide_s16_m_untied:
      27  **	movprfx	z0, z1
      28  **	lsl	z0\.h, p0/m, z0\.h, z4\.d
      29  **	ret
      30  */
      31  TEST_DUAL_Z (lsl_wide_s16_m_untied, svint16_t, svuint64_t,
      32  	     z0 = svlsl_wide_s16_m (p0, z1, z4),
      33  	     z0 = svlsl_wide_m (p0, z1, z4))
      34  
      35  /*
      36  ** lsl_wide_x0_s16_m_tied1:
      37  **	mov	(z[0-9]+\.d), x0
      38  **	lsl	z0\.h, p0/m, z0\.h, \1
      39  **	ret
      40  */
      41  TEST_UNIFORM_ZX (lsl_wide_x0_s16_m_tied1, svint16_t, uint64_t,
      42  		 z0 = svlsl_wide_n_s16_m (p0, z0, x0),
      43  		 z0 = svlsl_wide_m (p0, z0, x0))
      44  
      45  /*
      46  ** lsl_wide_x0_s16_m_untied:
      47  **	mov	(z[0-9]+\.d), x0
      48  **	movprfx	z0, z1
      49  **	lsl	z0\.h, p0/m, z0\.h, \1
      50  **	ret
      51  */
      52  TEST_UNIFORM_ZX (lsl_wide_x0_s16_m_untied, svint16_t, uint64_t,
      53  		 z0 = svlsl_wide_n_s16_m (p0, z1, x0),
      54  		 z0 = svlsl_wide_m (p0, z1, x0))
      55  
      56  /*
      57  ** lsl_wide_1_s16_m_tied1:
      58  **	lsl	z0\.h, p0/m, z0\.h, #1
      59  **	ret
      60  */
      61  TEST_UNIFORM_Z (lsl_wide_1_s16_m_tied1, svint16_t,
      62  		z0 = svlsl_wide_n_s16_m (p0, z0, 1),
      63  		z0 = svlsl_wide_m (p0, z0, 1))
      64  
      65  /*
      66  ** lsl_wide_1_s16_m_untied:
      67  **	movprfx	z0, z1
      68  **	lsl	z0\.h, p0/m, z0\.h, #1
      69  **	ret
      70  */
      71  TEST_UNIFORM_Z (lsl_wide_1_s16_m_untied, svint16_t,
      72  		z0 = svlsl_wide_n_s16_m (p0, z1, 1),
      73  		z0 = svlsl_wide_m (p0, z1, 1))
      74  
      75  /*
      76  ** lsl_wide_15_s16_m_tied1:
      77  **	lsl	z0\.h, p0/m, z0\.h, #15
      78  **	ret
      79  */
      80  TEST_UNIFORM_Z (lsl_wide_15_s16_m_tied1, svint16_t,
      81  		z0 = svlsl_wide_n_s16_m (p0, z0, 15),
      82  		z0 = svlsl_wide_m (p0, z0, 15))
      83  
      84  /*
      85  ** lsl_wide_15_s16_m_untied:
      86  **	movprfx	z0, z1
      87  **	lsl	z0\.h, p0/m, z0\.h, #15
      88  **	ret
      89  */
      90  TEST_UNIFORM_Z (lsl_wide_15_s16_m_untied, svint16_t,
      91  		z0 = svlsl_wide_n_s16_m (p0, z1, 15),
      92  		z0 = svlsl_wide_m (p0, z1, 15))
      93  
      94  /*
      95  ** lsl_wide_16_s16_m_tied1:
      96  **	mov	(z[0-9]+\.d), #16
      97  **	lsl	z0\.h, p0/m, z0\.h, \1
      98  **	ret
      99  */
     100  TEST_UNIFORM_Z (lsl_wide_16_s16_m_tied1, svint16_t,
     101  		z0 = svlsl_wide_n_s16_m (p0, z0, 16),
     102  		z0 = svlsl_wide_m (p0, z0, 16))
     103  
     104  /*
     105  ** lsl_wide_16_s16_m_untied: { xfail *-*-* }
     106  **	mov	(z[0-9]+\.d), #16
     107  **	movprfx	z0, z1
     108  **	lsl	z0\.h, p0/m, z0\.h, \1
     109  **	ret
     110  */
     111  TEST_UNIFORM_Z (lsl_wide_16_s16_m_untied, svint16_t,
     112  		z0 = svlsl_wide_n_s16_m (p0, z1, 16),
     113  		z0 = svlsl_wide_m (p0, z1, 16))
     114  
     115  /*
     116  ** lsl_wide_s16_z_tied1:
     117  **	movprfx	z0\.h, p0/z, z0\.h
     118  **	lsl	z0\.h, p0/m, z0\.h, z4\.d
     119  **	ret
     120  */
     121  TEST_DUAL_Z (lsl_wide_s16_z_tied1, svint16_t, svuint64_t,
     122  	     z0 = svlsl_wide_s16_z (p0, z0, z4),
     123  	     z0 = svlsl_wide_z (p0, z0, z4))
     124  
     125  /*
     126  ** lsl_wide_s16_z_tied2:
     127  **	mov	(z[0-9]+\.d), z0\.d
     128  **	movprfx	z0\.h, p0/z, z4\.h
     129  **	lsl	z0\.h, p0/m, z0\.h, \1
     130  **	ret
     131  */
     132  TEST_DUAL_Z_REV (lsl_wide_s16_z_tied2, svint16_t, svuint64_t,
     133  		 z0_res = svlsl_wide_s16_z (p0, z4, z0),
     134  		 z0_res = svlsl_wide_z (p0, z4, z0))
     135  
     136  /*
     137  ** lsl_wide_s16_z_untied:
     138  **	movprfx	z0\.h, p0/z, z1\.h
     139  **	lsl	z0\.h, p0/m, z0\.h, z4\.d
     140  **	ret
     141  */
     142  TEST_DUAL_Z (lsl_wide_s16_z_untied, svint16_t, svuint64_t,
     143  	     z0 = svlsl_wide_s16_z (p0, z1, z4),
     144  	     z0 = svlsl_wide_z (p0, z1, z4))
     145  
     146  /*
     147  ** lsl_wide_x0_s16_z_tied1:
     148  **	mov	(z[0-9]+\.d), x0
     149  **	movprfx	z0\.h, p0/z, z0\.h
     150  **	lsl	z0\.h, p0/m, z0\.h, \1
     151  **	ret
     152  */
     153  TEST_UNIFORM_ZX (lsl_wide_x0_s16_z_tied1, svint16_t, uint64_t,
     154  		 z0 = svlsl_wide_n_s16_z (p0, z0, x0),
     155  		 z0 = svlsl_wide_z (p0, z0, x0))
     156  
     157  /*
     158  ** lsl_wide_x0_s16_z_untied: { xfail *-*-* }
     159  **	mov	(z[0-9]+\.d), x0
     160  **	movprfx	z0\.h, p0/z, z1\.h
     161  **	lsl	z0\.h, p0/m, z0\.h, \1
     162  **	ret
     163  */
     164  TEST_UNIFORM_ZX (lsl_wide_x0_s16_z_untied, svint16_t, uint64_t,
     165  		 z0 = svlsl_wide_n_s16_z (p0, z1, x0),
     166  		 z0 = svlsl_wide_z (p0, z1, x0))
     167  
     168  /*
     169  ** lsl_wide_1_s16_z_tied1:
     170  **	movprfx	z0\.h, p0/z, z0\.h
     171  **	lsl	z0\.h, p0/m, z0\.h, #1
     172  **	ret
     173  */
     174  TEST_UNIFORM_Z (lsl_wide_1_s16_z_tied1, svint16_t,
     175  		z0 = svlsl_wide_n_s16_z (p0, z0, 1),
     176  		z0 = svlsl_wide_z (p0, z0, 1))
     177  
     178  /*
     179  ** lsl_wide_1_s16_z_untied:
     180  **	movprfx	z0\.h, p0/z, z1\.h
     181  **	lsl	z0\.h, p0/m, z0\.h, #1
     182  **	ret
     183  */
     184  TEST_UNIFORM_Z (lsl_wide_1_s16_z_untied, svint16_t,
     185  		z0 = svlsl_wide_n_s16_z (p0, z1, 1),
     186  		z0 = svlsl_wide_z (p0, z1, 1))
     187  
     188  /*
     189  ** lsl_wide_15_s16_z_tied1:
     190  **	movprfx	z0\.h, p0/z, z0\.h
     191  **	lsl	z0\.h, p0/m, z0\.h, #15
     192  **	ret
     193  */
     194  TEST_UNIFORM_Z (lsl_wide_15_s16_z_tied1, svint16_t,
     195  		z0 = svlsl_wide_n_s16_z (p0, z0, 15),
     196  		z0 = svlsl_wide_z (p0, z0, 15))
     197  
     198  /*
     199  ** lsl_wide_15_s16_z_untied:
     200  **	movprfx	z0\.h, p0/z, z1\.h
     201  **	lsl	z0\.h, p0/m, z0\.h, #15
     202  **	ret
     203  */
     204  TEST_UNIFORM_Z (lsl_wide_15_s16_z_untied, svint16_t,
     205  		z0 = svlsl_wide_n_s16_z (p0, z1, 15),
     206  		z0 = svlsl_wide_z (p0, z1, 15))
     207  
     208  /*
     209  ** lsl_wide_16_s16_z_tied1:
     210  **	mov	(z[0-9]+\.d), #16
     211  **	movprfx	z0\.h, p0/z, z0\.h
     212  **	lsl	z0\.h, p0/m, z0\.h, \1
     213  **	ret
     214  */
     215  TEST_UNIFORM_Z (lsl_wide_16_s16_z_tied1, svint16_t,
     216  		z0 = svlsl_wide_n_s16_z (p0, z0, 16),
     217  		z0 = svlsl_wide_z (p0, z0, 16))
     218  
     219  /*
     220  ** lsl_wide_16_s16_z_untied: { xfail *-*-* }
     221  **	mov	(z[0-9]+\.d), #16
     222  **	movprfx	z0\.h, p0/z, z1\.h
     223  **	lsl	z0\.h, p0/m, z0\.h, \1
     224  **	ret
     225  */
     226  TEST_UNIFORM_Z (lsl_wide_16_s16_z_untied, svint16_t,
     227  		z0 = svlsl_wide_n_s16_z (p0, z1, 16),
     228  		z0 = svlsl_wide_z (p0, z1, 16))
     229  
     230  /*
     231  ** lsl_wide_s16_x_tied1:
     232  **	lsl	z0\.h, z0\.h, z4\.d
     233  **	ret
     234  */
     235  TEST_DUAL_Z (lsl_wide_s16_x_tied1, svint16_t, svuint64_t,
     236  	     z0 = svlsl_wide_s16_x (p0, z0, z4),
     237  	     z0 = svlsl_wide_x (p0, z0, z4))
     238  
     239  /*
     240  ** lsl_wide_s16_x_tied2:
     241  **	lsl	z0\.h, z4\.h, z0\.d
     242  **	ret
     243  */
     244  TEST_DUAL_Z_REV (lsl_wide_s16_x_tied2, svint16_t, svuint64_t,
     245  		 z0_res = svlsl_wide_s16_x (p0, z4, z0),
     246  		 z0_res = svlsl_wide_x (p0, z4, z0))
     247  
     248  /*
     249  ** lsl_wide_s16_x_untied:
     250  **	lsl	z0\.h, z1\.h, z4\.d
     251  **	ret
     252  */
     253  TEST_DUAL_Z (lsl_wide_s16_x_untied, svint16_t, svuint64_t,
     254  	     z0 = svlsl_wide_s16_x (p0, z1, z4),
     255  	     z0 = svlsl_wide_x (p0, z1, z4))
     256  
     257  /*
     258  ** lsl_wide_x0_s16_x_tied1:
     259  **	mov	(z[0-9]+\.d), x0
     260  **	lsl	z0\.h, z0\.h, \1
     261  **	ret
     262  */
     263  TEST_UNIFORM_ZX (lsl_wide_x0_s16_x_tied1, svint16_t, uint64_t,
     264  		 z0 = svlsl_wide_n_s16_x (p0, z0, x0),
     265  		 z0 = svlsl_wide_x (p0, z0, x0))
     266  
     267  /*
     268  ** lsl_wide_x0_s16_x_untied:
     269  **	mov	(z[0-9]+\.d), x0
     270  **	lsl	z0\.h, z1\.h, \1
     271  **	ret
     272  */
     273  TEST_UNIFORM_ZX (lsl_wide_x0_s16_x_untied, svint16_t, uint64_t,
     274  		 z0 = svlsl_wide_n_s16_x (p0, z1, x0),
     275  		 z0 = svlsl_wide_x (p0, z1, x0))
     276  
     277  /*
     278  ** lsl_wide_1_s16_x_tied1:
     279  **	lsl	z0\.h, z0\.h, #1
     280  **	ret
     281  */
     282  TEST_UNIFORM_Z (lsl_wide_1_s16_x_tied1, svint16_t,
     283  		z0 = svlsl_wide_n_s16_x (p0, z0, 1),
     284  		z0 = svlsl_wide_x (p0, z0, 1))
     285  
     286  /*
     287  ** lsl_wide_1_s16_x_untied:
     288  **	lsl	z0\.h, z1\.h, #1
     289  **	ret
     290  */
     291  TEST_UNIFORM_Z (lsl_wide_1_s16_x_untied, svint16_t,
     292  		z0 = svlsl_wide_n_s16_x (p0, z1, 1),
     293  		z0 = svlsl_wide_x (p0, z1, 1))
     294  
     295  /*
     296  ** lsl_wide_15_s16_x_tied1:
     297  **	lsl	z0\.h, z0\.h, #15
     298  **	ret
     299  */
     300  TEST_UNIFORM_Z (lsl_wide_15_s16_x_tied1, svint16_t,
     301  		z0 = svlsl_wide_n_s16_x (p0, z0, 15),
     302  		z0 = svlsl_wide_x (p0, z0, 15))
     303  
     304  /*
     305  ** lsl_wide_15_s16_x_untied:
     306  **	lsl	z0\.h, z1\.h, #15
     307  **	ret
     308  */
     309  TEST_UNIFORM_Z (lsl_wide_15_s16_x_untied, svint16_t,
     310  		z0 = svlsl_wide_n_s16_x (p0, z1, 15),
     311  		z0 = svlsl_wide_x (p0, z1, 15))
     312  
     313  /*
     314  ** lsl_wide_16_s16_x_tied1:
     315  **	mov	(z[0-9]+\.d), #16
     316  **	lsl	z0\.h, z0\.h, \1
     317  **	ret
     318  */
     319  TEST_UNIFORM_Z (lsl_wide_16_s16_x_tied1, svint16_t,
     320  		z0 = svlsl_wide_n_s16_x (p0, z0, 16),
     321  		z0 = svlsl_wide_x (p0, z0, 16))
     322  
     323  /*
     324  ** lsl_wide_16_s16_x_untied:
     325  **	mov	(z[0-9]+\.d), #16
     326  **	lsl	z0\.h, z1\.h, \1
     327  **	ret
     328  */
     329  TEST_UNIFORM_Z (lsl_wide_16_s16_x_untied, svint16_t,
     330  		z0 = svlsl_wide_n_s16_x (p0, z1, 16),
     331  		z0 = svlsl_wide_x (p0, z1, 16))