1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
       2  
       3  #include "test_sve_acle.h"
       4  
       5  /*
       6  ** mul_s64_m_tied1:
       7  **	mul	z0\.d, p0/m, z0\.d, z1\.d
       8  **	ret
       9  */
      10  TEST_UNIFORM_Z (mul_s64_m_tied1, svint64_t,
      11  		z0 = svmul_s64_m (p0, z0, z1),
      12  		z0 = svmul_m (p0, z0, z1))
      13  
      14  /*
      15  ** mul_s64_m_tied2:
      16  **	mov	(z[0-9]+\.d), z0\.d
      17  **	movprfx	z0, z1
      18  **	mul	z0\.d, p0/m, z0\.d, \1
      19  **	ret
      20  */
      21  TEST_UNIFORM_Z (mul_s64_m_tied2, svint64_t,
      22  		z0 = svmul_s64_m (p0, z1, z0),
      23  		z0 = svmul_m (p0, z1, z0))
      24  
      25  /*
      26  ** mul_s64_m_untied:
      27  **	movprfx	z0, z1
      28  **	mul	z0\.d, p0/m, z0\.d, z2\.d
      29  **	ret
      30  */
      31  TEST_UNIFORM_Z (mul_s64_m_untied, svint64_t,
      32  		z0 = svmul_s64_m (p0, z1, z2),
      33  		z0 = svmul_m (p0, z1, z2))
      34  
      35  /*
      36  ** mul_x0_s64_m_tied1:
      37  **	mov	(z[0-9]+\.d), x0
      38  **	mul	z0\.d, p0/m, z0\.d, \1
      39  **	ret
      40  */
      41  TEST_UNIFORM_ZX (mul_x0_s64_m_tied1, svint64_t, int64_t,
      42  		 z0 = svmul_n_s64_m (p0, z0, x0),
      43  		 z0 = svmul_m (p0, z0, x0))
      44  
      45  /*
      46  ** mul_x0_s64_m_untied:
      47  **	mov	(z[0-9]+\.d), x0
      48  **	movprfx	z0, z1
      49  **	mul	z0\.d, p0/m, z0\.d, \1
      50  **	ret
      51  */
      52  TEST_UNIFORM_ZX (mul_x0_s64_m_untied, svint64_t, int64_t,
      53  		 z0 = svmul_n_s64_m (p0, z1, x0),
      54  		 z0 = svmul_m (p0, z1, x0))
      55  
      56  /*
      57  ** mul_2_s64_m_tied1:
      58  **	mov	(z[0-9]+\.d), #2
      59  **	mul	z0\.d, p0/m, z0\.d, \1
      60  **	ret
      61  */
      62  TEST_UNIFORM_Z (mul_2_s64_m_tied1, svint64_t,
      63  		z0 = svmul_n_s64_m (p0, z0, 2),
      64  		z0 = svmul_m (p0, z0, 2))
      65  
      66  /*
      67  ** mul_2_s64_m_untied: { xfail *-*-* }
      68  **	mov	(z[0-9]+\.d), #2
      69  **	movprfx	z0, z1
      70  **	mul	z0\.d, p0/m, z0\.d, \1
      71  **	ret
      72  */
      73  TEST_UNIFORM_Z (mul_2_s64_m_untied, svint64_t,
      74  		z0 = svmul_n_s64_m (p0, z1, 2),
      75  		z0 = svmul_m (p0, z1, 2))
      76  
      77  /*
      78  ** mul_m1_s64_m:
      79  **	mov	(z[0-9]+)\.b, #-1
      80  **	mul	z0\.d, p0/m, z0\.d, \1\.d
      81  **	ret
      82  */
      83  TEST_UNIFORM_Z (mul_m1_s64_m, svint64_t,
      84  		z0 = svmul_n_s64_m (p0, z0, -1),
      85  		z0 = svmul_m (p0, z0, -1))
      86  
      87  /*
      88  ** mul_s64_z_tied1:
      89  **	movprfx	z0\.d, p0/z, z0\.d
      90  **	mul	z0\.d, p0/m, z0\.d, z1\.d
      91  **	ret
      92  */
      93  TEST_UNIFORM_Z (mul_s64_z_tied1, svint64_t,
      94  		z0 = svmul_s64_z (p0, z0, z1),
      95  		z0 = svmul_z (p0, z0, z1))
      96  
      97  /*
      98  ** mul_s64_z_tied2:
      99  **	movprfx	z0\.d, p0/z, z0\.d
     100  **	mul	z0\.d, p0/m, z0\.d, z1\.d
     101  **	ret
     102  */
     103  TEST_UNIFORM_Z (mul_s64_z_tied2, svint64_t,
     104  		z0 = svmul_s64_z (p0, z1, z0),
     105  		z0 = svmul_z (p0, z1, z0))
     106  
     107  /*
     108  ** mul_s64_z_untied:
     109  ** (
     110  **	movprfx	z0\.d, p0/z, z1\.d
     111  **	mul	z0\.d, p0/m, z0\.d, z2\.d
     112  ** |
     113  **	movprfx	z0\.d, p0/z, z2\.d
     114  **	mul	z0\.d, p0/m, z0\.d, z1\.d
     115  ** )
     116  **	ret
     117  */
     118  TEST_UNIFORM_Z (mul_s64_z_untied, svint64_t,
     119  		z0 = svmul_s64_z (p0, z1, z2),
     120  		z0 = svmul_z (p0, z1, z2))
     121  
     122  /*
     123  ** mul_x0_s64_z_tied1:
     124  **	mov	(z[0-9]+\.d), x0
     125  **	movprfx	z0\.d, p0/z, z0\.d
     126  **	mul	z0\.d, p0/m, z0\.d, \1
     127  **	ret
     128  */
     129  TEST_UNIFORM_ZX (mul_x0_s64_z_tied1, svint64_t, int64_t,
     130  		 z0 = svmul_n_s64_z (p0, z0, x0),
     131  		 z0 = svmul_z (p0, z0, x0))
     132  
     133  /*
     134  ** mul_x0_s64_z_untied:
     135  **	mov	(z[0-9]+\.d), x0
     136  ** (
     137  **	movprfx	z0\.d, p0/z, z1\.d
     138  **	mul	z0\.d, p0/m, z0\.d, \1
     139  ** |
     140  **	movprfx	z0\.d, p0/z, \1
     141  **	mul	z0\.d, p0/m, z0\.d, z1\.d
     142  ** )
     143  **	ret
     144  */
     145  TEST_UNIFORM_ZX (mul_x0_s64_z_untied, svint64_t, int64_t,
     146  		 z0 = svmul_n_s64_z (p0, z1, x0),
     147  		 z0 = svmul_z (p0, z1, x0))
     148  
     149  /*
     150  ** mul_2_s64_z_tied1:
     151  **	mov	(z[0-9]+\.d), #2
     152  **	movprfx	z0\.d, p0/z, z0\.d
     153  **	mul	z0\.d, p0/m, z0\.d, \1
     154  **	ret
     155  */
     156  TEST_UNIFORM_Z (mul_2_s64_z_tied1, svint64_t,
     157  		z0 = svmul_n_s64_z (p0, z0, 2),
     158  		z0 = svmul_z (p0, z0, 2))
     159  
     160  /*
     161  ** mul_2_s64_z_untied:
     162  **	mov	(z[0-9]+\.d), #2
     163  ** (
     164  **	movprfx	z0\.d, p0/z, z1\.d
     165  **	mul	z0\.d, p0/m, z0\.d, \1
     166  ** |
     167  **	movprfx	z0\.d, p0/z, \1
     168  **	mul	z0\.d, p0/m, z0\.d, z1\.d
     169  ** )
     170  **	ret
     171  */
     172  TEST_UNIFORM_Z (mul_2_s64_z_untied, svint64_t,
     173  		z0 = svmul_n_s64_z (p0, z1, 2),
     174  		z0 = svmul_z (p0, z1, 2))
     175  
     176  /*
     177  ** mul_s64_x_tied1:
     178  **	mul	z0\.d, p0/m, z0\.d, z1\.d
     179  **	ret
     180  */
     181  TEST_UNIFORM_Z (mul_s64_x_tied1, svint64_t,
     182  		z0 = svmul_s64_x (p0, z0, z1),
     183  		z0 = svmul_x (p0, z0, z1))
     184  
     185  /*
     186  ** mul_s64_x_tied2:
     187  **	mul	z0\.d, p0/m, z0\.d, z1\.d
     188  **	ret
     189  */
     190  TEST_UNIFORM_Z (mul_s64_x_tied2, svint64_t,
     191  		z0 = svmul_s64_x (p0, z1, z0),
     192  		z0 = svmul_x (p0, z1, z0))
     193  
     194  /*
     195  ** mul_s64_x_untied:
     196  ** (
     197  **	movprfx	z0, z1
     198  **	mul	z0\.d, p0/m, z0\.d, z2\.d
     199  ** |
     200  **	movprfx	z0, z2
     201  **	mul	z0\.d, p0/m, z0\.d, z1\.d
     202  ** )
     203  **	ret
     204  */
     205  TEST_UNIFORM_Z (mul_s64_x_untied, svint64_t,
     206  		z0 = svmul_s64_x (p0, z1, z2),
     207  		z0 = svmul_x (p0, z1, z2))
     208  
     209  /*
     210  ** mul_x0_s64_x_tied1:
     211  **	mov	(z[0-9]+\.d), x0
     212  **	mul	z0\.d, p0/m, z0\.d, \1
     213  **	ret
     214  */
     215  TEST_UNIFORM_ZX (mul_x0_s64_x_tied1, svint64_t, int64_t,
     216  		 z0 = svmul_n_s64_x (p0, z0, x0),
     217  		 z0 = svmul_x (p0, z0, x0))
     218  
     219  /*
     220  ** mul_x0_s64_x_untied:
     221  **	mov	z0\.d, x0
     222  **	mul	z0\.d, p0/m, z0\.d, z1\.d
     223  **	ret
     224  */
     225  TEST_UNIFORM_ZX (mul_x0_s64_x_untied, svint64_t, int64_t,
     226  		 z0 = svmul_n_s64_x (p0, z1, x0),
     227  		 z0 = svmul_x (p0, z1, x0))
     228  
     229  /*
     230  ** mul_2_s64_x_tied1:
     231  **	mul	z0\.d, z0\.d, #2
     232  **	ret
     233  */
     234  TEST_UNIFORM_Z (mul_2_s64_x_tied1, svint64_t,
     235  		z0 = svmul_n_s64_x (p0, z0, 2),
     236  		z0 = svmul_x (p0, z0, 2))
     237  
     238  /*
     239  ** mul_2_s64_x_untied:
     240  **	movprfx	z0, z1
     241  **	mul	z0\.d, z0\.d, #2
     242  **	ret
     243  */
     244  TEST_UNIFORM_Z (mul_2_s64_x_untied, svint64_t,
     245  		z0 = svmul_n_s64_x (p0, z1, 2),
     246  		z0 = svmul_x (p0, z1, 2))
     247  
     248  /*
     249  ** mul_127_s64_x:
     250  **	mul	z0\.d, z0\.d, #127
     251  **	ret
     252  */
     253  TEST_UNIFORM_Z (mul_127_s64_x, svint64_t,
     254  		z0 = svmul_n_s64_x (p0, z0, 127),
     255  		z0 = svmul_x (p0, z0, 127))
     256  
     257  /*
     258  ** mul_128_s64_x:
     259  **	mov	(z[0-9]+\.d), #128
     260  **	mul	z0\.d, p0/m, z0\.d, \1
     261  **	ret
     262  */
     263  TEST_UNIFORM_Z (mul_128_s64_x, svint64_t,
     264  		z0 = svmul_n_s64_x (p0, z0, 128),
     265  		z0 = svmul_x (p0, z0, 128))
     266  
     267  /*
     268  ** mul_255_s64_x:
     269  **	mov	(z[0-9]+\.d), #255
     270  **	mul	z0\.d, p0/m, z0\.d, \1
     271  **	ret
     272  */
     273  TEST_UNIFORM_Z (mul_255_s64_x, svint64_t,
     274  		z0 = svmul_n_s64_x (p0, z0, 255),
     275  		z0 = svmul_x (p0, z0, 255))
     276  
     277  /*
     278  ** mul_m1_s64_x:
     279  **	mul	z0\.d, z0\.d, #-1
     280  **	ret
     281  */
     282  TEST_UNIFORM_Z (mul_m1_s64_x, svint64_t,
     283  		z0 = svmul_n_s64_x (p0, z0, -1),
     284  		z0 = svmul_x (p0, z0, -1))
     285  
     286  /*
     287  ** mul_m127_s64_x:
     288  **	mul	z0\.d, z0\.d, #-127
     289  **	ret
     290  */
     291  TEST_UNIFORM_Z (mul_m127_s64_x, svint64_t,
     292  		z0 = svmul_n_s64_x (p0, z0, -127),
     293  		z0 = svmul_x (p0, z0, -127))
     294  
     295  /*
     296  ** mul_m128_s64_x:
     297  **	mul	z0\.d, z0\.d, #-128
     298  **	ret
     299  */
     300  TEST_UNIFORM_Z (mul_m128_s64_x, svint64_t,
     301  		z0 = svmul_n_s64_x (p0, z0, -128),
     302  		z0 = svmul_x (p0, z0, -128))