(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
sve/
acle/
asm/
and_s16.c
       1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
       2  
       3  #include "test_sve_acle.h"
       4  
       5  /*
       6  ** and_s16_m_tied1:
       7  **	and	z0\.h, p0/m, z0\.h, z1\.h
       8  **	ret
       9  */
      10  TEST_UNIFORM_Z (and_s16_m_tied1, svint16_t,
      11  		z0 = svand_s16_m (p0, z0, z1),
      12  		z0 = svand_m (p0, z0, z1))
      13  
      14  /*
      15  ** and_s16_m_tied2:
      16  **	mov	(z[0-9]+)\.d, z0\.d
      17  **	movprfx	z0, z1
      18  **	and	z0\.h, p0/m, z0\.h, \1\.h
      19  **	ret
      20  */
      21  TEST_UNIFORM_Z (and_s16_m_tied2, svint16_t,
      22  		z0 = svand_s16_m (p0, z1, z0),
      23  		z0 = svand_m (p0, z1, z0))
      24  
      25  /*
      26  ** and_s16_m_untied:
      27  **	movprfx	z0, z1
      28  **	and	z0\.h, p0/m, z0\.h, z2\.h
      29  **	ret
      30  */
      31  TEST_UNIFORM_Z (and_s16_m_untied, svint16_t,
      32  		z0 = svand_s16_m (p0, z1, z2),
      33  		z0 = svand_m (p0, z1, z2))
      34  
      35  /*
      36  ** and_w0_s16_m_tied1:
      37  **	mov	(z[0-9]+\.h), w0
      38  **	and	z0\.h, p0/m, z0\.h, \1
      39  **	ret
      40  */
      41  TEST_UNIFORM_ZX (and_w0_s16_m_tied1, svint16_t, int16_t,
      42  		 z0 = svand_n_s16_m (p0, z0, x0),
      43  		 z0 = svand_m (p0, z0, x0))
      44  
      45  /*
      46  ** and_w0_s16_m_untied: { xfail *-*-* }
      47  **	mov	(z[0-9]+\.h), w0
      48  **	movprfx	z0, z1
      49  **	and	z0\.h, p0/m, z0\.h, \1
      50  **	ret
      51  */
      52  TEST_UNIFORM_ZX (and_w0_s16_m_untied, svint16_t, int16_t,
      53  		 z0 = svand_n_s16_m (p0, z1, x0),
      54  		 z0 = svand_m (p0, z1, x0))
      55  
      56  /*
      57  ** and_1_s16_m_tied1:
      58  **	mov	(z[0-9]+\.h), #1
      59  **	and	z0\.h, p0/m, z0\.h, \1
      60  **	ret
      61  */
      62  TEST_UNIFORM_Z (and_1_s16_m_tied1, svint16_t,
      63  		z0 = svand_n_s16_m (p0, z0, 1),
      64  		z0 = svand_m (p0, z0, 1))
      65  
      66  /*
      67  ** and_1_s16_m_untied: { xfail *-*-* }
      68  **	mov	(z[0-9]+\.h), #1
      69  **	movprfx	z0, z1
      70  **	and	z0\.h, p0/m, z0\.h, \1
      71  **	ret
      72  */
      73  TEST_UNIFORM_Z (and_1_s16_m_untied, svint16_t,
      74  		z0 = svand_n_s16_m (p0, z1, 1),
      75  		z0 = svand_m (p0, z1, 1))
      76  
      77  /*
      78  ** and_m2_s16_m:
      79  **	mov	(z[0-9]+\.h), #-2
      80  **	and	z0\.h, p0/m, z0\.h, \1
      81  **	ret
      82  */
      83  TEST_UNIFORM_Z (and_m2_s16_m, svint16_t,
      84  		z0 = svand_n_s16_m (p0, z0, -2),
      85  		z0 = svand_m (p0, z0, -2))
      86  
      87  /*
      88  ** and_255_s16_m_tied1:
      89  **	uxtb	z0\.h, p0/m, z0\.h
      90  **	ret
      91  */
      92  TEST_UNIFORM_Z (and_255_s16_m_tied1, svint16_t,
      93  		z0 = svand_n_s16_m (p0, z0, 255),
      94  		z0 = svand_m (p0, z0, 255))
      95  
      96  /*
      97  ** and_255_s16_m_untied:
      98  **	movprfx	z0, z1
      99  **	uxtb	z0\.h, p0/m, z1\.h
     100  **	ret
     101  */
     102  TEST_UNIFORM_Z (and_255_s16_m_untied, svint16_t,
     103  		z0 = svand_n_s16_m (p0, z1, 255),
     104  		z0 = svand_m (p0, z1, 255))
     105  
     106  /*
     107  ** and_s16_z_tied1:
     108  **	movprfx	z0\.h, p0/z, z0\.h
     109  **	and	z0\.h, p0/m, z0\.h, z1\.h
     110  **	ret
     111  */
     112  TEST_UNIFORM_Z (and_s16_z_tied1, svint16_t,
     113  		z0 = svand_s16_z (p0, z0, z1),
     114  		z0 = svand_z (p0, z0, z1))
     115  
     116  /*
     117  ** and_s16_z_tied2:
     118  **	movprfx	z0\.h, p0/z, z0\.h
     119  **	and	z0\.h, p0/m, z0\.h, z1\.h
     120  **	ret
     121  */
     122  TEST_UNIFORM_Z (and_s16_z_tied2, svint16_t,
     123  		z0 = svand_s16_z (p0, z1, z0),
     124  		z0 = svand_z (p0, z1, z0))
     125  
     126  /*
     127  ** and_s16_z_untied:
     128  ** (
     129  **	movprfx	z0\.h, p0/z, z1\.h
     130  **	and	z0\.h, p0/m, z0\.h, z2\.h
     131  ** |
     132  **	movprfx	z0\.h, p0/z, z2\.h
     133  **	and	z0\.h, p0/m, z0\.h, z1\.h
     134  ** )
     135  **	ret
     136  */
     137  TEST_UNIFORM_Z (and_s16_z_untied, svint16_t,
     138  		z0 = svand_s16_z (p0, z1, z2),
     139  		z0 = svand_z (p0, z1, z2))
     140  
     141  /*
     142  ** and_w0_s16_z_tied1:
     143  **	mov	(z[0-9]+\.h), w0
     144  **	movprfx	z0\.h, p0/z, z0\.h
     145  **	and	z0\.h, p0/m, z0\.h, \1
     146  **	ret
     147  */
     148  TEST_UNIFORM_ZX (and_w0_s16_z_tied1, svint16_t, int16_t,
     149  		 z0 = svand_n_s16_z (p0, z0, x0),
     150  		 z0 = svand_z (p0, z0, x0))
     151  
     152  /*
     153  ** and_w0_s16_z_untied:
     154  **	mov	(z[0-9]+\.h), w0
     155  ** (
     156  **	movprfx	z0\.h, p0/z, z1\.h
     157  **	and	z0\.h, p0/m, z0\.h, \1
     158  ** |
     159  **	movprfx	z0\.h, p0/z, \1
     160  **	and	z0\.h, p0/m, z0\.h, z1\.h
     161  ** )
     162  **	ret
     163  */
     164  TEST_UNIFORM_ZX (and_w0_s16_z_untied, svint16_t, int16_t,
     165  		 z0 = svand_n_s16_z (p0, z1, x0),
     166  		 z0 = svand_z (p0, z1, x0))
     167  
     168  /*
     169  ** and_1_s16_z_tied1:
     170  **	mov	(z[0-9]+\.h), #1
     171  **	movprfx	z0\.h, p0/z, z0\.h
     172  **	and	z0\.h, p0/m, z0\.h, \1
     173  **	ret
     174  */
     175  TEST_UNIFORM_Z (and_1_s16_z_tied1, svint16_t,
     176  		z0 = svand_n_s16_z (p0, z0, 1),
     177  		z0 = svand_z (p0, z0, 1))
     178  
     179  /*
     180  ** and_1_s16_z_untied:
     181  **	mov	(z[0-9]+\.h), #1
     182  ** (
     183  **	movprfx	z0\.h, p0/z, z1\.h
     184  **	and	z0\.h, p0/m, z0\.h, \1
     185  ** |
     186  **	movprfx	z0\.h, p0/z, \1
     187  **	and	z0\.h, p0/m, z0\.h, z1\.h
     188  ** )
     189  **	ret
     190  */
     191  TEST_UNIFORM_Z (and_1_s16_z_untied, svint16_t,
     192  		z0 = svand_n_s16_z (p0, z1, 1),
     193  		z0 = svand_z (p0, z1, 1))
     194  
     195  /*
     196  ** and_255_s16_z_tied1:
     197  ** (
     198  **	mov	(z[0-9]+)\.d, z0\.d
     199  **	movprfx	z0\.h, p0/z, \1\.h
     200  **	uxtb	z0\.h, p0/m, \1\.h
     201  ** |
     202  **	mov	(z[0-9]+\.h), #255
     203  **	movprfx	z0\.h, p0/z, z0\.h
     204  **	and	z0\.h, p0/m, z0\.h, \1
     205  ** )
     206  **	ret
     207  */
     208  TEST_UNIFORM_Z (and_255_s16_z_tied1, svint16_t,
     209  		z0 = svand_n_s16_z (p0, z0, 255),
     210  		z0 = svand_z (p0, z0, 255))
     211  
     212  /*
     213  ** and_255_s16_z_untied:
     214  **	movprfx	z0\.h, p0/z, z1\.h
     215  **	uxtb	z0\.h, p0/m, z1\.h
     216  **	ret
     217  */
     218  TEST_UNIFORM_Z (and_255_s16_z_untied, svint16_t,
     219  		z0 = svand_n_s16_z (p0, z1, 255),
     220  		z0 = svand_z (p0, z1, 255))
     221  
     222  /*
     223  ** and_s16_x_tied1:
     224  **	and	z0\.d, (z0\.d, z1\.d|z1\.d, z0\.d)
     225  **	ret
     226  */
     227  TEST_UNIFORM_Z (and_s16_x_tied1, svint16_t,
     228  		z0 = svand_s16_x (p0, z0, z1),
     229  		z0 = svand_x (p0, z0, z1))
     230  
     231  /*
     232  ** and_s16_x_tied2:
     233  **	and	z0\.d, (z0\.d, z1\.d|z1\.d, z0\.d)
     234  **	ret
     235  */
     236  TEST_UNIFORM_Z (and_s16_x_tied2, svint16_t,
     237  		z0 = svand_s16_x (p0, z1, z0),
     238  		z0 = svand_x (p0, z1, z0))
     239  
     240  /*
     241  ** and_s16_x_untied:
     242  **	and	z0\.d, (z1\.d, z2\.d|z2\.d, z1\.d)
     243  **	ret
     244  */
     245  TEST_UNIFORM_Z (and_s16_x_untied, svint16_t,
     246  		z0 = svand_s16_x (p0, z1, z2),
     247  		z0 = svand_x (p0, z1, z2))
     248  
     249  /*
     250  ** and_w0_s16_x_tied1:
     251  **	mov	(z[0-9]+)\.h, w0
     252  **	and	z0\.d, (z0\.d, \1\.d|\1\.d, z0\.d)
     253  **	ret
     254  */
     255  TEST_UNIFORM_ZX (and_w0_s16_x_tied1, svint16_t, int16_t,
     256  		 z0 = svand_n_s16_x (p0, z0, x0),
     257  		 z0 = svand_x (p0, z0, x0))
     258  
     259  /*
     260  ** and_w0_s16_x_untied:
     261  **	mov	(z[0-9]+)\.h, w0
     262  **	and	z0\.d, (z1\.d, \1\.d|\1\.d, z1\.d)
     263  **	ret
     264  */
     265  TEST_UNIFORM_ZX (and_w0_s16_x_untied, svint16_t, int16_t,
     266  		 z0 = svand_n_s16_x (p0, z1, x0),
     267  		 z0 = svand_x (p0, z1, x0))
     268  
     269  /*
     270  ** and_1_s16_x_tied1:
     271  **	and	z0\.h, z0\.h, #0x1
     272  **	ret
     273  */
     274  TEST_UNIFORM_Z (and_1_s16_x_tied1, svint16_t,
     275  		z0 = svand_n_s16_x (p0, z0, 1),
     276  		z0 = svand_x (p0, z0, 1))
     277  
     278  /*
     279  ** and_1_s16_x_untied:
     280  **	movprfx	z0, z1
     281  **	and	z0\.h, z0\.h, #0x1
     282  **	ret
     283  */
     284  TEST_UNIFORM_Z (and_1_s16_x_untied, svint16_t,
     285  		z0 = svand_n_s16_x (p0, z1, 1),
     286  		z0 = svand_x (p0, z1, 1))
     287  
     288  /*
     289  ** and_127_s16_x:
     290  **	and	z0\.h, z0\.h, #0x7f
     291  **	ret
     292  */
     293  TEST_UNIFORM_Z (and_127_s16_x, svint16_t,
     294  		z0 = svand_n_s16_x (p0, z0, 127),
     295  		z0 = svand_x (p0, z0, 127))
     296  
     297  /*
     298  ** and_128_s16_x:
     299  **	and	z0\.h, z0\.h, #0x80
     300  **	ret
     301  */
     302  TEST_UNIFORM_Z (and_128_s16_x, svint16_t,
     303  		z0 = svand_n_s16_x (p0, z0, 128),
     304  		z0 = svand_x (p0, z0, 128))
     305  
     306  /*
     307  ** and_255_s16_x:
     308  **	and	z0\.h, z0\.h, #0xff
     309  **	ret
     310  */
     311  TEST_UNIFORM_Z (and_255_s16_x, svint16_t,
     312  		z0 = svand_n_s16_x (p0, z0, 255),
     313  		z0 = svand_x (p0, z0, 255))
     314  
     315  /*
     316  ** and_256_s16_x:
     317  **	and	z0\.h, z0\.h, #0x100
     318  **	ret
     319  */
     320  TEST_UNIFORM_Z (and_256_s16_x, svint16_t,
     321  		z0 = svand_n_s16_x (p0, z0, 256),
     322  		z0 = svand_x (p0, z0, 256))
     323  
     324  /*
     325  ** and_257_s16_x:
     326  **	and	z0\.h, z0\.h, #0x101
     327  **	ret
     328  */
     329  TEST_UNIFORM_Z (and_257_s16_x, svint16_t,
     330  		z0 = svand_n_s16_x (p0, z0, 257),
     331  		z0 = svand_x (p0, z0, 257))
     332  
     333  /*
     334  ** and_512_s16_x:
     335  **	and	z0\.h, z0\.h, #0x200
     336  **	ret
     337  */
     338  TEST_UNIFORM_Z (and_512_s16_x, svint16_t,
     339  		z0 = svand_n_s16_x (p0, z0, 512),
     340  		z0 = svand_x (p0, z0, 512))
     341  
     342  /*
     343  ** and_65280_s16_x:
     344  **	and	z0\.h, z0\.h, #0xff00
     345  **	ret
     346  */
     347  TEST_UNIFORM_Z (and_65280_s16_x, svint16_t,
     348  		z0 = svand_n_s16_x (p0, z0, 0xff00),
     349  		z0 = svand_x (p0, z0, 0xff00))
     350  
     351  /*
     352  ** and_m127_s16_x:
     353  **	and	z0\.h, z0\.h, #0xff81
     354  **	ret
     355  */
     356  TEST_UNIFORM_Z (and_m127_s16_x, svint16_t,
     357  		z0 = svand_n_s16_x (p0, z0, -127),
     358  		z0 = svand_x (p0, z0, -127))
     359  
     360  /*
     361  ** and_m128_s16_x:
     362  **	and	z0\.h, z0\.h, #0xff80
     363  **	ret
     364  */
     365  TEST_UNIFORM_Z (and_m128_s16_x, svint16_t,
     366  		z0 = svand_n_s16_x (p0, z0, -128),
     367  		z0 = svand_x (p0, z0, -128))
     368  
     369  /*
     370  ** and_m255_s16_x:
     371  **	and	z0\.h, z0\.h, #0xff01
     372  **	ret
     373  */
     374  TEST_UNIFORM_Z (and_m255_s16_x, svint16_t,
     375  		z0 = svand_n_s16_x (p0, z0, -255),
     376  		z0 = svand_x (p0, z0, -255))
     377  
     378  /*
     379  ** and_m256_s16_x:
     380  **	and	z0\.h, z0\.h, #0xff00
     381  **	ret
     382  */
     383  TEST_UNIFORM_Z (and_m256_s16_x, svint16_t,
     384  		z0 = svand_n_s16_x (p0, z0, -256),
     385  		z0 = svand_x (p0, z0, -256))
     386  
     387  /*
     388  ** and_m257_s16_x:
     389  **	and	z0\.h, z0\.h, #0xfeff
     390  **	ret
     391  */
     392  TEST_UNIFORM_Z (and_m257_s16_x, svint16_t,
     393  		z0 = svand_n_s16_x (p0, z0, -257),
     394  		z0 = svand_x (p0, z0, -257))
     395  
     396  /*
     397  ** and_m512_s16_x:
     398  **	and	z0\.h, z0\.h, #0xfe00
     399  **	ret
     400  */
     401  TEST_UNIFORM_Z (and_m512_s16_x, svint16_t,
     402  		z0 = svand_n_s16_x (p0, z0, -512),
     403  		z0 = svand_x (p0, z0, -512))
     404  
     405  /*
     406  ** and_m32768_s16_x:
     407  **	and	z0\.h, z0\.h, #0x8000
     408  **	ret
     409  */
     410  TEST_UNIFORM_Z (and_m32768_s16_x, svint16_t,
     411  		z0 = svand_n_s16_x (p0, z0, -0x8000),
     412  		z0 = svand_x (p0, z0, -0x8000))
     413  
     414  /*
     415  ** and_5_s16_x:
     416  **	mov	(z[0-9]+)\.h, #5
     417  **	and	z0\.d, (z0\.d, \1\.d|\1\.d, z0\.d)
     418  **	ret
     419  */
     420  TEST_UNIFORM_Z (and_5_s16_x, svint16_t,
     421  		z0 = svand_n_s16_x (p0, z0, 5),
     422  		z0 = svand_x (p0, z0, 5))