(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
sve/
acle/
asm/
and_s32.c
       1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
       2  
       3  #include "test_sve_acle.h"
       4  
       5  /*
       6  ** and_s32_m_tied1:
       7  **	and	z0\.s, p0/m, z0\.s, z1\.s
       8  **	ret
       9  */
      10  TEST_UNIFORM_Z (and_s32_m_tied1, svint32_t,
      11  		z0 = svand_s32_m (p0, z0, z1),
      12  		z0 = svand_m (p0, z0, z1))
      13  
      14  /*
      15  ** and_s32_m_tied2:
      16  **	mov	(z[0-9]+)\.d, z0\.d
      17  **	movprfx	z0, z1
      18  **	and	z0\.s, p0/m, z0\.s, \1\.s
      19  **	ret
      20  */
      21  TEST_UNIFORM_Z (and_s32_m_tied2, svint32_t,
      22  		z0 = svand_s32_m (p0, z1, z0),
      23  		z0 = svand_m (p0, z1, z0))
      24  
      25  /*
      26  ** and_s32_m_untied:
      27  **	movprfx	z0, z1
      28  **	and	z0\.s, p0/m, z0\.s, z2\.s
      29  **	ret
      30  */
      31  TEST_UNIFORM_Z (and_s32_m_untied, svint32_t,
      32  		z0 = svand_s32_m (p0, z1, z2),
      33  		z0 = svand_m (p0, z1, z2))
      34  
      35  /*
      36  ** and_w0_s32_m_tied1:
      37  **	mov	(z[0-9]+\.s), w0
      38  **	and	z0\.s, p0/m, z0\.s, \1
      39  **	ret
      40  */
      41  TEST_UNIFORM_ZX (and_w0_s32_m_tied1, svint32_t, int32_t,
      42  		 z0 = svand_n_s32_m (p0, z0, x0),
      43  		 z0 = svand_m (p0, z0, x0))
      44  
      45  /*
      46  ** and_w0_s32_m_untied:
      47  **	mov	(z[0-9]+\.s), w0
      48  **	movprfx	z0, z1
      49  **	and	z0\.s, p0/m, z0\.s, \1
      50  **	ret
      51  */
      52  TEST_UNIFORM_ZX (and_w0_s32_m_untied, svint32_t, int32_t,
      53  		 z0 = svand_n_s32_m (p0, z1, x0),
      54  		 z0 = svand_m (p0, z1, x0))
      55  
      56  /*
      57  ** and_1_s32_m_tied1:
      58  **	mov	(z[0-9]+\.s), #1
      59  **	and	z0\.s, p0/m, z0\.s, \1
      60  **	ret
      61  */
      62  TEST_UNIFORM_Z (and_1_s32_m_tied1, svint32_t,
      63  		z0 = svand_n_s32_m (p0, z0, 1),
      64  		z0 = svand_m (p0, z0, 1))
      65  
      66  /*
      67  ** and_1_s32_m_untied: { xfail *-*-* }
      68  **	mov	(z[0-9]+\.s), #1
      69  **	movprfx	z0, z1
      70  **	and	z0\.s, p0/m, z0\.s, \1
      71  **	ret
      72  */
      73  TEST_UNIFORM_Z (and_1_s32_m_untied, svint32_t,
      74  		z0 = svand_n_s32_m (p0, z1, 1),
      75  		z0 = svand_m (p0, z1, 1))
      76  
      77  /*
      78  ** and_m2_s32_m:
      79  **	mov	(z[0-9]+\.s), #-2
      80  **	and	z0\.s, p0/m, z0\.s, \1
      81  **	ret
      82  */
      83  TEST_UNIFORM_Z (and_m2_s32_m, svint32_t,
      84  		z0 = svand_n_s32_m (p0, z0, -2),
      85  		z0 = svand_m (p0, z0, -2))
      86  
      87  /*
      88  ** and_255_s32_m_tied1:
      89  **	uxtb	z0\.s, p0/m, z0\.s
      90  **	ret
      91  */
      92  TEST_UNIFORM_Z (and_255_s32_m_tied1, svint32_t,
      93  		z0 = svand_n_s32_m (p0, z0, 255),
      94  		z0 = svand_m (p0, z0, 255))
      95  
      96  /*
      97  ** and_255_s32_m_untied:
      98  **	movprfx	z0, z1
      99  **	uxtb	z0\.s, p0/m, z1\.s
     100  **	ret
     101  */
     102  TEST_UNIFORM_Z (and_255_s32_m_untied, svint32_t,
     103  		z0 = svand_n_s32_m (p0, z1, 255),
     104  		z0 = svand_m (p0, z1, 255))
     105  
     106  /*
     107  ** and_65535_s32_m_tied1:
     108  **	uxth	z0\.s, p0/m, z0\.s
     109  **	ret
     110  */
     111  TEST_UNIFORM_Z (and_65535_s32_m_tied1, svint32_t,
     112  		z0 = svand_n_s32_m (p0, z0, 65535),
     113  		z0 = svand_m (p0, z0, 65535))
     114  
     115  /*
     116  ** and_65535_s32_m_untied:
     117  **	movprfx	z0, z1
     118  **	uxth	z0\.s, p0/m, z1\.s
     119  **	ret
     120  */
     121  TEST_UNIFORM_Z (and_65535_s32_m_untied, svint32_t,
     122  		z0 = svand_n_s32_m (p0, z1, 65535),
     123  		z0 = svand_m (p0, z1, 65535))
     124  
     125  /*
     126  ** and_s32_z_tied1:
     127  **	movprfx	z0\.s, p0/z, z0\.s
     128  **	and	z0\.s, p0/m, z0\.s, z1\.s
     129  **	ret
     130  */
     131  TEST_UNIFORM_Z (and_s32_z_tied1, svint32_t,
     132  		z0 = svand_s32_z (p0, z0, z1),
     133  		z0 = svand_z (p0, z0, z1))
     134  
     135  /*
     136  ** and_s32_z_tied2:
     137  **	movprfx	z0\.s, p0/z, z0\.s
     138  **	and	z0\.s, p0/m, z0\.s, z1\.s
     139  **	ret
     140  */
     141  TEST_UNIFORM_Z (and_s32_z_tied2, svint32_t,
     142  		z0 = svand_s32_z (p0, z1, z0),
     143  		z0 = svand_z (p0, z1, z0))
     144  
     145  /*
     146  ** and_s32_z_untied:
     147  ** (
     148  **	movprfx	z0\.s, p0/z, z1\.s
     149  **	and	z0\.s, p0/m, z0\.s, z2\.s
     150  ** |
     151  **	movprfx	z0\.s, p0/z, z2\.s
     152  **	and	z0\.s, p0/m, z0\.s, z1\.s
     153  ** )
     154  **	ret
     155  */
     156  TEST_UNIFORM_Z (and_s32_z_untied, svint32_t,
     157  		z0 = svand_s32_z (p0, z1, z2),
     158  		z0 = svand_z (p0, z1, z2))
     159  
     160  /*
     161  ** and_w0_s32_z_tied1:
     162  **	mov	(z[0-9]+\.s), w0
     163  **	movprfx	z0\.s, p0/z, z0\.s
     164  **	and	z0\.s, p0/m, z0\.s, \1
     165  **	ret
     166  */
     167  TEST_UNIFORM_ZX (and_w0_s32_z_tied1, svint32_t, int32_t,
     168  		 z0 = svand_n_s32_z (p0, z0, x0),
     169  		 z0 = svand_z (p0, z0, x0))
     170  
     171  /*
     172  ** and_w0_s32_z_untied:
     173  **	mov	(z[0-9]+\.s), w0
     174  ** (
     175  **	movprfx	z0\.s, p0/z, z1\.s
     176  **	and	z0\.s, p0/m, z0\.s, \1
     177  ** |
     178  **	movprfx	z0\.s, p0/z, \1
     179  **	and	z0\.s, p0/m, z0\.s, z1\.s
     180  ** )
     181  **	ret
     182  */
     183  TEST_UNIFORM_ZX (and_w0_s32_z_untied, svint32_t, int32_t,
     184  		 z0 = svand_n_s32_z (p0, z1, x0),
     185  		 z0 = svand_z (p0, z1, x0))
     186  
     187  /*
     188  ** and_1_s32_z_tied1:
     189  **	mov	(z[0-9]+\.s), #1
     190  **	movprfx	z0\.s, p0/z, z0\.s
     191  **	and	z0\.s, p0/m, z0\.s, \1
     192  **	ret
     193  */
     194  TEST_UNIFORM_Z (and_1_s32_z_tied1, svint32_t,
     195  		z0 = svand_n_s32_z (p0, z0, 1),
     196  		z0 = svand_z (p0, z0, 1))
     197  
     198  /*
     199  ** and_1_s32_z_untied:
     200  **	mov	(z[0-9]+\.s), #1
     201  ** (
     202  **	movprfx	z0\.s, p0/z, z1\.s
     203  **	and	z0\.s, p0/m, z0\.s, \1
     204  ** |
     205  **	movprfx	z0\.s, p0/z, \1
     206  **	and	z0\.s, p0/m, z0\.s, z1\.s
     207  ** )
     208  **	ret
     209  */
     210  TEST_UNIFORM_Z (and_1_s32_z_untied, svint32_t,
     211  		z0 = svand_n_s32_z (p0, z1, 1),
     212  		z0 = svand_z (p0, z1, 1))
     213  
     214  /*
     215  ** and_255_s32_z_tied1:
     216  ** (
     217  **	mov	(z[0-9]+)\.d, z0\.d
     218  **	movprfx	z0\.s, p0/z, \1\.s
     219  **	uxtb	z0\.s, p0/m, \1\.s
     220  ** |
     221  **	mov	(z[0-9]+\.s), #255
     222  **	movprfx	z0\.s, p0/z, z0\.s
     223  **	and	z0\.s, p0/m, z0\.s, \1
     224  ** )
     225  **	ret
     226  */
     227  TEST_UNIFORM_Z (and_255_s32_z_tied1, svint32_t,
     228  		z0 = svand_n_s32_z (p0, z0, 255),
     229  		z0 = svand_z (p0, z0, 255))
     230  
     231  /*
     232  ** and_255_s32_z_untied:
     233  **	movprfx	z0\.s, p0/z, z1\.s
     234  **	uxtb	z0\.s, p0/m, z1\.s
     235  **	ret
     236  */
     237  TEST_UNIFORM_Z (and_255_s32_z_untied, svint32_t,
     238  		z0 = svand_n_s32_z (p0, z1, 255),
     239  		z0 = svand_z (p0, z1, 255))
     240  
     241  /*
     242  ** and_65535_s32_z_tied1:
     243  ** (
     244  **	mov	(z[0-9]+)\.d, z0\.d
     245  **	movprfx	z0\.s, p0/z, \1\.s
     246  **	uxth	z0\.s, p0/m, \1\.s
     247  ** |
     248  **	mov	(z[0-9]+\.s), #65535
     249  **	movprfx	z0\.s, p0/z, z0\.s
     250  **	and	z0\.s, p0/m, z0\.s, \1
     251  ** )
     252  **	ret
     253  */
     254  TEST_UNIFORM_Z (and_65535_s32_z_tied1, svint32_t,
     255  		z0 = svand_n_s32_z (p0, z0, 65535),
     256  		z0 = svand_z (p0, z0, 65535))
     257  
     258  /*
     259  ** and_65535_s32_z_untied:
     260  **	movprfx	z0\.s, p0/z, z1\.s
     261  **	uxth	z0\.s, p0/m, z1\.s
     262  **	ret
     263  */
     264  TEST_UNIFORM_Z (and_65535_s32_z_untied, svint32_t,
     265  		z0 = svand_n_s32_z (p0, z1, 65535),
     266  		z0 = svand_z (p0, z1, 65535))
     267  
     268  /*
     269  ** and_s32_x_tied1:
     270  **	and	z0\.d, (z0\.d, z1\.d|z1\.d, z0\.d)
     271  **	ret
     272  */
     273  TEST_UNIFORM_Z (and_s32_x_tied1, svint32_t,
     274  		z0 = svand_s32_x (p0, z0, z1),
     275  		z0 = svand_x (p0, z0, z1))
     276  
     277  /*
     278  ** and_s32_x_tied2:
     279  **	and	z0\.d, (z0\.d, z1\.d|z1\.d, z0\.d)
     280  **	ret
     281  */
     282  TEST_UNIFORM_Z (and_s32_x_tied2, svint32_t,
     283  		z0 = svand_s32_x (p0, z1, z0),
     284  		z0 = svand_x (p0, z1, z0))
     285  
     286  /*
     287  ** and_s32_x_untied:
     288  **	and	z0\.d, (z1\.d, z2\.d|z2\.d, z1\.d)
     289  **	ret
     290  */
     291  TEST_UNIFORM_Z (and_s32_x_untied, svint32_t,
     292  		z0 = svand_s32_x (p0, z1, z2),
     293  		z0 = svand_x (p0, z1, z2))
     294  
     295  /*
     296  ** and_w0_s32_x_tied1:
     297  **	mov	(z[0-9]+)\.s, w0
     298  **	and	z0\.d, (z0\.d, \1\.d|\1\.d, z0\.d)
     299  **	ret
     300  */
     301  TEST_UNIFORM_ZX (and_w0_s32_x_tied1, svint32_t, int32_t,
     302  		 z0 = svand_n_s32_x (p0, z0, x0),
     303  		 z0 = svand_x (p0, z0, x0))
     304  
     305  /*
     306  ** and_w0_s32_x_untied:
     307  **	mov	(z[0-9]+)\.s, w0
     308  **	and	z0\.d, (z1\.d, \1\.d|\1\.d, z1\.d)
     309  **	ret
     310  */
     311  TEST_UNIFORM_ZX (and_w0_s32_x_untied, svint32_t, int32_t,
     312  		 z0 = svand_n_s32_x (p0, z1, x0),
     313  		 z0 = svand_x (p0, z1, x0))
     314  
     315  /*
     316  ** and_1_s32_x_tied1:
     317  **	and	z0\.s, z0\.s, #0x1
     318  **	ret
     319  */
     320  TEST_UNIFORM_Z (and_1_s32_x_tied1, svint32_t,
     321  		z0 = svand_n_s32_x (p0, z0, 1),
     322  		z0 = svand_x (p0, z0, 1))
     323  
     324  /*
     325  ** and_1_s32_x_untied:
     326  **	movprfx	z0, z1
     327  **	and	z0\.s, z0\.s, #0x1
     328  **	ret
     329  */
     330  TEST_UNIFORM_Z (and_1_s32_x_untied, svint32_t,
     331  		z0 = svand_n_s32_x (p0, z1, 1),
     332  		z0 = svand_x (p0, z1, 1))
     333  
     334  /*
     335  ** and_127_s32_x:
     336  **	and	z0\.s, z0\.s, #0x7f
     337  **	ret
     338  */
     339  TEST_UNIFORM_Z (and_127_s32_x, svint32_t,
     340  		z0 = svand_n_s32_x (p0, z0, 127),
     341  		z0 = svand_x (p0, z0, 127))
     342  
     343  /*
     344  ** and_128_s32_x:
     345  **	and	z0\.s, z0\.s, #0x80
     346  **	ret
     347  */
     348  TEST_UNIFORM_Z (and_128_s32_x, svint32_t,
     349  		z0 = svand_n_s32_x (p0, z0, 128),
     350  		z0 = svand_x (p0, z0, 128))
     351  
     352  /*
     353  ** and_255_s32_x:
     354  **	and	z0\.s, z0\.s, #0xff
     355  **	ret
     356  */
     357  TEST_UNIFORM_Z (and_255_s32_x, svint32_t,
     358  		z0 = svand_n_s32_x (p0, z0, 255),
     359  		z0 = svand_x (p0, z0, 255))
     360  
     361  /*
     362  ** and_256_s32_x:
     363  **	and	z0\.s, z0\.s, #0x100
     364  **	ret
     365  */
     366  TEST_UNIFORM_Z (and_256_s32_x, svint32_t,
     367  		z0 = svand_n_s32_x (p0, z0, 256),
     368  		z0 = svand_x (p0, z0, 256))
     369  
     370  /* TODO: Bad code and needs fixing.  */
     371  TEST_UNIFORM_Z (and_257_s32_x, svint32_t,
     372  		z0 = svand_n_s32_x (p0, z0, 257),
     373  		z0 = svand_x (p0, z0, 257))
     374  
     375  /*
     376  ** and_512_s32_x:
     377  **	and	z0\.s, z0\.s, #0x200
     378  **	ret
     379  */
     380  TEST_UNIFORM_Z (and_512_s32_x, svint32_t,
     381  		z0 = svand_n_s32_x (p0, z0, 512),
     382  		z0 = svand_x (p0, z0, 512))
     383  
     384  /*
     385  ** and_65280_s32_x:
     386  **	and	z0\.s, z0\.s, #0xff00
     387  **	ret
     388  */
     389  TEST_UNIFORM_Z (and_65280_s32_x, svint32_t,
     390  		z0 = svand_n_s32_x (p0, z0, 0xff00),
     391  		z0 = svand_x (p0, z0, 0xff00))
     392  
     393  /*
     394  ** and_m127_s32_x:
     395  **	and	z0\.s, z0\.s, #0xffffff81
     396  **	ret
     397  */
     398  TEST_UNIFORM_Z (and_m127_s32_x, svint32_t,
     399  		z0 = svand_n_s32_x (p0, z0, -127),
     400  		z0 = svand_x (p0, z0, -127))
     401  
     402  /*
     403  ** and_m128_s32_x:
     404  **	and	z0\.s, z0\.s, #0xffffff80
     405  **	ret
     406  */
     407  TEST_UNIFORM_Z (and_m128_s32_x, svint32_t,
     408  		z0 = svand_n_s32_x (p0, z0, -128),
     409  		z0 = svand_x (p0, z0, -128))
     410  
     411  /*
     412  ** and_m255_s32_x:
     413  **	and	z0\.s, z0\.s, #0xffffff01
     414  **	ret
     415  */
     416  TEST_UNIFORM_Z (and_m255_s32_x, svint32_t,
     417  		z0 = svand_n_s32_x (p0, z0, -255),
     418  		z0 = svand_x (p0, z0, -255))
     419  
     420  /*
     421  ** and_m256_s32_x:
     422  **	and	z0\.s, z0\.s, #0xffffff00
     423  **	ret
     424  */
     425  TEST_UNIFORM_Z (and_m256_s32_x, svint32_t,
     426  		z0 = svand_n_s32_x (p0, z0, -256),
     427  		z0 = svand_x (p0, z0, -256))
     428  
     429  /*
     430  ** and_m257_s32_x:
     431  **	and	z0\.s, z0\.s, #0xfffffeff
     432  **	ret
     433  */
     434  TEST_UNIFORM_Z (and_m257_s32_x, svint32_t,
     435  		z0 = svand_n_s32_x (p0, z0, -257),
     436  		z0 = svand_x (p0, z0, -257))
     437  
     438  /*
     439  ** and_m512_s32_x:
     440  **	and	z0\.s, z0\.s, #0xfffffe00
     441  **	ret
     442  */
     443  TEST_UNIFORM_Z (and_m512_s32_x, svint32_t,
     444  		z0 = svand_n_s32_x (p0, z0, -512),
     445  		z0 = svand_x (p0, z0, -512))
     446  
     447  /*
     448  ** and_m32768_s32_x:
     449  **	and	z0\.s, z0\.s, #0xffff8000
     450  **	ret
     451  */
     452  TEST_UNIFORM_Z (and_m32768_s32_x, svint32_t,
     453  		z0 = svand_n_s32_x (p0, z0, -0x8000),
     454  		z0 = svand_x (p0, z0, -0x8000))
     455  
     456  /*
     457  ** and_5_s32_x:
     458  **	mov	(z[0-9]+)\.s, #5
     459  **	and	z0\.d, (z0\.d, \1\.d|\1\.d, z0\.d)
     460  **	ret
     461  */
     462  TEST_UNIFORM_Z (and_5_s32_x, svint32_t,
     463  		z0 = svand_n_s32_x (p0, z0, 5),
     464  		z0 = svand_x (p0, z0, 5))