(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
sve/
acle/
asm/
and_s64.c
       1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
       2  
       3  #include "test_sve_acle.h"
       4  
       5  /*
       6  ** and_s64_m_tied1:
       7  **	and	z0\.d, p0/m, z0\.d, z1\.d
       8  **	ret
       9  */
      10  TEST_UNIFORM_Z (and_s64_m_tied1, svint64_t,
      11  		z0 = svand_s64_m (p0, z0, z1),
      12  		z0 = svand_m (p0, z0, z1))
      13  
      14  /*
      15  ** and_s64_m_tied2:
      16  **	mov	(z[0-9]+\.d), z0\.d
      17  **	movprfx	z0, z1
      18  **	and	z0\.d, p0/m, z0\.d, \1
      19  **	ret
      20  */
      21  TEST_UNIFORM_Z (and_s64_m_tied2, svint64_t,
      22  		z0 = svand_s64_m (p0, z1, z0),
      23  		z0 = svand_m (p0, z1, z0))
      24  
      25  /*
      26  ** and_s64_m_untied:
      27  **	movprfx	z0, z1
      28  **	and	z0\.d, p0/m, z0\.d, z2\.d
      29  **	ret
      30  */
      31  TEST_UNIFORM_Z (and_s64_m_untied, svint64_t,
      32  		z0 = svand_s64_m (p0, z1, z2),
      33  		z0 = svand_m (p0, z1, z2))
      34  
      35  /*
      36  ** and_x0_s64_m_tied1:
      37  **	mov	(z[0-9]+\.d), x0
      38  **	and	z0\.d, p0/m, z0\.d, \1
      39  **	ret
      40  */
      41  TEST_UNIFORM_ZX (and_x0_s64_m_tied1, svint64_t, int64_t,
      42  		 z0 = svand_n_s64_m (p0, z0, x0),
      43  		 z0 = svand_m (p0, z0, x0))
      44  
      45  /*
      46  ** and_x0_s64_m_untied:
      47  **	mov	(z[0-9]+\.d), x0
      48  **	movprfx	z0, z1
      49  **	and	z0\.d, p0/m, z0\.d, \1
      50  **	ret
      51  */
      52  TEST_UNIFORM_ZX (and_x0_s64_m_untied, svint64_t, int64_t,
      53  		 z0 = svand_n_s64_m (p0, z1, x0),
      54  		 z0 = svand_m (p0, z1, x0))
      55  
      56  /*
      57  ** and_1_s64_m_tied1:
      58  **	mov	(z[0-9]+\.d), #1
      59  **	and	z0\.d, p0/m, z0\.d, \1
      60  **	ret
      61  */
      62  TEST_UNIFORM_Z (and_1_s64_m_tied1, svint64_t,
      63  		z0 = svand_n_s64_m (p0, z0, 1),
      64  		z0 = svand_m (p0, z0, 1))
      65  
      66  /*
      67  ** and_1_s64_m_untied: { xfail *-*-* }
      68  **	mov	(z[0-9]+\.d), #1
      69  **	movprfx	z0, z1
      70  **	and	z0\.d, p0/m, z0\.d, \1
      71  **	ret
      72  */
      73  TEST_UNIFORM_Z (and_1_s64_m_untied, svint64_t,
      74  		z0 = svand_n_s64_m (p0, z1, 1),
      75  		z0 = svand_m (p0, z1, 1))
      76  
      77  /*
      78  ** and_m2_s64_m:
      79  **	mov	(z[0-9]+\.d), #-2
      80  **	and	z0\.d, p0/m, z0\.d, \1
      81  **	ret
      82  */
      83  TEST_UNIFORM_Z (and_m2_s64_m, svint64_t,
      84  		z0 = svand_n_s64_m (p0, z0, -2),
      85  		z0 = svand_m (p0, z0, -2))
      86  
      87  /*
      88  ** and_255_s64_m_tied1:
      89  **	uxtb	z0\.d, p0/m, z0\.d
      90  **	ret
      91  */
      92  TEST_UNIFORM_Z (and_255_s64_m_tied1, svint64_t,
      93  		z0 = svand_n_s64_m (p0, z0, 255),
      94  		z0 = svand_m (p0, z0, 255))
      95  
      96  /*
      97  ** and_255_s64_m_untied:
      98  **	movprfx	z0, z1
      99  **	uxtb	z0\.d, p0/m, z1\.d
     100  **	ret
     101  */
     102  TEST_UNIFORM_Z (and_255_s64_m_untied, svint64_t,
     103  		z0 = svand_n_s64_m (p0, z1, 255),
     104  		z0 = svand_m (p0, z1, 255))
     105  
     106  /*
     107  ** and_65535_s64_m_tied1:
     108  **	uxth	z0\.d, p0/m, z0\.d
     109  **	ret
     110  */
     111  TEST_UNIFORM_Z (and_65535_s64_m_tied1, svint64_t,
     112  		z0 = svand_n_s64_m (p0, z0, 65535),
     113  		z0 = svand_m (p0, z0, 65535))
     114  
     115  /*
     116  ** and_65535_s64_m_untied:
     117  **	movprfx	z0, z1
     118  **	uxth	z0\.d, p0/m, z1\.d
     119  **	ret
     120  */
     121  TEST_UNIFORM_Z (and_65535_s64_m_untied, svint64_t,
     122  		z0 = svand_n_s64_m (p0, z1, 65535),
     123  		z0 = svand_m (p0, z1, 65535))
     124  
     125  /*
     126  ** and_0xffffffff_s64_m_tied1:
     127  **	uxtw	z0\.d, p0/m, z0\.d
     128  **	ret
     129  */
     130  TEST_UNIFORM_Z (and_0xffffffff_s64_m_tied1, svint64_t,
     131  		z0 = svand_n_s64_m (p0, z0, 0xffffffff),
     132  		z0 = svand_m (p0, z0, 0xffffffff))
     133  
     134  /*
     135  ** and_0xffffffff_s64_m_untied:
     136  **	movprfx	z0, z1
     137  **	uxtw	z0\.d, p0/m, z1\.d
     138  **	ret
     139  */
     140  TEST_UNIFORM_Z (and_0xffffffff_s64_m_untied, svint64_t,
     141  		z0 = svand_n_s64_m (p0, z1, 0xffffffff),
     142  		z0 = svand_m (p0, z1, 0xffffffff))
     143  
     144  /*
     145  ** and_s64_z_tied1:
     146  **	movprfx	z0\.d, p0/z, z0\.d
     147  **	and	z0\.d, p0/m, z0\.d, z1\.d
     148  **	ret
     149  */
     150  TEST_UNIFORM_Z (and_s64_z_tied1, svint64_t,
     151  		z0 = svand_s64_z (p0, z0, z1),
     152  		z0 = svand_z (p0, z0, z1))
     153  
     154  /*
     155  ** and_s64_z_tied2:
     156  **	movprfx	z0\.d, p0/z, z0\.d
     157  **	and	z0\.d, p0/m, z0\.d, z1\.d
     158  **	ret
     159  */
     160  TEST_UNIFORM_Z (and_s64_z_tied2, svint64_t,
     161  		z0 = svand_s64_z (p0, z1, z0),
     162  		z0 = svand_z (p0, z1, z0))
     163  
     164  /*
     165  ** and_s64_z_untied:
     166  ** (
     167  **	movprfx	z0\.d, p0/z, z1\.d
     168  **	and	z0\.d, p0/m, z0\.d, z2\.d
     169  ** |
     170  **	movprfx	z0\.d, p0/z, z2\.d
     171  **	and	z0\.d, p0/m, z0\.d, z1\.d
     172  ** )
     173  **	ret
     174  */
     175  TEST_UNIFORM_Z (and_s64_z_untied, svint64_t,
     176  		z0 = svand_s64_z (p0, z1, z2),
     177  		z0 = svand_z (p0, z1, z2))
     178  
     179  /*
     180  ** and_x0_s64_z_tied1:
     181  **	mov	(z[0-9]+\.d), x0
     182  **	movprfx	z0\.d, p0/z, z0\.d
     183  **	and	z0\.d, p0/m, z0\.d, \1
     184  **	ret
     185  */
     186  TEST_UNIFORM_ZX (and_x0_s64_z_tied1, svint64_t, int64_t,
     187  		 z0 = svand_n_s64_z (p0, z0, x0),
     188  		 z0 = svand_z (p0, z0, x0))
     189  
     190  /*
     191  ** and_x0_s64_z_untied:
     192  **	mov	(z[0-9]+\.d), x0
     193  ** (
     194  **	movprfx	z0\.d, p0/z, z1\.d
     195  **	and	z0\.d, p0/m, z0\.d, \1
     196  ** |
     197  **	movprfx	z0\.d, p0/z, \1
     198  **	and	z0\.d, p0/m, z0\.d, z1\.d
     199  ** )
     200  **	ret
     201  */
     202  TEST_UNIFORM_ZX (and_x0_s64_z_untied, svint64_t, int64_t,
     203  		 z0 = svand_n_s64_z (p0, z1, x0),
     204  		 z0 = svand_z (p0, z1, x0))
     205  
     206  /*
     207  ** and_1_s64_z_tied1:
     208  **	mov	(z[0-9]+\.d), #1
     209  **	movprfx	z0\.d, p0/z, z0\.d
     210  **	and	z0\.d, p0/m, z0\.d, \1
     211  **	ret
     212  */
     213  TEST_UNIFORM_Z (and_1_s64_z_tied1, svint64_t,
     214  		z0 = svand_n_s64_z (p0, z0, 1),
     215  		z0 = svand_z (p0, z0, 1))
     216  
     217  /*
     218  ** and_1_s64_z_untied:
     219  **	mov	(z[0-9]+\.d), #1
     220  ** (
     221  **	movprfx	z0\.d, p0/z, z1\.d
     222  **	and	z0\.d, p0/m, z0\.d, \1
     223  ** |
     224  **	movprfx	z0\.d, p0/z, \1
     225  **	and	z0\.d, p0/m, z0\.d, z1\.d
     226  ** )
     227  **	ret
     228  */
     229  TEST_UNIFORM_Z (and_1_s64_z_untied, svint64_t,
     230  		z0 = svand_n_s64_z (p0, z1, 1),
     231  		z0 = svand_z (p0, z1, 1))
     232  
     233  /*
     234  ** and_255_s64_z_tied1:
     235  ** (
     236  **	mov	(z[0-9]+\.d), z0\.d
     237  **	movprfx	z0\.d, p0/z, \1
     238  **	uxtb	z0\.d, p0/m, \1
     239  ** |
     240  **	mov	(z[0-9]+\.d), #255
     241  **	movprfx	z0\.d, p0/z, z0\.d
     242  **	and	z0\.d, p0/m, z0\.d, \1
     243  ** )
     244  **	ret
     245  */
     246  TEST_UNIFORM_Z (and_255_s64_z_tied1, svint64_t,
     247  		z0 = svand_n_s64_z (p0, z0, 255),
     248  		z0 = svand_z (p0, z0, 255))
     249  
     250  /*
     251  ** and_255_s64_z_untied:
     252  **	movprfx	z0\.d, p0/z, z1\.d
     253  **	uxtb	z0\.d, p0/m, z1\.d
     254  **	ret
     255  */
     256  TEST_UNIFORM_Z (and_255_s64_z_untied, svint64_t,
     257  		z0 = svand_n_s64_z (p0, z1, 255),
     258  		z0 = svand_z (p0, z1, 255))
     259  
     260  /*
     261  ** and_65535_s64_z_tied1:
     262  ** (
     263  **	mov	(z[0-9]+\.d), z0\.d
     264  **	movprfx	z0\.d, p0/z, \1
     265  **	uxth	z0\.d, p0/m, \1
     266  ** |
     267  **	mov	(z[0-9]+\.d), #65535
     268  **	movprfx	z0\.d, p0/z, z0\.d
     269  **	and	z0\.d, p0/m, z0\.d, \1
     270  ** )
     271  **	ret
     272  */
     273  TEST_UNIFORM_Z (and_65535_s64_z_tied1, svint64_t,
     274  		z0 = svand_n_s64_z (p0, z0, 65535),
     275  		z0 = svand_z (p0, z0, 65535))
     276  
     277  /*
     278  ** and_65535_s64_z_untied:
     279  **	movprfx	z0\.d, p0/z, z1\.d
     280  **	uxth	z0\.d, p0/m, z1\.d
     281  **	ret
     282  */
     283  TEST_UNIFORM_Z (and_65535_s64_z_untied, svint64_t,
     284  		z0 = svand_n_s64_z (p0, z1, 65535),
     285  		z0 = svand_z (p0, z1, 65535))
     286  
     287  /*
     288  ** and_0xffffffff_s64_z_tied1:
     289  ** (
     290  **	mov	(z[0-9]+\.d), z0\.d
     291  **	movprfx	z0\.d, p0/z, \1
     292  **	uxtw	z0\.d, p0/m, \1
     293  ** |
     294  **	mov	(z[0-9]+\.d), #4294967295
     295  **	movprfx	z0\.d, p0/z, z0\.d
     296  **	and	z0\.d, p0/m, z0\.d, \1
     297  ** )
     298  **	ret
     299  */
     300  TEST_UNIFORM_Z (and_0xffffffff_s64_z_tied1, svint64_t,
     301  		z0 = svand_n_s64_z (p0, z0, 0xffffffff),
     302  		z0 = svand_z (p0, z0, 0xffffffff))
     303  
     304  /*
     305  ** and_0xffffffff_s64_z_untied:
     306  **	movprfx	z0\.d, p0/z, z1\.d
     307  **	uxtw	z0\.d, p0/m, z1\.d
     308  **	ret
     309  */
     310  TEST_UNIFORM_Z (and_0xffffffff_s64_z_untied, svint64_t,
     311  		z0 = svand_n_s64_z (p0, z1, 0xffffffff),
     312  		z0 = svand_z (p0, z1, 0xffffffff))
     313  
     314  /*
     315  ** and_s64_x_tied1:
     316  **	and	z0\.d, (z0\.d, z1\.d|z1\.d, z0\.d)
     317  **	ret
     318  */
     319  TEST_UNIFORM_Z (and_s64_x_tied1, svint64_t,
     320  		z0 = svand_s64_x (p0, z0, z1),
     321  		z0 = svand_x (p0, z0, z1))
     322  
     323  /*
     324  ** and_s64_x_tied2:
     325  **	and	z0\.d, (z0\.d, z1\.d|z1\.d, z0\.d)
     326  **	ret
     327  */
     328  TEST_UNIFORM_Z (and_s64_x_tied2, svint64_t,
     329  		z0 = svand_s64_x (p0, z1, z0),
     330  		z0 = svand_x (p0, z1, z0))
     331  
     332  /*
     333  ** and_s64_x_untied:
     334  **	and	z0\.d, (z1\.d, z2\.d|z2\.d, z1\.d)
     335  **	ret
     336  */
     337  TEST_UNIFORM_Z (and_s64_x_untied, svint64_t,
     338  		z0 = svand_s64_x (p0, z1, z2),
     339  		z0 = svand_x (p0, z1, z2))
     340  
     341  /*
     342  ** and_x0_s64_x_tied1:
     343  **	mov	(z[0-9]+\.d), x0
     344  **	and	z0\.d, (z0\.d, \1|\1, z0\.d)
     345  **	ret
     346  */
     347  TEST_UNIFORM_ZX (and_x0_s64_x_tied1, svint64_t, int64_t,
     348  		 z0 = svand_n_s64_x (p0, z0, x0),
     349  		 z0 = svand_x (p0, z0, x0))
     350  
     351  /*
     352  ** and_x0_s64_x_untied:
     353  **	mov	(z[0-9]+\.d), x0
     354  **	and	z0\.d, (z1\.d, \1|\1, z1\.d)
     355  **	ret
     356  */
     357  TEST_UNIFORM_ZX (and_x0_s64_x_untied, svint64_t, int64_t,
     358  		 z0 = svand_n_s64_x (p0, z1, x0),
     359  		 z0 = svand_x (p0, z1, x0))
     360  
     361  /*
     362  ** and_1_s64_x_tied1:
     363  **	and	z0\.d, z0\.d, #0x1
     364  **	ret
     365  */
     366  TEST_UNIFORM_Z (and_1_s64_x_tied1, svint64_t,
     367  		z0 = svand_n_s64_x (p0, z0, 1),
     368  		z0 = svand_x (p0, z0, 1))
     369  
     370  /*
     371  ** and_1_s64_x_untied:
     372  **	movprfx	z0, z1
     373  **	and	z0\.d, z0\.d, #0x1
     374  **	ret
     375  */
     376  TEST_UNIFORM_Z (and_1_s64_x_untied, svint64_t,
     377  		z0 = svand_n_s64_x (p0, z1, 1),
     378  		z0 = svand_x (p0, z1, 1))
     379  
     380  /*
     381  ** and_127_s64_x:
     382  **	and	z0\.d, z0\.d, #0x7f
     383  **	ret
     384  */
     385  TEST_UNIFORM_Z (and_127_s64_x, svint64_t,
     386  		z0 = svand_n_s64_x (p0, z0, 127),
     387  		z0 = svand_x (p0, z0, 127))
     388  
     389  /*
     390  ** and_128_s64_x:
     391  **	and	z0\.d, z0\.d, #0x80
     392  **	ret
     393  */
     394  TEST_UNIFORM_Z (and_128_s64_x, svint64_t,
     395  		z0 = svand_n_s64_x (p0, z0, 128),
     396  		z0 = svand_x (p0, z0, 128))
     397  
     398  /*
     399  ** and_255_s64_x:
     400  **	and	z0\.d, z0\.d, #0xff
     401  **	ret
     402  */
     403  TEST_UNIFORM_Z (and_255_s64_x, svint64_t,
     404  		z0 = svand_n_s64_x (p0, z0, 255),
     405  		z0 = svand_x (p0, z0, 255))
     406  
     407  /*
     408  ** and_256_s64_x:
     409  **	and	z0\.d, z0\.d, #0x100
     410  **	ret
     411  */
     412  TEST_UNIFORM_Z (and_256_s64_x, svint64_t,
     413  		z0 = svand_n_s64_x (p0, z0, 256),
     414  		z0 = svand_x (p0, z0, 256))
     415  
     416  /* TODO: Bad code and needs fixing.  */
     417  TEST_UNIFORM_Z (and_257_s64_x, svint64_t,
     418  		z0 = svand_n_s64_x (p0, z0, 257),
     419  		z0 = svand_x (p0, z0, 257))
     420  
     421  /*
     422  ** and_512_s64_x:
     423  **	and	z0\.d, z0\.d, #0x200
     424  **	ret
     425  */
     426  TEST_UNIFORM_Z (and_512_s64_x, svint64_t,
     427  		z0 = svand_n_s64_x (p0, z0, 512),
     428  		z0 = svand_x (p0, z0, 512))
     429  
     430  /*
     431  ** and_65280_s64_x:
     432  **	and	z0\.d, z0\.d, #0xff00
     433  **	ret
     434  */
     435  TEST_UNIFORM_Z (and_65280_s64_x, svint64_t,
     436  		z0 = svand_n_s64_x (p0, z0, 0xff00),
     437  		z0 = svand_x (p0, z0, 0xff00))
     438  
     439  /*
     440  ** and_m127_s64_x:
     441  **	and	z0\.d, z0\.d, #0xffffffffffffff81
     442  **	ret
     443  */
     444  TEST_UNIFORM_Z (and_m127_s64_x, svint64_t,
     445  		z0 = svand_n_s64_x (p0, z0, -127),
     446  		z0 = svand_x (p0, z0, -127))
     447  
     448  /*
     449  ** and_m128_s64_x:
     450  **	and	z0\.d, z0\.d, #0xffffffffffffff80
     451  **	ret
     452  */
     453  TEST_UNIFORM_Z (and_m128_s64_x, svint64_t,
     454  		z0 = svand_n_s64_x (p0, z0, -128),
     455  		z0 = svand_x (p0, z0, -128))
     456  
     457  /*
     458  ** and_m255_s64_x:
     459  **	and	z0\.d, z0\.d, #0xffffffffffffff01
     460  **	ret
     461  */
     462  TEST_UNIFORM_Z (and_m255_s64_x, svint64_t,
     463  		z0 = svand_n_s64_x (p0, z0, -255),
     464  		z0 = svand_x (p0, z0, -255))
     465  
     466  /*
     467  ** and_m256_s64_x:
     468  **	and	z0\.d, z0\.d, #0xffffffffffffff00
     469  **	ret
     470  */
     471  TEST_UNIFORM_Z (and_m256_s64_x, svint64_t,
     472  		z0 = svand_n_s64_x (p0, z0, -256),
     473  		z0 = svand_x (p0, z0, -256))
     474  
     475  /*
     476  ** and_m257_s64_x:
     477  **	and	z0\.d, z0\.d, #0xfffffffffffffeff
     478  **	ret
     479  */
     480  TEST_UNIFORM_Z (and_m257_s64_x, svint64_t,
     481  		z0 = svand_n_s64_x (p0, z0, -257),
     482  		z0 = svand_x (p0, z0, -257))
     483  
     484  /*
     485  ** and_m512_s64_x:
     486  **	and	z0\.d, z0\.d, #0xfffffffffffffe00
     487  **	ret
     488  */
     489  TEST_UNIFORM_Z (and_m512_s64_x, svint64_t,
     490  		z0 = svand_n_s64_x (p0, z0, -512),
     491  		z0 = svand_x (p0, z0, -512))
     492  
     493  /*
     494  ** and_m32768_s64_x:
     495  **	and	z0\.d, z0\.d, #0xffffffffffff8000
     496  **	ret
     497  */
     498  TEST_UNIFORM_Z (and_m32768_s64_x, svint64_t,
     499  		z0 = svand_n_s64_x (p0, z0, -0x8000),
     500  		z0 = svand_x (p0, z0, -0x8000))
     501  
     502  /*
     503  ** and_5_s64_x:
     504  **	mov	(z[0-9]+\.d), #5
     505  **	and	z0\.d, (z0\.d, \1|\1, z0\.d)
     506  **	ret
     507  */
     508  TEST_UNIFORM_Z (and_5_s64_x, svint64_t,
     509  		z0 = svand_n_s64_x (p0, z0, 5),
     510  		z0 = svand_x (p0, z0, 5))