(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
sve/
acle/
asm/
and_s8.c
       1  /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
       2  
       3  #include "test_sve_acle.h"
       4  
       5  /*
       6  ** and_s8_m_tied1:
       7  **	and	z0\.b, p0/m, z0\.b, z1\.b
       8  **	ret
       9  */
      10  TEST_UNIFORM_Z (and_s8_m_tied1, svint8_t,
      11  		z0 = svand_s8_m (p0, z0, z1),
      12  		z0 = svand_m (p0, z0, z1))
      13  
      14  /*
      15  ** and_s8_m_tied2:
      16  **	mov	(z[0-9]+)\.d, z0\.d
      17  **	movprfx	z0, z1
      18  **	and	z0\.b, p0/m, z0\.b, \1\.b
      19  **	ret
      20  */
      21  TEST_UNIFORM_Z (and_s8_m_tied2, svint8_t,
      22  		z0 = svand_s8_m (p0, z1, z0),
      23  		z0 = svand_m (p0, z1, z0))
      24  
      25  /*
      26  ** and_s8_m_untied:
      27  **	movprfx	z0, z1
      28  **	and	z0\.b, p0/m, z0\.b, z2\.b
      29  **	ret
      30  */
      31  TEST_UNIFORM_Z (and_s8_m_untied, svint8_t,
      32  		z0 = svand_s8_m (p0, z1, z2),
      33  		z0 = svand_m (p0, z1, z2))
      34  
      35  /*
      36  ** and_w0_s8_m_tied1:
      37  **	mov	(z[0-9]+\.b), w0
      38  **	and	z0\.b, p0/m, z0\.b, \1
      39  **	ret
      40  */
      41  TEST_UNIFORM_ZX (and_w0_s8_m_tied1, svint8_t, int8_t,
      42  		 z0 = svand_n_s8_m (p0, z0, x0),
      43  		 z0 = svand_m (p0, z0, x0))
      44  
      45  /*
      46  ** and_w0_s8_m_untied: { xfail *-*-* }
      47  **	mov	(z[0-9]+\.b), w0
      48  **	movprfx	z0, z1
      49  **	and	z0\.b, p0/m, z0\.b, \1
      50  **	ret
      51  */
      52  TEST_UNIFORM_ZX (and_w0_s8_m_untied, svint8_t, int8_t,
      53  		 z0 = svand_n_s8_m (p0, z1, x0),
      54  		 z0 = svand_m (p0, z1, x0))
      55  
      56  /*
      57  ** and_1_s8_m_tied1:
      58  **	mov	(z[0-9]+\.b), #1
      59  **	and	z0\.b, p0/m, z0\.b, \1
      60  **	ret
      61  */
      62  TEST_UNIFORM_Z (and_1_s8_m_tied1, svint8_t,
      63  		z0 = svand_n_s8_m (p0, z0, 1),
      64  		z0 = svand_m (p0, z0, 1))
      65  
      66  /*
      67  ** and_1_s8_m_untied: { xfail *-*-* }
      68  **	mov	(z[0-9]+\.b), #1
      69  **	movprfx	z0, z1
      70  **	and	z0\.b, p0/m, z0\.b, \1
      71  **	ret
      72  */
      73  TEST_UNIFORM_Z (and_1_s8_m_untied, svint8_t,
      74  		z0 = svand_n_s8_m (p0, z1, 1),
      75  		z0 = svand_m (p0, z1, 1))
      76  
      77  /*
      78  ** and_m2_s8_m:
      79  **	mov	(z[0-9]+\.b), #-2
      80  **	and	z0\.b, p0/m, z0\.b, \1
      81  **	ret
      82  */
      83  TEST_UNIFORM_Z (and_m2_s8_m, svint8_t,
      84  		z0 = svand_n_s8_m (p0, z0, -2),
      85  		z0 = svand_m (p0, z0, -2))
      86  
      87  /*
      88  ** and_s8_z_tied1:
      89  **	movprfx	z0\.b, p0/z, z0\.b
      90  **	and	z0\.b, p0/m, z0\.b, z1\.b
      91  **	ret
      92  */
      93  TEST_UNIFORM_Z (and_s8_z_tied1, svint8_t,
      94  		z0 = svand_s8_z (p0, z0, z1),
      95  		z0 = svand_z (p0, z0, z1))
      96  
      97  /*
      98  ** and_s8_z_tied2:
      99  **	movprfx	z0\.b, p0/z, z0\.b
     100  **	and	z0\.b, p0/m, z0\.b, z1\.b
     101  **	ret
     102  */
     103  TEST_UNIFORM_Z (and_s8_z_tied2, svint8_t,
     104  		z0 = svand_s8_z (p0, z1, z0),
     105  		z0 = svand_z (p0, z1, z0))
     106  
     107  /*
     108  ** and_s8_z_untied:
     109  ** (
     110  **	movprfx	z0\.b, p0/z, z1\.b
     111  **	and	z0\.b, p0/m, z0\.b, z2\.b
     112  ** |
     113  **	movprfx	z0\.b, p0/z, z2\.b
     114  **	and	z0\.b, p0/m, z0\.b, z1\.b
     115  ** )
     116  **	ret
     117  */
     118  TEST_UNIFORM_Z (and_s8_z_untied, svint8_t,
     119  		z0 = svand_s8_z (p0, z1, z2),
     120  		z0 = svand_z (p0, z1, z2))
     121  
     122  /*
     123  ** and_w0_s8_z_tied1:
     124  **	mov	(z[0-9]+\.b), w0
     125  **	movprfx	z0\.b, p0/z, z0\.b
     126  **	and	z0\.b, p0/m, z0\.b, \1
     127  **	ret
     128  */
     129  TEST_UNIFORM_ZX (and_w0_s8_z_tied1, svint8_t, int8_t,
     130  		 z0 = svand_n_s8_z (p0, z0, x0),
     131  		 z0 = svand_z (p0, z0, x0))
     132  
     133  /*
     134  ** and_w0_s8_z_untied:
     135  **	mov	(z[0-9]+\.b), w0
     136  ** (
     137  **	movprfx	z0\.b, p0/z, z1\.b
     138  **	and	z0\.b, p0/m, z0\.b, \1
     139  ** |
     140  **	movprfx	z0\.b, p0/z, \1
     141  **	and	z0\.b, p0/m, z0\.b, z1\.b
     142  ** )
     143  **	ret
     144  */
     145  TEST_UNIFORM_ZX (and_w0_s8_z_untied, svint8_t, int8_t,
     146  		 z0 = svand_n_s8_z (p0, z1, x0),
     147  		 z0 = svand_z (p0, z1, x0))
     148  
     149  /*
     150  ** and_1_s8_z_tied1:
     151  **	mov	(z[0-9]+\.b), #1
     152  **	movprfx	z0\.b, p0/z, z0\.b
     153  **	and	z0\.b, p0/m, z0\.b, \1
     154  **	ret
     155  */
     156  TEST_UNIFORM_Z (and_1_s8_z_tied1, svint8_t,
     157  		z0 = svand_n_s8_z (p0, z0, 1),
     158  		z0 = svand_z (p0, z0, 1))
     159  
     160  /*
     161  ** and_1_s8_z_untied:
     162  **	mov	(z[0-9]+\.b), #1
     163  ** (
     164  **	movprfx	z0\.b, p0/z, z1\.b
     165  **	and	z0\.b, p0/m, z0\.b, \1
     166  ** |
     167  **	movprfx	z0\.b, p0/z, \1
     168  **	and	z0\.b, p0/m, z0\.b, z1\.b
     169  ** )
     170  **	ret
     171  */
     172  TEST_UNIFORM_Z (and_1_s8_z_untied, svint8_t,
     173  		z0 = svand_n_s8_z (p0, z1, 1),
     174  		z0 = svand_z (p0, z1, 1))
     175  
     176  /*
     177  ** and_s8_x_tied1:
     178  **	and	z0\.d, (z0\.d, z1\.d|z1\.d, z0\.d)
     179  **	ret
     180  */
     181  TEST_UNIFORM_Z (and_s8_x_tied1, svint8_t,
     182  		z0 = svand_s8_x (p0, z0, z1),
     183  		z0 = svand_x (p0, z0, z1))
     184  
     185  /*
     186  ** and_s8_x_tied2:
     187  **	and	z0\.d, (z0\.d, z1\.d|z1\.d, z0\.d)
     188  **	ret
     189  */
     190  TEST_UNIFORM_Z (and_s8_x_tied2, svint8_t,
     191  		z0 = svand_s8_x (p0, z1, z0),
     192  		z0 = svand_x (p0, z1, z0))
     193  
     194  /*
     195  ** and_s8_x_untied:
     196  **	and	z0\.d, (z1\.d, z2\.d|z2\.d, z1\.d)
     197  **	ret
     198  */
     199  TEST_UNIFORM_Z (and_s8_x_untied, svint8_t,
     200  		z0 = svand_s8_x (p0, z1, z2),
     201  		z0 = svand_x (p0, z1, z2))
     202  
     203  /*
     204  ** and_w0_s8_x_tied1:
     205  **	mov	(z[0-9]+)\.b, w0
     206  **	and	z0\.d, (z0\.d, \1\.d|\1\.d, z0\.d)
     207  **	ret
     208  */
     209  TEST_UNIFORM_ZX (and_w0_s8_x_tied1, svint8_t, int8_t,
     210  		 z0 = svand_n_s8_x (p0, z0, x0),
     211  		 z0 = svand_x (p0, z0, x0))
     212  
     213  /*
     214  ** and_w0_s8_x_untied:
     215  **	mov	(z[0-9]+)\.b, w0
     216  **	and	z0\.d, (z1\.d, \1\.d|\1\.d, z1\.d)
     217  **	ret
     218  */
     219  TEST_UNIFORM_ZX (and_w0_s8_x_untied, svint8_t, int8_t,
     220  		 z0 = svand_n_s8_x (p0, z1, x0),
     221  		 z0 = svand_x (p0, z1, x0))
     222  
     223  /*
     224  ** and_1_s8_x_tied1:
     225  **	and	z0\.b, z0\.b, #0x1
     226  **	ret
     227  */
     228  TEST_UNIFORM_Z (and_1_s8_x_tied1, svint8_t,
     229  		z0 = svand_n_s8_x (p0, z0, 1),
     230  		z0 = svand_x (p0, z0, 1))
     231  
     232  /*
     233  ** and_1_s8_x_untied:
     234  **	movprfx	z0, z1
     235  **	and	z0\.b, z0\.b, #0x1
     236  **	ret
     237  */
     238  TEST_UNIFORM_Z (and_1_s8_x_untied, svint8_t,
     239  		z0 = svand_n_s8_x (p0, z1, 1),
     240  		z0 = svand_x (p0, z1, 1))
     241  
     242  /*
     243  ** and_127_s8_x:
     244  **	and	z0\.b, z0\.b, #0x7f
     245  **	ret
     246  */
     247  TEST_UNIFORM_Z (and_127_s8_x, svint8_t,
     248  		z0 = svand_n_s8_x (p0, z0, 127),
     249  		z0 = svand_x (p0, z0, 127))
     250  
     251  /*
     252  ** and_128_s8_x:
     253  **	and	z0\.b, z0\.b, #0x80
     254  **	ret
     255  */
     256  TEST_UNIFORM_Z (and_128_s8_x, svint8_t,
     257  		z0 = svand_n_s8_x (p0, z0, 128),
     258  		z0 = svand_x (p0, z0, 128))
     259  
     260  /*
     261  ** and_255_s8_x:
     262  **	ret
     263  */
     264  TEST_UNIFORM_Z (and_255_s8_x, svint8_t,
     265  		z0 = svand_n_s8_x (p0, z0, 255),
     266  		z0 = svand_x (p0, z0, 255))
     267  
     268  /*
     269  ** and_m127_s8_x:
     270  **	and	z0\.b, z0\.b, #0x81
     271  **	ret
     272  */
     273  TEST_UNIFORM_Z (and_m127_s8_x, svint8_t,
     274  		z0 = svand_n_s8_x (p0, z0, -127),
     275  		z0 = svand_x (p0, z0, -127))
     276  
     277  /*
     278  ** and_m128_s8_x:
     279  **	and	z0\.b, z0\.b, #0x80
     280  **	ret
     281  */
     282  TEST_UNIFORM_Z (and_m128_s8_x, svint8_t,
     283  		z0 = svand_n_s8_x (p0, z0, -128),
     284  		z0 = svand_x (p0, z0, -128))
     285  
     286  /*
     287  ** and_5_s8_x:
     288  **	mov	(z[0-9]+)\.b, #5
     289  **	and	z0\.d, (z0\.d, \1\.d|\1\.d, z0\.d)
     290  **	ret
     291  */
     292  TEST_UNIFORM_Z (and_5_s8_x, svint8_t,
     293  		z0 = svand_n_s8_x (p0, z0, 5),
     294  		z0 = svand_x (p0, z0, 5))