(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
sve/
sel_3.c
       1  /* { dg-do assemble { target aarch64_variant_pcs } } */
       2  /* { dg-options "-O2 -msve-vector-bits=256 --save-temps" } */
       3  /* { dg-final { check-function-bodies "**" "" } } */
       4  
       5  #include <stdint.h>
       6  
       7  typedef int8_t vnx16qi __attribute__((vector_size (32)));
       8  typedef int16_t vnx8hi __attribute__((vector_size (32)));
       9  typedef int32_t vnx4si __attribute__((vector_size (32)));
      10  typedef _Float16 vnx8hf __attribute__((vector_size (32)));
      11  typedef float vnx4sf __attribute__((vector_size (32)));
      12  
      13  /* Predicate vector: 1 0 0 0 0 0 0 0 ... */
      14  
      15  #define MASK_32		{ 0, 33, 34, 35, 36, 37, 38, 39,  \
      16  			  8, 41, 42, 43, 44, 45, 46, 47,  \
      17  			  16, 49, 50, 51, 52, 53, 54, 55, \
      18  			  24, 57, 58, 59, 60, 61, 62, 63  }
      19  
      20  /* Predicate vector: 1 0 0 0 ... */
      21  
      22  #define MASK_16		{ 0, 17, 18, 19, 4, 21, 22, 23, \
      23  			  8, 25, 26, 27, 12, 29, 30, 31 } 
      24  
      25  /* Predicate vector: 1 0 ... */
      26  
      27  #define MASK_8		{ 0, 9, 2, 11, 4, 13, 6, 15 }
      28  
      29  /*
      30  ** permute_vnx16qi:
      31  **	ptrue	(p[0-7])\.d, vl4
      32  **	sel	z0\.b, \1, z0\.b, z1\.b
      33  **	ret
      34  */
      35  __SVInt8_t
      36  permute_vnx16qi (__SVInt8_t x, __SVInt8_t y)
      37  {
      38    return __builtin_shuffle ((vnx16qi) x, (vnx16qi) y, (vnx16qi) MASK_32);
      39  }
      40  
      41  /*
      42  ** permute_vnx8hi:
      43  **	ptrue	(p[0-7])\.d, vl4
      44  **	sel	z0\.h, \1, z0\.h, z1\.h
      45  **	ret
      46  */
      47  __SVInt16_t
      48  permute_vnx8hi (__SVInt16_t x, __SVInt16_t y)
      49  {
      50    return __builtin_shuffle ((vnx8hi) x, (vnx8hi) y, (vnx8hi) MASK_16);
      51  }
      52  
      53  /*
      54  ** permute_vnx4si:
      55  **	ptrue	(p[0-7])\.d, vl4
      56  **	sel	z0\.s, \1, z0\.s, z1\.s
      57  **	ret
      58  */
      59  __SVInt32_t
      60  permute_vnx4si (__SVInt32_t x, __SVInt32_t y)
      61  {
      62    return __builtin_shuffle ((vnx4si) x, (vnx4si) y, (vnx4si) MASK_8);
      63  }
      64  
      65  /*
      66  ** permute_vnx8hf:
      67  **	ptrue	(p[0-7])\.d, vl4
      68  **	sel	z0\.h, \1, z0\.h, z1\.h
      69  **	ret
      70  */
      71  __SVFloat16_t
      72  permute_vnx8hf (__SVFloat16_t x, __SVFloat16_t y)
      73  {
      74    return (__SVFloat16_t) __builtin_shuffle ((vnx8hf) x, (vnx8hf) y,
      75  					    (vnx8hi) MASK_16);
      76  }
      77  
      78  /*
      79  ** permute_vnx4sf:
      80  **	ptrue	(p[0-7])\.d, vl4
      81  **	sel	z0\.s, \1, z0\.s, z1\.s
      82  **	ret
      83  */
      84  __SVFloat32_t
      85  permute_vnx4sf (__SVFloat32_t x, __SVFloat32_t y)
      86  {
      87    return __builtin_shuffle ((vnx4sf) x, (vnx4sf) y, (vnx4si) MASK_8);
      88  }