(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
vec-init-14.c
       1  /* { dg-do compile } */
       2  /* { dg-options "-O" } */
       3  /* { dg-final { check-function-bodies "**" "" "" { target lp64 } } } */
       4  
       5  #include <arm_neon.h>
       6  
       7  void ext();
       8  
       9  /*
      10  ** s32_1:
      11  **	fmov	s0, w0
      12  **	ins	v0\.s\[1\], w1
      13  **	ret
      14  */
      15  int32x2_t s32_1(int32_t a0, int32_t a1) {
      16    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
      17      return (int32x2_t) { a1, a0 };
      18    else
      19      return (int32x2_t) { a0, a1 };
      20  }
      21  /*
      22  ** s32_2:
      23  **	fmov	s0, w0
      24  **	ld1	{v0\.s}\[1\], \[x1\]
      25  **	ret
      26  */
      27  int32x2_t s32_2(int32_t a0, int32_t *ptr) {
      28    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
      29      return (int32x2_t) { ptr[0], a0 };
      30    else
      31      return (int32x2_t) { a0, ptr[0] };
      32  }
      33  /*
      34  ** s32_3:
      35  **	ldr	s0, \[x0\]
      36  **	ins	v0\.s\[1\], w1
      37  **	ret
      38  */
      39  int32x2_t s32_3(int32_t *ptr, int32_t a1) {
      40    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
      41      return (int32x2_t) { a1, ptr[0] };
      42    else
      43      return (int32x2_t) { ptr[0], a1 };
      44  }
      45  /*
      46  ** s32_4:
      47  **	stp	w1, w2, \[x0\]
      48  **	ret
      49  */
      50  void s32_4(int32x2_t *res, int32_t a0, int32_t a1) {
      51    res[0] = (int32x2_t) { a0, a1 };
      52  }
      53  /*
      54  ** s32_5:
      55  **	stp	w1, w2, \[x0, #?4\]
      56  **	ret
      57  */
      58  void s32_5(uintptr_t res, int32_t a0, int32_t a1) {
      59    *(int32x2_t *)(res + 4) = (int32x2_t) { a0, a1 };
      60  }
      61  /* Currently uses d8 to hold res across the call.  */
      62  int32x2_t s32_6(int32_t a0, int32_t a1) {
      63    int32x2_t res = { a0, a1 };
      64    ext ();
      65    return res;
      66  }
      67  
      68  /*
      69  ** f32_1:
      70  **	ins	v0\.s\[1\], v1\.s\[0\]
      71  **	ret
      72  */
      73  float32x2_t f32_1(float32_t a0, float32_t a1) {
      74    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
      75      return (float32x2_t) { a1, a0 };
      76    else
      77      return (float32x2_t) { a0, a1 };
      78  }
      79  /*
      80  ** f32_2:
      81  **	ld1	{v0\.s}\[1\], \[x0\]
      82  **	ret
      83  */
      84  float32x2_t f32_2(float32_t a0, float32_t *ptr) {
      85    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
      86      return (float32x2_t) { ptr[0], a0 };
      87    else
      88      return (float32x2_t) { a0, ptr[0] };
      89  }
      90  /*
      91  ** f32_3:
      92  **	ldr	s0, \[x0\]
      93  **	ins	v0\.s\[1\], v1\.s\[0\]
      94  **	ret
      95  */
      96  float32x2_t f32_3(float32_t a0, float32_t a1, float32_t *ptr) {
      97    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
      98      return (float32x2_t) { a1, ptr[0] };
      99    else
     100      return (float32x2_t) { ptr[0], a1 };
     101  }
     102  /*
     103  ** f32_4:
     104  **	stp	s0, s1, \[x0\]
     105  **	ret
     106  */
     107  void f32_4(float32x2_t *res, float32_t a0, float32_t a1) {
     108    res[0] = (float32x2_t) { a0, a1 };
     109  }
     110  /*
     111  ** f32_5:
     112  **	stp	s0, s1, \[x0, #?4\]
     113  **	ret
     114  */
     115  void f32_5(uintptr_t res, float32_t a0, float32_t a1) {
     116    *(float32x2_t *)(res + 4) = (float32x2_t) { a0, a1 };
     117  }
     118  /* Currently uses d8 to hold res across the call.  */
     119  float32x2_t f32_6(float32_t a0, float32_t a1) {
     120    float32x2_t res = { a0, a1 };
     121    ext ();
     122    return res;
     123  }