(root)/
gcc-13.2.0/
gcc/
testsuite/
gcc.target/
aarch64/
vec-init-9.c
       1  /* { dg-do compile } */
       2  /* { dg-options "-O" } */
       3  /* { dg-final { check-function-bodies "**" "" "" { target lp64 } } } */
       4  
       5  #include <arm_neon.h>
       6  
       7  void ext();
       8  
       9  /*
      10  ** s64q_1:
      11  **	fmov	d0, x0
      12  **	ins	v0\.d\[1\], x1
      13  **	ret
      14  */
      15  int64x2_t s64q_1(int64_t a0, int64_t a1) {
      16    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
      17      return (int64x2_t) { a1, a0 };
      18    else
      19      return (int64x2_t) { a0, a1 };
      20  }
      21  /*
      22  ** s64q_2:
      23  **	fmov	d0, x0
      24  **	ld1	{v0\.d}\[1\], \[x1\]
      25  **	ret
      26  */
      27  int64x2_t s64q_2(int64_t a0, int64_t *ptr) {
      28    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
      29      return (int64x2_t) { ptr[0], a0 };
      30    else
      31      return (int64x2_t) { a0, ptr[0] };
      32  }
      33  /*
      34  ** s64q_3:
      35  **	ldr	d0, \[x0\]
      36  **	ins	v0\.d\[1\], x1
      37  **	ret
      38  */
      39  int64x2_t s64q_3(int64_t *ptr, int64_t a1) {
      40    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
      41      return (int64x2_t) { a1, ptr[0] };
      42    else
      43      return (int64x2_t) { ptr[0], a1 };
      44  }
      45  /*
      46  ** s64q_4:
      47  **	stp	x1, x2, \[x0\]
      48  **	ret
      49  */
      50  void s64q_4(int64x2_t *res, int64_t a0, int64_t a1) {
      51    res[0] = (int64x2_t) { a0, a1 };
      52  }
      53  /*
      54  ** s64q_5:
      55  **	stp	x1, x2, \[x0, #?8\]
      56  **	ret
      57  */
      58  void s64q_5(uintptr_t res, int64_t a0, int64_t a1) {
      59    *(int64x2_t *)(res + 8) = (int64x2_t) { a0, a1 };
      60  }
      61  /*
      62  ** s64q_6:
      63  **	...
      64  **	stp	x0, x1, .*
      65  **	...
      66  **	ldr	q0, .*
      67  **	...
      68  **	ret
      69  */
      70  int64x2_t s64q_6(int64_t a0, int64_t a1) {
      71    int64x2_t res = { a0, a1 };
      72    ext ();
      73    return res;
      74  }
      75  
      76  /*
      77  ** f64q_1:
      78  **	ins	v0\.d\[1\], v1\.d\[0\]
      79  **	ret
      80  */
      81  float64x2_t f64q_1(float64_t a0, float64_t a1) {
      82    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
      83      return (float64x2_t) { a1, a0 };
      84    else
      85      return (float64x2_t) { a0, a1 };
      86  }
      87  /*
      88  ** f64q_2:
      89  **	ld1	{v0\.d}\[1\], \[x0\]
      90  **	ret
      91  */
      92  float64x2_t f64q_2(float64_t a0, float64_t *ptr) {
      93    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
      94      return (float64x2_t) { ptr[0], a0 };
      95    else
      96      return (float64x2_t) { a0, ptr[0] };
      97  }
      98  /*
      99  ** f64q_3:
     100  **	ldr	d0, \[x0\]
     101  **	ins	v0\.d\[1\], v1\.d\[0\]
     102  **	ret
     103  */
     104  float64x2_t f64q_3(float64_t a0, float64_t a1, float64_t *ptr) {
     105    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
     106      return (float64x2_t) { a1, ptr[0] };
     107    else
     108      return (float64x2_t) { ptr[0], a1 };
     109  }
     110  /*
     111  ** f64q_4:
     112  **	stp	d0, d1, \[x0\]
     113  **	ret
     114  */
     115  void f64q_4(float64x2_t *res, float64_t a0, float64_t a1) {
     116    res[0] = (float64x2_t) { a0, a1 };
     117  }
     118  /*
     119  ** f64q_5:
     120  **	stp	d0, d1, \[x0, #?8\]
     121  **	ret
     122  */
     123  void f64q_5(uintptr_t res, float64_t a0, float64_t a1) {
     124    *(float64x2_t *)(res + 8) = (float64x2_t) { a0, a1 };
     125  }
     126  /*
     127  ** f64q_6:
     128  **	...
     129  **	stp	d0, d1, .*
     130  **	...
     131  **	ldr	q0, .*
     132  **	...
     133  **	ret
     134  */
     135  float64x2_t f64q_6(float64_t a0, float64_t a1) {
     136    float64x2_t res = { a0, a1 };
     137    ext ();
     138    return res;
     139  }
     140  
     141  /*
     142  ** s32q_1:
     143  **	ins	v0\.d\[1\], v1\.d\[0\]
     144  **	ret
     145  */
     146  int32x4_t s32q_1(int32x2_t a0, int32x2_t a1) {
     147    return vcombine_s32 (a0, a1);
     148  }
     149  /*
     150  ** s32q_2:
     151  **	ld1	{v0\.d}\[1\], \[x0\]
     152  **	ret
     153  */
     154  int32x4_t s32q_2(int32x2_t a0, int32x2_t *ptr) {
     155    return vcombine_s32 (a0, ptr[0]);
     156  }
     157  /*
     158  ** s32q_3:
     159  **	ldr	d0, \[x0\]
     160  **	ins	v0\.d\[1\], v1\.d\[0\]
     161  **	ret
     162  */
     163  int32x4_t s32q_3(int32x2_t a0, int32x2_t a1, int32x2_t *ptr) {
     164    return vcombine_s32 (ptr[0], a1);
     165  }
     166  /*
     167  ** s32q_4:
     168  **	stp	d0, d1, \[x0\]
     169  **	ret
     170  */
     171  void s32q_4(int32x4_t *res, int32x2_t a0, int32x2_t a1) {
     172    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
     173      res[0] = vcombine_s32 (a1, a0);
     174    else
     175      res[0] = vcombine_s32 (a0, a1);
     176  }
     177  /*
     178  ** s32q_5:
     179  **	stp	d0, d1, \[x0, #?8\]
     180  **	ret
     181  */
     182  void s32q_5(uintptr_t res, int32x2_t a0, int32x2_t a1) {
     183    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
     184      *(int32x4_t *)(res + 8) = vcombine_s32 (a1, a0);
     185    else
     186      *(int32x4_t *)(res + 8) = vcombine_s32 (a0, a1);
     187  }
     188  /*
     189  ** s32q_6:
     190  **	...
     191  **	stp	d0, d1, .*
     192  **	...
     193  **	ldr	q0, .*
     194  **	...
     195  **	ret
     196  */
     197  int32x4_t s32q_6(int32x2_t a0, int32x2_t a1) {
     198    int32x4_t res = (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
     199  		   ? vcombine_s32 (a1, a0)
     200  		   : vcombine_s32 (a0, a1));
     201    ext ();
     202    return res;
     203  }
     204  
     205  /*
     206  ** f32q_1:
     207  **	ins	v0\.d\[1\], v1\.d\[0\]
     208  **	ret
     209  */
     210  float32x4_t f32q_1(float32x2_t a0, float32x2_t a1) {
     211    return vcombine_f32 (a0, a1);
     212  }
     213  /*
     214  ** f32q_2:
     215  **	ld1	{v0\.d}\[1\], \[x0\]
     216  **	ret
     217  */
     218  float32x4_t f32q_2(float32x2_t a0, float32x2_t *ptr) {
     219    return vcombine_f32 (a0, ptr[0]);
     220  }
     221  /*
     222  ** f32q_3:
     223  **	ldr	d0, \[x0\]
     224  **	ins	v0\.d\[1\], v1\.d\[0\]
     225  **	ret
     226  */
     227  float32x4_t f32q_3(float32x2_t a0, float32x2_t a1, float32x2_t *ptr) {
     228    return vcombine_f32 (ptr[0], a1);
     229  }
     230  /*
     231  ** f32q_4:
     232  **	stp	d0, d1, \[x0\]
     233  **	ret
     234  */
     235  void f32q_4(float32x4_t *res, float32x2_t a0, float32x2_t a1) {
     236    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
     237      res[0] = vcombine_f32 (a1, a0);
     238    else
     239      res[0] = vcombine_f32 (a0, a1);
     240  }
     241  /*
     242  ** f32q_5:
     243  **	stp	d0, d1, \[x0, #?8\]
     244  **	ret
     245  */
     246  void f32q_5(uintptr_t res, float32x2_t a0, float32x2_t a1) {
     247    if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
     248      *(float32x4_t *)(res + 8) = vcombine_f32 (a1, a0);
     249    else
     250      *(float32x4_t *)(res + 8) = vcombine_f32 (a0, a1);
     251  }
     252  /*
     253  ** f32q_6:
     254  **	...
     255  **	stp	d0, d1, .*
     256  **	...
     257  **	ldr	q0, .*
     258  **	...
     259  **	ret
     260  */
     261  float32x4_t f32q_6(float32x2_t a0, float32x2_t a1) {
     262    float32x4_t res = (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
     263  		     ? vcombine_f32 (a1, a0)
     264  		     : vcombine_f32 (a0, a1));
     265    ext ();
     266    return res;
     267  }