1 /* { dg-do compile } */
2 /* { dg-options "-O" } */
3 /* { dg-final { check-function-bodies "**" "" "" { target lp64 } } } */
4
5 #include <arm_neon.h>
6
7 /*
8 ** s64q_1:
9 ** fmov d0, x0
10 ** ret
11 */
12 int64x2_t s64q_1(int64_t a0) {
13 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
14 return (int64x2_t) { 0, a0 };
15 else
16 return (int64x2_t) { a0, 0 };
17 }
18 /*
19 ** s64q_2:
20 ** ldr d0, \[x0\]
21 ** ret
22 */
23 int64x2_t s64q_2(int64_t *ptr) {
24 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
25 return (int64x2_t) { 0, ptr[0] };
26 else
27 return (int64x2_t) { ptr[0], 0 };
28 }
29 /*
30 ** s64q_3:
31 ** ldr d0, \[x0, #?8\]
32 ** ret
33 */
34 int64x2_t s64q_3(int64_t *ptr) {
35 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
36 return (int64x2_t) { 0, ptr[1] };
37 else
38 return (int64x2_t) { ptr[1], 0 };
39 }
40
41 /*
42 ** f64q_1:
43 ** fmov d0, d0
44 ** ret
45 */
46 float64x2_t f64q_1(float64_t a0) {
47 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
48 return (float64x2_t) { 0, a0 };
49 else
50 return (float64x2_t) { a0, 0 };
51 }
52 /*
53 ** f64q_2:
54 ** ldr d0, \[x0\]
55 ** ret
56 */
57 float64x2_t f64q_2(float64_t *ptr) {
58 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
59 return (float64x2_t) { 0, ptr[0] };
60 else
61 return (float64x2_t) { ptr[0], 0 };
62 }
63 /*
64 ** f64q_3:
65 ** ldr d0, \[x0, #?8\]
66 ** ret
67 */
68 float64x2_t f64q_3(float64_t *ptr) {
69 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
70 return (float64x2_t) { 0, ptr[1] };
71 else
72 return (float64x2_t) { ptr[1], 0 };
73 }
74
75 /*
76 ** s32q_1:
77 ** fmov d0, d0
78 ** ret
79 */
80 int32x4_t s32q_1(int32x2_t a0, int32x2_t a1) {
81 return vcombine_s32 (a0, (int32x2_t) { 0, 0 });
82 }
83 /*
84 ** s32q_2:
85 ** ldr d0, \[x0\]
86 ** ret
87 */
88 int32x4_t s32q_2(int32x2_t *ptr) {
89 return vcombine_s32 (ptr[0], (int32x2_t) { 0, 0 });
90 }
91 /*
92 ** s32q_3:
93 ** ldr d0, \[x0, #?8\]
94 ** ret
95 */
96 int32x4_t s32q_3(int32x2_t *ptr) {
97 return vcombine_s32 (ptr[1], (int32x2_t) { 0, 0 });
98 }
99
100 /*
101 ** f32q_1:
102 ** fmov d0, d0
103 ** ret
104 */
105 float32x4_t f32q_1(float32x2_t a0, float32x2_t a1) {
106 return vcombine_f32 (a0, (float32x2_t) { 0, 0 });
107 }
108 /*
109 ** f32q_2:
110 ** ldr d0, \[x0\]
111 ** ret
112 */
113 float32x4_t f32q_2(float32x2_t *ptr) {
114 return vcombine_f32 (ptr[0], (float32x2_t) { 0, 0 });
115 }
116 /*
117 ** f32q_3:
118 ** ldr d0, \[x0, #?8\]
119 ** ret
120 */
121 float32x4_t f32q_3(float32x2_t *ptr) {
122 return vcombine_f32 (ptr[1], (float32x2_t) { 0, 0 });
123 }