1 /* { dg-do compile } */
2 /* { dg-options "-O" } */
3 /* { dg-require-effective-target arm_v8_1m_mve_ok } */
4 /* { dg-add-options arm_v8_1m_mve } */
5 /* { dg-additional-options "-mfloat-abi=hard" } *
6 /* { dg-final { check-function-bodies "**" "" } } */
7
8 /*
9 ** r_w:
10 ** vmov r0, s0
11 ** bx lr
12 */
13 void
14 r_w (float s0)
15 {
16 register float r0 asm ("r0");
17 r0 = s0;
18 asm volatile ("" :: "r" (r0));
19 }
20
21 /*
22 ** w_r:
23 ** vmov s0, r0
24 ** bx lr
25 */
26 float
27 w_r ()
28 {
29 register float r0 asm ("r0");
30 asm volatile ("" : "=r" (r0));
31 return r0;
32 }
33
34 /*
35 ** w_w:
36 ** vmov.f32 s1, s0
37 ** bx lr
38 */
39 void
40 w_w (float s0)
41 {
42 register float s1 asm ("s1");
43 s1 = s0;
44 asm volatile ("" :: "w" (s1));
45 }
46
47 /*
48 ** r_m_m64:
49 ** sub (r[0-9]+), r0, #256
50 ** ldr r1, \[\1\] @ float
51 ** bx lr
52 */
53 void
54 r_m_m64 (float *r0)
55 {
56 register float r1 asm ("r1");
57 r1 = r0[-64];
58 asm volatile ("" :: "r" (r1));
59 }
60
61 /*
62 ** r_m_m63:
63 ** ldr r1, \[r0, #-252\] @ float
64 ** bx lr
65 */
66 void
67 r_m_m63 (float *r0)
68 {
69 register float r1 asm ("r1");
70 r1 = r0[-63];
71 asm volatile ("" :: "r" (r1));
72 }
73
74 /*
75 ** r_m_m1:
76 ** ldr r1, \[r0, #-4\] @ float
77 ** bx lr
78 */
79 void
80 r_m_m1 (float *r0)
81 {
82 register float r1 asm ("r1");
83 r1 = r0[-1];
84 asm volatile ("" :: "r" (r1));
85 }
86
87 /*
88 ** r_m_0:
89 ** ldr r1, \[r0\] @ float
90 ** bx lr
91 */
92 void
93 r_m_0 (float *r0)
94 {
95 register float r1 asm ("r1");
96 r1 = r0[0];
97 asm volatile ("" :: "r" (r1));
98 }
99
100 /*
101 ** r_m_1:
102 ** ldr r1, \[r0, #4\] @ float
103 ** bx lr
104 */
105 void
106 r_m_1 (float *r0)
107 {
108 register float r1 asm ("r1");
109 r1 = r0[1];
110 asm volatile ("" :: "r" (r1));
111 }
112
113 /*
114 ** r_m_255:
115 ** ldr r1, \[r0, #1020\] @ float
116 ** bx lr
117 */
118 void
119 r_m_255 (float *r0)
120 {
121 register float r1 asm ("r1");
122 r1 = r0[255];
123 asm volatile ("" :: "r" (r1));
124 }
125
126 /*
127 ** r_m_256:
128 ** add (r[0-9]+), r0, #1024
129 ** ldr r1, \[r0\] @ float
130 ** bx lr
131 */
132 void
133 r_m_256 (float *r0)
134 {
135 register float r1 asm ("r1");
136 r1 = r0[256];
137 asm volatile ("" :: "r" (r1));
138 }
139
140 /* ??? This could be done in one instruction, but without mve.fp,
141 it makes more sense for memory_operand to enforce the GPR range. */
142 /*
143 ** w_m_m64:
144 ** sub (r[0-9]+), r0, #256
145 ** vldr.32 s0, \[\1\]
146 ** bx lr
147 */
148 void
149 w_m_m64 (float *r0)
150 {
151 register float s0 asm ("s0");
152 s0 = r0[-64];
153 asm volatile ("" :: "w" (s0));
154 }
155
156 /*
157 ** w_m_m63:
158 ** vldr.32 s0, \[r0, #-252\]
159 ** bx lr
160 */
161 void
162 w_m_m63 (float *r0)
163 {
164 register float s0 asm ("s0");
165 s0 = r0[-63];
166 asm volatile ("" :: "w" (s0));
167 }
168
169 /*
170 ** w_m_m1:
171 ** vldr.32 s0, \[r0, #-4\]
172 ** bx lr
173 */
174 void
175 w_m_m1 (float *r0)
176 {
177 register float s0 asm ("s0");
178 s0 = r0[-1];
179 asm volatile ("" :: "w" (s0));
180 }
181
182 /*
183 ** w_m_0:
184 ** vldr.32 s0, \[r0\]
185 ** bx lr
186 */
187 void
188 w_m_0 (float *r0)
189 {
190 register float s0 asm ("s0");
191 s0 = r0[0];
192 asm volatile ("" :: "w" (s0));
193 }
194
195 /*
196 ** w_m_1:
197 ** vldr.32 s0, \[r0, #4\]
198 ** bx lr
199 */
200 void
201 w_m_1 (float *r0)
202 {
203 register float s0 asm ("s0");
204 s0 = r0[1];
205 asm volatile ("" :: "w" (s0));
206 }
207
208 /*
209 ** w_m_255:
210 ** vldr.32 s0, \[r0, #1020\]
211 ** bx lr
212 */
213 void
214 w_m_255 (float *r0)
215 {
216 register float s0 asm ("s0");
217 s0 = r0[255];
218 asm volatile ("" :: "w" (s0));
219 }
220
221 /*
222 ** w_m_256:
223 ** add (r[0-9]+), r0, #1024
224 ** vldr.32 s0, \[\1\]
225 ** bx lr
226 */
227 void
228 w_m_256 (float *r0)
229 {
230 register float s0 asm ("s0");
231 s0 = r0[256];
232 asm volatile ("" :: "w" (s0));
233 }
234
235 /*
236 ** m_m64_r:
237 ** sub (r[0-9]+), r0, #256
238 ** str r1, \[\1\] @ float
239 ** bx lr
240 */
241 void
242 m_m64_r (float *r0)
243 {
244 register float r1 asm ("r1");
245 asm volatile ("" : "=r" (r1));
246 r0[-64] = r1;
247 }
248
249 /*
250 ** m_m63_r:
251 ** str r1, \[r0, #-252\] @ float
252 ** bx lr
253 */
254 void
255 m_m63_r (float *r0)
256 {
257 register float r1 asm ("r1");
258 asm volatile ("" : "=r" (r1));
259 r0[-63] = r1;
260 }
261
262 /*
263 ** m_m1_r:
264 ** str r1, \[r0, #-4\] @ float
265 ** bx lr
266 */
267 void
268 m_m1_r (float *r0)
269 {
270 register float r1 asm ("r1");
271 asm volatile ("" : "=r" (r1));
272 r0[-1] = r1;
273 }
274
275 /*
276 ** m_0_r:
277 ** str r1, \[r0\] @ float
278 ** bx lr
279 */
280 void
281 m_0_r (float *r0)
282 {
283 register float r1 asm ("r1");
284 asm volatile ("" : "=r" (r1));
285 r0[0] = r1;
286 }
287
288 /*
289 ** m_1_r:
290 ** str r1, \[r0, #4\] @ float
291 ** bx lr
292 */
293 void
294 m_1_r (float *r0)
295 {
296 register float r1 asm ("r1");
297 asm volatile ("" : "=r" (r1));
298 r0[1] = r1;
299 }
300
301 /*
302 ** m_255_r:
303 ** str r1, \[r0, #1020\] @ float
304 ** bx lr
305 */
306 void
307 m_255_r (float *r0)
308 {
309 register float r1 asm ("r1");
310 asm volatile ("" : "=r" (r1));
311 r0[255] = r1;
312 }
313
314 /*
315 ** m_256_r:
316 ** add (r[0-9]+), r0, #1024
317 ** str r1, \[r0\] @ float
318 ** bx lr
319 */
320 void
321 m_256_r (float *r0)
322 {
323 register float r1 asm ("r1");
324 asm volatile ("" : "=r" (r1));
325 r0[256] = r1;
326 }
327
328 /* ??? This could be done in one instruction, but without mve.fp,
329 it makes more sense for memory_operand to enforce the GPR range. */
330 /*
331 ** m_m64_w:
332 ** sub (r[0-9]+), r0, #256
333 ** vstr.32 s0, \[\1\]
334 ** bx lr
335 */
336 void
337 m_m64_w (float *r0)
338 {
339 register float s0 asm ("s0");
340 asm volatile ("" : "=w" (s0));
341 r0[-64] = s0;
342 }
343
344 /*
345 ** m_m63_w:
346 ** vstr.32 s0, \[r0, #-252\]
347 ** bx lr
348 */
349 void
350 m_m63_w (float *r0)
351 {
352 register float s0 asm ("s0");
353 asm volatile ("" : "=w" (s0));
354 r0[-63] = s0;
355 }
356
357 /*
358 ** m_m1_w:
359 ** vstr.32 s0, \[r0, #-4\]
360 ** bx lr
361 */
362 void
363 m_m1_w (float *r0)
364 {
365 register float s0 asm ("s0");
366 asm volatile ("" : "=w" (s0));
367 r0[-1] = s0;
368 }
369
370 /*
371 ** m_0_w:
372 ** vstr.32 s0, \[r0\]
373 ** bx lr
374 */
375 void
376 m_0_w (float *r0)
377 {
378 register float s0 asm ("s0");
379 asm volatile ("" : "=w" (s0));
380 r0[0] = s0;
381 }
382
383 /*
384 ** m_1_w:
385 ** vstr.32 s0, \[r0, #4\]
386 ** bx lr
387 */
388 void
389 m_1_w (float *r0)
390 {
391 register float s0 asm ("s0");
392 asm volatile ("" : "=w" (s0));
393 r0[1] = s0;
394 }
395
396 /*
397 ** m_255_w:
398 ** vstr.32 s0, \[r0, #1020\]
399 ** bx lr
400 */
401 void
402 m_255_w (float *r0)
403 {
404 register float s0 asm ("s0");
405 asm volatile ("" : "=w" (s0));
406 r0[255] = s0;
407 }
408
409 /*
410 ** m_256_w:
411 ** add (r[0-9]+), r0, #1024
412 ** vstr.32 s0, \[\1\]
413 ** bx lr
414 */
415 void
416 m_256_w (float *r0)
417 {
418 register float s0 asm ("s0");
419 asm volatile ("" : "=w" (s0));
420 r0[256] = s0;
421 }