1 /* { dg-do run { target avx512fp16 } } */
2 /* { dg-options "-O2 -mavx512fp16 -mavx512dq" } */
3
4
5 #define AVX512FP16
6 #include "avx512fp16-helper.h"
7
8 #define N_ELEMS (AVX512F_LEN / 16)
9
10 void NOINLINE
11 EMULATE(c_fmadd_pch) (V512 * dest, V512 op1, V512 op2,
12 __mmask16 k, int zero_mask, int c_flag,
13 int is_mask3)
14 {
15 V512 v1, v2, v3, v4, v5, v6, v7, v8;
16 int i;
17 int invert = 1;
18 if (c_flag == 1)
19 invert = -1;
20
21 unpack_ph_2twops(op1, &v1, &v2);
22 unpack_ph_2twops(op2, &v3, &v4);
23 unpack_ph_2twops(*dest, &v7, &v8);
24
25 for (i = 0; i < 16; i++) {
26 if (((1 << (i / 2)) & k) == 0) {
27 if (zero_mask) {
28 v5.f32[i] = 0;
29 }
30 else {
31 v5.u32[i] = is_mask3 ? v3.u32[i] : v7.u32[i];
32 }
33 }
34 else {
35 if ((i % 2) == 0) {
36 v5.f32[i] = v1.f32[i] * v7.f32[i]
37 - invert * (v1.f32[i+1] * v7.f32[i+1]) + v3.f32[i];
38 }
39 else {
40 v5.f32[i] = v1.f32[i-1] * v7.f32[i]
41 + invert * (v1.f32[i] * v7.f32[i-1]) + v3.f32[i];
42
43 }
44 }
45 if (((1 << (i / 2 + 8)) & k) == 0) {
46 if (zero_mask) {
47 v6.f32[i] = 0;
48 }
49 else {
50 v6.u32[i] = is_mask3 ? v4.u32[i] : v8.u32[i];
51 }
52 }
53 else {
54 if ((i % 2) == 0) {
55 v6.f32[i] = v2.f32[i] * v8.f32[i]
56 - invert * (v2.f32[i+1] * v8.f32[i+1]) + v4.f32[i];
57 }
58 else {
59 v6.f32[i] = v2.f32[i-1] * v8.f32[i]
60 + invert * (v2.f32[i] * v8.f32[i-1]) + v4.f32[i];
61 }
62
63 }
64 }
65
66 *dest = pack_twops_2ph(v5, v6);
67 }
68
69 void
70 TEST (void)
71 {
72 V512 res;
73 V512 exp;
74
75 init_src();
76
77 init_dest(&res, &exp);
78 EMULATE(c_fmadd_pch)(&exp, src1, src2, NET_CMASK, 0, 0, 0);
79 HF(res) = INTRINSIC (_fmadd_pch) (HF(res), HF(src1),
80 HF(src2));
81 CHECK_RESULT (&res, &exp, N_ELEMS, _fmadd_pch);
82
83 init_dest(&res, &exp);
84 EMULATE(c_fmadd_pch)(&exp, src1, src2, HALF_MASK, 0, 0, 0);
85 HF(res) = INTRINSIC (_mask_fmadd_pch) (HF(res), HALF_MASK, HF(src1),
86 HF(src2));
87 CHECK_RESULT (&res, &exp, N_ELEMS, _mask_fmadd_pch);
88
89 init_dest(&res, &exp);
90 EMULATE(c_fmadd_pch)(&exp, src1, src2, HALF_MASK, 0, 0, 1);
91 HF(res) = INTRINSIC (_mask3_fmadd_pch) (HF(res), HF(src1), HF(src2),
92 HALF_MASK);
93 CHECK_RESULT (&res, &exp, N_ELEMS, _mask3_fmadd_pch);
94
95 init_dest(&res, &exp);
96 EMULATE(c_fmadd_pch)(&exp, src1, src2, HALF_MASK, 1, 0, 0);
97 HF(res) = INTRINSIC (_maskz_fmadd_pch) (HALF_MASK, HF(res), HF(src1),
98 HF(src2));
99 CHECK_RESULT (&res, &exp, N_ELEMS, _maskz_fmadd_pch);
100
101 #if AVX512F_LEN == 512
102 init_dest(&res, &exp);
103 EMULATE(c_fmadd_pch)(&exp, src1, src2, NET_CMASK, 0, 0, 0);
104 HF(res) = INTRINSIC (_fmadd_round_pch) (HF(res), HF(src1),
105 HF(src2), _ROUND_NINT);
106 CHECK_RESULT (&res, &exp, N_ELEMS, _fmadd_pch);
107
108 init_dest(&res, &exp);
109 EMULATE(c_fmadd_pch)(&exp, src1, src2, HALF_MASK, 0, 0, 0);
110 HF(res) = INTRINSIC (_mask_fmadd_round_pch) (HF(res), HALF_MASK, HF(src1),
111 HF(src2), _ROUND_NINT);
112 CHECK_RESULT (&res, &exp, N_ELEMS, _mask_fmadd_pch);
113
114 init_dest(&res, &exp);
115 EMULATE(c_fmadd_pch)(&exp, src1, src2, HALF_MASK, 0, 0, 1);
116 HF(res) = INTRINSIC (_mask3_fmadd_round_pch) (HF(res), HF(src1), HF(src2),
117 HALF_MASK, _ROUND_NINT);
118 CHECK_RESULT (&res, &exp, N_ELEMS, _mask3_fmadd_pch);
119
120 init_dest(&res, &exp);
121 EMULATE(c_fmadd_pch)(&exp, src1, src2, HALF_MASK, 1, 0, 0);
122 HF(res) = INTRINSIC (_maskz_fmadd_round_pch) (HALF_MASK, HF(res), HF(src1),
123 HF(src2), _ROUND_NINT);
124 CHECK_RESULT (&res, &exp, N_ELEMS, _maskz_fmadd_pch);
125 #endif
126
127 if (n_errs != 0) {
128 abort ();
129 }
130 }
131