1 /* { dg-do run { target avx512fp16 } } */
2 /* { dg-options "-O2 -mavx512fp16 -mavx512dq" } */
3
4
5 #define AVX512FP16
6 #include "avx512fp16-helper.h"
7
8 #define N_ELEMS (AVX512F_LEN / 16)
9
10 void NOINLINE
11 EMULATE(c_fmul_pch) (V512 * dest, V512 op1, V512 op2,
12 __mmask16 k, int zero_mask, int c_flag)
13 {
14 V512 v1, v2, v3, v4, v5, v6, v7, v8;
15 int i;
16 int invert = 1;
17 if (c_flag == 1)
18 invert = -1;
19
20 unpack_ph_2twops(op1, &v1, &v2);
21 unpack_ph_2twops(op2, &v3, &v4);
22 unpack_ph_2twops(*dest, &v7, &v8);
23
24 for (i = 0; i < 16; i++) {
25 if (((1 << (i / 2)) & k) == 0) {
26 if (zero_mask) {
27 v5.f32[i] = 0;
28 }
29 else {
30 v5.u32[i] = v7.u32[i];
31 }
32 }
33 else {
34 if ((i % 2) == 0) {
35 v5.f32[i] = v1.f32[i] * v3.f32[i]
36 - invert * (v1.f32[i+1] * v3.f32[i+1]);
37 }
38 else {
39 v5.f32[i] = v1.f32[i-1] * v3.f32[i]
40 + invert * (v1.f32[i] * v3.f32[i-1]);
41
42 }
43 }
44 if (((1 << (i / 2 + 8)) & k) == 0) {
45 if (zero_mask) {
46 v6.f32[i] = 0;
47 }
48 else {
49 v6.u32[i] = v8.u32[i];
50 }
51 }
52 else {
53 if ((i % 2) == 0) {
54 v6.f32[i] = v2.f32[i] * v4.f32[i]
55 - invert * (v2.f32[i+1] * v4.f32[i+1]);
56 }
57 else {
58 v6.f32[i] = v2.f32[i-1] * v4.f32[i]
59 + invert * (v2.f32[i] * v4.f32[i-1]);
60 }
61
62 }
63 }
64
65 *dest = pack_twops_2ph(v5, v6);
66 }
67
68 void
69 TEST (void)
70 {
71 V512 res;
72 V512 exp;
73
74 init_src();
75
76 EMULATE(c_fmul_pch)(&exp, src1, src2, NET_CMASK, 0, 0);
77 HF(res) = INTRINSIC (_fmul_pch) (HF(src1), HF(src2));
78 CHECK_RESULT (&res, &exp, N_ELEMS, _fmul_pch);
79
80 init_dest(&res, &exp);
81 EMULATE(c_fmul_pch)(&exp, src1, src2, HALF_MASK, 0, 0);
82 HF(res) = INTRINSIC (_mask_fmul_pch) (HF(res),HALF_MASK, HF(src1),
83 HF(src2));
84 CHECK_RESULT (&res, &exp, N_ELEMS, _mask_fmul_pch);
85
86 init_dest(&res, &exp);
87 EMULATE(c_fmul_pch)(&exp, src1, src2, HALF_MASK, 1, 0);
88 HF(res) = INTRINSIC (_maskz_fmul_pch) (HALF_MASK, HF(src1),
89 HF(src2));
90 CHECK_RESULT (&res, &exp, N_ELEMS, _maskz_fmul_pch);
91
92 #if AVX512F_LEN == 512
93 init_dest(&res, &exp);
94 EMULATE(c_fmul_pch)(&exp, src1, src2, NET_CMASK, 0, 0);
95 HF(res) = INTRINSIC (_fmul_round_pch) (HF(src1), HF(src2), _ROUND_NINT);
96 CHECK_RESULT (&res, &exp, N_ELEMS, _fmul_pch);
97
98 init_dest(&res, &exp);
99 EMULATE(c_fmul_pch)(&exp, src1, src2, HALF_MASK, 0, 0);
100 HF(res) = INTRINSIC (_mask_fmul_round_pch) (HF(res),HALF_MASK, HF(src1),
101 HF(src2), _ROUND_NINT);
102 CHECK_RESULT (&res, &exp, N_ELEMS, _mask_fmul_pch);
103
104 init_dest(&res, &exp);
105 EMULATE(c_fmul_pch)(&exp, src1, src2, HALF_MASK, 1, 0);
106 HF(res) = INTRINSIC (_maskz_fmul_round_pch) (HALF_MASK, HF(src1),
107 HF(src2), _ROUND_NINT);
108 CHECK_RESULT (&res, &exp, N_ELEMS, _maskz_fmul_pch);
109 #endif
110
111 if (n_errs != 0) {
112 abort ();
113 }
114 }
115