1 /* mpn_toom43_mul -- Multiply {ap,an} and {bp,bn} where an is nominally 4/3
2 times as large as bn. Or more accurately, bn < an < 2 bn.
3
4 Contributed to the GNU project by Marco Bodrato.
5
6 The idea of applying toom to unbalanced multiplication is due to Marco
7 Bodrato and Alberto Zanoni.
8
9 THE FUNCTION IN THIS FILE IS INTERNAL WITH A MUTABLE INTERFACE. IT IS ONLY
10 SAFE TO REACH IT THROUGH DOCUMENTED INTERFACES. IN FACT, IT IS ALMOST
11 GUARANTEED THAT IT WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
12
13 Copyright 2009, 2020 Free Software Foundation, Inc.
14
15 This file is part of the GNU MP Library.
16
17 The GNU MP Library is free software; you can redistribute it and/or modify
18 it under the terms of either:
19
20 * the GNU Lesser General Public License as published by the Free
21 Software Foundation; either version 3 of the License, or (at your
22 option) any later version.
23
24 or
25
26 * the GNU General Public License as published by the Free Software
27 Foundation; either version 2 of the License, or (at your option) any
28 later version.
29
30 or both in parallel, as here.
31
32 The GNU MP Library is distributed in the hope that it will be useful, but
33 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
34 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
35 for more details.
36
37 You should have received copies of the GNU General Public License and the
38 GNU Lesser General Public License along with the GNU MP Library. If not,
39 see https://www.gnu.org/licenses/. */
40
41
42 #include "gmp-impl.h"
43
44 /* Evaluate in: -2, -1, 0, +1, +2, +inf
45
46 <-s-><--n--><--n--><--n-->
47 ___ ______ ______ ______
48 |a3_|___a2_|___a1_|___a0_|
49 |_b2_|___b1_|___b0_|
50 <-t--><--n--><--n-->
51
52 v0 = a0 * b0 # A(0)*B(0)
53 v1 = (a0+ a1+ a2+ a3)*(b0+ b1+ b2) # A(1)*B(1) ah <= 3 bh <= 2
54 vm1 = (a0- a1+ a2- a3)*(b0- b1+ b2) # A(-1)*B(-1) |ah| <= 1 |bh|<= 1
55 v2 = (a0+2a1+4a2+8a3)*(b0+2b1+4b2) # A(2)*B(2) ah <= 14 bh <= 6
56 vm2 = (a0-2a1+4a2-8a3)*(b0-2b1+4b2) # A(-2)*B(-2) |ah| <= 9 |bh|<= 4
57 vinf= a3 * b2 # A(inf)*B(inf)
58 */
59
60 void
61 mpn_toom43_mul (mp_ptr pp,
62 mp_srcptr ap, mp_size_t an,
63 mp_srcptr bp, mp_size_t bn, mp_ptr scratch)
64 {
65 mp_size_t n, s, t;
66 enum toom6_flags flags;
67 mp_limb_t cy;
68
69 #define a0 ap
70 #define a1 (ap + n)
71 #define a2 (ap + 2 * n)
72 #define a3 (ap + 3 * n)
73 #define b0 bp
74 #define b1 (bp + n)
75 #define b2 (bp + 2 * n)
76
77 n = 1 + (3 * an >= 4 * bn ? (an - 1) >> 2 : (bn - 1) / (size_t) 3);
78
79 s = an - 3 * n;
80 t = bn - 2 * n;
81
82 ASSERT (0 < s && s <= n);
83 ASSERT (0 < t && t <= n);
84
85 /* This is true whenever an >= 25 or bn >= 19, I think. It
86 guarantees that we can fit 5 values of size n+1 in the product
87 area. */
88 ASSERT (s+t >= 5);
89
90 #define v0 pp /* 2n */
91 #define vm1 (scratch) /* 2n+1 */
92 #define v1 (pp + 2*n) /* 2n+1 */
93 #define vm2 (scratch + 2 * n + 1) /* 2n+1 */
94 #define v2 (scratch + 4 * n + 2) /* 2n+1 */
95 #define vinf (pp + 5 * n) /* s+t */
96 #define bs1 pp /* n+1 */
97 #define bsm1 (scratch + 2 * n + 2) /* n+1 */
98 #define asm1 (scratch + 3 * n + 3) /* n+1 */
99 #define asm2 (scratch + 4 * n + 4) /* n+1 */
100 #define bsm2 (pp + n + 1) /* n+1 */
101 #define bs2 (pp + 2 * n + 2) /* n+1 */
102 #define as2 (pp + 3 * n + 3) /* n+1 */
103 #define as1 (pp + 4 * n + 4) /* n+1 */
104
105 /* Total sccratch need is 6 * n + 3 + 1; we allocate one extra
106 limb, because products will overwrite 2n+2 limbs. */
107
108 #define a0a2 scratch
109 #define b0b2 scratch
110 #define a1a3 asm1
111 #define b1d bsm1
112
113 /* Compute as2 and asm2. */
114 flags = (enum toom6_flags) (toom6_vm2_neg & mpn_toom_eval_dgr3_pm2 (as2, asm2, ap, n, s, a1a3));
115
116 /* Compute bs2 and bsm2. */
117 b1d[n] = mpn_lshift (b1d, b1, n, 1); /* 2b1 */
118 #if HAVE_NATIVE_mpn_addlsh2_n
119 cy = mpn_addlsh2_n (b0b2, b0, b2, t); /* 4b2 + b0 */
120 #else
121 cy = mpn_lshift (b0b2, b2, t, 2); /* 4b2 */
122 cy += mpn_add_n (b0b2, b0b2, b0, t); /* 4b2 + b0 */
123 #endif
124 if (t != n)
125 cy = mpn_add_1 (b0b2 + t, b0 + t, n - t, cy);
126 b0b2[n] = cy;
127
128 #if HAVE_NATIVE_mpn_add_n_sub_n
129 if (mpn_cmp (b0b2, b1d, n+1) < 0)
130 {
131 mpn_add_n_sub_n (bs2, bsm2, b1d, b0b2, n+1);
132 flags = (enum toom6_flags) (flags ^ toom6_vm2_neg);
133 }
134 else
135 {
136 mpn_add_n_sub_n (bs2, bsm2, b0b2, b1d, n+1);
137 }
138 #else
139 mpn_add_n (bs2, b0b2, b1d, n+1);
140 if (mpn_cmp (b0b2, b1d, n+1) < 0)
141 {
142 mpn_sub_n (bsm2, b1d, b0b2, n+1);
143 flags = (enum toom6_flags) (flags ^ toom6_vm2_neg);
144 }
145 else
146 {
147 mpn_sub_n (bsm2, b0b2, b1d, n+1);
148 }
149 #endif
150
151 /* Compute as1 and asm1. */
152 flags = (enum toom6_flags) (flags ^ (toom6_vm1_neg & mpn_toom_eval_dgr3_pm1 (as1, asm1, ap, n, s, a0a2)));
153
154 /* Compute bs1 and bsm1. */
155 bsm1[n] = mpn_add (bsm1, b0, n, b2, t);
156 #if HAVE_NATIVE_mpn_add_n_sub_n
157 if (bsm1[n] == 0 && mpn_cmp (bsm1, b1, n) < 0)
158 {
159 cy = mpn_add_n_sub_n (bs1, bsm1, b1, bsm1, n);
160 bs1[n] = cy >> 1;
161 flags = (enum toom6_flags) (flags ^ toom6_vm1_neg);
162 }
163 else
164 {
165 cy = mpn_add_n_sub_n (bs1, bsm1, bsm1, b1, n);
166 bs1[n] = bsm1[n] + (cy >> 1);
167 bsm1[n]-= cy & 1;
168 }
169 #else
170 bs1[n] = bsm1[n] + mpn_add_n (bs1, bsm1, b1, n);
171 if (bsm1[n] == 0 && mpn_cmp (bsm1, b1, n) < 0)
172 {
173 mpn_sub_n (bsm1, b1, bsm1, n);
174 flags = (enum toom6_flags) (flags ^ toom6_vm1_neg);
175 }
176 else
177 {
178 bsm1[n] -= mpn_sub_n (bsm1, bsm1, b1, n);
179 }
180 #endif
181
182 ASSERT (as1[n] <= 3);
183 ASSERT (bs1[n] <= 2);
184 ASSERT (asm1[n] <= 1);
185 ASSERT (bsm1[n] <= 1);
186 ASSERT (as2[n] <=14);
187 ASSERT (bs2[n] <= 6);
188 ASSERT (asm2[n] <= 9);
189 ASSERT (bsm2[n] <= 4);
190
191 /* vm1, 2n+1 limbs */
192 vm1[2*n] = 0;
193 mpn_mul_n (vm1, asm1, bsm1, n + (asm1[n] | bsm1[n])); /* W4 */
194
195 /* vm2, 2n+1 limbs */
196 mpn_mul_n (vm2, asm2, bsm2, n+1); /* W2 */
197
198 /* v2, 2n+1 limbs */
199 mpn_mul_n (v2, as2, bs2, n+1); /* W1 */
200
201 /* v1, 2n+1 limbs */
202 mpn_mul_n (v1, as1, bs1, n+1); /* W3 */
203
204 /* vinf, s+t limbs */ /* W0 */
205 if (s > t) mpn_mul (vinf, a3, s, b2, t);
206 else mpn_mul (vinf, b2, t, a3, s);
207
208 /* v0, 2n limbs */
209 mpn_mul_n (v0, ap, bp, n); /* W5 */
210
211 mpn_toom_interpolate_6pts (pp, n, flags, vm1, vm2, v2, t + s);
212
213 #undef v0
214 #undef vm1
215 #undef v1
216 #undef vm2
217 #undef v2
218 #undef vinf
219 #undef bs1
220 #undef bs2
221 #undef bsm1
222 #undef bsm2
223 #undef asm1
224 #undef asm2
225 /* #undef as1 */
226 /* #undef as2 */
227 #undef a0a2
228 #undef b0b2
229 #undef a1a3
230 #undef b1d
231 #undef a0
232 #undef a1
233 #undef a2
234 #undef a3
235 #undef b0
236 #undef b1
237 #undef b2
238 }