1 /* mpn_submul_1 -- multiply the N long limb vector pointed to by UP by VL,
2 subtract the N least significant limbs of the product from the limb
3 vector pointed to by RP. Return the most significant limb of the
4 product, adjusted for carry-out from the subtraction.
5
6 Copyright 1992-1994, 1996, 2000, 2002, 2004 Free Software Foundation, Inc.
7
8 This file is part of the GNU MP Library.
9
10 The GNU MP Library is free software; you can redistribute it and/or modify
11 it under the terms of either:
12
13 * the GNU Lesser General Public License as published by the Free
14 Software Foundation; either version 3 of the License, or (at your
15 option) any later version.
16
17 or
18
19 * the GNU General Public License as published by the Free Software
20 Foundation; either version 2 of the License, or (at your option) any
21 later version.
22
23 or both in parallel, as here.
24
25 The GNU MP Library is distributed in the hope that it will be useful, but
26 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
27 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
28 for more details.
29
30 You should have received copies of the GNU General Public License and the
31 GNU Lesser General Public License along with the GNU MP Library. If not,
32 see https://www.gnu.org/licenses/. */
33
34 #include "gmp-impl.h"
35 #include "longlong.h"
36
37
38 #if GMP_NAIL_BITS == 0
39
40 mp_limb_t
41 mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t v0)
42 {
43 mp_limb_t u0, crec, c, p1, p0, r0;
44
45 ASSERT (n >= 1);
46 ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
47
48 crec = 0;
49 do
50 {
51 u0 = *up++;
52 umul_ppmm (p1, p0, u0, v0);
53
54 r0 = *rp;
55
56 p0 = r0 - p0;
57 c = r0 < p0;
58
59 p1 = p1 + c;
60
61 r0 = p0 - crec; /* cycle 0, 3, ... */
62 c = p0 < r0; /* cycle 1, 4, ... */
63
64 crec = p1 + c; /* cycle 2, 5, ... */
65
66 *rp++ = r0;
67 }
68 while (--n != 0);
69
70 return crec;
71 }
72
73 #endif
74
75 #if GMP_NAIL_BITS == 1
76
77 mp_limb_t
78 mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t v0)
79 {
80 mp_limb_t shifted_v0, u0, r0, p0, p1, prev_p1, cl, xl, c1, c2, c3;
81
82 ASSERT (n >= 1);
83 ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
84 ASSERT_MPN (rp, n);
85 ASSERT_MPN (up, n);
86 ASSERT_LIMB (v0);
87
88 shifted_v0 = v0 << GMP_NAIL_BITS;
89 cl = 0;
90 prev_p1 = 0;
91 do
92 {
93 u0 = *up++;
94 r0 = *rp;
95 umul_ppmm (p1, p0, u0, shifted_v0);
96 p0 >>= GMP_NAIL_BITS;
97 SUBC_LIMB (c1, xl, r0, prev_p1);
98 SUBC_LIMB (c2, xl, xl, p0);
99 SUBC_LIMB (c3, xl, xl, cl);
100 cl = c1 + c2 + c3;
101 *rp++ = xl;
102 prev_p1 = p1;
103 }
104 while (--n != 0);
105
106 return prev_p1 + cl;
107 }
108
109 #endif
110
111 #if GMP_NAIL_BITS >= 2
112
113 mp_limb_t
114 mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t v0)
115 {
116 mp_limb_t shifted_v0, u0, r0, p0, p1, prev_p1, xw, cl, xl;
117
118 ASSERT (n >= 1);
119 ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
120 ASSERT_MPN (rp, n);
121 ASSERT_MPN (up, n);
122 ASSERT_LIMB (v0);
123
124 shifted_v0 = v0 << GMP_NAIL_BITS;
125 cl = 0;
126 prev_p1 = 0;
127 do
128 {
129 u0 = *up++;
130 r0 = *rp;
131 umul_ppmm (p1, p0, u0, shifted_v0);
132 p0 >>= GMP_NAIL_BITS;
133 xw = r0 - (prev_p1 + p0) + cl;
134 cl = (mp_limb_signed_t) xw >> GMP_NUMB_BITS; /* FIXME: non-portable */
135 xl = xw & GMP_NUMB_MASK;
136 *rp++ = xl;
137 prev_p1 = p1;
138 }
139 while (--n != 0);
140
141 return prev_p1 - cl;
142 }
143
144 #endif