1 /* Linear Congruential pseudo-random number generator functions.
2
3 Copyright 1999-2003, 2005, 2015 Free Software Foundation, Inc.
4
5 This file is part of the GNU MP Library.
6
7 The GNU MP Library is free software; you can redistribute it and/or modify
8 it under the terms of either:
9
10 * the GNU Lesser General Public License as published by the Free
11 Software Foundation; either version 3 of the License, or (at your
12 option) any later version.
13
14 or
15
16 * the GNU General Public License as published by the Free Software
17 Foundation; either version 2 of the License, or (at your option) any
18 later version.
19
20 or both in parallel, as here.
21
22 The GNU MP Library is distributed in the hope that it will be useful, but
23 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
24 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
26
27 You should have received copies of the GNU General Public License and the
28 GNU Lesser General Public License along with the GNU MP Library. If not,
29 see https://www.gnu.org/licenses/. */
30
31 #include "gmp-impl.h"
32
33
34 /* State structure for LC, the RNG_STATE() pointer in a gmp_randstate_t.
35
36 _mp_seed holds the current seed value, in the range 0 to 2^m2exp-1.
37 SIZ(_mp_seed) is fixed at BITS_TO_LIMBS(_mp_m2exp) and the value is
38 padded with high zero limbs if necessary. ALLOC(_mp_seed) is the current
39 size of PTR(_mp_seed) in the usual way. There only needs to be
40 BITS_TO_LIMBS(_mp_m2exp) allocated, but the mpz functions in the
41 initialization and seeding end up making it a bit more than this.
42
43 _mp_a is the "a" multiplier, in the range 0 to 2^m2exp-1. SIZ(_mp_a) is
44 the size of the value in the normal way for an mpz_t, except that a value
45 of zero is held with SIZ(_mp_a)==1 and PTR(_mp_a)[0]==0. This makes it
46 easy to call mpn_mul, and the case of a==0 is highly un-random and not
47 worth any trouble to optimize.
48
49 {_cp,_cn} is the "c" addend. Normally _cn is 1, but when nails are in
50 use a ulong can be bigger than one limb, and in this case _cn is 2 if
51 necessary. c==0 is stored as _cp[0]==0 and _cn==1, which makes it easy
52 to call __GMPN_ADD. c==0 is fairly un-random so isn't worth optimizing.
53
54 _mp_m2exp gives the modulus, namely 2^m2exp. We demand m2exp>=1, since
55 m2exp==0 would mean no bits at all out of each iteration, which makes no
56 sense. */
57
58 typedef struct {
59 mpz_t _mp_seed;
60 mpz_t _mp_a;
61 mp_size_t _cn;
62 mp_limb_t _cp[LIMBS_PER_ULONG];
63 unsigned long _mp_m2exp;
64 } gmp_rand_lc_struct;
65
66
67 /* lc (rp, state) -- Generate next number in LC sequence. Return the
68 number of valid bits in the result. Discards the lower half of the
69 result. */
70
71 static unsigned long int
72 lc (mp_ptr rp, gmp_randstate_ptr rstate)
73 {
74 mp_ptr tp, seedp, ap;
75 mp_size_t ta;
76 mp_size_t tn, seedn, an;
77 unsigned long int m2exp;
78 unsigned long int bits;
79 mp_size_t xn;
80 gmp_rand_lc_struct *p;
81 TMP_DECL;
82
83 p = (gmp_rand_lc_struct *) RNG_STATE (rstate);
84
85 m2exp = p->_mp_m2exp;
86
87 seedp = PTR (p->_mp_seed);
88 seedn = SIZ (p->_mp_seed);
89
90 ap = PTR (p->_mp_a);
91 an = SIZ (p->_mp_a);
92
93 /* Allocate temporary storage. Let there be room for calculation of
94 (A * seed + C) % M, or M if bigger than that. */
95
96 TMP_MARK;
97
98 ta = an + seedn + 1;
99 tn = BITS_TO_LIMBS (m2exp);
100 if (ta <= tn) /* that is, if (ta < tn + 1) */
101 {
102 mp_size_t tmp = an + seedn;
103 ta = tn + 1;
104 tp = TMP_ALLOC_LIMBS (ta);
105 MPN_ZERO (&tp[tmp], ta - tmp); /* mpn_mul won't zero it out. */
106 }
107 else
108 tp = TMP_ALLOC_LIMBS (ta);
109
110 /* t = a * seed. NOTE: an is always > 0; see initialization. */
111 ASSERT (seedn >= an && an > 0);
112 mpn_mul (tp, seedp, seedn, ap, an);
113
114 /* t = t + c. NOTE: tn is always >= p->_cn (precondition for __GMPN_ADD);
115 see initialization. */
116 ASSERT (tn >= p->_cn);
117 mpn_add (tp, tp, tn, p->_cp, p->_cn);
118
119 /* t = t % m */
120 tp[m2exp / GMP_NUMB_BITS] &= (CNST_LIMB (1) << m2exp % GMP_NUMB_BITS) - 1;
121
122 /* Save result as next seed. */
123 MPN_COPY (PTR (p->_mp_seed), tp, tn);
124
125 /* Discard the lower m2exp/2 of the result. */
126 bits = m2exp / 2;
127 xn = bits / GMP_NUMB_BITS;
128
129 tn -= xn;
130 if (tn > 0)
131 {
132 unsigned int cnt = bits % GMP_NUMB_BITS;
133 if (cnt != 0)
134 {
135 mpn_rshift (tp, tp + xn, tn, cnt);
136 MPN_COPY_INCR (rp, tp, xn + 1);
137 }
138 else /* Even limb boundary. */
139 MPN_COPY_INCR (rp, tp + xn, tn);
140 }
141
142 TMP_FREE;
143
144 /* Return number of valid bits in the result. */
145 return (m2exp + 1) / 2;
146 }
147
148
149 /* Obtain a sequence of random numbers. */
150 static void
151 randget_lc (gmp_randstate_ptr rstate, mp_ptr rp, unsigned long int nbits)
152 {
153 unsigned long int rbitpos;
154 int chunk_nbits;
155 mp_ptr tp;
156 mp_size_t tn;
157 gmp_rand_lc_struct *p;
158 TMP_DECL;
159
160 p = (gmp_rand_lc_struct *) RNG_STATE (rstate);
161
162 TMP_MARK;
163
164 chunk_nbits = p->_mp_m2exp / 2;
165 tn = BITS_TO_LIMBS (chunk_nbits);
166
167 tp = TMP_ALLOC_LIMBS (tn);
168
169 rbitpos = 0;
170 while (rbitpos + chunk_nbits <= nbits)
171 {
172 mp_ptr r2p = rp + rbitpos / GMP_NUMB_BITS;
173
174 if (rbitpos % GMP_NUMB_BITS != 0)
175 {
176 mp_limb_t savelimb, rcy;
177 /* Target of new chunk is not bit aligned. Use temp space
178 and align things by shifting it up. */
179 lc (tp, rstate);
180 savelimb = r2p[0];
181 rcy = mpn_lshift (r2p, tp, tn, rbitpos % GMP_NUMB_BITS);
182 r2p[0] |= savelimb;
183 /* bogus */
184 if ((chunk_nbits % GMP_NUMB_BITS + rbitpos % GMP_NUMB_BITS)
185 > GMP_NUMB_BITS)
186 r2p[tn] = rcy;
187 }
188 else
189 {
190 /* Target of new chunk is bit aligned. Let `lc' put bits
191 directly into our target variable. */
192 lc (r2p, rstate);
193 }
194 rbitpos += chunk_nbits;
195 }
196
197 /* Handle last [0..chunk_nbits) bits. */
198 if (rbitpos != nbits)
199 {
200 mp_ptr r2p = rp + rbitpos / GMP_NUMB_BITS;
201 int last_nbits = nbits - rbitpos;
202 tn = BITS_TO_LIMBS (last_nbits);
203 lc (tp, rstate);
204 if (rbitpos % GMP_NUMB_BITS != 0)
205 {
206 mp_limb_t savelimb, rcy;
207 /* Target of new chunk is not bit aligned. Use temp space
208 and align things by shifting it up. */
209 savelimb = r2p[0];
210 rcy = mpn_lshift (r2p, tp, tn, rbitpos % GMP_NUMB_BITS);
211 r2p[0] |= savelimb;
212 if (rbitpos + tn * GMP_NUMB_BITS - rbitpos % GMP_NUMB_BITS < nbits)
213 r2p[tn] = rcy;
214 }
215 else
216 {
217 MPN_COPY (r2p, tp, tn);
218 }
219 /* Mask off top bits if needed. */
220 if (nbits % GMP_NUMB_BITS != 0)
221 rp[nbits / GMP_NUMB_BITS]
222 &= ~(~CNST_LIMB (0) << nbits % GMP_NUMB_BITS);
223 }
224
225 TMP_FREE;
226 }
227
228
229 static void
230 randseed_lc (gmp_randstate_ptr rstate, mpz_srcptr seed)
231 {
232 gmp_rand_lc_struct *p = (gmp_rand_lc_struct *) RNG_STATE (rstate);
233 mpz_ptr seedz = p->_mp_seed;
234 mp_size_t seedn = BITS_TO_LIMBS (p->_mp_m2exp);
235
236 /* Store p->_mp_seed as an unnormalized integer with size enough
237 for numbers up to 2^m2exp-1. That size can't be zero. */
238 mpz_fdiv_r_2exp (seedz, seed, p->_mp_m2exp);
239 MPN_ZERO (&PTR (seedz)[SIZ (seedz)], seedn - SIZ (seedz));
240 SIZ (seedz) = seedn;
241 }
242
243
244 static void
245 randclear_lc (gmp_randstate_ptr rstate)
246 {
247 gmp_rand_lc_struct *p = (gmp_rand_lc_struct *) RNG_STATE (rstate);
248
249 mpz_clear (p->_mp_seed);
250 mpz_clear (p->_mp_a);
251 (*__gmp_free_func) (p, sizeof (gmp_rand_lc_struct));
252 }
253
254 static void randiset_lc (gmp_randstate_ptr, gmp_randstate_srcptr);
255
256 static const gmp_randfnptr_t Linear_Congruential_Generator = {
257 randseed_lc,
258 randget_lc,
259 randclear_lc,
260 randiset_lc
261 };
262
263 static void
264 randiset_lc (gmp_randstate_ptr dst, gmp_randstate_srcptr src)
265 {
266 gmp_rand_lc_struct *dstp, *srcp;
267
268 srcp = (gmp_rand_lc_struct *) RNG_STATE (src);
269 dstp = (gmp_rand_lc_struct *) (*__gmp_allocate_func) (sizeof (gmp_rand_lc_struct));
270
271 RNG_STATE (dst) = (mp_limb_t *) (void *) dstp;
272 RNG_FNPTR (dst) = (void *) &Linear_Congruential_Generator;
273
274 /* _mp_seed and _mp_a might be unnormalized (high zero limbs), but
275 mpz_init_set won't worry about that */
276 mpz_init_set (dstp->_mp_seed, srcp->_mp_seed);
277 mpz_init_set (dstp->_mp_a, srcp->_mp_a);
278
279 dstp->_cn = srcp->_cn;
280
281 dstp->_cp[0] = srcp->_cp[0];
282 if (LIMBS_PER_ULONG > 1)
283 dstp->_cp[1] = srcp->_cp[1];
284 if (LIMBS_PER_ULONG > 2) /* usually there's only 1 or 2 */
285 MPN_COPY (dstp->_cp + 2, srcp->_cp + 2, LIMBS_PER_ULONG - 2);
286
287 dstp->_mp_m2exp = srcp->_mp_m2exp;
288 }
289
290
291 void
292 gmp_randinit_lc_2exp (gmp_randstate_ptr rstate,
293 mpz_srcptr a,
294 unsigned long int c,
295 mp_bitcnt_t m2exp)
296 {
297 gmp_rand_lc_struct *p;
298 mp_size_t seedn = BITS_TO_LIMBS (m2exp);
299
300 ASSERT_ALWAYS (m2exp != 0);
301
302 p = __GMP_ALLOCATE_FUNC_TYPE (1, gmp_rand_lc_struct);
303 RNG_STATE (rstate) = (mp_limb_t *) (void *) p;
304 RNG_FNPTR (rstate) = (void *) &Linear_Congruential_Generator;
305
306 /* allocate m2exp bits of space for p->_mp_seed, and initial seed "1" */
307 mpz_init2 (p->_mp_seed, m2exp);
308 MPN_ZERO (PTR (p->_mp_seed), seedn);
309 SIZ (p->_mp_seed) = seedn;
310 PTR (p->_mp_seed)[0] = 1;
311
312 /* "a", forced to 0 to 2^m2exp-1 */
313 mpz_init (p->_mp_a);
314 mpz_fdiv_r_2exp (p->_mp_a, a, m2exp);
315
316 /* Avoid SIZ(a) == 0 to avoid checking for special case in lc(). */
317 if (SIZ (p->_mp_a) == 0)
318 {
319 SIZ (p->_mp_a) = 1;
320 MPZ_NEWALLOC (p->_mp_a, 1)[0] = CNST_LIMB (0);
321 }
322
323 MPN_SET_UI (p->_cp, p->_cn, c);
324
325 /* Internally we may discard any bits of c above m2exp. The following
326 code ensures that __GMPN_ADD in lc() will always work. */
327 if (seedn < p->_cn)
328 p->_cn = (p->_cp[0] != 0);
329
330 p->_mp_m2exp = m2exp;
331 }