]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/bn/bn_lcl.h
Run util/openssl-format-source -v -c .
[thirdparty/openssl.git] / crypto / bn / bn_lcl.h
1 /* crypto/bn/bn_lcl.h */
2 /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
3 * All rights reserved.
4 *
5 * This package is an SSL implementation written
6 * by Eric Young (eay@cryptsoft.com).
7 * The implementation was written so as to conform with Netscapes SSL.
8 *
9 * This library is free for commercial and non-commercial use as long as
10 * the following conditions are aheared to. The following conditions
11 * apply to all code found in this distribution, be it the RC4, RSA,
12 * lhash, DES, etc., code; not just the SSL code. The SSL documentation
13 * included with this distribution is covered by the same copyright terms
14 * except that the holder is Tim Hudson (tjh@cryptsoft.com).
15 *
16 * Copyright remains Eric Young's, and as such any Copyright notices in
17 * the code are not to be removed.
18 * If this package is used in a product, Eric Young should be given attribution
19 * as the author of the parts of the library used.
20 * This can be in the form of a textual message at program startup or
21 * in documentation (online or textual) provided with the package.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. All advertising materials mentioning features or use of this software
32 * must display the following acknowledgement:
33 * "This product includes cryptographic software written by
34 * Eric Young (eay@cryptsoft.com)"
35 * The word 'cryptographic' can be left out if the rouines from the library
36 * being used are not cryptographic related :-).
37 * 4. If you include any Windows specific code (or a derivative thereof) from
38 * the apps directory (application code) you must include an acknowledgement:
39 * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
40 *
41 * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 *
53 * The licence and distribution terms for any publically available version or
54 * derivative of this code cannot be changed. i.e. this code cannot simply be
55 * copied and put under another distribution licence
56 * [including the GNU Public Licence.]
57 */
58 /* ====================================================================
59 * Copyright (c) 1998-2000 The OpenSSL Project. All rights reserved.
60 *
61 * Redistribution and use in source and binary forms, with or without
62 * modification, are permitted provided that the following conditions
63 * are met:
64 *
65 * 1. Redistributions of source code must retain the above copyright
66 * notice, this list of conditions and the following disclaimer.
67 *
68 * 2. Redistributions in binary form must reproduce the above copyright
69 * notice, this list of conditions and the following disclaimer in
70 * the documentation and/or other materials provided with the
71 * distribution.
72 *
73 * 3. All advertising materials mentioning features or use of this
74 * software must display the following acknowledgment:
75 * "This product includes software developed by the OpenSSL Project
76 * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
77 *
78 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
79 * endorse or promote products derived from this software without
80 * prior written permission. For written permission, please contact
81 * openssl-core@openssl.org.
82 *
83 * 5. Products derived from this software may not be called "OpenSSL"
84 * nor may "OpenSSL" appear in their names without prior written
85 * permission of the OpenSSL Project.
86 *
87 * 6. Redistributions of any form whatsoever must retain the following
88 * acknowledgment:
89 * "This product includes software developed by the OpenSSL Project
90 * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
91 *
92 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
93 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
94 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
95 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
96 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
97 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
98 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
99 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
100 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
101 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
102 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
103 * OF THE POSSIBILITY OF SUCH DAMAGE.
104 * ====================================================================
105 *
106 * This product includes cryptographic software written by Eric Young
107 * (eay@cryptsoft.com). This product includes software written by Tim
108 * Hudson (tjh@cryptsoft.com).
109 *
110 */
111
112 #ifndef HEADER_BN_LCL_H
113 # define HEADER_BN_LCL_H
114
115 # include "internal/bn_int.h"
116
117 #ifdef __cplusplus
118 extern "C" {
119 #endif
120
121 /*-
122 * Bignum consistency macros
123 * There is one "API" macro, bn_fix_top(), for stripping leading zeroes from
124 * bignum data after direct manipulations on the data. There is also an
125 * "internal" macro, bn_check_top(), for verifying that there are no leading
126 * zeroes. Unfortunately, some auditing is required due to the fact that
127 * bn_fix_top() has become an overabused duct-tape because bignum data is
128 * occasionally passed around in an inconsistent state. So the following
129 * changes have been made to sort this out;
130 * - bn_fix_top()s implementation has been moved to bn_correct_top()
131 * - if BN_DEBUG isn't defined, bn_fix_top() maps to bn_correct_top(), and
132 * bn_check_top() is as before.
133 * - if BN_DEBUG *is* defined;
134 * - bn_check_top() tries to pollute unused words even if the bignum 'top' is
135 * consistent. (ed: only if BN_DEBUG_RAND is defined)
136 * - bn_fix_top() maps to bn_check_top() rather than "fixing" anything.
137 * The idea is to have debug builds flag up inconsistent bignums when they
138 * occur. If that occurs in a bn_fix_top(), we examine the code in question; if
139 * the use of bn_fix_top() was appropriate (ie. it follows directly after code
140 * that manipulates the bignum) it is converted to bn_correct_top(), and if it
141 * was not appropriate, we convert it permanently to bn_check_top() and track
142 * down the cause of the bug. Eventually, no internal code should be using the
143 * bn_fix_top() macro. External applications and libraries should try this with
144 * their own code too, both in terms of building against the openssl headers
145 * with BN_DEBUG defined *and* linking with a version of OpenSSL built with it
146 * defined. This not only improves external code, it provides more test
147 * coverage for openssl's own code.
148 */
149
150 # ifdef BN_DEBUG
151
152 /* We only need assert() when debugging */
153 # include <assert.h>
154
155 # ifdef BN_DEBUG_RAND
156 /* To avoid "make update" cvs wars due to BN_DEBUG, use some tricks */
157 # ifndef RAND_pseudo_bytes
158 int RAND_pseudo_bytes(unsigned char *buf, int num);
159 # define BN_DEBUG_TRIX
160 # endif
161 # define bn_pollute(a) \
162 do { \
163 const BIGNUM *_bnum1 = (a); \
164 if(_bnum1->top < _bnum1->dmax) { \
165 unsigned char _tmp_char; \
166 /* We cast away const without the compiler knowing, any \
167 * *genuinely* constant variables that aren't mutable \
168 * wouldn't be constructed with top!=dmax. */ \
169 BN_ULONG *_not_const; \
170 memcpy(&_not_const, &_bnum1->d, sizeof(BN_ULONG*)); \
171 RAND_pseudo_bytes(&_tmp_char, 1); \
172 memset((unsigned char *)(_not_const + _bnum1->top), _tmp_char, \
173 (_bnum1->dmax - _bnum1->top) * sizeof(BN_ULONG)); \
174 } \
175 } while(0)
176 # ifdef BN_DEBUG_TRIX
177 # undef RAND_pseudo_bytes
178 # endif
179 # else
180 # define bn_pollute(a)
181 # endif
182 # define bn_check_top(a) \
183 do { \
184 const BIGNUM *_bnum2 = (a); \
185 if (_bnum2 != NULL) { \
186 assert((_bnum2->top == 0) || \
187 (_bnum2->d[_bnum2->top - 1] != 0)); \
188 bn_pollute(_bnum2); \
189 } \
190 } while(0)
191
192 # define bn_fix_top(a) bn_check_top(a)
193
194 # define bn_check_size(bn, bits) bn_wcheck_size(bn, ((bits+BN_BITS2-1))/BN_BITS2)
195 # define bn_wcheck_size(bn, words) \
196 do { \
197 const BIGNUM *_bnum2 = (bn); \
198 assert((words) <= (_bnum2)->dmax && (words) >= (_bnum2)->top); \
199 /* avoid unused variable warning with NDEBUG */ \
200 (void)(_bnum2); \
201 } while(0)
202
203 # else /* !BN_DEBUG */
204
205 # define bn_pollute(a)
206 # define bn_check_top(a)
207 # define bn_fix_top(a) bn_correct_top(a)
208 # define bn_check_size(bn, bits)
209 # define bn_wcheck_size(bn, words)
210
211 # endif
212
213 BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
214 BN_ULONG w);
215 BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w);
216 void bn_sqr_words(BN_ULONG *rp, const BN_ULONG *ap, int num);
217 BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d);
218 BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
219 int num);
220 BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
221 int num);
222
223 struct bignum_st {
224 BN_ULONG *d; /* Pointer to an array of 'BN_BITS2' bit
225 * chunks. */
226 int top; /* Index of last used d +1. */
227 /* The next are internal book keeping for bn_expand. */
228 int dmax; /* Size of the d array. */
229 int neg; /* one if the number is negative */
230 int flags;
231 };
232
233 /* Used for montgomery multiplication */
234 struct bn_mont_ctx_st {
235 int ri; /* number of bits in R */
236 BIGNUM RR; /* used to convert to montgomery form */
237 BIGNUM N; /* The modulus */
238 BIGNUM Ni; /* R*(1/R mod N) - N*Ni = 1 (Ni is only
239 * stored for bignum algorithm) */
240 BN_ULONG n0[2]; /* least significant word(s) of Ni; (type
241 * changed with 0.9.9, was "BN_ULONG n0;"
242 * before) */
243 int flags;
244 };
245
246 /*
247 * Used for reciprocal division/mod functions It cannot be shared between
248 * threads
249 */
250 struct bn_recp_ctx_st {
251 BIGNUM N; /* the divisor */
252 BIGNUM Nr; /* the reciprocal */
253 int num_bits;
254 int shift;
255 int flags;
256 };
257
258 /* Used for slow "generation" functions. */
259 struct bn_gencb_st {
260 unsigned int ver; /* To handle binary (in)compatibility */
261 void *arg; /* callback-specific data */
262 union {
263 /* if(ver==1) - handles old style callbacks */
264 void (*cb_1) (int, int, void *);
265 /* if(ver==2) - new callback style */
266 int (*cb_2) (int, int, BN_GENCB *);
267 } cb;
268 };
269
270 /*-
271 * BN_window_bits_for_exponent_size -- macro for sliding window mod_exp functions
272 *
273 *
274 * For window size 'w' (w >= 2) and a random 'b' bits exponent,
275 * the number of multiplications is a constant plus on average
276 *
277 * 2^(w-1) + (b-w)/(w+1);
278 *
279 * here 2^(w-1) is for precomputing the table (we actually need
280 * entries only for windows that have the lowest bit set), and
281 * (b-w)/(w+1) is an approximation for the expected number of
282 * w-bit windows, not counting the first one.
283 *
284 * Thus we should use
285 *
286 * w >= 6 if b > 671
287 * w = 5 if 671 > b > 239
288 * w = 4 if 239 > b > 79
289 * w = 3 if 79 > b > 23
290 * w <= 2 if 23 > b
291 *
292 * (with draws in between). Very small exponents are often selected
293 * with low Hamming weight, so we use w = 1 for b <= 23.
294 */
295 # if 1
296 # define BN_window_bits_for_exponent_size(b) \
297 ((b) > 671 ? 6 : \
298 (b) > 239 ? 5 : \
299 (b) > 79 ? 4 : \
300 (b) > 23 ? 3 : 1)
301 # else
302 /*
303 * Old SSLeay/OpenSSL table. Maximum window size was 5, so this table differs
304 * for b==1024; but it coincides for other interesting values (b==160,
305 * b==512).
306 */
307 # define BN_window_bits_for_exponent_size(b) \
308 ((b) > 255 ? 5 : \
309 (b) > 127 ? 4 : \
310 (b) > 17 ? 3 : 1)
311 # endif
312
313 /*
314 * BN_mod_exp_mont_conttime is based on the assumption that the L1 data cache
315 * line width of the target processor is at least the following value.
316 */
317 # define MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH ( 64 )
318 # define MOD_EXP_CTIME_MIN_CACHE_LINE_MASK (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - 1)
319
320 /*
321 * Window sizes optimized for fixed window size modular exponentiation
322 * algorithm (BN_mod_exp_mont_consttime). To achieve the security goals of
323 * BN_mode_exp_mont_consttime, the maximum size of the window must not exceed
324 * log_2(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH). Window size thresholds are
325 * defined for cache line sizes of 32 and 64, cache line sizes where
326 * log_2(32)=5 and log_2(64)=6 respectively. A window size of 7 should only be
327 * used on processors that have a 128 byte or greater cache line size.
328 */
329 # if MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH == 64
330
331 # define BN_window_bits_for_ctime_exponent_size(b) \
332 ((b) > 937 ? 6 : \
333 (b) > 306 ? 5 : \
334 (b) > 89 ? 4 : \
335 (b) > 22 ? 3 : 1)
336 # define BN_MAX_WINDOW_BITS_FOR_CTIME_EXPONENT_SIZE (6)
337
338 # elif MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH == 32
339
340 # define BN_window_bits_for_ctime_exponent_size(b) \
341 ((b) > 306 ? 5 : \
342 (b) > 89 ? 4 : \
343 (b) > 22 ? 3 : 1)
344 # define BN_MAX_WINDOW_BITS_FOR_CTIME_EXPONENT_SIZE (5)
345
346 # endif
347
348 /* Pentium pro 16,16,16,32,64 */
349 /* Alpha 16,16,16,16.64 */
350 # define BN_MULL_SIZE_NORMAL (16)/* 32 */
351 # define BN_MUL_RECURSIVE_SIZE_NORMAL (16)/* 32 less than */
352 # define BN_SQR_RECURSIVE_SIZE_NORMAL (16)/* 32 */
353 # define BN_MUL_LOW_RECURSIVE_SIZE_NORMAL (32)/* 32 */
354 # define BN_MONT_CTX_SET_SIZE_WORD (64)/* 32 */
355
356 /*
357 * 2011-02-22 SMS. In various places, a size_t variable or a type cast to
358 * size_t was used to perform integer-only operations on pointers. This
359 * failed on VMS with 64-bit pointers (CC /POINTER_SIZE = 64) because size_t
360 * is still only 32 bits. What's needed in these cases is an integer type
361 * with the same size as a pointer, which size_t is not certain to be. The
362 * only fix here is VMS-specific.
363 */
364 # if defined(OPENSSL_SYS_VMS)
365 # if __INITIAL_POINTER_SIZE == 64
366 # define PTR_SIZE_INT long long
367 # else /* __INITIAL_POINTER_SIZE == 64 */
368 # define PTR_SIZE_INT int
369 # endif /* __INITIAL_POINTER_SIZE == 64 [else] */
370 # elif !defined(PTR_SIZE_INT) /* defined(OPENSSL_SYS_VMS) */
371 # define PTR_SIZE_INT size_t
372 # endif /* defined(OPENSSL_SYS_VMS) [else] */
373
374 # if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC)
375 /*
376 * BN_UMULT_HIGH section.
377 *
378 * No, I'm not trying to overwhelm you when stating that the
379 * product of N-bit numbers is 2*N bits wide:-) No, I don't expect
380 * you to be impressed when I say that if the compiler doesn't
381 * support 2*N integer type, then you have to replace every N*N
382 * multiplication with 4 (N/2)*(N/2) accompanied by some shifts
383 * and additions which unavoidably results in severe performance
384 * penalties. Of course provided that the hardware is capable of
385 * producing 2*N result... That's when you normally start
386 * considering assembler implementation. However! It should be
387 * pointed out that some CPUs (most notably Alpha, PowerPC and
388 * upcoming IA-64 family:-) provide *separate* instruction
389 * calculating the upper half of the product placing the result
390 * into a general purpose register. Now *if* the compiler supports
391 * inline assembler, then it's not impossible to implement the
392 * "bignum" routines (and have the compiler optimize 'em)
393 * exhibiting "native" performance in C. That's what BN_UMULT_HIGH
394 * macro is about:-)
395 *
396 * <appro@fy.chalmers.se>
397 */
398 # if defined(__alpha) && (defined(SIXTY_FOUR_BIT_LONG) || defined(SIXTY_FOUR_BIT))
399 # if defined(__DECC)
400 # include <c_asm.h>
401 # define BN_UMULT_HIGH(a,b) (BN_ULONG)asm("umulh %a0,%a1,%v0",(a),(b))
402 # elif defined(__GNUC__) && __GNUC__>=2
403 # define BN_UMULT_HIGH(a,b) ({ \
404 register BN_ULONG ret; \
405 asm ("umulh %1,%2,%0" \
406 : "=r"(ret) \
407 : "r"(a), "r"(b)); \
408 ret; })
409 # endif /* compiler */
410 # elif defined(_ARCH_PPC) && defined(__64BIT__) && defined(SIXTY_FOUR_BIT_LONG)
411 # if defined(__GNUC__) && __GNUC__>=2
412 # define BN_UMULT_HIGH(a,b) ({ \
413 register BN_ULONG ret; \
414 asm ("mulhdu %0,%1,%2" \
415 : "=r"(ret) \
416 : "r"(a), "r"(b)); \
417 ret; })
418 # endif /* compiler */
419 # elif (defined(__x86_64) || defined(__x86_64__)) && \
420 (defined(SIXTY_FOUR_BIT_LONG) || defined(SIXTY_FOUR_BIT))
421 # if defined(__GNUC__) && __GNUC__>=2
422 # define BN_UMULT_HIGH(a,b) ({ \
423 register BN_ULONG ret,discard; \
424 asm ("mulq %3" \
425 : "=a"(discard),"=d"(ret) \
426 : "a"(a), "g"(b) \
427 : "cc"); \
428 ret; })
429 # define BN_UMULT_LOHI(low,high,a,b) \
430 asm ("mulq %3" \
431 : "=a"(low),"=d"(high) \
432 : "a"(a),"g"(b) \
433 : "cc");
434 # endif
435 # elif (defined(_M_AMD64) || defined(_M_X64)) && defined(SIXTY_FOUR_BIT)
436 # if defined(_MSC_VER) && _MSC_VER>=1400
437 unsigned __int64 __umulh(unsigned __int64 a, unsigned __int64 b);
438 unsigned __int64 _umul128(unsigned __int64 a, unsigned __int64 b,
439 unsigned __int64 *h);
440 # pragma intrinsic(__umulh,_umul128)
441 # define BN_UMULT_HIGH(a,b) __umulh((a),(b))
442 # define BN_UMULT_LOHI(low,high,a,b) ((low)=_umul128((a),(b),&(high)))
443 # endif
444 # elif defined(__mips) && (defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG))
445 # if defined(__GNUC__) && __GNUC__>=2
446 # if __GNUC__>=4 && __GNUC_MINOR__>=4
447 /* "h" constraint is no more since 4.4 */
448 # define BN_UMULT_HIGH(a,b) (((__uint128_t)(a)*(b))>>64)
449 # define BN_UMULT_LOHI(low,high,a,b) ({ \
450 __uint128_t ret=(__uint128_t)(a)*(b); \
451 (high)=ret>>64; (low)=ret; })
452 # else
453 # define BN_UMULT_HIGH(a,b) ({ \
454 register BN_ULONG ret; \
455 asm ("dmultu %1,%2" \
456 : "=h"(ret) \
457 : "r"(a), "r"(b) : "l"); \
458 ret; })
459 # define BN_UMULT_LOHI(low,high,a,b)\
460 asm ("dmultu %2,%3" \
461 : "=l"(low),"=h"(high) \
462 : "r"(a), "r"(b));
463 # endif
464 # endif
465 # elif defined(__aarch64__) && defined(SIXTY_FOUR_BIT_LONG)
466 # if defined(__GNUC__) && __GNUC__>=2
467 # define BN_UMULT_HIGH(a,b) ({ \
468 register BN_ULONG ret; \
469 asm ("umulh %0,%1,%2" \
470 : "=r"(ret) \
471 : "r"(a), "r"(b)); \
472 ret; })
473 # endif
474 # endif /* cpu */
475 # endif /* OPENSSL_NO_ASM */
476
477 /*************************************************************
478 * Using the long long type
479 */
480 # define Lw(t) (((BN_ULONG)(t))&BN_MASK2)
481 # define Hw(t) (((BN_ULONG)((t)>>BN_BITS2))&BN_MASK2)
482
483 # ifdef BN_DEBUG_RAND
484 # define bn_clear_top2max(a) \
485 { \
486 int ind = (a)->dmax - (a)->top; \
487 BN_ULONG *ftl = &(a)->d[(a)->top-1]; \
488 for (; ind != 0; ind--) \
489 *(++ftl) = 0x0; \
490 }
491 # else
492 # define bn_clear_top2max(a)
493 # endif
494
495 # ifdef BN_LLONG
496 # define mul_add(r,a,w,c) { \
497 BN_ULLONG t; \
498 t=(BN_ULLONG)w * (a) + (r) + (c); \
499 (r)= Lw(t); \
500 (c)= Hw(t); \
501 }
502
503 # define mul(r,a,w,c) { \
504 BN_ULLONG t; \
505 t=(BN_ULLONG)w * (a) + (c); \
506 (r)= Lw(t); \
507 (c)= Hw(t); \
508 }
509
510 # define sqr(r0,r1,a) { \
511 BN_ULLONG t; \
512 t=(BN_ULLONG)(a)*(a); \
513 (r0)=Lw(t); \
514 (r1)=Hw(t); \
515 }
516
517 # elif defined(BN_UMULT_LOHI)
518 # define mul_add(r,a,w,c) { \
519 BN_ULONG high,low,ret,tmp=(a); \
520 ret = (r); \
521 BN_UMULT_LOHI(low,high,w,tmp); \
522 ret += (c); \
523 (c) = (ret<(c))?1:0; \
524 (c) += high; \
525 ret += low; \
526 (c) += (ret<low)?1:0; \
527 (r) = ret; \
528 }
529
530 # define mul(r,a,w,c) { \
531 BN_ULONG high,low,ret,ta=(a); \
532 BN_UMULT_LOHI(low,high,w,ta); \
533 ret = low + (c); \
534 (c) = high; \
535 (c) += (ret<low)?1:0; \
536 (r) = ret; \
537 }
538
539 # define sqr(r0,r1,a) { \
540 BN_ULONG tmp=(a); \
541 BN_UMULT_LOHI(r0,r1,tmp,tmp); \
542 }
543
544 # elif defined(BN_UMULT_HIGH)
545 # define mul_add(r,a,w,c) { \
546 BN_ULONG high,low,ret,tmp=(a); \
547 ret = (r); \
548 high= BN_UMULT_HIGH(w,tmp); \
549 ret += (c); \
550 low = (w) * tmp; \
551 (c) = (ret<(c))?1:0; \
552 (c) += high; \
553 ret += low; \
554 (c) += (ret<low)?1:0; \
555 (r) = ret; \
556 }
557
558 # define mul(r,a,w,c) { \
559 BN_ULONG high,low,ret,ta=(a); \
560 low = (w) * ta; \
561 high= BN_UMULT_HIGH(w,ta); \
562 ret = low + (c); \
563 (c) = high; \
564 (c) += (ret<low)?1:0; \
565 (r) = ret; \
566 }
567
568 # define sqr(r0,r1,a) { \
569 BN_ULONG tmp=(a); \
570 (r0) = tmp * tmp; \
571 (r1) = BN_UMULT_HIGH(tmp,tmp); \
572 }
573
574 # else
575 /*************************************************************
576 * No long long type
577 */
578
579 # define LBITS(a) ((a)&BN_MASK2l)
580 # define HBITS(a) (((a)>>BN_BITS4)&BN_MASK2l)
581 # define L2HBITS(a) (((a)<<BN_BITS4)&BN_MASK2)
582
583 # define LLBITS(a) ((a)&BN_MASKl)
584 # define LHBITS(a) (((a)>>BN_BITS2)&BN_MASKl)
585 # define LL2HBITS(a) ((BN_ULLONG)((a)&BN_MASKl)<<BN_BITS2)
586
587 # define mul64(l,h,bl,bh) \
588 { \
589 BN_ULONG m,m1,lt,ht; \
590 \
591 lt=l; \
592 ht=h; \
593 m =(bh)*(lt); \
594 lt=(bl)*(lt); \
595 m1=(bl)*(ht); \
596 ht =(bh)*(ht); \
597 m=(m+m1)&BN_MASK2; if (m < m1) ht+=L2HBITS((BN_ULONG)1); \
598 ht+=HBITS(m); \
599 m1=L2HBITS(m); \
600 lt=(lt+m1)&BN_MASK2; if (lt < m1) ht++; \
601 (l)=lt; \
602 (h)=ht; \
603 }
604
605 # define sqr64(lo,ho,in) \
606 { \
607 BN_ULONG l,h,m; \
608 \
609 h=(in); \
610 l=LBITS(h); \
611 h=HBITS(h); \
612 m =(l)*(h); \
613 l*=l; \
614 h*=h; \
615 h+=(m&BN_MASK2h1)>>(BN_BITS4-1); \
616 m =(m&BN_MASK2l)<<(BN_BITS4+1); \
617 l=(l+m)&BN_MASK2; if (l < m) h++; \
618 (lo)=l; \
619 (ho)=h; \
620 }
621
622 # define mul_add(r,a,bl,bh,c) { \
623 BN_ULONG l,h; \
624 \
625 h= (a); \
626 l=LBITS(h); \
627 h=HBITS(h); \
628 mul64(l,h,(bl),(bh)); \
629 \
630 /* non-multiply part */ \
631 l=(l+(c))&BN_MASK2; if (l < (c)) h++; \
632 (c)=(r); \
633 l=(l+(c))&BN_MASK2; if (l < (c)) h++; \
634 (c)=h&BN_MASK2; \
635 (r)=l; \
636 }
637
638 # define mul(r,a,bl,bh,c) { \
639 BN_ULONG l,h; \
640 \
641 h= (a); \
642 l=LBITS(h); \
643 h=HBITS(h); \
644 mul64(l,h,(bl),(bh)); \
645 \
646 /* non-multiply part */ \
647 l+=(c); if ((l&BN_MASK2) < (c)) h++; \
648 (c)=h&BN_MASK2; \
649 (r)=l&BN_MASK2; \
650 }
651 # endif /* !BN_LLONG */
652
653 void BN_init(BIGNUM *a);
654 void BN_RECP_CTX_init(BN_RECP_CTX *recp);
655 void BN_MONT_CTX_init(BN_MONT_CTX *ctx);
656
657 void bn_mul_normal(BN_ULONG *r, BN_ULONG *a, int na, BN_ULONG *b, int nb);
658 void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b);
659 void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b);
660 void bn_sqr_normal(BN_ULONG *r, const BN_ULONG *a, int n, BN_ULONG *tmp);
661 void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a);
662 void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a);
663 int bn_cmp_words(const BN_ULONG *a, const BN_ULONG *b, int n);
664 int bn_cmp_part_words(const BN_ULONG *a, const BN_ULONG *b, int cl, int dl);
665 void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
666 int dna, int dnb, BN_ULONG *t);
667 void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b,
668 int n, int tna, int tnb, BN_ULONG *t);
669 void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t);
670 void bn_mul_low_normal(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n);
671 void bn_mul_low_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
672 BN_ULONG *t);
673 void bn_mul_high(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, BN_ULONG *l, int n2,
674 BN_ULONG *t);
675 BN_ULONG bn_add_part_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
676 int cl, int dl);
677 BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
678 int cl, int dl);
679 int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
680 const BN_ULONG *np, const BN_ULONG *n0, int num);
681
682 BIGNUM *int_bn_mod_inverse(BIGNUM *in,
683 const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx,
684 int *noinv);
685
686 int bn_probable_prime_dh(BIGNUM *rnd, int bits,
687 const BIGNUM *add, const BIGNUM *rem, BN_CTX *ctx);
688 int bn_probable_prime_dh_retry(BIGNUM *rnd, int bits, BN_CTX *ctx);
689 int bn_probable_prime_dh_coprime(BIGNUM *rnd, int bits, BN_CTX *ctx);
690
691 #ifdef __cplusplus
692 }
693 #endif
694
695 #endif