2 * Copyright 2001-2018 The OpenSSL Project Authors. All Rights Reserved.
3 * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved
5 * Licensed under the OpenSSL license (the "License"). You may not use
6 * this file except in compliance with the License. You can obtain a copy
7 * in the file LICENSE in the source distribution or at
8 * https://www.openssl.org/source/license.html
12 #include <openssl/err.h>
14 #include "internal/cryptlib.h"
15 #include "internal/bn_int.h"
17 #include "internal/refcount.h"
20 * This file implements the wNAF-based interleaving multi-exponentiation method
22 * http://www.informatik.tu-darmstadt.de/TI/Mitarbeiter/moeller.html#multiexp
23 * You might now find it here:
24 * http://link.springer.com/chapter/10.1007%2F3-540-45537-X_13
25 * http://www.bmoeller.de/pdf/TI-01-08.multiexp.pdf
26 * For multiplication with precomputation, we use wNAF splitting, formerly at:
27 * http://www.informatik.tu-darmstadt.de/TI/Mitarbeiter/moeller.html#fastexp
30 /* structure for precomputed multiples of the generator */
31 struct ec_pre_comp_st
{
32 const EC_GROUP
*group
; /* parent EC_GROUP object */
33 size_t blocksize
; /* block size for wNAF splitting */
34 size_t numblocks
; /* max. number of blocks for which we have
36 size_t w
; /* window size */
37 EC_POINT
**points
; /* array with pre-calculated multiples of
38 * generator: 'num' pointers to EC_POINT
39 * objects followed by a NULL */
40 size_t num
; /* numblocks * 2^(w-1) */
41 CRYPTO_REF_COUNT references
;
45 static EC_PRE_COMP
*ec_pre_comp_new(const EC_GROUP
*group
)
47 EC_PRE_COMP
*ret
= NULL
;
52 ret
= OPENSSL_zalloc(sizeof(*ret
));
54 ECerr(EC_F_EC_PRE_COMP_NEW
, ERR_R_MALLOC_FAILURE
);
59 ret
->blocksize
= 8; /* default */
60 ret
->w
= 4; /* default */
63 ret
->lock
= CRYPTO_THREAD_lock_new();
64 if (ret
->lock
== NULL
) {
65 ECerr(EC_F_EC_PRE_COMP_NEW
, ERR_R_MALLOC_FAILURE
);
72 EC_PRE_COMP
*EC_ec_pre_comp_dup(EC_PRE_COMP
*pre
)
76 CRYPTO_UP_REF(&pre
->references
, &i
, pre
->lock
);
80 void EC_ec_pre_comp_free(EC_PRE_COMP
*pre
)
87 CRYPTO_DOWN_REF(&pre
->references
, &i
, pre
->lock
);
88 REF_PRINT_COUNT("EC_ec", pre
);
91 REF_ASSERT_ISNT(i
< 0);
93 if (pre
->points
!= NULL
) {
96 for (pts
= pre
->points
; *pts
!= NULL
; pts
++)
98 OPENSSL_free(pre
->points
);
100 CRYPTO_THREAD_lock_free(pre
->lock
);
104 #define EC_POINT_BN_set_flags(P, flags) do { \
105 BN_set_flags((P)->X, (flags)); \
106 BN_set_flags((P)->Y, (flags)); \
107 BN_set_flags((P)->Z, (flags)); \
111 * This functions computes (in constant time) a point multiplication over the
114 * At a high level, it is Montgomery ladder with conditional swaps.
116 * It performs either a fixed point multiplication
117 * (scalar * generator)
118 * when point is NULL, or a variable point multiplication
120 * when point is not NULL.
122 * scalar should be in the range [0,n) otherwise all constant time bets are off.
124 * NB: This says nothing about EC_POINT_add and EC_POINT_dbl,
125 * which of course are not constant time themselves.
127 * The product is stored in r.
129 * Returns 1 on success, 0 otherwise.
131 static int ec_mul_consttime(const EC_GROUP
*group
, EC_POINT
*r
,
132 const BIGNUM
*scalar
, const EC_POINT
*point
,
135 int i
, order_bits
, group_top
, kbit
, pbit
, Z_is_one
;
138 BIGNUM
*lambda
= NULL
;
139 BN_CTX
*new_ctx
= NULL
;
142 if (ctx
== NULL
&& (ctx
= new_ctx
= BN_CTX_secure_new()) == NULL
)
147 order_bits
= BN_num_bits(group
->order
);
149 s
= EC_POINT_new(group
);
154 if (!EC_POINT_copy(s
, group
->generator
))
157 if (!EC_POINT_copy(s
, point
))
161 EC_POINT_BN_set_flags(s
, BN_FLG_CONSTTIME
);
163 lambda
= BN_CTX_get(ctx
);
169 * Group orders are often on a word boundary.
170 * So when we pad the scalar, some timing diff might
171 * pop if it needs to be expanded due to carries.
172 * So expand ahead of time.
174 group_top
= bn_get_top(group
->order
);
175 if ((bn_wexpand(k
, group_top
+ 1) == NULL
)
176 || (bn_wexpand(lambda
, group_top
+ 1) == NULL
))
179 if (!BN_copy(k
, scalar
))
182 BN_set_flags(k
, BN_FLG_CONSTTIME
);
184 if ((BN_num_bits(k
) > order_bits
) || (BN_is_negative(k
))) {
186 * this is an unusual input, and we don't guarantee
189 if (!BN_nnmod(k
, k
, group
->order
, ctx
))
193 if (!BN_add(lambda
, k
, group
->order
))
195 BN_set_flags(lambda
, BN_FLG_CONSTTIME
);
196 if (!BN_add(k
, lambda
, group
->order
))
199 * lambda := scalar + order
200 * k := scalar + 2*order
202 kbit
= BN_is_bit_set(lambda
, order_bits
);
203 BN_consttime_swap(kbit
, k
, lambda
, group_top
+ 1);
205 group_top
= bn_get_top(group
->field
);
206 if ((bn_wexpand(s
->X
, group_top
) == NULL
)
207 || (bn_wexpand(s
->Y
, group_top
) == NULL
)
208 || (bn_wexpand(s
->Z
, group_top
) == NULL
)
209 || (bn_wexpand(r
->X
, group_top
) == NULL
)
210 || (bn_wexpand(r
->Y
, group_top
) == NULL
)
211 || (bn_wexpand(r
->Z
, group_top
) == NULL
))
215 * Apply coordinate blinding for EC_POINT.
217 * The underlying EC_METHOD can optionally implement this function:
218 * ec_point_blind_coordinates() returns 0 in case of errors or 1 on
219 * success or if coordinate blinding is not implemented for this
222 if (!ec_point_blind_coordinates(group
, s
, ctx
))
225 /* top bit is a 1, in a fixed pos */
226 if (!EC_POINT_copy(r
, s
))
229 EC_POINT_BN_set_flags(r
, BN_FLG_CONSTTIME
);
231 if (!EC_POINT_dbl(group
, s
, s
, ctx
))
236 #define EC_POINT_CSWAP(c, a, b, w, t) do { \
237 BN_consttime_swap(c, (a)->X, (b)->X, w); \
238 BN_consttime_swap(c, (a)->Y, (b)->Y, w); \
239 BN_consttime_swap(c, (a)->Z, (b)->Z, w); \
240 t = ((a)->Z_is_one ^ (b)->Z_is_one) & (c); \
241 (a)->Z_is_one ^= (t); \
242 (b)->Z_is_one ^= (t); \
246 * The ladder step, with branches, is
248 * k[i] == 0: S = add(R, S), R = dbl(R)
249 * k[i] == 1: R = add(S, R), S = dbl(S)
251 * Swapping R, S conditionally on k[i] leaves you with state
253 * k[i] == 0: T, U = R, S
254 * k[i] == 1: T, U = S, R
256 * Then perform the ECC ops.
261 * Which leaves you with state
263 * k[i] == 0: U = add(R, S), T = dbl(R)
264 * k[i] == 1: U = add(S, R), T = dbl(S)
266 * Swapping T, U conditionally on k[i] leaves you with state
268 * k[i] == 0: R, S = T, U
269 * k[i] == 1: R, S = U, T
271 * Which leaves you with state
273 * k[i] == 0: S = add(R, S), R = dbl(R)
274 * k[i] == 1: R = add(S, R), S = dbl(S)
276 * So we get the same logic, but instead of a branch it's a
277 * conditional swap, followed by ECC ops, then another conditional swap.
279 * Optimization: The end of iteration i and start of i-1 looks like
286 * CSWAP(k[i-1], R, S)
288 * CSWAP(k[i-1], R, S)
291 * So instead of two contiguous swaps, you can merge the condition
292 * bits and do a single swap.
294 * k[i] k[i-1] Outcome
300 * This is XOR. pbit tracks the previous bit of k.
303 for (i
= order_bits
- 1; i
>= 0; i
--) {
304 kbit
= BN_is_bit_set(k
, i
) ^ pbit
;
305 EC_POINT_CSWAP(kbit
, r
, s
, group_top
, Z_is_one
);
306 if (!EC_POINT_add(group
, s
, r
, s
, ctx
))
308 if (!EC_POINT_dbl(group
, r
, r
, ctx
))
311 * pbit logic merges this cswap with that of the
316 /* one final cswap to move the right value into r */
317 EC_POINT_CSWAP(pbit
, r
, s
, group_top
, Z_is_one
);
318 #undef EC_POINT_CSWAP
325 BN_CTX_free(new_ctx
);
330 #undef EC_POINT_BN_set_flags
333 * TODO: table should be optimised for the wNAF-based implementation,
334 * sometimes smaller windows will give better performance (thus the
335 * boundaries should be increased)
337 #define EC_window_bits_for_scalar_size(b) \
348 * \sum scalars[i]*points[i],
351 * in the addition if scalar != NULL
353 int ec_wNAF_mul(const EC_GROUP
*group
, EC_POINT
*r
, const BIGNUM
*scalar
,
354 size_t num
, const EC_POINT
*points
[], const BIGNUM
*scalars
[],
357 BN_CTX
*new_ctx
= NULL
;
358 const EC_POINT
*generator
= NULL
;
359 EC_POINT
*tmp
= NULL
;
361 size_t blocksize
= 0, numblocks
= 0; /* for wNAF splitting */
362 size_t pre_points_per_block
= 0;
365 int r_is_inverted
= 0;
366 int r_is_at_infinity
= 1;
367 size_t *wsize
= NULL
; /* individual window sizes */
368 signed char **wNAF
= NULL
; /* individual wNAFs */
369 size_t *wNAF_len
= NULL
;
372 EC_POINT
**val
= NULL
; /* precomputation */
374 EC_POINT
***val_sub
= NULL
; /* pointers to sub-arrays of 'val' or
375 * 'pre_comp->points' */
376 const EC_PRE_COMP
*pre_comp
= NULL
;
377 int num_scalar
= 0; /* flag: will be set to 1 if 'scalar' must be
378 * treated like other scalars, i.e.
379 * precomputation is not available */
382 if (!ec_point_is_compat(r
, group
)) {
383 ECerr(EC_F_EC_WNAF_MUL
, EC_R_INCOMPATIBLE_OBJECTS
);
387 if ((scalar
== NULL
) && (num
== 0)) {
388 return EC_POINT_set_to_infinity(group
, r
);
392 * Handle the common cases where the scalar is secret, enforcing a constant
393 * time scalar multiplication algorithm.
395 if ((scalar
!= NULL
) && (num
== 0)) {
397 * In this case we want to compute scalar * GeneratorPoint: this
398 * codepath is reached most prominently by (ephemeral) key generation
399 * of EC cryptosystems (i.e. ECDSA keygen and sign setup, ECDH
400 * keygen/first half), where the scalar is always secret. This is why
401 * we ignore if BN_FLG_CONSTTIME is actually set and we always call the
402 * constant time version.
404 return ec_mul_consttime(group
, r
, scalar
, NULL
, ctx
);
406 if ((scalar
== NULL
) && (num
== 1)) {
408 * In this case we want to compute scalar * GenericPoint: this codepath
409 * is reached most prominently by the second half of ECDH, where the
410 * secret scalar is multiplied by the peer's public point. To protect
411 * the secret scalar, we ignore if BN_FLG_CONSTTIME is actually set and
412 * we always call the constant time version.
414 return ec_mul_consttime(group
, r
, scalars
[0], points
[0], ctx
);
417 for (i
= 0; i
< num
; i
++) {
418 if (!ec_point_is_compat(points
[i
], group
)) {
419 ECerr(EC_F_EC_WNAF_MUL
, EC_R_INCOMPATIBLE_OBJECTS
);
425 ctx
= new_ctx
= BN_CTX_new();
430 if (scalar
!= NULL
) {
431 generator
= EC_GROUP_get0_generator(group
);
432 if (generator
== NULL
) {
433 ECerr(EC_F_EC_WNAF_MUL
, EC_R_UNDEFINED_GENERATOR
);
437 /* look if we can use precomputed multiples of generator */
439 pre_comp
= group
->pre_comp
.ec
;
440 if (pre_comp
&& pre_comp
->numblocks
441 && (EC_POINT_cmp(group
, generator
, pre_comp
->points
[0], ctx
) ==
443 blocksize
= pre_comp
->blocksize
;
446 * determine maximum number of blocks that wNAF splitting may
447 * yield (NB: maximum wNAF length is bit length plus one)
449 numblocks
= (BN_num_bits(scalar
) / blocksize
) + 1;
452 * we cannot use more blocks than we have precomputation for
454 if (numblocks
> pre_comp
->numblocks
)
455 numblocks
= pre_comp
->numblocks
;
457 pre_points_per_block
= (size_t)1 << (pre_comp
->w
- 1);
459 /* check that pre_comp looks sane */
460 if (pre_comp
->num
!= (pre_comp
->numblocks
* pre_points_per_block
)) {
461 ECerr(EC_F_EC_WNAF_MUL
, ERR_R_INTERNAL_ERROR
);
465 /* can't use precomputation */
468 num_scalar
= 1; /* treat 'scalar' like 'num'-th element of
473 totalnum
= num
+ numblocks
;
475 wsize
= OPENSSL_malloc(totalnum
* sizeof(wsize
[0]));
476 wNAF_len
= OPENSSL_malloc(totalnum
* sizeof(wNAF_len
[0]));
477 /* include space for pivot */
478 wNAF
= OPENSSL_malloc((totalnum
+ 1) * sizeof(wNAF
[0]));
479 val_sub
= OPENSSL_malloc(totalnum
* sizeof(val_sub
[0]));
481 /* Ensure wNAF is initialised in case we end up going to err */
483 wNAF
[0] = NULL
; /* preliminary pivot */
485 if (wsize
== NULL
|| wNAF_len
== NULL
|| wNAF
== NULL
|| val_sub
== NULL
) {
486 ECerr(EC_F_EC_WNAF_MUL
, ERR_R_MALLOC_FAILURE
);
491 * num_val will be the total number of temporarily precomputed points
495 for (i
= 0; i
< num
+ num_scalar
; i
++) {
498 bits
= i
< num
? BN_num_bits(scalars
[i
]) : BN_num_bits(scalar
);
499 wsize
[i
] = EC_window_bits_for_scalar_size(bits
);
500 num_val
+= (size_t)1 << (wsize
[i
] - 1);
501 wNAF
[i
+ 1] = NULL
; /* make sure we always have a pivot */
503 bn_compute_wNAF((i
< num
? scalars
[i
] : scalar
), wsize
[i
],
507 if (wNAF_len
[i
] > max_len
)
508 max_len
= wNAF_len
[i
];
512 /* we go here iff scalar != NULL */
514 if (pre_comp
== NULL
) {
515 if (num_scalar
!= 1) {
516 ECerr(EC_F_EC_WNAF_MUL
, ERR_R_INTERNAL_ERROR
);
519 /* we have already generated a wNAF for 'scalar' */
521 signed char *tmp_wNAF
= NULL
;
524 if (num_scalar
!= 0) {
525 ECerr(EC_F_EC_WNAF_MUL
, ERR_R_INTERNAL_ERROR
);
530 * use the window size for which we have precomputation
532 wsize
[num
] = pre_comp
->w
;
533 tmp_wNAF
= bn_compute_wNAF(scalar
, wsize
[num
], &tmp_len
);
537 if (tmp_len
<= max_len
) {
539 * One of the other wNAFs is at least as long as the wNAF
540 * belonging to the generator, so wNAF splitting will not buy
545 totalnum
= num
+ 1; /* don't use wNAF splitting */
546 wNAF
[num
] = tmp_wNAF
;
547 wNAF
[num
+ 1] = NULL
;
548 wNAF_len
[num
] = tmp_len
;
550 * pre_comp->points starts with the points that we need here:
552 val_sub
[num
] = pre_comp
->points
;
555 * don't include tmp_wNAF directly into wNAF array - use wNAF
556 * splitting and include the blocks
560 EC_POINT
**tmp_points
;
562 if (tmp_len
< numblocks
* blocksize
) {
564 * possibly we can do with fewer blocks than estimated
566 numblocks
= (tmp_len
+ blocksize
- 1) / blocksize
;
567 if (numblocks
> pre_comp
->numblocks
) {
568 ECerr(EC_F_EC_WNAF_MUL
, ERR_R_INTERNAL_ERROR
);
569 OPENSSL_free(tmp_wNAF
);
572 totalnum
= num
+ numblocks
;
575 /* split wNAF in 'numblocks' parts */
577 tmp_points
= pre_comp
->points
;
579 for (i
= num
; i
< totalnum
; i
++) {
580 if (i
< totalnum
- 1) {
581 wNAF_len
[i
] = blocksize
;
582 if (tmp_len
< blocksize
) {
583 ECerr(EC_F_EC_WNAF_MUL
, ERR_R_INTERNAL_ERROR
);
584 OPENSSL_free(tmp_wNAF
);
587 tmp_len
-= blocksize
;
590 * last block gets whatever is left (this could be
591 * more or less than 'blocksize'!)
593 wNAF_len
[i
] = tmp_len
;
596 wNAF
[i
] = OPENSSL_malloc(wNAF_len
[i
]);
597 if (wNAF
[i
] == NULL
) {
598 ECerr(EC_F_EC_WNAF_MUL
, ERR_R_MALLOC_FAILURE
);
599 OPENSSL_free(tmp_wNAF
);
602 memcpy(wNAF
[i
], pp
, wNAF_len
[i
]);
603 if (wNAF_len
[i
] > max_len
)
604 max_len
= wNAF_len
[i
];
606 if (*tmp_points
== NULL
) {
607 ECerr(EC_F_EC_WNAF_MUL
, ERR_R_INTERNAL_ERROR
);
608 OPENSSL_free(tmp_wNAF
);
611 val_sub
[i
] = tmp_points
;
612 tmp_points
+= pre_points_per_block
;
615 OPENSSL_free(tmp_wNAF
);
621 * All points we precompute now go into a single array 'val'.
622 * 'val_sub[i]' is a pointer to the subarray for the i-th point, or to a
623 * subarray of 'pre_comp->points' if we already have precomputation.
625 val
= OPENSSL_malloc((num_val
+ 1) * sizeof(val
[0]));
627 ECerr(EC_F_EC_WNAF_MUL
, ERR_R_MALLOC_FAILURE
);
630 val
[num_val
] = NULL
; /* pivot element */
632 /* allocate points for precomputation */
634 for (i
= 0; i
< num
+ num_scalar
; i
++) {
636 for (j
= 0; j
< ((size_t)1 << (wsize
[i
] - 1)); j
++) {
637 *v
= EC_POINT_new(group
);
643 if (!(v
== val
+ num_val
)) {
644 ECerr(EC_F_EC_WNAF_MUL
, ERR_R_INTERNAL_ERROR
);
648 if ((tmp
= EC_POINT_new(group
)) == NULL
)
652 * prepare precomputed values:
653 * val_sub[i][0] := points[i]
654 * val_sub[i][1] := 3 * points[i]
655 * val_sub[i][2] := 5 * points[i]
658 for (i
= 0; i
< num
+ num_scalar
; i
++) {
660 if (!EC_POINT_copy(val_sub
[i
][0], points
[i
]))
663 if (!EC_POINT_copy(val_sub
[i
][0], generator
))
668 if (!EC_POINT_dbl(group
, tmp
, val_sub
[i
][0], ctx
))
670 for (j
= 1; j
< ((size_t)1 << (wsize
[i
] - 1)); j
++) {
672 (group
, val_sub
[i
][j
], val_sub
[i
][j
- 1], tmp
, ctx
))
678 if (!EC_POINTs_make_affine(group
, num_val
, val
, ctx
))
681 r_is_at_infinity
= 1;
683 for (k
= max_len
- 1; k
>= 0; k
--) {
684 if (!r_is_at_infinity
) {
685 if (!EC_POINT_dbl(group
, r
, r
, ctx
))
689 for (i
= 0; i
< totalnum
; i
++) {
690 if (wNAF_len
[i
] > (size_t)k
) {
691 int digit
= wNAF
[i
][k
];
700 if (is_neg
!= r_is_inverted
) {
701 if (!r_is_at_infinity
) {
702 if (!EC_POINT_invert(group
, r
, ctx
))
705 r_is_inverted
= !r_is_inverted
;
710 if (r_is_at_infinity
) {
711 if (!EC_POINT_copy(r
, val_sub
[i
][digit
>> 1]))
713 r_is_at_infinity
= 0;
716 (group
, r
, r
, val_sub
[i
][digit
>> 1], ctx
))
724 if (r_is_at_infinity
) {
725 if (!EC_POINT_set_to_infinity(group
, r
))
729 if (!EC_POINT_invert(group
, r
, ctx
))
736 BN_CTX_free(new_ctx
);
739 OPENSSL_free(wNAF_len
);
743 for (w
= wNAF
; *w
!= NULL
; w
++)
749 for (v
= val
; *v
!= NULL
; v
++)
750 EC_POINT_clear_free(*v
);
754 OPENSSL_free(val_sub
);
759 * ec_wNAF_precompute_mult()
760 * creates an EC_PRE_COMP object with preprecomputed multiples of the generator
761 * for use with wNAF splitting as implemented in ec_wNAF_mul().
763 * 'pre_comp->points' is an array of multiples of the generator
764 * of the following form:
765 * points[0] = generator;
766 * points[1] = 3 * generator;
768 * points[2^(w-1)-1] = (2^(w-1)-1) * generator;
769 * points[2^(w-1)] = 2^blocksize * generator;
770 * points[2^(w-1)+1] = 3 * 2^blocksize * generator;
772 * points[2^(w-1)*(numblocks-1)-1] = (2^(w-1)) * 2^(blocksize*(numblocks-2)) * generator
773 * points[2^(w-1)*(numblocks-1)] = 2^(blocksize*(numblocks-1)) * generator
775 * points[2^(w-1)*numblocks-1] = (2^(w-1)) * 2^(blocksize*(numblocks-1)) * generator
776 * points[2^(w-1)*numblocks] = NULL
778 int ec_wNAF_precompute_mult(EC_GROUP
*group
, BN_CTX
*ctx
)
780 const EC_POINT
*generator
;
781 EC_POINT
*tmp_point
= NULL
, *base
= NULL
, **var
;
782 BN_CTX
*new_ctx
= NULL
;
784 size_t i
, bits
, w
, pre_points_per_block
, blocksize
, numblocks
, num
;
785 EC_POINT
**points
= NULL
;
786 EC_PRE_COMP
*pre_comp
;
789 /* if there is an old EC_PRE_COMP object, throw it away */
790 EC_pre_comp_free(group
);
791 if ((pre_comp
= ec_pre_comp_new(group
)) == NULL
)
794 generator
= EC_GROUP_get0_generator(group
);
795 if (generator
== NULL
) {
796 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT
, EC_R_UNDEFINED_GENERATOR
);
801 ctx
= new_ctx
= BN_CTX_new();
808 order
= EC_GROUP_get0_order(group
);
811 if (BN_is_zero(order
)) {
812 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT
, EC_R_UNKNOWN_ORDER
);
816 bits
= BN_num_bits(order
);
818 * The following parameters mean we precompute (approximately) one point
819 * per bit. TBD: The combination 8, 4 is perfect for 160 bits; for other
820 * bit lengths, other parameter combinations might provide better
825 if (EC_window_bits_for_scalar_size(bits
) > w
) {
826 /* let's not make the window too small ... */
827 w
= EC_window_bits_for_scalar_size(bits
);
830 numblocks
= (bits
+ blocksize
- 1) / blocksize
; /* max. number of blocks
834 pre_points_per_block
= (size_t)1 << (w
- 1);
835 num
= pre_points_per_block
* numblocks
; /* number of points to compute
838 points
= OPENSSL_malloc(sizeof(*points
) * (num
+ 1));
839 if (points
== NULL
) {
840 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT
, ERR_R_MALLOC_FAILURE
);
845 var
[num
] = NULL
; /* pivot */
846 for (i
= 0; i
< num
; i
++) {
847 if ((var
[i
] = EC_POINT_new(group
)) == NULL
) {
848 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT
, ERR_R_MALLOC_FAILURE
);
853 if ((tmp_point
= EC_POINT_new(group
)) == NULL
854 || (base
= EC_POINT_new(group
)) == NULL
) {
855 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT
, ERR_R_MALLOC_FAILURE
);
859 if (!EC_POINT_copy(base
, generator
))
862 /* do the precomputation */
863 for (i
= 0; i
< numblocks
; i
++) {
866 if (!EC_POINT_dbl(group
, tmp_point
, base
, ctx
))
869 if (!EC_POINT_copy(*var
++, base
))
872 for (j
= 1; j
< pre_points_per_block
; j
++, var
++) {
874 * calculate odd multiples of the current base point
876 if (!EC_POINT_add(group
, *var
, tmp_point
, *(var
- 1), ctx
))
880 if (i
< numblocks
- 1) {
882 * get the next base (multiply current one by 2^blocksize)
886 if (blocksize
<= 2) {
887 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT
, ERR_R_INTERNAL_ERROR
);
891 if (!EC_POINT_dbl(group
, base
, tmp_point
, ctx
))
893 for (k
= 2; k
< blocksize
; k
++) {
894 if (!EC_POINT_dbl(group
, base
, base
, ctx
))
900 if (!EC_POINTs_make_affine(group
, num
, points
, ctx
))
903 pre_comp
->group
= group
;
904 pre_comp
->blocksize
= blocksize
;
905 pre_comp
->numblocks
= numblocks
;
907 pre_comp
->points
= points
;
910 SETPRECOMP(group
, ec
, pre_comp
);
917 BN_CTX_free(new_ctx
);
918 EC_ec_pre_comp_free(pre_comp
);
922 for (p
= points
; *p
!= NULL
; p
++)
924 OPENSSL_free(points
);
926 EC_POINT_free(tmp_point
);
931 int ec_wNAF_have_precompute_mult(const EC_GROUP
*group
)
933 return HAVEPRECOMP(group
, ec
);