]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/ec/ec_mult.c
deprecate EC_POINT_make_affine and EC_POINTs_make_affine
[thirdparty/openssl.git] / crypto / ec / ec_mult.c
1 /*
2 * Copyright 2001-2020 The OpenSSL Project Authors. All Rights Reserved.
3 * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved
4 *
5 * Licensed under the Apache License 2.0 (the "License"). You may not use
6 * this file except in compliance with the License. You can obtain a copy
7 * in the file LICENSE in the source distribution or at
8 * https://www.openssl.org/source/license.html
9 */
10
11 /*
12 * ECDSA low level APIs are deprecated for public use, but still ok for
13 * internal use.
14 */
15 #include "internal/deprecated.h"
16
17 #include <string.h>
18 #include <openssl/err.h>
19
20 #include "internal/cryptlib.h"
21 #include "crypto/bn.h"
22 #include "ec_local.h"
23 #include "internal/refcount.h"
24
25 /*
26 * This file implements the wNAF-based interleaving multi-exponentiation method
27 * Formerly at:
28 * http://www.informatik.tu-darmstadt.de/TI/Mitarbeiter/moeller.html#multiexp
29 * You might now find it here:
30 * http://link.springer.com/chapter/10.1007%2F3-540-45537-X_13
31 * http://www.bmoeller.de/pdf/TI-01-08.multiexp.pdf
32 * For multiplication with precomputation, we use wNAF splitting, formerly at:
33 * http://www.informatik.tu-darmstadt.de/TI/Mitarbeiter/moeller.html#fastexp
34 */
35
36 /* structure for precomputed multiples of the generator */
37 struct ec_pre_comp_st {
38 const EC_GROUP *group; /* parent EC_GROUP object */
39 size_t blocksize; /* block size for wNAF splitting */
40 size_t numblocks; /* max. number of blocks for which we have
41 * precomputation */
42 size_t w; /* window size */
43 EC_POINT **points; /* array with pre-calculated multiples of
44 * generator: 'num' pointers to EC_POINT
45 * objects followed by a NULL */
46 size_t num; /* numblocks * 2^(w-1) */
47 CRYPTO_REF_COUNT references;
48 CRYPTO_RWLOCK *lock;
49 };
50
51 static EC_PRE_COMP *ec_pre_comp_new(const EC_GROUP *group)
52 {
53 EC_PRE_COMP *ret = NULL;
54
55 if (!group)
56 return NULL;
57
58 ret = OPENSSL_zalloc(sizeof(*ret));
59 if (ret == NULL) {
60 ECerr(EC_F_EC_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
61 return ret;
62 }
63
64 ret->group = group;
65 ret->blocksize = 8; /* default */
66 ret->w = 4; /* default */
67 ret->references = 1;
68
69 ret->lock = CRYPTO_THREAD_lock_new();
70 if (ret->lock == NULL) {
71 ECerr(EC_F_EC_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
72 OPENSSL_free(ret);
73 return NULL;
74 }
75 return ret;
76 }
77
78 EC_PRE_COMP *EC_ec_pre_comp_dup(EC_PRE_COMP *pre)
79 {
80 int i;
81 if (pre != NULL)
82 CRYPTO_UP_REF(&pre->references, &i, pre->lock);
83 return pre;
84 }
85
86 void EC_ec_pre_comp_free(EC_PRE_COMP *pre)
87 {
88 int i;
89
90 if (pre == NULL)
91 return;
92
93 CRYPTO_DOWN_REF(&pre->references, &i, pre->lock);
94 REF_PRINT_COUNT("EC_ec", pre);
95 if (i > 0)
96 return;
97 REF_ASSERT_ISNT(i < 0);
98
99 if (pre->points != NULL) {
100 EC_POINT **pts;
101
102 for (pts = pre->points; *pts != NULL; pts++)
103 EC_POINT_free(*pts);
104 OPENSSL_free(pre->points);
105 }
106 CRYPTO_THREAD_lock_free(pre->lock);
107 OPENSSL_free(pre);
108 }
109
110 #define EC_POINT_BN_set_flags(P, flags) do { \
111 BN_set_flags((P)->X, (flags)); \
112 BN_set_flags((P)->Y, (flags)); \
113 BN_set_flags((P)->Z, (flags)); \
114 } while(0)
115
116 /*-
117 * This functions computes a single point multiplication over the EC group,
118 * using, at a high level, a Montgomery ladder with conditional swaps, with
119 * various timing attack defenses.
120 *
121 * It performs either a fixed point multiplication
122 * (scalar * generator)
123 * when point is NULL, or a variable point multiplication
124 * (scalar * point)
125 * when point is not NULL.
126 *
127 * `scalar` cannot be NULL and should be in the range [0,n) otherwise all
128 * constant time bets are off (where n is the cardinality of the EC group).
129 *
130 * This function expects `group->order` and `group->cardinality` to be well
131 * defined and non-zero: it fails with an error code otherwise.
132 *
133 * NB: This says nothing about the constant-timeness of the ladder step
134 * implementation (i.e., the default implementation is based on EC_POINT_add and
135 * EC_POINT_dbl, which of course are not constant time themselves) or the
136 * underlying multiprecision arithmetic.
137 *
138 * The product is stored in `r`.
139 *
140 * This is an internal function: callers are in charge of ensuring that the
141 * input parameters `group`, `r`, `scalar` and `ctx` are not NULL.
142 *
143 * Returns 1 on success, 0 otherwise.
144 */
145 int ec_scalar_mul_ladder(const EC_GROUP *group, EC_POINT *r,
146 const BIGNUM *scalar, const EC_POINT *point,
147 BN_CTX *ctx)
148 {
149 int i, cardinality_bits, group_top, kbit, pbit, Z_is_one;
150 EC_POINT *p = NULL;
151 EC_POINT *s = NULL;
152 BIGNUM *k = NULL;
153 BIGNUM *lambda = NULL;
154 BIGNUM *cardinality = NULL;
155 int ret = 0;
156
157 /* early exit if the input point is the point at infinity */
158 if (point != NULL && EC_POINT_is_at_infinity(group, point))
159 return EC_POINT_set_to_infinity(group, r);
160
161 if (BN_is_zero(group->order)) {
162 ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_UNKNOWN_ORDER);
163 return 0;
164 }
165 if (BN_is_zero(group->cofactor)) {
166 ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_UNKNOWN_COFACTOR);
167 return 0;
168 }
169
170 BN_CTX_start(ctx);
171
172 if (((p = EC_POINT_new(group)) == NULL)
173 || ((s = EC_POINT_new(group)) == NULL)) {
174 ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_MALLOC_FAILURE);
175 goto err;
176 }
177
178 if (point == NULL) {
179 if (!EC_POINT_copy(p, group->generator)) {
180 ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_EC_LIB);
181 goto err;
182 }
183 } else {
184 if (!EC_POINT_copy(p, point)) {
185 ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_EC_LIB);
186 goto err;
187 }
188 }
189
190 EC_POINT_BN_set_flags(p, BN_FLG_CONSTTIME);
191 EC_POINT_BN_set_flags(r, BN_FLG_CONSTTIME);
192 EC_POINT_BN_set_flags(s, BN_FLG_CONSTTIME);
193
194 cardinality = BN_CTX_get(ctx);
195 lambda = BN_CTX_get(ctx);
196 k = BN_CTX_get(ctx);
197 if (k == NULL) {
198 ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_MALLOC_FAILURE);
199 goto err;
200 }
201
202 if (!BN_mul(cardinality, group->order, group->cofactor, ctx)) {
203 ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
204 goto err;
205 }
206
207 /*
208 * Group cardinalities are often on a word boundary.
209 * So when we pad the scalar, some timing diff might
210 * pop if it needs to be expanded due to carries.
211 * So expand ahead of time.
212 */
213 cardinality_bits = BN_num_bits(cardinality);
214 group_top = bn_get_top(cardinality);
215 if ((bn_wexpand(k, group_top + 2) == NULL)
216 || (bn_wexpand(lambda, group_top + 2) == NULL)) {
217 ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
218 goto err;
219 }
220
221 if (!BN_copy(k, scalar)) {
222 ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
223 goto err;
224 }
225
226 BN_set_flags(k, BN_FLG_CONSTTIME);
227
228 if ((BN_num_bits(k) > cardinality_bits) || (BN_is_negative(k))) {
229 /*-
230 * this is an unusual input, and we don't guarantee
231 * constant-timeness
232 */
233 if (!BN_nnmod(k, k, cardinality, ctx)) {
234 ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
235 goto err;
236 }
237 }
238
239 if (!BN_add(lambda, k, cardinality)) {
240 ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
241 goto err;
242 }
243 BN_set_flags(lambda, BN_FLG_CONSTTIME);
244 if (!BN_add(k, lambda, cardinality)) {
245 ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
246 goto err;
247 }
248 /*
249 * lambda := scalar + cardinality
250 * k := scalar + 2*cardinality
251 */
252 kbit = BN_is_bit_set(lambda, cardinality_bits);
253 BN_consttime_swap(kbit, k, lambda, group_top + 2);
254
255 group_top = bn_get_top(group->field);
256 if ((bn_wexpand(s->X, group_top) == NULL)
257 || (bn_wexpand(s->Y, group_top) == NULL)
258 || (bn_wexpand(s->Z, group_top) == NULL)
259 || (bn_wexpand(r->X, group_top) == NULL)
260 || (bn_wexpand(r->Y, group_top) == NULL)
261 || (bn_wexpand(r->Z, group_top) == NULL)
262 || (bn_wexpand(p->X, group_top) == NULL)
263 || (bn_wexpand(p->Y, group_top) == NULL)
264 || (bn_wexpand(p->Z, group_top) == NULL)) {
265 ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_BN_LIB);
266 goto err;
267 }
268
269 /* ensure input point is in affine coords for ladder step efficiency */
270 if (!p->Z_is_one && (group->meth->make_affine == NULL
271 || !group->meth->make_affine(group, p, ctx))) {
272 ECerr(EC_F_EC_SCALAR_MUL_LADDER, ERR_R_EC_LIB);
273 goto err;
274 }
275
276 /* Initialize the Montgomery ladder */
277 if (!ec_point_ladder_pre(group, r, s, p, ctx)) {
278 ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_LADDER_PRE_FAILURE);
279 goto err;
280 }
281
282 /* top bit is a 1, in a fixed pos */
283 pbit = 1;
284
285 #define EC_POINT_CSWAP(c, a, b, w, t) do { \
286 BN_consttime_swap(c, (a)->X, (b)->X, w); \
287 BN_consttime_swap(c, (a)->Y, (b)->Y, w); \
288 BN_consttime_swap(c, (a)->Z, (b)->Z, w); \
289 t = ((a)->Z_is_one ^ (b)->Z_is_one) & (c); \
290 (a)->Z_is_one ^= (t); \
291 (b)->Z_is_one ^= (t); \
292 } while(0)
293
294 /*-
295 * The ladder step, with branches, is
296 *
297 * k[i] == 0: S = add(R, S), R = dbl(R)
298 * k[i] == 1: R = add(S, R), S = dbl(S)
299 *
300 * Swapping R, S conditionally on k[i] leaves you with state
301 *
302 * k[i] == 0: T, U = R, S
303 * k[i] == 1: T, U = S, R
304 *
305 * Then perform the ECC ops.
306 *
307 * U = add(T, U)
308 * T = dbl(T)
309 *
310 * Which leaves you with state
311 *
312 * k[i] == 0: U = add(R, S), T = dbl(R)
313 * k[i] == 1: U = add(S, R), T = dbl(S)
314 *
315 * Swapping T, U conditionally on k[i] leaves you with state
316 *
317 * k[i] == 0: R, S = T, U
318 * k[i] == 1: R, S = U, T
319 *
320 * Which leaves you with state
321 *
322 * k[i] == 0: S = add(R, S), R = dbl(R)
323 * k[i] == 1: R = add(S, R), S = dbl(S)
324 *
325 * So we get the same logic, but instead of a branch it's a
326 * conditional swap, followed by ECC ops, then another conditional swap.
327 *
328 * Optimization: The end of iteration i and start of i-1 looks like
329 *
330 * ...
331 * CSWAP(k[i], R, S)
332 * ECC
333 * CSWAP(k[i], R, S)
334 * (next iteration)
335 * CSWAP(k[i-1], R, S)
336 * ECC
337 * CSWAP(k[i-1], R, S)
338 * ...
339 *
340 * So instead of two contiguous swaps, you can merge the condition
341 * bits and do a single swap.
342 *
343 * k[i] k[i-1] Outcome
344 * 0 0 No Swap
345 * 0 1 Swap
346 * 1 0 Swap
347 * 1 1 No Swap
348 *
349 * This is XOR. pbit tracks the previous bit of k.
350 */
351
352 for (i = cardinality_bits - 1; i >= 0; i--) {
353 kbit = BN_is_bit_set(k, i) ^ pbit;
354 EC_POINT_CSWAP(kbit, r, s, group_top, Z_is_one);
355
356 /* Perform a single step of the Montgomery ladder */
357 if (!ec_point_ladder_step(group, r, s, p, ctx)) {
358 ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_LADDER_STEP_FAILURE);
359 goto err;
360 }
361 /*
362 * pbit logic merges this cswap with that of the
363 * next iteration
364 */
365 pbit ^= kbit;
366 }
367 /* one final cswap to move the right value into r */
368 EC_POINT_CSWAP(pbit, r, s, group_top, Z_is_one);
369 #undef EC_POINT_CSWAP
370
371 /* Finalize ladder (and recover full point coordinates) */
372 if (!ec_point_ladder_post(group, r, s, p, ctx)) {
373 ECerr(EC_F_EC_SCALAR_MUL_LADDER, EC_R_LADDER_POST_FAILURE);
374 goto err;
375 }
376
377 ret = 1;
378
379 err:
380 EC_POINT_free(p);
381 EC_POINT_clear_free(s);
382 BN_CTX_end(ctx);
383
384 return ret;
385 }
386
387 #undef EC_POINT_BN_set_flags
388
389 /*
390 * TODO: table should be optimised for the wNAF-based implementation,
391 * sometimes smaller windows will give better performance (thus the
392 * boundaries should be increased)
393 */
394 #define EC_window_bits_for_scalar_size(b) \
395 ((size_t) \
396 ((b) >= 2000 ? 6 : \
397 (b) >= 800 ? 5 : \
398 (b) >= 300 ? 4 : \
399 (b) >= 70 ? 3 : \
400 (b) >= 20 ? 2 : \
401 1))
402
403 /*-
404 * Compute
405 * \sum scalars[i]*points[i],
406 * also including
407 * scalar*generator
408 * in the addition if scalar != NULL
409 */
410 int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *scalar,
411 size_t num, const EC_POINT *points[], const BIGNUM *scalars[],
412 BN_CTX *ctx)
413 {
414 const EC_POINT *generator = NULL;
415 EC_POINT *tmp = NULL;
416 size_t totalnum;
417 size_t blocksize = 0, numblocks = 0; /* for wNAF splitting */
418 size_t pre_points_per_block = 0;
419 size_t i, j;
420 int k;
421 int r_is_inverted = 0;
422 int r_is_at_infinity = 1;
423 size_t *wsize = NULL; /* individual window sizes */
424 signed char **wNAF = NULL; /* individual wNAFs */
425 size_t *wNAF_len = NULL;
426 size_t max_len = 0;
427 size_t num_val;
428 EC_POINT **val = NULL; /* precomputation */
429 EC_POINT **v;
430 EC_POINT ***val_sub = NULL; /* pointers to sub-arrays of 'val' or
431 * 'pre_comp->points' */
432 const EC_PRE_COMP *pre_comp = NULL;
433 int num_scalar = 0; /* flag: will be set to 1 if 'scalar' must be
434 * treated like other scalars, i.e.
435 * precomputation is not available */
436 int ret = 0;
437
438 if (!BN_is_zero(group->order) && !BN_is_zero(group->cofactor)) {
439 /*-
440 * Handle the common cases where the scalar is secret, enforcing a
441 * scalar multiplication implementation based on a Montgomery ladder,
442 * with various timing attack defenses.
443 */
444 if ((scalar != group->order) && (scalar != NULL) && (num == 0)) {
445 /*-
446 * In this case we want to compute scalar * GeneratorPoint: this
447 * codepath is reached most prominently by (ephemeral) key
448 * generation of EC cryptosystems (i.e. ECDSA keygen and sign setup,
449 * ECDH keygen/first half), where the scalar is always secret. This
450 * is why we ignore if BN_FLG_CONSTTIME is actually set and we
451 * always call the ladder version.
452 */
453 return ec_scalar_mul_ladder(group, r, scalar, NULL, ctx);
454 }
455 if ((scalar == NULL) && (num == 1) && (scalars[0] != group->order)) {
456 /*-
457 * In this case we want to compute scalar * VariablePoint: this
458 * codepath is reached most prominently by the second half of ECDH,
459 * where the secret scalar is multiplied by the peer's public point.
460 * To protect the secret scalar, we ignore if BN_FLG_CONSTTIME is
461 * actually set and we always call the ladder version.
462 */
463 return ec_scalar_mul_ladder(group, r, scalars[0], points[0], ctx);
464 }
465 }
466
467 if (scalar != NULL) {
468 generator = EC_GROUP_get0_generator(group);
469 if (generator == NULL) {
470 ECerr(EC_F_EC_WNAF_MUL, EC_R_UNDEFINED_GENERATOR);
471 goto err;
472 }
473
474 /* look if we can use precomputed multiples of generator */
475
476 pre_comp = group->pre_comp.ec;
477 if (pre_comp && pre_comp->numblocks
478 && (EC_POINT_cmp(group, generator, pre_comp->points[0], ctx) ==
479 0)) {
480 blocksize = pre_comp->blocksize;
481
482 /*
483 * determine maximum number of blocks that wNAF splitting may
484 * yield (NB: maximum wNAF length is bit length plus one)
485 */
486 numblocks = (BN_num_bits(scalar) / blocksize) + 1;
487
488 /*
489 * we cannot use more blocks than we have precomputation for
490 */
491 if (numblocks > pre_comp->numblocks)
492 numblocks = pre_comp->numblocks;
493
494 pre_points_per_block = (size_t)1 << (pre_comp->w - 1);
495
496 /* check that pre_comp looks sane */
497 if (pre_comp->num != (pre_comp->numblocks * pre_points_per_block)) {
498 ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
499 goto err;
500 }
501 } else {
502 /* can't use precomputation */
503 pre_comp = NULL;
504 numblocks = 1;
505 num_scalar = 1; /* treat 'scalar' like 'num'-th element of
506 * 'scalars' */
507 }
508 }
509
510 totalnum = num + numblocks;
511
512 wsize = OPENSSL_malloc(totalnum * sizeof(wsize[0]));
513 wNAF_len = OPENSSL_malloc(totalnum * sizeof(wNAF_len[0]));
514 /* include space for pivot */
515 wNAF = OPENSSL_malloc((totalnum + 1) * sizeof(wNAF[0]));
516 val_sub = OPENSSL_malloc(totalnum * sizeof(val_sub[0]));
517
518 /* Ensure wNAF is initialised in case we end up going to err */
519 if (wNAF != NULL)
520 wNAF[0] = NULL; /* preliminary pivot */
521
522 if (wsize == NULL || wNAF_len == NULL || wNAF == NULL || val_sub == NULL) {
523 ECerr(EC_F_EC_WNAF_MUL, ERR_R_MALLOC_FAILURE);
524 goto err;
525 }
526
527 /*
528 * num_val will be the total number of temporarily precomputed points
529 */
530 num_val = 0;
531
532 for (i = 0; i < num + num_scalar; i++) {
533 size_t bits;
534
535 bits = i < num ? BN_num_bits(scalars[i]) : BN_num_bits(scalar);
536 wsize[i] = EC_window_bits_for_scalar_size(bits);
537 num_val += (size_t)1 << (wsize[i] - 1);
538 wNAF[i + 1] = NULL; /* make sure we always have a pivot */
539 wNAF[i] =
540 bn_compute_wNAF((i < num ? scalars[i] : scalar), wsize[i],
541 &wNAF_len[i]);
542 if (wNAF[i] == NULL)
543 goto err;
544 if (wNAF_len[i] > max_len)
545 max_len = wNAF_len[i];
546 }
547
548 if (numblocks) {
549 /* we go here iff scalar != NULL */
550
551 if (pre_comp == NULL) {
552 if (num_scalar != 1) {
553 ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
554 goto err;
555 }
556 /* we have already generated a wNAF for 'scalar' */
557 } else {
558 signed char *tmp_wNAF = NULL;
559 size_t tmp_len = 0;
560
561 if (num_scalar != 0) {
562 ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
563 goto err;
564 }
565
566 /*
567 * use the window size for which we have precomputation
568 */
569 wsize[num] = pre_comp->w;
570 tmp_wNAF = bn_compute_wNAF(scalar, wsize[num], &tmp_len);
571 if (!tmp_wNAF)
572 goto err;
573
574 if (tmp_len <= max_len) {
575 /*
576 * One of the other wNAFs is at least as long as the wNAF
577 * belonging to the generator, so wNAF splitting will not buy
578 * us anything.
579 */
580
581 numblocks = 1;
582 totalnum = num + 1; /* don't use wNAF splitting */
583 wNAF[num] = tmp_wNAF;
584 wNAF[num + 1] = NULL;
585 wNAF_len[num] = tmp_len;
586 /*
587 * pre_comp->points starts with the points that we need here:
588 */
589 val_sub[num] = pre_comp->points;
590 } else {
591 /*
592 * don't include tmp_wNAF directly into wNAF array - use wNAF
593 * splitting and include the blocks
594 */
595
596 signed char *pp;
597 EC_POINT **tmp_points;
598
599 if (tmp_len < numblocks * blocksize) {
600 /*
601 * possibly we can do with fewer blocks than estimated
602 */
603 numblocks = (tmp_len + blocksize - 1) / blocksize;
604 if (numblocks > pre_comp->numblocks) {
605 ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
606 OPENSSL_free(tmp_wNAF);
607 goto err;
608 }
609 totalnum = num + numblocks;
610 }
611
612 /* split wNAF in 'numblocks' parts */
613 pp = tmp_wNAF;
614 tmp_points = pre_comp->points;
615
616 for (i = num; i < totalnum; i++) {
617 if (i < totalnum - 1) {
618 wNAF_len[i] = blocksize;
619 if (tmp_len < blocksize) {
620 ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
621 OPENSSL_free(tmp_wNAF);
622 goto err;
623 }
624 tmp_len -= blocksize;
625 } else
626 /*
627 * last block gets whatever is left (this could be
628 * more or less than 'blocksize'!)
629 */
630 wNAF_len[i] = tmp_len;
631
632 wNAF[i + 1] = NULL;
633 wNAF[i] = OPENSSL_malloc(wNAF_len[i]);
634 if (wNAF[i] == NULL) {
635 ECerr(EC_F_EC_WNAF_MUL, ERR_R_MALLOC_FAILURE);
636 OPENSSL_free(tmp_wNAF);
637 goto err;
638 }
639 memcpy(wNAF[i], pp, wNAF_len[i]);
640 if (wNAF_len[i] > max_len)
641 max_len = wNAF_len[i];
642
643 if (*tmp_points == NULL) {
644 ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
645 OPENSSL_free(tmp_wNAF);
646 goto err;
647 }
648 val_sub[i] = tmp_points;
649 tmp_points += pre_points_per_block;
650 pp += blocksize;
651 }
652 OPENSSL_free(tmp_wNAF);
653 }
654 }
655 }
656
657 /*
658 * All points we precompute now go into a single array 'val'.
659 * 'val_sub[i]' is a pointer to the subarray for the i-th point, or to a
660 * subarray of 'pre_comp->points' if we already have precomputation.
661 */
662 val = OPENSSL_malloc((num_val + 1) * sizeof(val[0]));
663 if (val == NULL) {
664 ECerr(EC_F_EC_WNAF_MUL, ERR_R_MALLOC_FAILURE);
665 goto err;
666 }
667 val[num_val] = NULL; /* pivot element */
668
669 /* allocate points for precomputation */
670 v = val;
671 for (i = 0; i < num + num_scalar; i++) {
672 val_sub[i] = v;
673 for (j = 0; j < ((size_t)1 << (wsize[i] - 1)); j++) {
674 *v = EC_POINT_new(group);
675 if (*v == NULL)
676 goto err;
677 v++;
678 }
679 }
680 if (!(v == val + num_val)) {
681 ECerr(EC_F_EC_WNAF_MUL, ERR_R_INTERNAL_ERROR);
682 goto err;
683 }
684
685 if ((tmp = EC_POINT_new(group)) == NULL)
686 goto err;
687
688 /*-
689 * prepare precomputed values:
690 * val_sub[i][0] := points[i]
691 * val_sub[i][1] := 3 * points[i]
692 * val_sub[i][2] := 5 * points[i]
693 * ...
694 */
695 for (i = 0; i < num + num_scalar; i++) {
696 if (i < num) {
697 if (!EC_POINT_copy(val_sub[i][0], points[i]))
698 goto err;
699 } else {
700 if (!EC_POINT_copy(val_sub[i][0], generator))
701 goto err;
702 }
703
704 if (wsize[i] > 1) {
705 if (!EC_POINT_dbl(group, tmp, val_sub[i][0], ctx))
706 goto err;
707 for (j = 1; j < ((size_t)1 << (wsize[i] - 1)); j++) {
708 if (!EC_POINT_add
709 (group, val_sub[i][j], val_sub[i][j - 1], tmp, ctx))
710 goto err;
711 }
712 }
713 }
714
715 if (group->meth->points_make_affine == NULL
716 || !group->meth->points_make_affine(group, num_val, val, ctx))
717 goto err;
718
719 r_is_at_infinity = 1;
720
721 for (k = max_len - 1; k >= 0; k--) {
722 if (!r_is_at_infinity) {
723 if (!EC_POINT_dbl(group, r, r, ctx))
724 goto err;
725 }
726
727 for (i = 0; i < totalnum; i++) {
728 if (wNAF_len[i] > (size_t)k) {
729 int digit = wNAF[i][k];
730 int is_neg;
731
732 if (digit) {
733 is_neg = digit < 0;
734
735 if (is_neg)
736 digit = -digit;
737
738 if (is_neg != r_is_inverted) {
739 if (!r_is_at_infinity) {
740 if (!EC_POINT_invert(group, r, ctx))
741 goto err;
742 }
743 r_is_inverted = !r_is_inverted;
744 }
745
746 /* digit > 0 */
747
748 if (r_is_at_infinity) {
749 if (!EC_POINT_copy(r, val_sub[i][digit >> 1]))
750 goto err;
751
752 /*-
753 * Apply coordinate blinding for EC_POINT.
754 *
755 * The underlying EC_METHOD can optionally implement this function:
756 * ec_point_blind_coordinates() returns 0 in case of errors or 1 on
757 * success or if coordinate blinding is not implemented for this
758 * group.
759 */
760 if (!ec_point_blind_coordinates(group, r, ctx)) {
761 ECerr(EC_F_EC_WNAF_MUL, EC_R_POINT_COORDINATES_BLIND_FAILURE);
762 goto err;
763 }
764
765 r_is_at_infinity = 0;
766 } else {
767 if (!EC_POINT_add
768 (group, r, r, val_sub[i][digit >> 1], ctx))
769 goto err;
770 }
771 }
772 }
773 }
774 }
775
776 if (r_is_at_infinity) {
777 if (!EC_POINT_set_to_infinity(group, r))
778 goto err;
779 } else {
780 if (r_is_inverted)
781 if (!EC_POINT_invert(group, r, ctx))
782 goto err;
783 }
784
785 ret = 1;
786
787 err:
788 EC_POINT_free(tmp);
789 OPENSSL_free(wsize);
790 OPENSSL_free(wNAF_len);
791 if (wNAF != NULL) {
792 signed char **w;
793
794 for (w = wNAF; *w != NULL; w++)
795 OPENSSL_free(*w);
796
797 OPENSSL_free(wNAF);
798 }
799 if (val != NULL) {
800 for (v = val; *v != NULL; v++)
801 EC_POINT_clear_free(*v);
802
803 OPENSSL_free(val);
804 }
805 OPENSSL_free(val_sub);
806 return ret;
807 }
808
809 /*-
810 * ec_wNAF_precompute_mult()
811 * creates an EC_PRE_COMP object with preprecomputed multiples of the generator
812 * for use with wNAF splitting as implemented in ec_wNAF_mul().
813 *
814 * 'pre_comp->points' is an array of multiples of the generator
815 * of the following form:
816 * points[0] = generator;
817 * points[1] = 3 * generator;
818 * ...
819 * points[2^(w-1)-1] = (2^(w-1)-1) * generator;
820 * points[2^(w-1)] = 2^blocksize * generator;
821 * points[2^(w-1)+1] = 3 * 2^blocksize * generator;
822 * ...
823 * points[2^(w-1)*(numblocks-1)-1] = (2^(w-1)) * 2^(blocksize*(numblocks-2)) * generator
824 * points[2^(w-1)*(numblocks-1)] = 2^(blocksize*(numblocks-1)) * generator
825 * ...
826 * points[2^(w-1)*numblocks-1] = (2^(w-1)) * 2^(blocksize*(numblocks-1)) * generator
827 * points[2^(w-1)*numblocks] = NULL
828 */
829 int ec_wNAF_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
830 {
831 const EC_POINT *generator;
832 EC_POINT *tmp_point = NULL, *base = NULL, **var;
833 const BIGNUM *order;
834 size_t i, bits, w, pre_points_per_block, blocksize, numblocks, num;
835 EC_POINT **points = NULL;
836 EC_PRE_COMP *pre_comp;
837 int ret = 0;
838 #ifndef FIPS_MODULE
839 BN_CTX *new_ctx = NULL;
840 #endif
841
842 /* if there is an old EC_PRE_COMP object, throw it away */
843 EC_pre_comp_free(group);
844 if ((pre_comp = ec_pre_comp_new(group)) == NULL)
845 return 0;
846
847 generator = EC_GROUP_get0_generator(group);
848 if (generator == NULL) {
849 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, EC_R_UNDEFINED_GENERATOR);
850 goto err;
851 }
852
853 #ifndef FIPS_MODULE
854 if (ctx == NULL)
855 ctx = new_ctx = BN_CTX_new();
856 #endif
857 if (ctx == NULL)
858 goto err;
859
860 BN_CTX_start(ctx);
861
862 order = EC_GROUP_get0_order(group);
863 if (order == NULL)
864 goto err;
865 if (BN_is_zero(order)) {
866 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, EC_R_UNKNOWN_ORDER);
867 goto err;
868 }
869
870 bits = BN_num_bits(order);
871 /*
872 * The following parameters mean we precompute (approximately) one point
873 * per bit. TBD: The combination 8, 4 is perfect for 160 bits; for other
874 * bit lengths, other parameter combinations might provide better
875 * efficiency.
876 */
877 blocksize = 8;
878 w = 4;
879 if (EC_window_bits_for_scalar_size(bits) > w) {
880 /* let's not make the window too small ... */
881 w = EC_window_bits_for_scalar_size(bits);
882 }
883
884 numblocks = (bits + blocksize - 1) / blocksize; /* max. number of blocks
885 * to use for wNAF
886 * splitting */
887
888 pre_points_per_block = (size_t)1 << (w - 1);
889 num = pre_points_per_block * numblocks; /* number of points to compute
890 * and store */
891
892 points = OPENSSL_malloc(sizeof(*points) * (num + 1));
893 if (points == NULL) {
894 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, ERR_R_MALLOC_FAILURE);
895 goto err;
896 }
897
898 var = points;
899 var[num] = NULL; /* pivot */
900 for (i = 0; i < num; i++) {
901 if ((var[i] = EC_POINT_new(group)) == NULL) {
902 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, ERR_R_MALLOC_FAILURE);
903 goto err;
904 }
905 }
906
907 if ((tmp_point = EC_POINT_new(group)) == NULL
908 || (base = EC_POINT_new(group)) == NULL) {
909 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, ERR_R_MALLOC_FAILURE);
910 goto err;
911 }
912
913 if (!EC_POINT_copy(base, generator))
914 goto err;
915
916 /* do the precomputation */
917 for (i = 0; i < numblocks; i++) {
918 size_t j;
919
920 if (!EC_POINT_dbl(group, tmp_point, base, ctx))
921 goto err;
922
923 if (!EC_POINT_copy(*var++, base))
924 goto err;
925
926 for (j = 1; j < pre_points_per_block; j++, var++) {
927 /*
928 * calculate odd multiples of the current base point
929 */
930 if (!EC_POINT_add(group, *var, tmp_point, *(var - 1), ctx))
931 goto err;
932 }
933
934 if (i < numblocks - 1) {
935 /*
936 * get the next base (multiply current one by 2^blocksize)
937 */
938 size_t k;
939
940 if (blocksize <= 2) {
941 ECerr(EC_F_EC_WNAF_PRECOMPUTE_MULT, ERR_R_INTERNAL_ERROR);
942 goto err;
943 }
944
945 if (!EC_POINT_dbl(group, base, tmp_point, ctx))
946 goto err;
947 for (k = 2; k < blocksize; k++) {
948 if (!EC_POINT_dbl(group, base, base, ctx))
949 goto err;
950 }
951 }
952 }
953
954 if (group->meth->points_make_affine == NULL
955 || !group->meth->points_make_affine(group, num, points, ctx))
956 goto err;
957
958 pre_comp->group = group;
959 pre_comp->blocksize = blocksize;
960 pre_comp->numblocks = numblocks;
961 pre_comp->w = w;
962 pre_comp->points = points;
963 points = NULL;
964 pre_comp->num = num;
965 SETPRECOMP(group, ec, pre_comp);
966 pre_comp = NULL;
967 ret = 1;
968
969 err:
970 BN_CTX_end(ctx);
971 #ifndef FIPS_MODULE
972 BN_CTX_free(new_ctx);
973 #endif
974 EC_ec_pre_comp_free(pre_comp);
975 if (points) {
976 EC_POINT **p;
977
978 for (p = points; *p != NULL; p++)
979 EC_POINT_free(*p);
980 OPENSSL_free(points);
981 }
982 EC_POINT_free(tmp_point);
983 EC_POINT_free(base);
984 return ret;
985 }
986
987 int ec_wNAF_have_precompute_mult(const EC_GROUP *group)
988 {
989 return HAVEPRECOMP(group, ec);
990 }