]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/ec/ecp_nistp224.c
Enable curve-spefific ECDSA implementations via EC_METHOD
[thirdparty/openssl.git] / crypto / ec / ecp_nistp224.c
1 /*
2 * Copyright 2010-2018 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 /* Copyright 2011 Google Inc.
11 *
12 * Licensed under the Apache License, Version 2.0 (the "License");
13 *
14 * you may not use this file except in compliance with the License.
15 * You may obtain a copy of the License at
16 *
17 * http://www.apache.org/licenses/LICENSE-2.0
18 *
19 * Unless required by applicable law or agreed to in writing, software
20 * distributed under the License is distributed on an "AS IS" BASIS,
21 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22 * See the License for the specific language governing permissions and
23 * limitations under the License.
24 */
25
26 /*
27 * A 64-bit implementation of the NIST P-224 elliptic curve point multiplication
28 *
29 * Inspired by Daniel J. Bernstein's public domain nistp224 implementation
30 * and Adam Langley's public domain 64-bit C implementation of curve25519
31 */
32
33 #include <openssl/opensslconf.h>
34 #ifdef OPENSSL_NO_EC_NISTP_64_GCC_128
35 NON_EMPTY_TRANSLATION_UNIT
36 #else
37
38 # include <stdint.h>
39 # include <string.h>
40 # include <openssl/err.h>
41 # include "ec_lcl.h"
42
43 # if defined(__SIZEOF_INT128__) && __SIZEOF_INT128__==16
44 /* even with gcc, the typedef won't work for 32-bit platforms */
45 typedef __uint128_t uint128_t; /* nonstandard; implemented by gcc on 64-bit
46 * platforms */
47 # else
48 # error "Your compiler doesn't appear to support 128-bit integer types"
49 # endif
50
51 typedef uint8_t u8;
52 typedef uint64_t u64;
53
54 /******************************************************************************/
55 /*-
56 * INTERNAL REPRESENTATION OF FIELD ELEMENTS
57 *
58 * Field elements are represented as a_0 + 2^56*a_1 + 2^112*a_2 + 2^168*a_3
59 * using 64-bit coefficients called 'limbs',
60 * and sometimes (for multiplication results) as
61 * b_0 + 2^56*b_1 + 2^112*b_2 + 2^168*b_3 + 2^224*b_4 + 2^280*b_5 + 2^336*b_6
62 * using 128-bit coefficients called 'widelimbs'.
63 * A 4-limb representation is an 'felem';
64 * a 7-widelimb representation is a 'widefelem'.
65 * Even within felems, bits of adjacent limbs overlap, and we don't always
66 * reduce the representations: we ensure that inputs to each felem
67 * multiplication satisfy a_i < 2^60, so outputs satisfy b_i < 4*2^60*2^60,
68 * and fit into a 128-bit word without overflow. The coefficients are then
69 * again partially reduced to obtain an felem satisfying a_i < 2^57.
70 * We only reduce to the unique minimal representation at the end of the
71 * computation.
72 */
73
74 typedef uint64_t limb;
75 typedef uint128_t widelimb;
76
77 typedef limb felem[4];
78 typedef widelimb widefelem[7];
79
80 /*
81 * Field element represented as a byte array. 28*8 = 224 bits is also the
82 * group order size for the elliptic curve, and we also use this type for
83 * scalars for point multiplication.
84 */
85 typedef u8 felem_bytearray[28];
86
87 static const felem_bytearray nistp224_curve_params[5] = {
88 {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* p */
89 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
90 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
91 {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* a */
92 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
93 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE},
94 {0xB4, 0x05, 0x0A, 0x85, 0x0C, 0x04, 0xB3, 0xAB, 0xF5, 0x41, /* b */
95 0x32, 0x56, 0x50, 0x44, 0xB0, 0xB7, 0xD7, 0xBF, 0xD8, 0xBA,
96 0x27, 0x0B, 0x39, 0x43, 0x23, 0x55, 0xFF, 0xB4},
97 {0xB7, 0x0E, 0x0C, 0xBD, 0x6B, 0xB4, 0xBF, 0x7F, 0x32, 0x13, /* x */
98 0x90, 0xB9, 0x4A, 0x03, 0xC1, 0xD3, 0x56, 0xC2, 0x11, 0x22,
99 0x34, 0x32, 0x80, 0xD6, 0x11, 0x5C, 0x1D, 0x21},
100 {0xbd, 0x37, 0x63, 0x88, 0xb5, 0xf7, 0x23, 0xfb, 0x4c, 0x22, /* y */
101 0xdf, 0xe6, 0xcd, 0x43, 0x75, 0xa0, 0x5a, 0x07, 0x47, 0x64,
102 0x44, 0xd5, 0x81, 0x99, 0x85, 0x00, 0x7e, 0x34}
103 };
104
105 /*-
106 * Precomputed multiples of the standard generator
107 * Points are given in coordinates (X, Y, Z) where Z normally is 1
108 * (0 for the point at infinity).
109 * For each field element, slice a_0 is word 0, etc.
110 *
111 * The table has 2 * 16 elements, starting with the following:
112 * index | bits | point
113 * ------+---------+------------------------------
114 * 0 | 0 0 0 0 | 0G
115 * 1 | 0 0 0 1 | 1G
116 * 2 | 0 0 1 0 | 2^56G
117 * 3 | 0 0 1 1 | (2^56 + 1)G
118 * 4 | 0 1 0 0 | 2^112G
119 * 5 | 0 1 0 1 | (2^112 + 1)G
120 * 6 | 0 1 1 0 | (2^112 + 2^56)G
121 * 7 | 0 1 1 1 | (2^112 + 2^56 + 1)G
122 * 8 | 1 0 0 0 | 2^168G
123 * 9 | 1 0 0 1 | (2^168 + 1)G
124 * 10 | 1 0 1 0 | (2^168 + 2^56)G
125 * 11 | 1 0 1 1 | (2^168 + 2^56 + 1)G
126 * 12 | 1 1 0 0 | (2^168 + 2^112)G
127 * 13 | 1 1 0 1 | (2^168 + 2^112 + 1)G
128 * 14 | 1 1 1 0 | (2^168 + 2^112 + 2^56)G
129 * 15 | 1 1 1 1 | (2^168 + 2^112 + 2^56 + 1)G
130 * followed by a copy of this with each element multiplied by 2^28.
131 *
132 * The reason for this is so that we can clock bits into four different
133 * locations when doing simple scalar multiplies against the base point,
134 * and then another four locations using the second 16 elements.
135 */
136 static const felem gmul[2][16][3] = {
137 {{{0, 0, 0, 0},
138 {0, 0, 0, 0},
139 {0, 0, 0, 0}},
140 {{0x3280d6115c1d21, 0xc1d356c2112234, 0x7f321390b94a03, 0xb70e0cbd6bb4bf},
141 {0xd5819985007e34, 0x75a05a07476444, 0xfb4c22dfe6cd43, 0xbd376388b5f723},
142 {1, 0, 0, 0}},
143 {{0xfd9675666ebbe9, 0xbca7664d40ce5e, 0x2242df8d8a2a43, 0x1f49bbb0f99bc5},
144 {0x29e0b892dc9c43, 0xece8608436e662, 0xdc858f185310d0, 0x9812dd4eb8d321},
145 {1, 0, 0, 0}},
146 {{0x6d3e678d5d8eb8, 0x559eed1cb362f1, 0x16e9a3bbce8a3f, 0xeedcccd8c2a748},
147 {0xf19f90ed50266d, 0xabf2b4bf65f9df, 0x313865468fafec, 0x5cb379ba910a17},
148 {1, 0, 0, 0}},
149 {{0x0641966cab26e3, 0x91fb2991fab0a0, 0xefec27a4e13a0b, 0x0499aa8a5f8ebe},
150 {0x7510407766af5d, 0x84d929610d5450, 0x81d77aae82f706, 0x6916f6d4338c5b},
151 {1, 0, 0, 0}},
152 {{0xea95ac3b1f15c6, 0x086000905e82d4, 0xdd323ae4d1c8b1, 0x932b56be7685a3},
153 {0x9ef93dea25dbbf, 0x41665960f390f0, 0xfdec76dbe2a8a7, 0x523e80f019062a},
154 {1, 0, 0, 0}},
155 {{0x822fdd26732c73, 0xa01c83531b5d0f, 0x363f37347c1ba4, 0xc391b45c84725c},
156 {0xbbd5e1b2d6ad24, 0xddfbcde19dfaec, 0xc393da7e222a7f, 0x1efb7890ede244},
157 {1, 0, 0, 0}},
158 {{0x4c9e90ca217da1, 0xd11beca79159bb, 0xff8d33c2c98b7c, 0x2610b39409f849},
159 {0x44d1352ac64da0, 0xcdbb7b2c46b4fb, 0x966c079b753c89, 0xfe67e4e820b112},
160 {1, 0, 0, 0}},
161 {{0xe28cae2df5312d, 0xc71b61d16f5c6e, 0x79b7619a3e7c4c, 0x05c73240899b47},
162 {0x9f7f6382c73e3a, 0x18615165c56bda, 0x641fab2116fd56, 0x72855882b08394},
163 {1, 0, 0, 0}},
164 {{0x0469182f161c09, 0x74a98ca8d00fb5, 0xb89da93489a3e0, 0x41c98768fb0c1d},
165 {0xe5ea05fb32da81, 0x3dce9ffbca6855, 0x1cfe2d3fbf59e6, 0x0e5e03408738a7},
166 {1, 0, 0, 0}},
167 {{0xdab22b2333e87f, 0x4430137a5dd2f6, 0xe03ab9f738beb8, 0xcb0c5d0dc34f24},
168 {0x764a7df0c8fda5, 0x185ba5c3fa2044, 0x9281d688bcbe50, 0xc40331df893881},
169 {1, 0, 0, 0}},
170 {{0xb89530796f0f60, 0xade92bd26909a3, 0x1a0c83fb4884da, 0x1765bf22a5a984},
171 {0x772a9ee75db09e, 0x23bc6c67cec16f, 0x4c1edba8b14e2f, 0xe2a215d9611369},
172 {1, 0, 0, 0}},
173 {{0x571e509fb5efb3, 0xade88696410552, 0xc8ae85fada74fe, 0x6c7e4be83bbde3},
174 {0xff9f51160f4652, 0xb47ce2495a6539, 0xa2946c53b582f4, 0x286d2db3ee9a60},
175 {1, 0, 0, 0}},
176 {{0x40bbd5081a44af, 0x0995183b13926c, 0xbcefba6f47f6d0, 0x215619e9cc0057},
177 {0x8bc94d3b0df45e, 0xf11c54a3694f6f, 0x8631b93cdfe8b5, 0xe7e3f4b0982db9},
178 {1, 0, 0, 0}},
179 {{0xb17048ab3e1c7b, 0xac38f36ff8a1d8, 0x1c29819435d2c6, 0xc813132f4c07e9},
180 {0x2891425503b11f, 0x08781030579fea, 0xf5426ba5cc9674, 0x1e28ebf18562bc},
181 {1, 0, 0, 0}},
182 {{0x9f31997cc864eb, 0x06cd91d28b5e4c, 0xff17036691a973, 0xf1aef351497c58},
183 {0xdd1f2d600564ff, 0xdead073b1402db, 0x74a684435bd693, 0xeea7471f962558},
184 {1, 0, 0, 0}}},
185 {{{0, 0, 0, 0},
186 {0, 0, 0, 0},
187 {0, 0, 0, 0}},
188 {{0x9665266dddf554, 0x9613d78b60ef2d, 0xce27a34cdba417, 0xd35ab74d6afc31},
189 {0x85ccdd22deb15e, 0x2137e5783a6aab, 0xa141cffd8c93c6, 0x355a1830e90f2d},
190 {1, 0, 0, 0}},
191 {{0x1a494eadaade65, 0xd6da4da77fe53c, 0xe7992996abec86, 0x65c3553c6090e3},
192 {0xfa610b1fb09346, 0xf1c6540b8a4aaf, 0xc51a13ccd3cbab, 0x02995b1b18c28a},
193 {1, 0, 0, 0}},
194 {{0x7874568e7295ef, 0x86b419fbe38d04, 0xdc0690a7550d9a, 0xd3966a44beac33},
195 {0x2b7280ec29132f, 0xbeaa3b6a032df3, 0xdc7dd88ae41200, 0xd25e2513e3a100},
196 {1, 0, 0, 0}},
197 {{0x924857eb2efafd, 0xac2bce41223190, 0x8edaa1445553fc, 0x825800fd3562d5},
198 {0x8d79148ea96621, 0x23a01c3dd9ed8d, 0xaf8b219f9416b5, 0xd8db0cc277daea},
199 {1, 0, 0, 0}},
200 {{0x76a9c3b1a700f0, 0xe9acd29bc7e691, 0x69212d1a6b0327, 0x6322e97fe154be},
201 {0x469fc5465d62aa, 0x8d41ed18883b05, 0x1f8eae66c52b88, 0xe4fcbe9325be51},
202 {1, 0, 0, 0}},
203 {{0x825fdf583cac16, 0x020b857c7b023a, 0x683c17744b0165, 0x14ffd0a2daf2f1},
204 {0x323b36184218f9, 0x4944ec4e3b47d4, 0xc15b3080841acf, 0x0bced4b01a28bb},
205 {1, 0, 0, 0}},
206 {{0x92ac22230df5c4, 0x52f33b4063eda8, 0xcb3f19870c0c93, 0x40064f2ba65233},
207 {0xfe16f0924f8992, 0x012da25af5b517, 0x1a57bb24f723a6, 0x06f8bc76760def},
208 {1, 0, 0, 0}},
209 {{0x4a7084f7817cb9, 0xbcab0738ee9a78, 0x3ec11e11d9c326, 0xdc0fe90e0f1aae},
210 {0xcf639ea5f98390, 0x5c350aa22ffb74, 0x9afae98a4047b7, 0x956ec2d617fc45},
211 {1, 0, 0, 0}},
212 {{0x4306d648c1be6a, 0x9247cd8bc9a462, 0xf5595e377d2f2e, 0xbd1c3caff1a52e},
213 {0x045e14472409d0, 0x29f3e17078f773, 0x745a602b2d4f7d, 0x191837685cdfbb},
214 {1, 0, 0, 0}},
215 {{0x5b6ee254a8cb79, 0x4953433f5e7026, 0xe21faeb1d1def4, 0xc4c225785c09de},
216 {0x307ce7bba1e518, 0x31b125b1036db8, 0x47e91868839e8f, 0xc765866e33b9f3},
217 {1, 0, 0, 0}},
218 {{0x3bfece24f96906, 0x4794da641e5093, 0xde5df64f95db26, 0x297ecd89714b05},
219 {0x701bd3ebb2c3aa, 0x7073b4f53cb1d5, 0x13c5665658af16, 0x9895089d66fe58},
220 {1, 0, 0, 0}},
221 {{0x0fef05f78c4790, 0x2d773633b05d2e, 0x94229c3a951c94, 0xbbbd70df4911bb},
222 {0xb2c6963d2c1168, 0x105f47a72b0d73, 0x9fdf6111614080, 0x7b7e94b39e67b0},
223 {1, 0, 0, 0}},
224 {{0xad1a7d6efbe2b3, 0xf012482c0da69d, 0x6b3bdf12438345, 0x40d7558d7aa4d9},
225 {0x8a09fffb5c6d3d, 0x9a356e5d9ffd38, 0x5973f15f4f9b1c, 0xdcd5f59f63c3ea},
226 {1, 0, 0, 0}},
227 {{0xacf39f4c5ca7ab, 0x4c8071cc5fd737, 0xc64e3602cd1184, 0x0acd4644c9abba},
228 {0x6c011a36d8bf6e, 0xfecd87ba24e32a, 0x19f6f56574fad8, 0x050b204ced9405},
229 {1, 0, 0, 0}},
230 {{0xed4f1cae7d9a96, 0x5ceef7ad94c40a, 0x778e4a3bf3ef9b, 0x7405783dc3b55e},
231 {0x32477c61b6e8c6, 0xb46a97570f018b, 0x91176d0a7e95d1, 0x3df90fbc4c7d0e},
232 {1, 0, 0, 0}}}
233 };
234
235 /* Precomputation for the group generator. */
236 struct nistp224_pre_comp_st {
237 felem g_pre_comp[2][16][3];
238 CRYPTO_REF_COUNT references;
239 CRYPTO_RWLOCK *lock;
240 };
241
242 const EC_METHOD *EC_GFp_nistp224_method(void)
243 {
244 static const EC_METHOD ret = {
245 EC_FLAGS_DEFAULT_OCT,
246 NID_X9_62_prime_field,
247 ec_GFp_nistp224_group_init,
248 ec_GFp_simple_group_finish,
249 ec_GFp_simple_group_clear_finish,
250 ec_GFp_nist_group_copy,
251 ec_GFp_nistp224_group_set_curve,
252 ec_GFp_simple_group_get_curve,
253 ec_GFp_simple_group_get_degree,
254 ec_group_simple_order_bits,
255 ec_GFp_simple_group_check_discriminant,
256 ec_GFp_simple_point_init,
257 ec_GFp_simple_point_finish,
258 ec_GFp_simple_point_clear_finish,
259 ec_GFp_simple_point_copy,
260 ec_GFp_simple_point_set_to_infinity,
261 ec_GFp_simple_set_Jprojective_coordinates_GFp,
262 ec_GFp_simple_get_Jprojective_coordinates_GFp,
263 ec_GFp_simple_point_set_affine_coordinates,
264 ec_GFp_nistp224_point_get_affine_coordinates,
265 0 /* point_set_compressed_coordinates */ ,
266 0 /* point2oct */ ,
267 0 /* oct2point */ ,
268 ec_GFp_simple_add,
269 ec_GFp_simple_dbl,
270 ec_GFp_simple_invert,
271 ec_GFp_simple_is_at_infinity,
272 ec_GFp_simple_is_on_curve,
273 ec_GFp_simple_cmp,
274 ec_GFp_simple_make_affine,
275 ec_GFp_simple_points_make_affine,
276 ec_GFp_nistp224_points_mul,
277 ec_GFp_nistp224_precompute_mult,
278 ec_GFp_nistp224_have_precompute_mult,
279 ec_GFp_nist_field_mul,
280 ec_GFp_nist_field_sqr,
281 0 /* field_div */ ,
282 ec_GFp_simple_field_inv,
283 0 /* field_encode */ ,
284 0 /* field_decode */ ,
285 0, /* field_set_to_one */
286 ec_key_simple_priv2oct,
287 ec_key_simple_oct2priv,
288 0, /* set private */
289 ec_key_simple_generate_key,
290 ec_key_simple_check_key,
291 ec_key_simple_generate_public_key,
292 0, /* keycopy */
293 0, /* keyfinish */
294 ecdsa_simple_sign_setup,
295 ecdsa_simple_sign_sig,
296 ecdsa_simple_verify_sig,
297 ecdh_simple_compute_key,
298 0, /* field_inverse_mod_ord */
299 0, /* blind_coordinates */
300 0, /* ladder_pre */
301 0, /* ladder_step */
302 0 /* ladder_post */
303 };
304
305 return &ret;
306 }
307
308 /*
309 * Helper functions to convert field elements to/from internal representation
310 */
311 static void bin28_to_felem(felem out, const u8 in[28])
312 {
313 out[0] = *((const uint64_t *)(in)) & 0x00ffffffffffffff;
314 out[1] = (*((const uint64_t *)(in + 7))) & 0x00ffffffffffffff;
315 out[2] = (*((const uint64_t *)(in + 14))) & 0x00ffffffffffffff;
316 out[3] = (*((const uint64_t *)(in+20))) >> 8;
317 }
318
319 static void felem_to_bin28(u8 out[28], const felem in)
320 {
321 unsigned i;
322 for (i = 0; i < 7; ++i) {
323 out[i] = in[0] >> (8 * i);
324 out[i + 7] = in[1] >> (8 * i);
325 out[i + 14] = in[2] >> (8 * i);
326 out[i + 21] = in[3] >> (8 * i);
327 }
328 }
329
330 /* To preserve endianness when using BN_bn2bin and BN_bin2bn */
331 static void flip_endian(u8 *out, const u8 *in, unsigned len)
332 {
333 unsigned i;
334 for (i = 0; i < len; ++i)
335 out[i] = in[len - 1 - i];
336 }
337
338 /* From OpenSSL BIGNUM to internal representation */
339 static int BN_to_felem(felem out, const BIGNUM *bn)
340 {
341 felem_bytearray b_in;
342 felem_bytearray b_out;
343 unsigned num_bytes;
344
345 /* BN_bn2bin eats leading zeroes */
346 memset(b_out, 0, sizeof(b_out));
347 num_bytes = BN_num_bytes(bn);
348 if (num_bytes > sizeof(b_out)) {
349 ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
350 return 0;
351 }
352 if (BN_is_negative(bn)) {
353 ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
354 return 0;
355 }
356 num_bytes = BN_bn2bin(bn, b_in);
357 flip_endian(b_out, b_in, num_bytes);
358 bin28_to_felem(out, b_out);
359 return 1;
360 }
361
362 /* From internal representation to OpenSSL BIGNUM */
363 static BIGNUM *felem_to_BN(BIGNUM *out, const felem in)
364 {
365 felem_bytearray b_in, b_out;
366 felem_to_bin28(b_in, in);
367 flip_endian(b_out, b_in, sizeof(b_out));
368 return BN_bin2bn(b_out, sizeof(b_out), out);
369 }
370
371 /******************************************************************************/
372 /*-
373 * FIELD OPERATIONS
374 *
375 * Field operations, using the internal representation of field elements.
376 * NB! These operations are specific to our point multiplication and cannot be
377 * expected to be correct in general - e.g., multiplication with a large scalar
378 * will cause an overflow.
379 *
380 */
381
382 static void felem_one(felem out)
383 {
384 out[0] = 1;
385 out[1] = 0;
386 out[2] = 0;
387 out[3] = 0;
388 }
389
390 static void felem_assign(felem out, const felem in)
391 {
392 out[0] = in[0];
393 out[1] = in[1];
394 out[2] = in[2];
395 out[3] = in[3];
396 }
397
398 /* Sum two field elements: out += in */
399 static void felem_sum(felem out, const felem in)
400 {
401 out[0] += in[0];
402 out[1] += in[1];
403 out[2] += in[2];
404 out[3] += in[3];
405 }
406
407 /* Subtract field elements: out -= in */
408 /* Assumes in[i] < 2^57 */
409 static void felem_diff(felem out, const felem in)
410 {
411 static const limb two58p2 = (((limb) 1) << 58) + (((limb) 1) << 2);
412 static const limb two58m2 = (((limb) 1) << 58) - (((limb) 1) << 2);
413 static const limb two58m42m2 = (((limb) 1) << 58) -
414 (((limb) 1) << 42) - (((limb) 1) << 2);
415
416 /* Add 0 mod 2^224-2^96+1 to ensure out > in */
417 out[0] += two58p2;
418 out[1] += two58m42m2;
419 out[2] += two58m2;
420 out[3] += two58m2;
421
422 out[0] -= in[0];
423 out[1] -= in[1];
424 out[2] -= in[2];
425 out[3] -= in[3];
426 }
427
428 /* Subtract in unreduced 128-bit mode: out -= in */
429 /* Assumes in[i] < 2^119 */
430 static void widefelem_diff(widefelem out, const widefelem in)
431 {
432 static const widelimb two120 = ((widelimb) 1) << 120;
433 static const widelimb two120m64 = (((widelimb) 1) << 120) -
434 (((widelimb) 1) << 64);
435 static const widelimb two120m104m64 = (((widelimb) 1) << 120) -
436 (((widelimb) 1) << 104) - (((widelimb) 1) << 64);
437
438 /* Add 0 mod 2^224-2^96+1 to ensure out > in */
439 out[0] += two120;
440 out[1] += two120m64;
441 out[2] += two120m64;
442 out[3] += two120;
443 out[4] += two120m104m64;
444 out[5] += two120m64;
445 out[6] += two120m64;
446
447 out[0] -= in[0];
448 out[1] -= in[1];
449 out[2] -= in[2];
450 out[3] -= in[3];
451 out[4] -= in[4];
452 out[5] -= in[5];
453 out[6] -= in[6];
454 }
455
456 /* Subtract in mixed mode: out128 -= in64 */
457 /* in[i] < 2^63 */
458 static void felem_diff_128_64(widefelem out, const felem in)
459 {
460 static const widelimb two64p8 = (((widelimb) 1) << 64) +
461 (((widelimb) 1) << 8);
462 static const widelimb two64m8 = (((widelimb) 1) << 64) -
463 (((widelimb) 1) << 8);
464 static const widelimb two64m48m8 = (((widelimb) 1) << 64) -
465 (((widelimb) 1) << 48) - (((widelimb) 1) << 8);
466
467 /* Add 0 mod 2^224-2^96+1 to ensure out > in */
468 out[0] += two64p8;
469 out[1] += two64m48m8;
470 out[2] += two64m8;
471 out[3] += two64m8;
472
473 out[0] -= in[0];
474 out[1] -= in[1];
475 out[2] -= in[2];
476 out[3] -= in[3];
477 }
478
479 /*
480 * Multiply a field element by a scalar: out = out * scalar The scalars we
481 * actually use are small, so results fit without overflow
482 */
483 static void felem_scalar(felem out, const limb scalar)
484 {
485 out[0] *= scalar;
486 out[1] *= scalar;
487 out[2] *= scalar;
488 out[3] *= scalar;
489 }
490
491 /*
492 * Multiply an unreduced field element by a scalar: out = out * scalar The
493 * scalars we actually use are small, so results fit without overflow
494 */
495 static void widefelem_scalar(widefelem out, const widelimb scalar)
496 {
497 out[0] *= scalar;
498 out[1] *= scalar;
499 out[2] *= scalar;
500 out[3] *= scalar;
501 out[4] *= scalar;
502 out[5] *= scalar;
503 out[6] *= scalar;
504 }
505
506 /* Square a field element: out = in^2 */
507 static void felem_square(widefelem out, const felem in)
508 {
509 limb tmp0, tmp1, tmp2;
510 tmp0 = 2 * in[0];
511 tmp1 = 2 * in[1];
512 tmp2 = 2 * in[2];
513 out[0] = ((widelimb) in[0]) * in[0];
514 out[1] = ((widelimb) in[0]) * tmp1;
515 out[2] = ((widelimb) in[0]) * tmp2 + ((widelimb) in[1]) * in[1];
516 out[3] = ((widelimb) in[3]) * tmp0 + ((widelimb) in[1]) * tmp2;
517 out[4] = ((widelimb) in[3]) * tmp1 + ((widelimb) in[2]) * in[2];
518 out[5] = ((widelimb) in[3]) * tmp2;
519 out[6] = ((widelimb) in[3]) * in[3];
520 }
521
522 /* Multiply two field elements: out = in1 * in2 */
523 static void felem_mul(widefelem out, const felem in1, const felem in2)
524 {
525 out[0] = ((widelimb) in1[0]) * in2[0];
526 out[1] = ((widelimb) in1[0]) * in2[1] + ((widelimb) in1[1]) * in2[0];
527 out[2] = ((widelimb) in1[0]) * in2[2] + ((widelimb) in1[1]) * in2[1] +
528 ((widelimb) in1[2]) * in2[0];
529 out[3] = ((widelimb) in1[0]) * in2[3] + ((widelimb) in1[1]) * in2[2] +
530 ((widelimb) in1[2]) * in2[1] + ((widelimb) in1[3]) * in2[0];
531 out[4] = ((widelimb) in1[1]) * in2[3] + ((widelimb) in1[2]) * in2[2] +
532 ((widelimb) in1[3]) * in2[1];
533 out[5] = ((widelimb) in1[2]) * in2[3] + ((widelimb) in1[3]) * in2[2];
534 out[6] = ((widelimb) in1[3]) * in2[3];
535 }
536
537 /*-
538 * Reduce seven 128-bit coefficients to four 64-bit coefficients.
539 * Requires in[i] < 2^126,
540 * ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16 */
541 static void felem_reduce(felem out, const widefelem in)
542 {
543 static const widelimb two127p15 = (((widelimb) 1) << 127) +
544 (((widelimb) 1) << 15);
545 static const widelimb two127m71 = (((widelimb) 1) << 127) -
546 (((widelimb) 1) << 71);
547 static const widelimb two127m71m55 = (((widelimb) 1) << 127) -
548 (((widelimb) 1) << 71) - (((widelimb) 1) << 55);
549 widelimb output[5];
550
551 /* Add 0 mod 2^224-2^96+1 to ensure all differences are positive */
552 output[0] = in[0] + two127p15;
553 output[1] = in[1] + two127m71m55;
554 output[2] = in[2] + two127m71;
555 output[3] = in[3];
556 output[4] = in[4];
557
558 /* Eliminate in[4], in[5], in[6] */
559 output[4] += in[6] >> 16;
560 output[3] += (in[6] & 0xffff) << 40;
561 output[2] -= in[6];
562
563 output[3] += in[5] >> 16;
564 output[2] += (in[5] & 0xffff) << 40;
565 output[1] -= in[5];
566
567 output[2] += output[4] >> 16;
568 output[1] += (output[4] & 0xffff) << 40;
569 output[0] -= output[4];
570
571 /* Carry 2 -> 3 -> 4 */
572 output[3] += output[2] >> 56;
573 output[2] &= 0x00ffffffffffffff;
574
575 output[4] = output[3] >> 56;
576 output[3] &= 0x00ffffffffffffff;
577
578 /* Now output[2] < 2^56, output[3] < 2^56, output[4] < 2^72 */
579
580 /* Eliminate output[4] */
581 output[2] += output[4] >> 16;
582 /* output[2] < 2^56 + 2^56 = 2^57 */
583 output[1] += (output[4] & 0xffff) << 40;
584 output[0] -= output[4];
585
586 /* Carry 0 -> 1 -> 2 -> 3 */
587 output[1] += output[0] >> 56;
588 out[0] = output[0] & 0x00ffffffffffffff;
589
590 output[2] += output[1] >> 56;
591 /* output[2] < 2^57 + 2^72 */
592 out[1] = output[1] & 0x00ffffffffffffff;
593 output[3] += output[2] >> 56;
594 /* output[3] <= 2^56 + 2^16 */
595 out[2] = output[2] & 0x00ffffffffffffff;
596
597 /*-
598 * out[0] < 2^56, out[1] < 2^56, out[2] < 2^56,
599 * out[3] <= 2^56 + 2^16 (due to final carry),
600 * so out < 2*p
601 */
602 out[3] = output[3];
603 }
604
605 static void felem_square_reduce(felem out, const felem in)
606 {
607 widefelem tmp;
608 felem_square(tmp, in);
609 felem_reduce(out, tmp);
610 }
611
612 static void felem_mul_reduce(felem out, const felem in1, const felem in2)
613 {
614 widefelem tmp;
615 felem_mul(tmp, in1, in2);
616 felem_reduce(out, tmp);
617 }
618
619 /*
620 * Reduce to unique minimal representation. Requires 0 <= in < 2*p (always
621 * call felem_reduce first)
622 */
623 static void felem_contract(felem out, const felem in)
624 {
625 static const int64_t two56 = ((limb) 1) << 56;
626 /* 0 <= in < 2*p, p = 2^224 - 2^96 + 1 */
627 /* if in > p , reduce in = in - 2^224 + 2^96 - 1 */
628 int64_t tmp[4], a;
629 tmp[0] = in[0];
630 tmp[1] = in[1];
631 tmp[2] = in[2];
632 tmp[3] = in[3];
633 /* Case 1: a = 1 iff in >= 2^224 */
634 a = (in[3] >> 56);
635 tmp[0] -= a;
636 tmp[1] += a << 40;
637 tmp[3] &= 0x00ffffffffffffff;
638 /*
639 * Case 2: a = 0 iff p <= in < 2^224, i.e., the high 128 bits are all 1
640 * and the lower part is non-zero
641 */
642 a = ((in[3] & in[2] & (in[1] | 0x000000ffffffffff)) + 1) |
643 (((int64_t) (in[0] + (in[1] & 0x000000ffffffffff)) - 1) >> 63);
644 a &= 0x00ffffffffffffff;
645 /* turn a into an all-one mask (if a = 0) or an all-zero mask */
646 a = (a - 1) >> 63;
647 /* subtract 2^224 - 2^96 + 1 if a is all-one */
648 tmp[3] &= a ^ 0xffffffffffffffff;
649 tmp[2] &= a ^ 0xffffffffffffffff;
650 tmp[1] &= (a ^ 0xffffffffffffffff) | 0x000000ffffffffff;
651 tmp[0] -= 1 & a;
652
653 /*
654 * eliminate negative coefficients: if tmp[0] is negative, tmp[1] must be
655 * non-zero, so we only need one step
656 */
657 a = tmp[0] >> 63;
658 tmp[0] += two56 & a;
659 tmp[1] -= 1 & a;
660
661 /* carry 1 -> 2 -> 3 */
662 tmp[2] += tmp[1] >> 56;
663 tmp[1] &= 0x00ffffffffffffff;
664
665 tmp[3] += tmp[2] >> 56;
666 tmp[2] &= 0x00ffffffffffffff;
667
668 /* Now 0 <= out < p */
669 out[0] = tmp[0];
670 out[1] = tmp[1];
671 out[2] = tmp[2];
672 out[3] = tmp[3];
673 }
674
675 /*
676 * Get negative value: out = -in
677 * Requires in[i] < 2^63,
678 * ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16
679 */
680 static void felem_neg(felem out, const felem in)
681 {
682 widefelem tmp;
683
684 memset(tmp, 0, sizeof(tmp));
685 felem_diff_128_64(tmp, in);
686 felem_reduce(out, tmp);
687 }
688
689 /*
690 * Zero-check: returns 1 if input is 0, and 0 otherwise. We know that field
691 * elements are reduced to in < 2^225, so we only need to check three cases:
692 * 0, 2^224 - 2^96 + 1, and 2^225 - 2^97 + 2
693 */
694 static limb felem_is_zero(const felem in)
695 {
696 limb zero, two224m96p1, two225m97p2;
697
698 zero = in[0] | in[1] | in[2] | in[3];
699 zero = (((int64_t) (zero) - 1) >> 63) & 1;
700 two224m96p1 = (in[0] ^ 1) | (in[1] ^ 0x00ffff0000000000)
701 | (in[2] ^ 0x00ffffffffffffff) | (in[3] ^ 0x00ffffffffffffff);
702 two224m96p1 = (((int64_t) (two224m96p1) - 1) >> 63) & 1;
703 two225m97p2 = (in[0] ^ 2) | (in[1] ^ 0x00fffe0000000000)
704 | (in[2] ^ 0x00ffffffffffffff) | (in[3] ^ 0x01ffffffffffffff);
705 two225m97p2 = (((int64_t) (two225m97p2) - 1) >> 63) & 1;
706 return (zero | two224m96p1 | two225m97p2);
707 }
708
709 static int felem_is_zero_int(const void *in)
710 {
711 return (int)(felem_is_zero(in) & ((limb) 1));
712 }
713
714 /* Invert a field element */
715 /* Computation chain copied from djb's code */
716 static void felem_inv(felem out, const felem in)
717 {
718 felem ftmp, ftmp2, ftmp3, ftmp4;
719 widefelem tmp;
720 unsigned i;
721
722 felem_square(tmp, in);
723 felem_reduce(ftmp, tmp); /* 2 */
724 felem_mul(tmp, in, ftmp);
725 felem_reduce(ftmp, tmp); /* 2^2 - 1 */
726 felem_square(tmp, ftmp);
727 felem_reduce(ftmp, tmp); /* 2^3 - 2 */
728 felem_mul(tmp, in, ftmp);
729 felem_reduce(ftmp, tmp); /* 2^3 - 1 */
730 felem_square(tmp, ftmp);
731 felem_reduce(ftmp2, tmp); /* 2^4 - 2 */
732 felem_square(tmp, ftmp2);
733 felem_reduce(ftmp2, tmp); /* 2^5 - 4 */
734 felem_square(tmp, ftmp2);
735 felem_reduce(ftmp2, tmp); /* 2^6 - 8 */
736 felem_mul(tmp, ftmp2, ftmp);
737 felem_reduce(ftmp, tmp); /* 2^6 - 1 */
738 felem_square(tmp, ftmp);
739 felem_reduce(ftmp2, tmp); /* 2^7 - 2 */
740 for (i = 0; i < 5; ++i) { /* 2^12 - 2^6 */
741 felem_square(tmp, ftmp2);
742 felem_reduce(ftmp2, tmp);
743 }
744 felem_mul(tmp, ftmp2, ftmp);
745 felem_reduce(ftmp2, tmp); /* 2^12 - 1 */
746 felem_square(tmp, ftmp2);
747 felem_reduce(ftmp3, tmp); /* 2^13 - 2 */
748 for (i = 0; i < 11; ++i) { /* 2^24 - 2^12 */
749 felem_square(tmp, ftmp3);
750 felem_reduce(ftmp3, tmp);
751 }
752 felem_mul(tmp, ftmp3, ftmp2);
753 felem_reduce(ftmp2, tmp); /* 2^24 - 1 */
754 felem_square(tmp, ftmp2);
755 felem_reduce(ftmp3, tmp); /* 2^25 - 2 */
756 for (i = 0; i < 23; ++i) { /* 2^48 - 2^24 */
757 felem_square(tmp, ftmp3);
758 felem_reduce(ftmp3, tmp);
759 }
760 felem_mul(tmp, ftmp3, ftmp2);
761 felem_reduce(ftmp3, tmp); /* 2^48 - 1 */
762 felem_square(tmp, ftmp3);
763 felem_reduce(ftmp4, tmp); /* 2^49 - 2 */
764 for (i = 0; i < 47; ++i) { /* 2^96 - 2^48 */
765 felem_square(tmp, ftmp4);
766 felem_reduce(ftmp4, tmp);
767 }
768 felem_mul(tmp, ftmp3, ftmp4);
769 felem_reduce(ftmp3, tmp); /* 2^96 - 1 */
770 felem_square(tmp, ftmp3);
771 felem_reduce(ftmp4, tmp); /* 2^97 - 2 */
772 for (i = 0; i < 23; ++i) { /* 2^120 - 2^24 */
773 felem_square(tmp, ftmp4);
774 felem_reduce(ftmp4, tmp);
775 }
776 felem_mul(tmp, ftmp2, ftmp4);
777 felem_reduce(ftmp2, tmp); /* 2^120 - 1 */
778 for (i = 0; i < 6; ++i) { /* 2^126 - 2^6 */
779 felem_square(tmp, ftmp2);
780 felem_reduce(ftmp2, tmp);
781 }
782 felem_mul(tmp, ftmp2, ftmp);
783 felem_reduce(ftmp, tmp); /* 2^126 - 1 */
784 felem_square(tmp, ftmp);
785 felem_reduce(ftmp, tmp); /* 2^127 - 2 */
786 felem_mul(tmp, ftmp, in);
787 felem_reduce(ftmp, tmp); /* 2^127 - 1 */
788 for (i = 0; i < 97; ++i) { /* 2^224 - 2^97 */
789 felem_square(tmp, ftmp);
790 felem_reduce(ftmp, tmp);
791 }
792 felem_mul(tmp, ftmp, ftmp3);
793 felem_reduce(out, tmp); /* 2^224 - 2^96 - 1 */
794 }
795
796 /*
797 * Copy in constant time: if icopy == 1, copy in to out, if icopy == 0, copy
798 * out to itself.
799 */
800 static void copy_conditional(felem out, const felem in, limb icopy)
801 {
802 unsigned i;
803 /*
804 * icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one
805 */
806 const limb copy = -icopy;
807 for (i = 0; i < 4; ++i) {
808 const limb tmp = copy & (in[i] ^ out[i]);
809 out[i] ^= tmp;
810 }
811 }
812
813 /******************************************************************************/
814 /*-
815 * ELLIPTIC CURVE POINT OPERATIONS
816 *
817 * Points are represented in Jacobian projective coordinates:
818 * (X, Y, Z) corresponds to the affine point (X/Z^2, Y/Z^3),
819 * or to the point at infinity if Z == 0.
820 *
821 */
822
823 /*-
824 * Double an elliptic curve point:
825 * (X', Y', Z') = 2 * (X, Y, Z), where
826 * X' = (3 * (X - Z^2) * (X + Z^2))^2 - 8 * X * Y^2
827 * Y' = 3 * (X - Z^2) * (X + Z^2) * (4 * X * Y^2 - X') - 8 * Y^4
828 * Z' = (Y + Z)^2 - Y^2 - Z^2 = 2 * Y * Z
829 * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed,
830 * while x_out == y_in is not (maybe this works, but it's not tested).
831 */
832 static void
833 point_double(felem x_out, felem y_out, felem z_out,
834 const felem x_in, const felem y_in, const felem z_in)
835 {
836 widefelem tmp, tmp2;
837 felem delta, gamma, beta, alpha, ftmp, ftmp2;
838
839 felem_assign(ftmp, x_in);
840 felem_assign(ftmp2, x_in);
841
842 /* delta = z^2 */
843 felem_square(tmp, z_in);
844 felem_reduce(delta, tmp);
845
846 /* gamma = y^2 */
847 felem_square(tmp, y_in);
848 felem_reduce(gamma, tmp);
849
850 /* beta = x*gamma */
851 felem_mul(tmp, x_in, gamma);
852 felem_reduce(beta, tmp);
853
854 /* alpha = 3*(x-delta)*(x+delta) */
855 felem_diff(ftmp, delta);
856 /* ftmp[i] < 2^57 + 2^58 + 2 < 2^59 */
857 felem_sum(ftmp2, delta);
858 /* ftmp2[i] < 2^57 + 2^57 = 2^58 */
859 felem_scalar(ftmp2, 3);
860 /* ftmp2[i] < 3 * 2^58 < 2^60 */
861 felem_mul(tmp, ftmp, ftmp2);
862 /* tmp[i] < 2^60 * 2^59 * 4 = 2^121 */
863 felem_reduce(alpha, tmp);
864
865 /* x' = alpha^2 - 8*beta */
866 felem_square(tmp, alpha);
867 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
868 felem_assign(ftmp, beta);
869 felem_scalar(ftmp, 8);
870 /* ftmp[i] < 8 * 2^57 = 2^60 */
871 felem_diff_128_64(tmp, ftmp);
872 /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
873 felem_reduce(x_out, tmp);
874
875 /* z' = (y + z)^2 - gamma - delta */
876 felem_sum(delta, gamma);
877 /* delta[i] < 2^57 + 2^57 = 2^58 */
878 felem_assign(ftmp, y_in);
879 felem_sum(ftmp, z_in);
880 /* ftmp[i] < 2^57 + 2^57 = 2^58 */
881 felem_square(tmp, ftmp);
882 /* tmp[i] < 4 * 2^58 * 2^58 = 2^118 */
883 felem_diff_128_64(tmp, delta);
884 /* tmp[i] < 2^118 + 2^64 + 8 < 2^119 */
885 felem_reduce(z_out, tmp);
886
887 /* y' = alpha*(4*beta - x') - 8*gamma^2 */
888 felem_scalar(beta, 4);
889 /* beta[i] < 4 * 2^57 = 2^59 */
890 felem_diff(beta, x_out);
891 /* beta[i] < 2^59 + 2^58 + 2 < 2^60 */
892 felem_mul(tmp, alpha, beta);
893 /* tmp[i] < 4 * 2^57 * 2^60 = 2^119 */
894 felem_square(tmp2, gamma);
895 /* tmp2[i] < 4 * 2^57 * 2^57 = 2^116 */
896 widefelem_scalar(tmp2, 8);
897 /* tmp2[i] < 8 * 2^116 = 2^119 */
898 widefelem_diff(tmp, tmp2);
899 /* tmp[i] < 2^119 + 2^120 < 2^121 */
900 felem_reduce(y_out, tmp);
901 }
902
903 /*-
904 * Add two elliptic curve points:
905 * (X_1, Y_1, Z_1) + (X_2, Y_2, Z_2) = (X_3, Y_3, Z_3), where
906 * X_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1)^2 - (Z_1^2 * X_2 - Z_2^2 * X_1)^3 -
907 * 2 * Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2
908 * Y_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1) * (Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2 - X_3) -
909 * Z_2^3 * Y_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^3
910 * Z_3 = (Z_1^2 * X_2 - Z_2^2 * X_1) * (Z_1 * Z_2)
911 *
912 * This runs faster if 'mixed' is set, which requires Z_2 = 1 or Z_2 = 0.
913 */
914
915 /*
916 * This function is not entirely constant-time: it includes a branch for
917 * checking whether the two input points are equal, (while not equal to the
918 * point at infinity). This case never happens during single point
919 * multiplication, so there is no timing leak for ECDH or ECDSA signing.
920 */
921 static void point_add(felem x3, felem y3, felem z3,
922 const felem x1, const felem y1, const felem z1,
923 const int mixed, const felem x2, const felem y2,
924 const felem z2)
925 {
926 felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, x_out, y_out, z_out;
927 widefelem tmp, tmp2;
928 limb z1_is_zero, z2_is_zero, x_equal, y_equal;
929
930 if (!mixed) {
931 /* ftmp2 = z2^2 */
932 felem_square(tmp, z2);
933 felem_reduce(ftmp2, tmp);
934
935 /* ftmp4 = z2^3 */
936 felem_mul(tmp, ftmp2, z2);
937 felem_reduce(ftmp4, tmp);
938
939 /* ftmp4 = z2^3*y1 */
940 felem_mul(tmp2, ftmp4, y1);
941 felem_reduce(ftmp4, tmp2);
942
943 /* ftmp2 = z2^2*x1 */
944 felem_mul(tmp2, ftmp2, x1);
945 felem_reduce(ftmp2, tmp2);
946 } else {
947 /*
948 * We'll assume z2 = 1 (special case z2 = 0 is handled later)
949 */
950
951 /* ftmp4 = z2^3*y1 */
952 felem_assign(ftmp4, y1);
953
954 /* ftmp2 = z2^2*x1 */
955 felem_assign(ftmp2, x1);
956 }
957
958 /* ftmp = z1^2 */
959 felem_square(tmp, z1);
960 felem_reduce(ftmp, tmp);
961
962 /* ftmp3 = z1^3 */
963 felem_mul(tmp, ftmp, z1);
964 felem_reduce(ftmp3, tmp);
965
966 /* tmp = z1^3*y2 */
967 felem_mul(tmp, ftmp3, y2);
968 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
969
970 /* ftmp3 = z1^3*y2 - z2^3*y1 */
971 felem_diff_128_64(tmp, ftmp4);
972 /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
973 felem_reduce(ftmp3, tmp);
974
975 /* tmp = z1^2*x2 */
976 felem_mul(tmp, ftmp, x2);
977 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
978
979 /* ftmp = z1^2*x2 - z2^2*x1 */
980 felem_diff_128_64(tmp, ftmp2);
981 /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
982 felem_reduce(ftmp, tmp);
983
984 /*
985 * the formulae are incorrect if the points are equal so we check for
986 * this and do doubling if this happens
987 */
988 x_equal = felem_is_zero(ftmp);
989 y_equal = felem_is_zero(ftmp3);
990 z1_is_zero = felem_is_zero(z1);
991 z2_is_zero = felem_is_zero(z2);
992 /* In affine coordinates, (X_1, Y_1) == (X_2, Y_2) */
993 if (x_equal && y_equal && !z1_is_zero && !z2_is_zero) {
994 point_double(x3, y3, z3, x1, y1, z1);
995 return;
996 }
997
998 /* ftmp5 = z1*z2 */
999 if (!mixed) {
1000 felem_mul(tmp, z1, z2);
1001 felem_reduce(ftmp5, tmp);
1002 } else {
1003 /* special case z2 = 0 is handled later */
1004 felem_assign(ftmp5, z1);
1005 }
1006
1007 /* z_out = (z1^2*x2 - z2^2*x1)*(z1*z2) */
1008 felem_mul(tmp, ftmp, ftmp5);
1009 felem_reduce(z_out, tmp);
1010
1011 /* ftmp = (z1^2*x2 - z2^2*x1)^2 */
1012 felem_assign(ftmp5, ftmp);
1013 felem_square(tmp, ftmp);
1014 felem_reduce(ftmp, tmp);
1015
1016 /* ftmp5 = (z1^2*x2 - z2^2*x1)^3 */
1017 felem_mul(tmp, ftmp, ftmp5);
1018 felem_reduce(ftmp5, tmp);
1019
1020 /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
1021 felem_mul(tmp, ftmp2, ftmp);
1022 felem_reduce(ftmp2, tmp);
1023
1024 /* tmp = z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */
1025 felem_mul(tmp, ftmp4, ftmp5);
1026 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
1027
1028 /* tmp2 = (z1^3*y2 - z2^3*y1)^2 */
1029 felem_square(tmp2, ftmp3);
1030 /* tmp2[i] < 4 * 2^57 * 2^57 < 2^116 */
1031
1032 /* tmp2 = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 */
1033 felem_diff_128_64(tmp2, ftmp5);
1034 /* tmp2[i] < 2^116 + 2^64 + 8 < 2^117 */
1035
1036 /* ftmp5 = 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
1037 felem_assign(ftmp5, ftmp2);
1038 felem_scalar(ftmp5, 2);
1039 /* ftmp5[i] < 2 * 2^57 = 2^58 */
1040
1041 /*-
1042 * x_out = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 -
1043 * 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2
1044 */
1045 felem_diff_128_64(tmp2, ftmp5);
1046 /* tmp2[i] < 2^117 + 2^64 + 8 < 2^118 */
1047 felem_reduce(x_out, tmp2);
1048
1049 /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out */
1050 felem_diff(ftmp2, x_out);
1051 /* ftmp2[i] < 2^57 + 2^58 + 2 < 2^59 */
1052
1053 /*
1054 * tmp2 = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out)
1055 */
1056 felem_mul(tmp2, ftmp3, ftmp2);
1057 /* tmp2[i] < 4 * 2^57 * 2^59 = 2^118 */
1058
1059 /*-
1060 * y_out = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) -
1061 * z2^3*y1*(z1^2*x2 - z2^2*x1)^3
1062 */
1063 widefelem_diff(tmp2, tmp);
1064 /* tmp2[i] < 2^118 + 2^120 < 2^121 */
1065 felem_reduce(y_out, tmp2);
1066
1067 /*
1068 * the result (x_out, y_out, z_out) is incorrect if one of the inputs is
1069 * the point at infinity, so we need to check for this separately
1070 */
1071
1072 /*
1073 * if point 1 is at infinity, copy point 2 to output, and vice versa
1074 */
1075 copy_conditional(x_out, x2, z1_is_zero);
1076 copy_conditional(x_out, x1, z2_is_zero);
1077 copy_conditional(y_out, y2, z1_is_zero);
1078 copy_conditional(y_out, y1, z2_is_zero);
1079 copy_conditional(z_out, z2, z1_is_zero);
1080 copy_conditional(z_out, z1, z2_is_zero);
1081 felem_assign(x3, x_out);
1082 felem_assign(y3, y_out);
1083 felem_assign(z3, z_out);
1084 }
1085
1086 /*
1087 * select_point selects the |idx|th point from a precomputation table and
1088 * copies it to out.
1089 * The pre_comp array argument should be size of |size| argument
1090 */
1091 static void select_point(const u64 idx, unsigned int size,
1092 const felem pre_comp[][3], felem out[3])
1093 {
1094 unsigned i, j;
1095 limb *outlimbs = &out[0][0];
1096
1097 memset(out, 0, sizeof(*out) * 3);
1098 for (i = 0; i < size; i++) {
1099 const limb *inlimbs = &pre_comp[i][0][0];
1100 u64 mask = i ^ idx;
1101 mask |= mask >> 4;
1102 mask |= mask >> 2;
1103 mask |= mask >> 1;
1104 mask &= 1;
1105 mask--;
1106 for (j = 0; j < 4 * 3; j++)
1107 outlimbs[j] |= inlimbs[j] & mask;
1108 }
1109 }
1110
1111 /* get_bit returns the |i|th bit in |in| */
1112 static char get_bit(const felem_bytearray in, unsigned i)
1113 {
1114 if (i >= 224)
1115 return 0;
1116 return (in[i >> 3] >> (i & 7)) & 1;
1117 }
1118
1119 /*
1120 * Interleaved point multiplication using precomputed point multiples: The
1121 * small point multiples 0*P, 1*P, ..., 16*P are in pre_comp[], the scalars
1122 * in scalars[]. If g_scalar is non-NULL, we also add this multiple of the
1123 * generator, using certain (large) precomputed multiples in g_pre_comp.
1124 * Output point (X, Y, Z) is stored in x_out, y_out, z_out
1125 */
1126 static void batch_mul(felem x_out, felem y_out, felem z_out,
1127 const felem_bytearray scalars[],
1128 const unsigned num_points, const u8 *g_scalar,
1129 const int mixed, const felem pre_comp[][17][3],
1130 const felem g_pre_comp[2][16][3])
1131 {
1132 int i, skip;
1133 unsigned num;
1134 unsigned gen_mul = (g_scalar != NULL);
1135 felem nq[3], tmp[4];
1136 u64 bits;
1137 u8 sign, digit;
1138
1139 /* set nq to the point at infinity */
1140 memset(nq, 0, sizeof(nq));
1141
1142 /*
1143 * Loop over all scalars msb-to-lsb, interleaving additions of multiples
1144 * of the generator (two in each of the last 28 rounds) and additions of
1145 * other points multiples (every 5th round).
1146 */
1147 skip = 1; /* save two point operations in the first
1148 * round */
1149 for (i = (num_points ? 220 : 27); i >= 0; --i) {
1150 /* double */
1151 if (!skip)
1152 point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
1153
1154 /* add multiples of the generator */
1155 if (gen_mul && (i <= 27)) {
1156 /* first, look 28 bits upwards */
1157 bits = get_bit(g_scalar, i + 196) << 3;
1158 bits |= get_bit(g_scalar, i + 140) << 2;
1159 bits |= get_bit(g_scalar, i + 84) << 1;
1160 bits |= get_bit(g_scalar, i + 28);
1161 /* select the point to add, in constant time */
1162 select_point(bits, 16, g_pre_comp[1], tmp);
1163
1164 if (!skip) {
1165 /* value 1 below is argument for "mixed" */
1166 point_add(nq[0], nq[1], nq[2],
1167 nq[0], nq[1], nq[2], 1, tmp[0], tmp[1], tmp[2]);
1168 } else {
1169 memcpy(nq, tmp, 3 * sizeof(felem));
1170 skip = 0;
1171 }
1172
1173 /* second, look at the current position */
1174 bits = get_bit(g_scalar, i + 168) << 3;
1175 bits |= get_bit(g_scalar, i + 112) << 2;
1176 bits |= get_bit(g_scalar, i + 56) << 1;
1177 bits |= get_bit(g_scalar, i);
1178 /* select the point to add, in constant time */
1179 select_point(bits, 16, g_pre_comp[0], tmp);
1180 point_add(nq[0], nq[1], nq[2],
1181 nq[0], nq[1], nq[2],
1182 1 /* mixed */ , tmp[0], tmp[1], tmp[2]);
1183 }
1184
1185 /* do other additions every 5 doublings */
1186 if (num_points && (i % 5 == 0)) {
1187 /* loop over all scalars */
1188 for (num = 0; num < num_points; ++num) {
1189 bits = get_bit(scalars[num], i + 4) << 5;
1190 bits |= get_bit(scalars[num], i + 3) << 4;
1191 bits |= get_bit(scalars[num], i + 2) << 3;
1192 bits |= get_bit(scalars[num], i + 1) << 2;
1193 bits |= get_bit(scalars[num], i) << 1;
1194 bits |= get_bit(scalars[num], i - 1);
1195 ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits);
1196
1197 /* select the point to add or subtract */
1198 select_point(digit, 17, pre_comp[num], tmp);
1199 felem_neg(tmp[3], tmp[1]); /* (X, -Y, Z) is the negative
1200 * point */
1201 copy_conditional(tmp[1], tmp[3], sign);
1202
1203 if (!skip) {
1204 point_add(nq[0], nq[1], nq[2],
1205 nq[0], nq[1], nq[2],
1206 mixed, tmp[0], tmp[1], tmp[2]);
1207 } else {
1208 memcpy(nq, tmp, 3 * sizeof(felem));
1209 skip = 0;
1210 }
1211 }
1212 }
1213 }
1214 felem_assign(x_out, nq[0]);
1215 felem_assign(y_out, nq[1]);
1216 felem_assign(z_out, nq[2]);
1217 }
1218
1219 /******************************************************************************/
1220 /*
1221 * FUNCTIONS TO MANAGE PRECOMPUTATION
1222 */
1223
1224 static NISTP224_PRE_COMP *nistp224_pre_comp_new(void)
1225 {
1226 NISTP224_PRE_COMP *ret = OPENSSL_zalloc(sizeof(*ret));
1227
1228 if (!ret) {
1229 ECerr(EC_F_NISTP224_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
1230 return ret;
1231 }
1232
1233 ret->references = 1;
1234
1235 ret->lock = CRYPTO_THREAD_lock_new();
1236 if (ret->lock == NULL) {
1237 ECerr(EC_F_NISTP224_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
1238 OPENSSL_free(ret);
1239 return NULL;
1240 }
1241 return ret;
1242 }
1243
1244 NISTP224_PRE_COMP *EC_nistp224_pre_comp_dup(NISTP224_PRE_COMP *p)
1245 {
1246 int i;
1247 if (p != NULL)
1248 CRYPTO_UP_REF(&p->references, &i, p->lock);
1249 return p;
1250 }
1251
1252 void EC_nistp224_pre_comp_free(NISTP224_PRE_COMP *p)
1253 {
1254 int i;
1255
1256 if (p == NULL)
1257 return;
1258
1259 CRYPTO_DOWN_REF(&p->references, &i, p->lock);
1260 REF_PRINT_COUNT("EC_nistp224", x);
1261 if (i > 0)
1262 return;
1263 REF_ASSERT_ISNT(i < 0);
1264
1265 CRYPTO_THREAD_lock_free(p->lock);
1266 OPENSSL_free(p);
1267 }
1268
1269 /******************************************************************************/
1270 /*
1271 * OPENSSL EC_METHOD FUNCTIONS
1272 */
1273
1274 int ec_GFp_nistp224_group_init(EC_GROUP *group)
1275 {
1276 int ret;
1277 ret = ec_GFp_simple_group_init(group);
1278 group->a_is_minus3 = 1;
1279 return ret;
1280 }
1281
1282 int ec_GFp_nistp224_group_set_curve(EC_GROUP *group, const BIGNUM *p,
1283 const BIGNUM *a, const BIGNUM *b,
1284 BN_CTX *ctx)
1285 {
1286 int ret = 0;
1287 BIGNUM *curve_p, *curve_a, *curve_b;
1288 #ifndef FIPS_MODE
1289 BN_CTX *new_ctx = NULL;
1290
1291 if (ctx == NULL)
1292 ctx = new_ctx = BN_CTX_new();
1293 #endif
1294 if (ctx == NULL)
1295 return 0;
1296
1297 BN_CTX_start(ctx);
1298 curve_p = BN_CTX_get(ctx);
1299 curve_a = BN_CTX_get(ctx);
1300 curve_b = BN_CTX_get(ctx);
1301 if (curve_b == NULL)
1302 goto err;
1303 BN_bin2bn(nistp224_curve_params[0], sizeof(felem_bytearray), curve_p);
1304 BN_bin2bn(nistp224_curve_params[1], sizeof(felem_bytearray), curve_a);
1305 BN_bin2bn(nistp224_curve_params[2], sizeof(felem_bytearray), curve_b);
1306 if ((BN_cmp(curve_p, p)) || (BN_cmp(curve_a, a)) || (BN_cmp(curve_b, b))) {
1307 ECerr(EC_F_EC_GFP_NISTP224_GROUP_SET_CURVE,
1308 EC_R_WRONG_CURVE_PARAMETERS);
1309 goto err;
1310 }
1311 group->field_mod_func = BN_nist_mod_224;
1312 ret = ec_GFp_simple_group_set_curve(group, p, a, b, ctx);
1313 err:
1314 BN_CTX_end(ctx);
1315 #ifndef FIPS_MODE
1316 BN_CTX_free(new_ctx);
1317 #endif
1318 return ret;
1319 }
1320
1321 /*
1322 * Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') =
1323 * (X/Z^2, Y/Z^3)
1324 */
1325 int ec_GFp_nistp224_point_get_affine_coordinates(const EC_GROUP *group,
1326 const EC_POINT *point,
1327 BIGNUM *x, BIGNUM *y,
1328 BN_CTX *ctx)
1329 {
1330 felem z1, z2, x_in, y_in, x_out, y_out;
1331 widefelem tmp;
1332
1333 if (EC_POINT_is_at_infinity(group, point)) {
1334 ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
1335 EC_R_POINT_AT_INFINITY);
1336 return 0;
1337 }
1338 if ((!BN_to_felem(x_in, point->X)) || (!BN_to_felem(y_in, point->Y)) ||
1339 (!BN_to_felem(z1, point->Z)))
1340 return 0;
1341 felem_inv(z2, z1);
1342 felem_square(tmp, z2);
1343 felem_reduce(z1, tmp);
1344 felem_mul(tmp, x_in, z1);
1345 felem_reduce(x_in, tmp);
1346 felem_contract(x_out, x_in);
1347 if (x != NULL) {
1348 if (!felem_to_BN(x, x_out)) {
1349 ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
1350 ERR_R_BN_LIB);
1351 return 0;
1352 }
1353 }
1354 felem_mul(tmp, z1, z2);
1355 felem_reduce(z1, tmp);
1356 felem_mul(tmp, y_in, z1);
1357 felem_reduce(y_in, tmp);
1358 felem_contract(y_out, y_in);
1359 if (y != NULL) {
1360 if (!felem_to_BN(y, y_out)) {
1361 ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
1362 ERR_R_BN_LIB);
1363 return 0;
1364 }
1365 }
1366 return 1;
1367 }
1368
1369 static void make_points_affine(size_t num, felem points[ /* num */ ][3],
1370 felem tmp_felems[ /* num+1 */ ])
1371 {
1372 /*
1373 * Runs in constant time, unless an input is the point at infinity (which
1374 * normally shouldn't happen).
1375 */
1376 ec_GFp_nistp_points_make_affine_internal(num,
1377 points,
1378 sizeof(felem),
1379 tmp_felems,
1380 (void (*)(void *))felem_one,
1381 felem_is_zero_int,
1382 (void (*)(void *, const void *))
1383 felem_assign,
1384 (void (*)(void *, const void *))
1385 felem_square_reduce, (void (*)
1386 (void *,
1387 const void
1388 *,
1389 const void
1390 *))
1391 felem_mul_reduce,
1392 (void (*)(void *, const void *))
1393 felem_inv,
1394 (void (*)(void *, const void *))
1395 felem_contract);
1396 }
1397
1398 /*
1399 * Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL
1400 * values Result is stored in r (r can equal one of the inputs).
1401 */
1402 int ec_GFp_nistp224_points_mul(const EC_GROUP *group, EC_POINT *r,
1403 const BIGNUM *scalar, size_t num,
1404 const EC_POINT *points[],
1405 const BIGNUM *scalars[], BN_CTX *ctx)
1406 {
1407 int ret = 0;
1408 int j;
1409 unsigned i;
1410 int mixed = 0;
1411 BIGNUM *x, *y, *z, *tmp_scalar;
1412 felem_bytearray g_secret;
1413 felem_bytearray *secrets = NULL;
1414 felem (*pre_comp)[17][3] = NULL;
1415 felem *tmp_felems = NULL;
1416 felem_bytearray tmp;
1417 unsigned num_bytes;
1418 int have_pre_comp = 0;
1419 size_t num_points = num;
1420 felem x_in, y_in, z_in, x_out, y_out, z_out;
1421 NISTP224_PRE_COMP *pre = NULL;
1422 const felem(*g_pre_comp)[16][3] = NULL;
1423 EC_POINT *generator = NULL;
1424 const EC_POINT *p = NULL;
1425 const BIGNUM *p_scalar = NULL;
1426
1427 BN_CTX_start(ctx);
1428 x = BN_CTX_get(ctx);
1429 y = BN_CTX_get(ctx);
1430 z = BN_CTX_get(ctx);
1431 tmp_scalar = BN_CTX_get(ctx);
1432 if (tmp_scalar == NULL)
1433 goto err;
1434
1435 if (scalar != NULL) {
1436 pre = group->pre_comp.nistp224;
1437 if (pre)
1438 /* we have precomputation, try to use it */
1439 g_pre_comp = (const felem(*)[16][3])pre->g_pre_comp;
1440 else
1441 /* try to use the standard precomputation */
1442 g_pre_comp = &gmul[0];
1443 generator = EC_POINT_new(group);
1444 if (generator == NULL)
1445 goto err;
1446 /* get the generator from precomputation */
1447 if (!felem_to_BN(x, g_pre_comp[0][1][0]) ||
1448 !felem_to_BN(y, g_pre_comp[0][1][1]) ||
1449 !felem_to_BN(z, g_pre_comp[0][1][2])) {
1450 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1451 goto err;
1452 }
1453 if (!EC_POINT_set_Jprojective_coordinates_GFp(group,
1454 generator, x, y, z,
1455 ctx))
1456 goto err;
1457 if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
1458 /* precomputation matches generator */
1459 have_pre_comp = 1;
1460 else
1461 /*
1462 * we don't have valid precomputation: treat the generator as a
1463 * random point
1464 */
1465 num_points = num_points + 1;
1466 }
1467
1468 if (num_points > 0) {
1469 if (num_points >= 3) {
1470 /*
1471 * unless we precompute multiples for just one or two points,
1472 * converting those into affine form is time well spent
1473 */
1474 mixed = 1;
1475 }
1476 secrets = OPENSSL_zalloc(sizeof(*secrets) * num_points);
1477 pre_comp = OPENSSL_zalloc(sizeof(*pre_comp) * num_points);
1478 if (mixed)
1479 tmp_felems =
1480 OPENSSL_malloc(sizeof(felem) * (num_points * 17 + 1));
1481 if ((secrets == NULL) || (pre_comp == NULL)
1482 || (mixed && (tmp_felems == NULL))) {
1483 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_MALLOC_FAILURE);
1484 goto err;
1485 }
1486
1487 /*
1488 * we treat NULL scalars as 0, and NULL points as points at infinity,
1489 * i.e., they contribute nothing to the linear combination
1490 */
1491 for (i = 0; i < num_points; ++i) {
1492 if (i == num)
1493 /* the generator */
1494 {
1495 p = EC_GROUP_get0_generator(group);
1496 p_scalar = scalar;
1497 } else
1498 /* the i^th point */
1499 {
1500 p = points[i];
1501 p_scalar = scalars[i];
1502 }
1503 if ((p_scalar != NULL) && (p != NULL)) {
1504 /* reduce scalar to 0 <= scalar < 2^224 */
1505 if ((BN_num_bits(p_scalar) > 224)
1506 || (BN_is_negative(p_scalar))) {
1507 /*
1508 * this is an unusual input, and we don't guarantee
1509 * constant-timeness
1510 */
1511 if (!BN_nnmod(tmp_scalar, p_scalar, group->order, ctx)) {
1512 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1513 goto err;
1514 }
1515 num_bytes = BN_bn2bin(tmp_scalar, tmp);
1516 } else
1517 num_bytes = BN_bn2bin(p_scalar, tmp);
1518 flip_endian(secrets[i], tmp, num_bytes);
1519 /* precompute multiples */
1520 if ((!BN_to_felem(x_out, p->X)) ||
1521 (!BN_to_felem(y_out, p->Y)) ||
1522 (!BN_to_felem(z_out, p->Z)))
1523 goto err;
1524 felem_assign(pre_comp[i][1][0], x_out);
1525 felem_assign(pre_comp[i][1][1], y_out);
1526 felem_assign(pre_comp[i][1][2], z_out);
1527 for (j = 2; j <= 16; ++j) {
1528 if (j & 1) {
1529 point_add(pre_comp[i][j][0], pre_comp[i][j][1],
1530 pre_comp[i][j][2], pre_comp[i][1][0],
1531 pre_comp[i][1][1], pre_comp[i][1][2], 0,
1532 pre_comp[i][j - 1][0],
1533 pre_comp[i][j - 1][1],
1534 pre_comp[i][j - 1][2]);
1535 } else {
1536 point_double(pre_comp[i][j][0], pre_comp[i][j][1],
1537 pre_comp[i][j][2], pre_comp[i][j / 2][0],
1538 pre_comp[i][j / 2][1],
1539 pre_comp[i][j / 2][2]);
1540 }
1541 }
1542 }
1543 }
1544 if (mixed)
1545 make_points_affine(num_points * 17, pre_comp[0], tmp_felems);
1546 }
1547
1548 /* the scalar for the generator */
1549 if ((scalar != NULL) && (have_pre_comp)) {
1550 memset(g_secret, 0, sizeof(g_secret));
1551 /* reduce scalar to 0 <= scalar < 2^224 */
1552 if ((BN_num_bits(scalar) > 224) || (BN_is_negative(scalar))) {
1553 /*
1554 * this is an unusual input, and we don't guarantee
1555 * constant-timeness
1556 */
1557 if (!BN_nnmod(tmp_scalar, scalar, group->order, ctx)) {
1558 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1559 goto err;
1560 }
1561 num_bytes = BN_bn2bin(tmp_scalar, tmp);
1562 } else
1563 num_bytes = BN_bn2bin(scalar, tmp);
1564 flip_endian(g_secret, tmp, num_bytes);
1565 /* do the multiplication with generator precomputation */
1566 batch_mul(x_out, y_out, z_out,
1567 (const felem_bytearray(*))secrets, num_points,
1568 g_secret,
1569 mixed, (const felem(*)[17][3])pre_comp, g_pre_comp);
1570 } else
1571 /* do the multiplication without generator precomputation */
1572 batch_mul(x_out, y_out, z_out,
1573 (const felem_bytearray(*))secrets, num_points,
1574 NULL, mixed, (const felem(*)[17][3])pre_comp, NULL);
1575 /* reduce the output to its unique minimal representation */
1576 felem_contract(x_in, x_out);
1577 felem_contract(y_in, y_out);
1578 felem_contract(z_in, z_out);
1579 if ((!felem_to_BN(x, x_in)) || (!felem_to_BN(y, y_in)) ||
1580 (!felem_to_BN(z, z_in))) {
1581 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1582 goto err;
1583 }
1584 ret = EC_POINT_set_Jprojective_coordinates_GFp(group, r, x, y, z, ctx);
1585
1586 err:
1587 BN_CTX_end(ctx);
1588 EC_POINT_free(generator);
1589 OPENSSL_free(secrets);
1590 OPENSSL_free(pre_comp);
1591 OPENSSL_free(tmp_felems);
1592 return ret;
1593 }
1594
1595 int ec_GFp_nistp224_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
1596 {
1597 int ret = 0;
1598 NISTP224_PRE_COMP *pre = NULL;
1599 int i, j;
1600 BIGNUM *x, *y;
1601 EC_POINT *generator = NULL;
1602 felem tmp_felems[32];
1603 #ifndef FIPS_MODE
1604 BN_CTX *new_ctx = NULL;
1605 #endif
1606
1607 /* throw away old precomputation */
1608 EC_pre_comp_free(group);
1609
1610 #ifndef FIPS_MODE
1611 if (ctx == NULL)
1612 ctx = new_ctx = BN_CTX_new();
1613 #endif
1614 if (ctx == NULL)
1615 return 0;
1616
1617 BN_CTX_start(ctx);
1618 x = BN_CTX_get(ctx);
1619 y = BN_CTX_get(ctx);
1620 if (y == NULL)
1621 goto err;
1622 /* get the generator */
1623 if (group->generator == NULL)
1624 goto err;
1625 generator = EC_POINT_new(group);
1626 if (generator == NULL)
1627 goto err;
1628 BN_bin2bn(nistp224_curve_params[3], sizeof(felem_bytearray), x);
1629 BN_bin2bn(nistp224_curve_params[4], sizeof(felem_bytearray), y);
1630 if (!EC_POINT_set_affine_coordinates(group, generator, x, y, ctx))
1631 goto err;
1632 if ((pre = nistp224_pre_comp_new()) == NULL)
1633 goto err;
1634 /*
1635 * if the generator is the standard one, use built-in precomputation
1636 */
1637 if (0 == EC_POINT_cmp(group, generator, group->generator, ctx)) {
1638 memcpy(pre->g_pre_comp, gmul, sizeof(pre->g_pre_comp));
1639 goto done;
1640 }
1641 if ((!BN_to_felem(pre->g_pre_comp[0][1][0], group->generator->X)) ||
1642 (!BN_to_felem(pre->g_pre_comp[0][1][1], group->generator->Y)) ||
1643 (!BN_to_felem(pre->g_pre_comp[0][1][2], group->generator->Z)))
1644 goto err;
1645 /*
1646 * compute 2^56*G, 2^112*G, 2^168*G for the first table, 2^28*G, 2^84*G,
1647 * 2^140*G, 2^196*G for the second one
1648 */
1649 for (i = 1; i <= 8; i <<= 1) {
1650 point_double(pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1],
1651 pre->g_pre_comp[1][i][2], pre->g_pre_comp[0][i][0],
1652 pre->g_pre_comp[0][i][1], pre->g_pre_comp[0][i][2]);
1653 for (j = 0; j < 27; ++j) {
1654 point_double(pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1],
1655 pre->g_pre_comp[1][i][2], pre->g_pre_comp[1][i][0],
1656 pre->g_pre_comp[1][i][1], pre->g_pre_comp[1][i][2]);
1657 }
1658 if (i == 8)
1659 break;
1660 point_double(pre->g_pre_comp[0][2 * i][0],
1661 pre->g_pre_comp[0][2 * i][1],
1662 pre->g_pre_comp[0][2 * i][2], pre->g_pre_comp[1][i][0],
1663 pre->g_pre_comp[1][i][1], pre->g_pre_comp[1][i][2]);
1664 for (j = 0; j < 27; ++j) {
1665 point_double(pre->g_pre_comp[0][2 * i][0],
1666 pre->g_pre_comp[0][2 * i][1],
1667 pre->g_pre_comp[0][2 * i][2],
1668 pre->g_pre_comp[0][2 * i][0],
1669 pre->g_pre_comp[0][2 * i][1],
1670 pre->g_pre_comp[0][2 * i][2]);
1671 }
1672 }
1673 for (i = 0; i < 2; i++) {
1674 /* g_pre_comp[i][0] is the point at infinity */
1675 memset(pre->g_pre_comp[i][0], 0, sizeof(pre->g_pre_comp[i][0]));
1676 /* the remaining multiples */
1677 /* 2^56*G + 2^112*G resp. 2^84*G + 2^140*G */
1678 point_add(pre->g_pre_comp[i][6][0], pre->g_pre_comp[i][6][1],
1679 pre->g_pre_comp[i][6][2], pre->g_pre_comp[i][4][0],
1680 pre->g_pre_comp[i][4][1], pre->g_pre_comp[i][4][2],
1681 0, pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
1682 pre->g_pre_comp[i][2][2]);
1683 /* 2^56*G + 2^168*G resp. 2^84*G + 2^196*G */
1684 point_add(pre->g_pre_comp[i][10][0], pre->g_pre_comp[i][10][1],
1685 pre->g_pre_comp[i][10][2], pre->g_pre_comp[i][8][0],
1686 pre->g_pre_comp[i][8][1], pre->g_pre_comp[i][8][2],
1687 0, pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
1688 pre->g_pre_comp[i][2][2]);
1689 /* 2^112*G + 2^168*G resp. 2^140*G + 2^196*G */
1690 point_add(pre->g_pre_comp[i][12][0], pre->g_pre_comp[i][12][1],
1691 pre->g_pre_comp[i][12][2], pre->g_pre_comp[i][8][0],
1692 pre->g_pre_comp[i][8][1], pre->g_pre_comp[i][8][2],
1693 0, pre->g_pre_comp[i][4][0], pre->g_pre_comp[i][4][1],
1694 pre->g_pre_comp[i][4][2]);
1695 /*
1696 * 2^56*G + 2^112*G + 2^168*G resp. 2^84*G + 2^140*G + 2^196*G
1697 */
1698 point_add(pre->g_pre_comp[i][14][0], pre->g_pre_comp[i][14][1],
1699 pre->g_pre_comp[i][14][2], pre->g_pre_comp[i][12][0],
1700 pre->g_pre_comp[i][12][1], pre->g_pre_comp[i][12][2],
1701 0, pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
1702 pre->g_pre_comp[i][2][2]);
1703 for (j = 1; j < 8; ++j) {
1704 /* odd multiples: add G resp. 2^28*G */
1705 point_add(pre->g_pre_comp[i][2 * j + 1][0],
1706 pre->g_pre_comp[i][2 * j + 1][1],
1707 pre->g_pre_comp[i][2 * j + 1][2],
1708 pre->g_pre_comp[i][2 * j][0],
1709 pre->g_pre_comp[i][2 * j][1],
1710 pre->g_pre_comp[i][2 * j][2], 0,
1711 pre->g_pre_comp[i][1][0], pre->g_pre_comp[i][1][1],
1712 pre->g_pre_comp[i][1][2]);
1713 }
1714 }
1715 make_points_affine(31, &(pre->g_pre_comp[0][1]), tmp_felems);
1716
1717 done:
1718 SETPRECOMP(group, nistp224, pre);
1719 pre = NULL;
1720 ret = 1;
1721 err:
1722 BN_CTX_end(ctx);
1723 EC_POINT_free(generator);
1724 #ifndef FIPS_MODE
1725 BN_CTX_free(new_ctx);
1726 #endif
1727 EC_nistp224_pre_comp_free(pre);
1728 return ret;
1729 }
1730
1731 int ec_GFp_nistp224_have_precompute_mult(const EC_GROUP *group)
1732 {
1733 return HAVEPRECOMP(group, nistp224);
1734 }
1735
1736 #endif