]>
git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/modes/ocb128.c
1 /* ====================================================================
2 * Copyright (c) 2014 The OpenSSL Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
16 * 3. All advertising materials mentioning features or use of this
17 * software must display the following acknowledgment:
18 * "This product includes software developed by the OpenSSL Project
19 * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
21 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22 * endorse or promote products derived from this software without
23 * prior written permission. For written permission, please contact
24 * openssl-core@openssl.org.
26 * 5. Products derived from this software may not be called "OpenSSL"
27 * nor may "OpenSSL" appear in their names without prior written
28 * permission of the OpenSSL Project.
30 * 6. Redistributions of any form whatsoever must retain the following
32 * "This product includes software developed by the OpenSSL Project
33 * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
35 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
39 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46 * OF THE POSSIBILITY OF SUCH DAMAGE.
47 * ====================================================================
51 #include <openssl/crypto.h>
52 #include "modes_lcl.h"
54 #ifndef OPENSSL_NO_OCB
57 * Calculate the number of binary trailing zero's in any given number
59 static u32
ocb_ntz(u64 n
)
64 * We do a right-to-left simple sequential search. This is surprisingly
65 * efficient as the distribution of trailing zeros is not uniform,
66 * e.g. the number of possible inputs with no trailing zeros is equal to
67 * the number with 1 or more; the number with exactly 1 is equal to the
68 * number with 2 or more, etc. Checking the last two bits covers 75% of
69 * all numbers. Checking the last three covers 87.5%
79 * Shift a block of 16 bytes left by shift bits
81 static void ocb_block_lshift(const unsigned char *in
, size_t shift
,
84 unsigned char shift_mask
;
86 unsigned char mask
[15];
89 shift_mask
<<= (8 - shift
);
90 for (i
= 15; i
>= 0; i
--) {
92 mask
[i
- 1] = in
[i
] & shift_mask
;
93 mask
[i
- 1] >>= 8 - shift
;
95 out
[i
] = in
[i
] << shift
;
104 * Perform a "double" operation as per OCB spec
106 static void ocb_double(OCB_BLOCK
*in
, OCB_BLOCK
*out
)
111 * Calculate the mask based on the most significant bit. There are more
112 * efficient ways to do this - but this way is constant time
114 mask
= in
->c
[0] & 0x80;
118 ocb_block_lshift(in
->c
, 1, out
->c
);
124 * Perform an xor on in1 and in2 - each of len bytes. Store result in out
126 static void ocb_block_xor(const unsigned char *in1
,
127 const unsigned char *in2
, size_t len
,
131 for (i
= 0; i
< len
; i
++) {
132 out
[i
] = in1
[i
] ^ in2
[i
];
137 * Lookup L_index in our lookup table. If we haven't already got it we need to
140 static OCB_BLOCK
*ocb_lookup_l(OCB128_CONTEXT
*ctx
, size_t idx
)
142 size_t l_index
= ctx
->l_index
;
144 if (idx
<= l_index
) {
148 /* We don't have it - so calculate it */
149 if (idx
>= ctx
->max_l_index
) {
151 * Each additional entry allows to process almost double as
152 * much data, so that in linear world the table will need to
153 * be expanded with smaller and smaller increments. Originally
154 * it was doubling in size, which was a waste. Growing it
155 * linearly is not formally optimal, but is simpler to implement.
156 * We grow table by minimally required 4*n that would accommodate
159 ctx
->max_l_index
+= (idx
- ctx
->max_l_index
+ 4) & ~3;
161 OPENSSL_realloc(ctx
->l
, ctx
->max_l_index
* sizeof(OCB_BLOCK
));
165 while (l_index
< idx
) {
166 ocb_double(ctx
->l
+ l_index
, ctx
->l
+ l_index
+ 1);
169 ctx
->l_index
= l_index
;
175 * Create a new OCB128_CONTEXT
177 OCB128_CONTEXT
*CRYPTO_ocb128_new(void *keyenc
, void *keydec
,
178 block128_f encrypt
, block128_f decrypt
,
181 OCB128_CONTEXT
*octx
;
184 if ((octx
= OPENSSL_malloc(sizeof(*octx
))) != NULL
) {
185 ret
= CRYPTO_ocb128_init(octx
, keyenc
, keydec
, encrypt
, decrypt
,
196 * Initialise an existing OCB128_CONTEXT
198 int CRYPTO_ocb128_init(OCB128_CONTEXT
*ctx
, void *keyenc
, void *keydec
,
199 block128_f encrypt
, block128_f decrypt
,
202 memset(ctx
, 0, sizeof(*ctx
));
204 ctx
->max_l_index
= 5;
205 ctx
->l
= OPENSSL_malloc(ctx
->max_l_index
* 16);
210 * We set both the encryption and decryption key schedules - decryption
211 * needs both. Don't really need decryption schedule if only doing
212 * encryption - but it simplifies things to take it anyway
214 ctx
->encrypt
= encrypt
;
215 ctx
->decrypt
= decrypt
;
216 ctx
->stream
= stream
;
217 ctx
->keyenc
= keyenc
;
218 ctx
->keydec
= keydec
;
220 /* L_* = ENCIPHER(K, zeros(128)) */
221 ctx
->encrypt(ctx
->l_star
.c
, ctx
->l_star
.c
, ctx
->keyenc
);
223 /* L_$ = double(L_*) */
224 ocb_double(&ctx
->l_star
, &ctx
->l_dollar
);
226 /* L_0 = double(L_$) */
227 ocb_double(&ctx
->l_dollar
, ctx
->l
);
229 /* L_{i} = double(L_{i-1}) */
230 ocb_double(ctx
->l
, ctx
->l
+1);
231 ocb_double(ctx
->l
+1, ctx
->l
+2);
232 ocb_double(ctx
->l
+2, ctx
->l
+3);
233 ocb_double(ctx
->l
+3, ctx
->l
+4);
234 ctx
->l_index
= 4; /* enough to process up to 496 bytes */
240 * Copy an OCB128_CONTEXT object
242 int CRYPTO_ocb128_copy_ctx(OCB128_CONTEXT
*dest
, OCB128_CONTEXT
*src
,
243 void *keyenc
, void *keydec
)
245 memcpy(dest
, src
, sizeof(OCB128_CONTEXT
));
247 dest
->keyenc
= keyenc
;
249 dest
->keydec
= keydec
;
251 dest
->l
= OPENSSL_malloc(src
->max_l_index
* 16);
254 memcpy(dest
->l
, src
->l
, (src
->l_index
+ 1) * 16);
260 * Set the IV to be used for this operation. Must be 1 - 15 bytes.
262 int CRYPTO_ocb128_setiv(OCB128_CONTEXT
*ctx
, const unsigned char *iv
,
263 size_t len
, size_t taglen
)
265 unsigned char ktop
[16], tmp
[16], mask
;
266 unsigned char stretch
[24], nonce
[16];
267 size_t bottom
, shift
;
270 * Spec says IV is 120 bits or fewer - it allows non byte aligned lengths.
271 * We don't support this at this stage
273 if ((len
> 15) || (len
< 1) || (taglen
> 16) || (taglen
< 1)) {
277 /* Nonce = num2str(TAGLEN mod 128,7) || zeros(120-bitlen(N)) || 1 || N */
278 nonce
[0] = ((taglen
* 8) % 128) << 1;
279 memset(nonce
+ 1, 0, 15);
280 memcpy(nonce
+ 16 - len
, iv
, len
);
281 nonce
[15 - len
] |= 1;
283 /* Ktop = ENCIPHER(K, Nonce[1..122] || zeros(6)) */
284 memcpy(tmp
, nonce
, 16);
286 ctx
->encrypt(tmp
, ktop
, ctx
->keyenc
);
288 /* Stretch = Ktop || (Ktop[1..64] xor Ktop[9..72]) */
289 memcpy(stretch
, ktop
, 16);
290 ocb_block_xor(ktop
, ktop
+ 1, 8, stretch
+ 16);
292 /* bottom = str2num(Nonce[123..128]) */
293 bottom
= nonce
[15] & 0x3f;
295 /* Offset_0 = Stretch[1+bottom..128+bottom] */
297 ocb_block_lshift(stretch
+ (bottom
/ 8), shift
, ctx
->offset
.c
);
301 (*(stretch
+ (bottom
/ 8) + 16) & mask
) >> (8 - shift
);
307 * Provide any AAD. This can be called multiple times. Only the final time can
308 * have a partial block
310 int CRYPTO_ocb128_aad(OCB128_CONTEXT
*ctx
, const unsigned char *aad
,
313 u64 i
, all_num_blocks
;
314 size_t num_blocks
, last_len
;
318 /* Calculate the number of blocks of AAD provided now, and so far */
319 num_blocks
= len
/ 16;
320 all_num_blocks
= num_blocks
+ ctx
->blocks_hashed
;
322 /* Loop through all full blocks of AAD */
323 for (i
= ctx
->blocks_hashed
+ 1; i
<= all_num_blocks
; i
++) {
325 OCB_BLOCK
*aad_block
;
327 /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
328 lookup
= ocb_lookup_l(ctx
, ocb_ntz(i
));
331 ocb_block16_xor(&ctx
->offset_aad
, lookup
, &ctx
->offset_aad
);
333 /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */
334 aad_block
= (OCB_BLOCK
*)(aad
+ ((i
- ctx
->blocks_hashed
- 1) * 16));
335 ocb_block16_xor(&ctx
->offset_aad
, aad_block
, &tmp1
);
336 ctx
->encrypt(tmp1
.c
, tmp2
.c
, ctx
->keyenc
);
337 ocb_block16_xor(&ctx
->sum
, &tmp2
, &ctx
->sum
);
341 * Check if we have any partial blocks left over. This is only valid in the
342 * last call to this function
347 /* Offset_* = Offset_m xor L_* */
348 ocb_block16_xor(&ctx
->offset_aad
, &ctx
->l_star
, &ctx
->offset_aad
);
350 /* CipherInput = (A_* || 1 || zeros(127-bitlen(A_*))) xor Offset_* */
351 memset(&tmp1
, 0, 16);
352 memcpy(&tmp1
, aad
+ (num_blocks
* 16), last_len
);
353 ((unsigned char *)&tmp1
)[last_len
] = 0x80;
354 ocb_block16_xor(&ctx
->offset_aad
, &tmp1
, &tmp2
);
356 /* Sum = Sum_m xor ENCIPHER(K, CipherInput) */
357 ctx
->encrypt(tmp2
.c
, tmp1
.c
, ctx
->keyenc
);
358 ocb_block16_xor(&ctx
->sum
, &tmp1
, &ctx
->sum
);
361 ctx
->blocks_hashed
= all_num_blocks
;
367 * Provide any data to be encrypted. This can be called multiple times. Only
368 * the final time can have a partial block
370 int CRYPTO_ocb128_encrypt(OCB128_CONTEXT
*ctx
,
371 const unsigned char *in
, unsigned char *out
,
374 u64 i
, all_num_blocks
;
375 size_t num_blocks
, last_len
;
381 * Calculate the number of blocks of data to be encrypted provided now, and
384 num_blocks
= len
/ 16;
385 all_num_blocks
= num_blocks
+ ctx
->blocks_processed
;
387 if (num_blocks
&& all_num_blocks
== (size_t)all_num_blocks
388 && ctx
->stream
!= NULL
) {
389 size_t max_idx
= 0, top
= (size_t)all_num_blocks
;
392 * See how many L_{i} entries we need to process data at hand
393 * and pre-compute missing entries in the table [if any]...
397 if (ocb_lookup_l(ctx
, max_idx
) == NULL
)
400 ctx
->stream(in
, out
, num_blocks
, ctx
->keyenc
,
401 (size_t)ctx
->blocks_processed
+ 1, ctx
->offset
.c
,
402 (const unsigned char (*)[16])ctx
->l
, ctx
->checksum
.c
);
404 /* Loop through all full blocks to be encrypted */
405 for (i
= ctx
->blocks_processed
+ 1; i
<= all_num_blocks
; i
++) {
410 /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
411 lookup
= ocb_lookup_l(ctx
, ocb_ntz(i
));
414 ocb_block16_xor(&ctx
->offset
, lookup
, &ctx
->offset
);
416 /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
418 (OCB_BLOCK
*)(in
+ ((i
- ctx
->blocks_processed
- 1) * 16));
419 ocb_block16_xor_misaligned(&ctx
->offset
, inblock
, &tmp1
);
420 /* Checksum_i = Checksum_{i-1} xor P_i */
421 ocb_block16_xor_misaligned(&ctx
->checksum
, inblock
, &ctx
->checksum
);
422 ctx
->encrypt(tmp1
.c
, tmp2
.c
, ctx
->keyenc
);
424 (OCB_BLOCK
*)(out
+ ((i
- ctx
->blocks_processed
- 1) * 16));
425 ocb_block16_xor_misaligned(&ctx
->offset
, &tmp2
, outblock
);
430 * Check if we have any partial blocks left over. This is only valid in the
431 * last call to this function
436 /* Offset_* = Offset_m xor L_* */
437 ocb_block16_xor(&ctx
->offset
, &ctx
->l_star
, &ctx
->offset
);
439 /* Pad = ENCIPHER(K, Offset_*) */
440 ctx
->encrypt(ctx
->offset
.c
, pad
.c
, ctx
->keyenc
);
442 /* C_* = P_* xor Pad[1..bitlen(P_*)] */
443 ocb_block_xor(in
+ (len
/ 16) * 16, (unsigned char *)&pad
, last_len
,
444 out
+ (num_blocks
* 16));
446 /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
447 memset(&tmp1
, 0, 16);
448 memcpy(&tmp1
, in
+ (len
/ 16) * 16, last_len
);
449 ((unsigned char *)(&tmp1
))[last_len
] = 0x80;
450 ocb_block16_xor(&ctx
->checksum
, &tmp1
, &ctx
->checksum
);
453 ctx
->blocks_processed
= all_num_blocks
;
459 * Provide any data to be decrypted. This can be called multiple times. Only
460 * the final time can have a partial block
462 int CRYPTO_ocb128_decrypt(OCB128_CONTEXT
*ctx
,
463 const unsigned char *in
, unsigned char *out
,
466 u64 i
, all_num_blocks
;
467 size_t num_blocks
, last_len
;
473 * Calculate the number of blocks of data to be decrypted provided now, and
476 num_blocks
= len
/ 16;
477 all_num_blocks
= num_blocks
+ ctx
->blocks_processed
;
479 if (num_blocks
&& all_num_blocks
== (size_t)all_num_blocks
480 && ctx
->stream
!= NULL
) {
481 size_t max_idx
= 0, top
= (size_t)all_num_blocks
;
484 * See how many L_{i} entries we need to process data at hand
485 * and pre-compute missing entries in the table [if any]...
489 if (ocb_lookup_l(ctx
, max_idx
) == NULL
)
492 ctx
->stream(in
, out
, num_blocks
, ctx
->keydec
,
493 (size_t)ctx
->blocks_processed
+ 1, ctx
->offset
.c
,
494 (const unsigned char (*)[16])ctx
->l
, ctx
->checksum
.c
);
496 /* Loop through all full blocks to be decrypted */
497 for (i
= ctx
->blocks_processed
+ 1; i
<= all_num_blocks
; i
++) {
501 /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
502 OCB_BLOCK
*lookup
= ocb_lookup_l(ctx
, ocb_ntz(i
));
505 ocb_block16_xor(&ctx
->offset
, lookup
, &ctx
->offset
);
507 /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */
509 (OCB_BLOCK
*)(in
+ ((i
- ctx
->blocks_processed
- 1) * 16));
510 ocb_block16_xor_misaligned(&ctx
->offset
, inblock
, &tmp1
);
511 ctx
->decrypt(tmp1
.c
, tmp2
.c
, ctx
->keydec
);
513 (OCB_BLOCK
*)(out
+ ((i
- ctx
->blocks_processed
- 1) * 16));
514 ocb_block16_xor_misaligned(&ctx
->offset
, &tmp2
, outblock
);
516 /* Checksum_i = Checksum_{i-1} xor P_i */
517 ocb_block16_xor_misaligned(&ctx
->checksum
, outblock
, &ctx
->checksum
);
522 * Check if we have any partial blocks left over. This is only valid in the
523 * last call to this function
528 /* Offset_* = Offset_m xor L_* */
529 ocb_block16_xor(&ctx
->offset
, &ctx
->l_star
, &ctx
->offset
);
531 /* Pad = ENCIPHER(K, Offset_*) */
532 ctx
->encrypt(ctx
->offset
.c
, pad
.c
, ctx
->keyenc
);
534 /* P_* = C_* xor Pad[1..bitlen(C_*)] */
535 ocb_block_xor(in
+ (len
/ 16) * 16, (unsigned char *)&pad
, last_len
,
536 out
+ (num_blocks
* 16));
538 /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
539 memset(&tmp1
, 0, 16);
540 memcpy(&tmp1
, out
+ (len
/ 16) * 16, last_len
);
541 ((unsigned char *)(&tmp1
))[last_len
] = 0x80;
542 ocb_block16_xor(&ctx
->checksum
, &tmp1
, &ctx
->checksum
);
545 ctx
->blocks_processed
= all_num_blocks
;
551 * Calculate the tag and verify it against the supplied tag
553 int CRYPTO_ocb128_finish(OCB128_CONTEXT
*ctx
, const unsigned char *tag
,
556 OCB_BLOCK tmp1
, tmp2
;
559 * Tag = ENCIPHER(K, Checksum_* xor Offset_* xor L_$) xor HASH(K,A)
561 ocb_block16_xor(&ctx
->checksum
, &ctx
->offset
, &tmp1
);
562 ocb_block16_xor(&tmp1
, &ctx
->l_dollar
, &tmp2
);
563 ctx
->encrypt(tmp2
.c
, tmp1
.c
, ctx
->keyenc
);
564 ocb_block16_xor(&tmp1
, &ctx
->sum
, &ctx
->tag
);
566 if (len
> 16 || len
< 1) {
570 /* Compare the tag if we've been given one */
572 return CRYPTO_memcmp(&ctx
->tag
, tag
, len
);
578 * Retrieve the calculated tag
580 int CRYPTO_ocb128_tag(OCB128_CONTEXT
*ctx
, unsigned char *tag
, size_t len
)
582 if (len
> 16 || len
< 1) {
586 /* Calculate the tag */
587 CRYPTO_ocb128_finish(ctx
, NULL
, 0);
589 /* Copy the tag into the supplied buffer */
590 memcpy(tag
, &ctx
->tag
, len
);
596 * Release all resources
598 void CRYPTO_ocb128_cleanup(OCB128_CONTEXT
*ctx
)
601 OPENSSL_clear_free(ctx
->l
, ctx
->max_l_index
* 16);
602 OPENSSL_cleanse(ctx
, sizeof(*ctx
));
606 #endif /* OPENSSL_NO_OCB */