]> git.ipfire.org Git - thirdparty/openssl.git/blame - crypto/include/internal/md32_common.h
Support public key and param check in EVP interface
[thirdparty/openssl.git] / crypto / include / internal / md32_common.h
CommitLineData
aa6bb135
RS
1/*
2 * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved.
bd3576d2 3 *
aa6bb135
RS
4 * Licensed under the OpenSSL license (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
bd3576d2
UM
8 */
9
1d97c843 10/*-
bd3576d2
UM
11 * This is a generic 32 bit "collector" for message digest algorithms.
12 * Whenever needed it collects input character stream into chunks of
13 * 32 bit values and invokes a block function that performs actual hash
14 * calculations.
15 *
16 * Porting guide.
17 *
18 * Obligatory macros:
19 *
20 * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN
0f113f3e 21 * this macro defines byte order of input stream.
bd3576d2 22 * HASH_CBLOCK
0f113f3e 23 * size of a unit chunk HASH_BLOCK operates on.
bd3576d2 24 * HASH_LONG
19f05ebc 25 * has to be at least 32 bit wide.
bd3576d2 26 * HASH_CTX
0f113f3e
MC
27 * context structure that at least contains following
28 * members:
29 * typedef struct {
30 * ...
31 * HASH_LONG Nl,Nh;
32 * either {
33 * HASH_LONG data[HASH_LBLOCK];
34 * unsigned char data[HASH_CBLOCK];
35 * };
36 * unsigned int num;
37 * ...
38 * } HASH_CTX;
39 * data[] vector is expected to be zeroed upon first call to
40 * HASH_UPDATE.
bd3576d2 41 * HASH_UPDATE
0f113f3e 42 * name of "Update" function, implemented here.
bd3576d2 43 * HASH_TRANSFORM
0f113f3e 44 * name of "Transform" function, implemented here.
bd3576d2 45 * HASH_FINAL
0f113f3e 46 * name of "Final" function, implemented here.
bd3576d2 47 * HASH_BLOCK_DATA_ORDER
0f113f3e
MC
48 * name of "block" function capable of treating *unaligned* input
49 * message in original (data) byte order, implemented externally.
1cbde6e4 50 * HASH_MAKE_STRING
19f05ebc 51 * macro converting context variables to an ASCII hash string.
bd3576d2 52 *
bd3576d2
UM
53 * MD5 example:
54 *
0f113f3e 55 * #define DATA_ORDER_IS_LITTLE_ENDIAN
bd3576d2 56 *
0f113f3e 57 * #define HASH_LONG MD5_LONG
0f113f3e
MC
58 * #define HASH_CTX MD5_CTX
59 * #define HASH_CBLOCK MD5_CBLOCK
60 * #define HASH_UPDATE MD5_Update
61 * #define HASH_TRANSFORM MD5_Transform
62 * #define HASH_FINAL MD5_Final
63 * #define HASH_BLOCK_DATA_ORDER md5_block_data_order
bd3576d2
UM
64 */
65
3ce2fdab
MC
66#include <openssl/crypto.h>
67
bd3576d2 68#if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
0f113f3e 69# error "DATA_ORDER must be defined!"
bd3576d2
UM
70#endif
71
72#ifndef HASH_CBLOCK
0f113f3e 73# error "HASH_CBLOCK must be defined!"
bd3576d2
UM
74#endif
75#ifndef HASH_LONG
0f113f3e 76# error "HASH_LONG must be defined!"
bd3576d2
UM
77#endif
78#ifndef HASH_CTX
0f113f3e 79# error "HASH_CTX must be defined!"
bd3576d2
UM
80#endif
81
82#ifndef HASH_UPDATE
0f113f3e 83# error "HASH_UPDATE must be defined!"
bd3576d2
UM
84#endif
85#ifndef HASH_TRANSFORM
0f113f3e 86# error "HASH_TRANSFORM must be defined!"
bd3576d2
UM
87#endif
88#ifndef HASH_FINAL
0f113f3e 89# error "HASH_FINAL must be defined!"
bd3576d2
UM
90#endif
91
bd3576d2 92#ifndef HASH_BLOCK_DATA_ORDER
0f113f3e 93# error "HASH_BLOCK_DATA_ORDER must be defined!"
bd3576d2 94#endif
bd3576d2
UM
95
96/*
97 * Engage compiler specific rotate intrinsic function if available.
98 */
99#undef ROTATE
100#ifndef PEDANTIC
cf5ecc3e 101# if defined(_MSC_VER)
0f113f3e 102# define ROTATE(a,n) _lrotl(a,n)
cf5ecc3e 103# elif defined(__ICC)
0f113f3e 104# define ROTATE(a,n) _rotl(a,n)
cf1b7d96 105# elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
bd3576d2
UM
106 /*
107 * Some GNU C inline assembler templates. Note that these are
108 * rotates by *constant* number of bits! But that's exactly
109 * what we need here...
bd3576d2 110 */
2f98abbc 111# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
0f113f3e
MC
112# define ROTATE(a,n) ({ register unsigned int ret; \
113 asm ( \
114 "roll %1,%0" \
115 : "=r"(ret) \
116 : "I"(n), "0"((unsigned int)(a)) \
117 : "cc"); \
118 ret; \
119 })
a9c32ace 120# elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \
0f113f3e
MC
121 defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
122# define ROTATE(a,n) ({ register unsigned int ret; \
123 asm ( \
124 "rlwinm %0,%1,%2,0,31" \
125 : "=r"(ret) \
126 : "r"(a), "I"(n)); \
127 ret; \
128 })
70831126 129# elif defined(__s390x__)
0f113f3e
MC
130# define ROTATE(a,n) ({ register unsigned int ret; \
131 asm ("rll %0,%1,%2" \
132 : "=r"(ret) \
133 : "r"(a), "I"(n)); \
134 ret; \
135 })
bd3576d2
UM
136# endif
137# endif
0f113f3e 138#endif /* PEDANTIC */
bd3576d2 139
bd3576d2 140#ifndef ROTATE
0f113f3e 141# define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
bd3576d2
UM
142#endif
143
bd3576d2
UM
144#if defined(DATA_ORDER_IS_BIG_ENDIAN)
145
0f113f3e
MC
146# ifndef PEDANTIC
147# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
148# if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
af2c2823 149 (defined(__x86_64) || defined(__x86_64__))
0f113f3e 150# if !defined(B_ENDIAN)
a2eb9688
AP
151 /*
152 * This gives ~30-40% performance improvement in SHA-256 compiled
153 * with gcc [on P4]. Well, first macro to be frank. We can pull
154 * this trick on x86* platforms only, because these CPUs can fetch
155 * unaligned data without raising an exception.
156 */
0f113f3e
MC
157# define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
158 asm ("bswapl %0":"=r"(r):"0"(r)); \
159 (c)+=4; (l)=r; })
160# define HOST_l2c(l,c) ({ unsigned int r=(l); \
161 asm ("bswapl %0":"=r"(r):"0"(r)); \
162 *((unsigned int *)(c))=r; (c)+=4; r; })
163# endif
164# elif defined(__aarch64__)
165# if defined(__BYTE_ORDER__)
166# if defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__
167# define HOST_c2l(c,l) ({ unsigned int r; \
168 asm ("rev %w0,%w1" \
169 :"=r"(r) \
170 :"r"(*((const unsigned int *)(c))));\
171 (c)+=4; (l)=r; })
172# define HOST_l2c(l,c) ({ unsigned int r; \
173 asm ("rev %w0,%w1" \
174 :"=r"(r) \
175 :"r"((unsigned int)(l)));\
176 *((unsigned int *)(c))=r; (c)+=4; r; })
177# elif defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
178# define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, (l))
179# define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, (l))
180# endif
039081b8
AP
181# endif
182# endif
a2eb9688 183# endif
0f113f3e
MC
184# if defined(__s390__) || defined(__s390x__)
185# define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, (l))
186# define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, (l))
187# endif
a2eb9688 188# endif
a2eb9688 189
0f113f3e
MC
190# ifndef HOST_c2l
191# define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \
192 l|=(((unsigned long)(*((c)++)))<<16), \
193 l|=(((unsigned long)(*((c)++)))<< 8), \
194 l|=(((unsigned long)(*((c)++))) ) )
195# endif
196# ifndef HOST_l2c
197# define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \
198 *((c)++)=(unsigned char)(((l)>>16)&0xff), \
199 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
200 *((c)++)=(unsigned char)(((l) )&0xff), \
201 l)
202# endif
bd3576d2
UM
203
204#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
205
0f113f3e
MC
206# ifndef PEDANTIC
207# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
208# if defined(__s390x__)
209# define HOST_c2l(c,l) ({ asm ("lrv %0,%1" \
210 :"=d"(l) :"m"(*(const unsigned int *)(c)));\
211 (c)+=4; (l); })
212# define HOST_l2c(l,c) ({ asm ("strv %1,%0" \
213 :"=m"(*(unsigned int *)(c)) :"d"(l));\
214 (c)+=4; (l); })
215# endif
b38c0add 216# endif
0f113f3e
MC
217# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
218# ifndef B_ENDIAN
021e5043 219 /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
0f113f3e
MC
220# define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l)
221# define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l)
222# endif
021e5043 223# endif
1110cea0 224# endif
a2eb9688 225
0f113f3e
MC
226# ifndef HOST_c2l
227# define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \
228 l|=(((unsigned long)(*((c)++)))<< 8), \
229 l|=(((unsigned long)(*((c)++)))<<16), \
230 l|=(((unsigned long)(*((c)++)))<<24) )
231# endif
232# ifndef HOST_l2c
233# define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \
234 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
235 *((c)++)=(unsigned char)(((l)>>16)&0xff), \
236 *((c)++)=(unsigned char)(((l)>>24)&0xff), \
237 l)
238# endif
bd3576d2
UM
239
240#endif
241
242/*
19f05ebc 243 * Time for some action :-)
bd3576d2
UM
244 */
245
0f113f3e
MC
246int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len)
247{
248 const unsigned char *data = data_;
249 unsigned char *p;
250 HASH_LONG l;
251 size_t n;
bd3576d2 252
0f113f3e
MC
253 if (len == 0)
254 return 1;
bd3576d2 255
0f113f3e 256 l = (c->Nl + (((HASH_LONG) len) << 3)) & 0xffffffffUL;
0f113f3e
MC
257 if (l < c->Nl) /* overflow */
258 c->Nh++;
259 c->Nh += (HASH_LONG) (len >> 29); /* might cause compiler warning on
260 * 16-bit */
261 c->Nl = l;
262
263 n = c->num;
264 if (n != 0) {
265 p = (unsigned char *)c->data;
266
267 if (len >= HASH_CBLOCK || len + n >= HASH_CBLOCK) {
268 memcpy(p + n, data, HASH_CBLOCK - n);
269 HASH_BLOCK_DATA_ORDER(c, p, 1);
270 n = HASH_CBLOCK - n;
271 data += n;
272 len -= n;
273 c->num = 0;
3ce2fdab
MC
274 /*
275 * We use memset rather than OPENSSL_cleanse() here deliberately.
276 * Using OPENSSL_cleanse() here could be a performance issue. It
277 * will get properly cleansed on finalisation so this isn't a
278 * security problem.
279 */
0f113f3e
MC
280 memset(p, 0, HASH_CBLOCK); /* keep it zeroed */
281 } else {
282 memcpy(p + n, data, len);
283 c->num += (unsigned int)len;
284 return 1;
285 }
286 }
287
288 n = len / HASH_CBLOCK;
289 if (n > 0) {
290 HASH_BLOCK_DATA_ORDER(c, data, n);
291 n *= HASH_CBLOCK;
292 data += n;
293 len -= n;
294 }
295
296 if (len != 0) {
297 p = (unsigned char *)c->data;
298 c->num = (unsigned int)len;
299 memcpy(p, data, len);
300 }
301 return 1;
302}
303
304void HASH_TRANSFORM(HASH_CTX *c, const unsigned char *data)
305{
306 HASH_BLOCK_DATA_ORDER(c, data, 1);
307}
308
309int HASH_FINAL(unsigned char *md, HASH_CTX *c)
310{
311 unsigned char *p = (unsigned char *)c->data;
312 size_t n = c->num;
313
314 p[n] = 0x80; /* there is always room for one */
315 n++;
316
317 if (n > (HASH_CBLOCK - 8)) {
318 memset(p + n, 0, HASH_CBLOCK - n);
319 n = 0;
320 HASH_BLOCK_DATA_ORDER(c, p, 1);
321 }
322 memset(p + n, 0, HASH_CBLOCK - 8 - n);
323
324 p += HASH_CBLOCK - 8;
bd3576d2 325#if defined(DATA_ORDER_IS_BIG_ENDIAN)
0f113f3e
MC
326 (void)HOST_l2c(c->Nh, p);
327 (void)HOST_l2c(c->Nl, p);
bd3576d2 328#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
0f113f3e
MC
329 (void)HOST_l2c(c->Nl, p);
330 (void)HOST_l2c(c->Nh, p);
bd3576d2 331#endif
0f113f3e
MC
332 p -= HASH_CBLOCK;
333 HASH_BLOCK_DATA_ORDER(c, p, 1);
334 c->num = 0;
3ce2fdab 335 OPENSSL_cleanse(p, HASH_CBLOCK);
bd3576d2 336
1cbde6e4 337#ifndef HASH_MAKE_STRING
0f113f3e 338# error "HASH_MAKE_STRING must be defined!"
1cbde6e4 339#else
0f113f3e 340 HASH_MAKE_STRING(c, md);
1cbde6e4 341#endif
bd3576d2 342
0f113f3e
MC
343 return 1;
344}
2f98abbc
AP
345
346#ifndef MD32_REG_T
0f113f3e
MC
347# if defined(__alpha) || defined(__sparcv9) || defined(__mips)
348# define MD32_REG_T long
2f98abbc 349/*
8483a003 350 * This comment was originally written for MD5, which is why it
2f98abbc
AP
351 * discusses A-D. But it basically applies to all 32-bit digests,
352 * which is why it was moved to common header file.
353 *
354 * In case you wonder why A-D are declared as long and not
355 * as MD5_LONG. Doing so results in slight performance
356 * boost on LP64 architectures. The catch is we don't
357 * really care if 32 MSBs of a 64-bit register get polluted
358 * with eventual overflows as we *save* only 32 LSBs in
359 * *either* case. Now declaring 'em long excuses the compiler
360 * from keeping 32 MSBs zeroed resulting in 13% performance
361 * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
0f113f3e 362 * Well, to be honest it should say that this *prevents*
2f98abbc 363 * performance degradation.
30ab7af2 364 */
0f113f3e 365# else
30ab7af2
AP
366/*
367 * Above is not absolute and there are LP64 compilers that
368 * generate better code if MD32_REG_T is defined int. The above
369 * pre-processor condition reflects the circumstances under which
370 * the conclusion was made and is subject to further extension.
2f98abbc 371 */
0f113f3e
MC
372# define MD32_REG_T int
373# endif
2f98abbc 374#endif