]>
Commit | Line | Data |
---|---|---|
bd3576d2 UM |
1 | /* crypto/md32_common.h */ |
2 | /* ==================================================================== | |
8a09b386 | 3 | * Copyright (c) 1999-2002 The OpenSSL Project. All rights reserved. |
bd3576d2 UM |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * | |
9 | * 1. Redistributions of source code must retain the above copyright | |
10 | * notice, this list of conditions and the following disclaimer. | |
11 | * | |
12 | * 2. Redistributions in binary form must reproduce the above copyright | |
13 | * notice, this list of conditions and the following disclaimer in | |
14 | * the documentation and/or other materials provided with the | |
15 | * distribution. | |
16 | * | |
17 | * 3. All advertising materials mentioning features or use of this | |
18 | * software must display the following acknowledgment: | |
19 | * "This product includes software developed by the OpenSSL Project | |
20 | * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" | |
21 | * | |
22 | * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to | |
23 | * endorse or promote products derived from this software without | |
24 | * prior written permission. For written permission, please contact | |
25 | * licensing@OpenSSL.org. | |
26 | * | |
27 | * 5. Products derived from this software may not be called "OpenSSL" | |
28 | * nor may "OpenSSL" appear in their names without prior written | |
29 | * permission of the OpenSSL Project. | |
30 | * | |
31 | * 6. Redistributions of any form whatsoever must retain the following | |
32 | * acknowledgment: | |
33 | * "This product includes software developed by the OpenSSL Project | |
34 | * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" | |
35 | * | |
36 | * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY | |
37 | * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
38 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
39 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR | |
40 | * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
41 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |
42 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
43 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
44 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | |
45 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
46 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED | |
47 | * OF THE POSSIBILITY OF SUCH DAMAGE. | |
48 | * ==================================================================== | |
49 | * | |
50 | * This product includes cryptographic software written by Eric Young | |
51 | * (eay@cryptsoft.com). This product includes software written by Tim | |
52 | * Hudson (tjh@cryptsoft.com). | |
53 | * | |
54 | */ | |
55 | ||
56 | /* | |
57 | * This is a generic 32 bit "collector" for message digest algorithms. | |
58 | * Whenever needed it collects input character stream into chunks of | |
59 | * 32 bit values and invokes a block function that performs actual hash | |
60 | * calculations. | |
61 | * | |
62 | * Porting guide. | |
63 | * | |
64 | * Obligatory macros: | |
65 | * | |
66 | * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN | |
67 | * this macro defines byte order of input stream. | |
68 | * HASH_CBLOCK | |
69 | * size of a unit chunk HASH_BLOCK operates on. | |
70 | * HASH_LONG | |
71 | * has to be at lest 32 bit wide, if it's wider, then | |
72 | * HASH_LONG_LOG2 *has to* be defined along | |
73 | * HASH_CTX | |
74 | * context structure that at least contains following | |
75 | * members: | |
76 | * typedef struct { | |
77 | * ... | |
78 | * HASH_LONG Nl,Nh; | |
79 | * HASH_LONG data[HASH_LBLOCK]; | |
80 | * int num; | |
81 | * ... | |
82 | * } HASH_CTX; | |
83 | * HASH_UPDATE | |
84 | * name of "Update" function, implemented here. | |
85 | * HASH_TRANSFORM | |
86 | * name of "Transform" function, implemented here. | |
87 | * HASH_FINAL | |
88 | * name of "Final" function, implemented here. | |
89 | * HASH_BLOCK_HOST_ORDER | |
90 | * name of "block" function treating *aligned* input message | |
91 | * in host byte order, implemented externally. | |
92 | * HASH_BLOCK_DATA_ORDER | |
93 | * name of "block" function treating *unaligned* input message | |
94 | * in original (data) byte order, implemented externally (it | |
95 | * actually is optional if data and host are of the same | |
96 | * "endianess"). | |
1cbde6e4 AP |
97 | * HASH_MAKE_STRING |
98 | * macro convering context variables to an ASCII hash string. | |
bd3576d2 UM |
99 | * |
100 | * Optional macros: | |
101 | * | |
102 | * B_ENDIAN or L_ENDIAN | |
103 | * defines host byte-order. | |
104 | * HASH_LONG_LOG2 | |
105 | * defaults to 2 if not states otherwise. | |
106 | * HASH_LBLOCK | |
107 | * assumed to be HASH_CBLOCK/4 if not stated otherwise. | |
108 | * HASH_BLOCK_DATA_ORDER_ALIGNED | |
109 | * alternative "block" function capable of treating | |
110 | * aligned input message in original (data) order, | |
111 | * implemented externally. | |
112 | * | |
113 | * MD5 example: | |
114 | * | |
115 | * #define DATA_ORDER_IS_LITTLE_ENDIAN | |
116 | * | |
117 | * #define HASH_LONG MD5_LONG | |
118 | * #define HASH_LONG_LOG2 MD5_LONG_LOG2 | |
119 | * #define HASH_CTX MD5_CTX | |
120 | * #define HASH_CBLOCK MD5_CBLOCK | |
121 | * #define HASH_LBLOCK MD5_LBLOCK | |
122 | * #define HASH_UPDATE MD5_Update | |
123 | * #define HASH_TRANSFORM MD5_Transform | |
124 | * #define HASH_FINAL MD5_Final | |
125 | * #define HASH_BLOCK_HOST_ORDER md5_block_host_order | |
126 | * #define HASH_BLOCK_DATA_ORDER md5_block_data_order | |
127 | * | |
128 | * <appro@fy.chalmers.se> | |
129 | */ | |
130 | ||
131 | #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN) | |
132 | #error "DATA_ORDER must be defined!" | |
133 | #endif | |
134 | ||
135 | #ifndef HASH_CBLOCK | |
136 | #error "HASH_CBLOCK must be defined!" | |
137 | #endif | |
138 | #ifndef HASH_LONG | |
139 | #error "HASH_LONG must be defined!" | |
140 | #endif | |
141 | #ifndef HASH_CTX | |
142 | #error "HASH_CTX must be defined!" | |
143 | #endif | |
144 | ||
145 | #ifndef HASH_UPDATE | |
146 | #error "HASH_UPDATE must be defined!" | |
147 | #endif | |
148 | #ifndef HASH_TRANSFORM | |
149 | #error "HASH_TRANSFORM must be defined!" | |
150 | #endif | |
151 | #ifndef HASH_FINAL | |
152 | #error "HASH_FINAL must be defined!" | |
153 | #endif | |
154 | ||
155 | #ifndef HASH_BLOCK_HOST_ORDER | |
156 | #error "HASH_BLOCK_HOST_ORDER must be defined!" | |
157 | #endif | |
158 | ||
159 | #if 0 | |
160 | /* | |
161 | * Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED | |
162 | * isn't defined. | |
163 | */ | |
164 | #ifndef HASH_BLOCK_DATA_ORDER | |
165 | #error "HASH_BLOCK_DATA_ORDER must be defined!" | |
166 | #endif | |
167 | #endif | |
168 | ||
169 | #ifndef HASH_LBLOCK | |
170 | #define HASH_LBLOCK (HASH_CBLOCK/4) | |
171 | #endif | |
172 | ||
173 | #ifndef HASH_LONG_LOG2 | |
174 | #define HASH_LONG_LOG2 2 | |
175 | #endif | |
176 | ||
177 | /* | |
178 | * Engage compiler specific rotate intrinsic function if available. | |
179 | */ | |
180 | #undef ROTATE | |
181 | #ifndef PEDANTIC | |
1a979201 | 182 | # if defined(_MSC_VER) || defined(__ICC) |
1cbde6e4 AP |
183 | # define ROTATE(a,n) _lrotl(a,n) |
184 | # elif defined(__MWERKS__) | |
1eab9a1f | 185 | # if defined(__POWERPC__) |
a7c5241f | 186 | # define ROTATE(a,n) __rlwinm(a,n,0,31) |
1eab9a1f AP |
187 | # elif defined(__MC68K__) |
188 | /* Motorola specific tweak. <appro@fy.chalmers.se> */ | |
189 | # define ROTATE(a,n) ( n<24 ? __rol(a,n) : __ror(a,32-n) ) | |
9a1e34e5 AP |
190 | # else |
191 | # define ROTATE(a,n) __rol(a,n) | |
192 | # endif | |
cf1b7d96 | 193 | # elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) |
bd3576d2 UM |
194 | /* |
195 | * Some GNU C inline assembler templates. Note that these are | |
196 | * rotates by *constant* number of bits! But that's exactly | |
197 | * what we need here... | |
198 | * | |
199 | * <appro@fy.chalmers.se> | |
200 | */ | |
2f98abbc | 201 | # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) |
bd3576d2 | 202 | # define ROTATE(a,n) ({ register unsigned int ret; \ |
0fad6cb7 | 203 | asm ( \ |
bd3576d2 UM |
204 | "roll %1,%0" \ |
205 | : "=r"(ret) \ | |
206 | : "I"(n), "0"(a) \ | |
207 | : "cc"); \ | |
208 | ret; \ | |
209 | }) | |
0fad6cb7 | 210 | # elif defined(__powerpc) || defined(__ppc) |
bd3576d2 | 211 | # define ROTATE(a,n) ({ register unsigned int ret; \ |
0fad6cb7 | 212 | asm ( \ |
bd3576d2 UM |
213 | "rlwinm %0,%1,%2,0,31" \ |
214 | : "=r"(ret) \ | |
215 | : "r"(a), "I"(n)); \ | |
216 | ret; \ | |
217 | }) | |
218 | # endif | |
219 | # endif | |
220 | ||
221 | /* | |
222 | * Engage compiler specific "fetch in reverse byte order" | |
223 | * intrinsic function if available. | |
224 | */ | |
cf1b7d96 | 225 | # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) |
bd3576d2 | 226 | /* some GNU C inline assembler templates by <appro@fy.chalmers.se> */ |
2f98abbc | 227 | # if (defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)) && !defined(I386_ONLY) |
bd3576d2 | 228 | # define BE_FETCH32(a) ({ register unsigned int l=(a);\ |
0fad6cb7 | 229 | asm ( \ |
bd3576d2 UM |
230 | "bswapl %0" \ |
231 | : "=r"(l) : "0"(l)); \ | |
232 | l; \ | |
233 | }) | |
234 | # elif defined(__powerpc) | |
235 | # define LE_FETCH32(a) ({ register unsigned int l; \ | |
0fad6cb7 | 236 | asm ( \ |
bd3576d2 UM |
237 | "lwbrx %0,0,%1" \ |
238 | : "=r"(l) \ | |
239 | : "r"(a)); \ | |
240 | l; \ | |
241 | }) | |
242 | ||
cf1b7d96 | 243 | # elif defined(__sparc) && defined(OPENSSL_SYS_ULTRASPARC) |
bd3576d2 | 244 | # define LE_FETCH32(a) ({ register unsigned int l; \ |
0fad6cb7 | 245 | asm ( \ |
bd3576d2 UM |
246 | "lda [%1]#ASI_PRIMARY_LITTLE,%0"\ |
247 | : "=r"(l) \ | |
248 | : "r"(a)); \ | |
249 | l; \ | |
250 | }) | |
251 | # endif | |
252 | # endif | |
253 | #endif /* PEDANTIC */ | |
254 | ||
255 | #if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */ | |
256 | /* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */ | |
257 | #ifdef ROTATE | |
258 | /* 5 instructions with rotate instruction, else 9 */ | |
259 | #define REVERSE_FETCH32(a,l) ( \ | |
260 | l=*(const HASH_LONG *)(a), \ | |
261 | ((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24))) \ | |
262 | ) | |
263 | #else | |
264 | /* 6 instructions with rotate instruction, else 8 */ | |
265 | #define REVERSE_FETCH32(a,l) ( \ | |
266 | l=*(const HASH_LONG *)(a), \ | |
267 | l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)), \ | |
268 | ROTATE(l,16) \ | |
269 | ) | |
270 | /* | |
271 | * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|... | |
272 | * It's rewritten as above for two reasons: | |
273 | * - RISCs aren't good at long constants and have to explicitely | |
274 | * compose 'em with several (well, usually 2) instructions in a | |
275 | * register before performing the actual operation and (as you | |
276 | * already realized:-) having same constant should inspire the | |
277 | * compiler to permanently allocate the only register for it; | |
278 | * - most modern CPUs have two ALUs, but usually only one has | |
279 | * circuitry for shifts:-( this minor tweak inspires compiler | |
280 | * to schedule shift instructions in a better way... | |
281 | * | |
282 | * <appro@fy.chalmers.se> | |
283 | */ | |
284 | #endif | |
285 | #endif | |
286 | ||
287 | #ifndef ROTATE | |
288 | #define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n)))) | |
289 | #endif | |
290 | ||
291 | /* | |
292 | * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED | |
293 | * and HASH_BLOCK_HOST_ORDER ought to be the same if input data | |
294 | * and host are of the same "endianess". It's possible to mask | |
295 | * this with blank #define HASH_BLOCK_DATA_ORDER though... | |
296 | * | |
297 | * <appro@fy.chalmers.se> | |
298 | */ | |
299 | #if defined(B_ENDIAN) | |
300 | # if defined(DATA_ORDER_IS_BIG_ENDIAN) | |
301 | # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 | |
302 | # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER | |
303 | # endif | |
304 | # elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) | |
305 | # ifndef HOST_FETCH32 | |
306 | # ifdef LE_FETCH32 | |
307 | # define HOST_FETCH32(p,l) LE_FETCH32(p) | |
308 | # elif defined(REVERSE_FETCH32) | |
309 | # define HOST_FETCH32(p,l) REVERSE_FETCH32(p,l) | |
310 | # endif | |
311 | # endif | |
312 | # endif | |
313 | #elif defined(L_ENDIAN) | |
314 | # if defined(DATA_ORDER_IS_LITTLE_ENDIAN) | |
315 | # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 | |
316 | # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER | |
317 | # endif | |
318 | # elif defined(DATA_ORDER_IS_BIG_ENDIAN) | |
319 | # ifndef HOST_FETCH32 | |
320 | # ifdef BE_FETCH32 | |
321 | # define HOST_FETCH32(p,l) BE_FETCH32(p) | |
322 | # elif defined(REVERSE_FETCH32) | |
323 | # define HOST_FETCH32(p,l) REVERSE_FETCH32(p,l) | |
324 | # endif | |
325 | # endif | |
326 | # endif | |
327 | #endif | |
328 | ||
0a78c297 | 329 | #if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) |
bd3576d2 UM |
330 | #ifndef HASH_BLOCK_DATA_ORDER |
331 | #error "HASH_BLOCK_DATA_ORDER must be defined!" | |
332 | #endif | |
333 | #endif | |
334 | ||
335 | #if defined(DATA_ORDER_IS_BIG_ENDIAN) | |
336 | ||
337 | #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \ | |
338 | l|=(((unsigned long)(*((c)++)))<<16), \ | |
339 | l|=(((unsigned long)(*((c)++)))<< 8), \ | |
340 | l|=(((unsigned long)(*((c)++))) ), \ | |
341 | l) | |
342 | #define HOST_p_c2l(c,l,n) { \ | |
343 | switch (n) { \ | |
344 | case 0: l =((unsigned long)(*((c)++)))<<24; \ | |
345 | case 1: l|=((unsigned long)(*((c)++)))<<16; \ | |
346 | case 2: l|=((unsigned long)(*((c)++)))<< 8; \ | |
347 | case 3: l|=((unsigned long)(*((c)++))); \ | |
348 | } } | |
349 | #define HOST_p_c2l_p(c,l,sc,len) { \ | |
350 | switch (sc) { \ | |
351 | case 0: l =((unsigned long)(*((c)++)))<<24; \ | |
352 | if (--len == 0) break; \ | |
353 | case 1: l|=((unsigned long)(*((c)++)))<<16; \ | |
354 | if (--len == 0) break; \ | |
355 | case 2: l|=((unsigned long)(*((c)++)))<< 8; \ | |
356 | } } | |
357 | /* NOTE the pointer is not incremented at the end of this */ | |
358 | #define HOST_c2l_p(c,l,n) { \ | |
359 | l=0; (c)+=n; \ | |
360 | switch (n) { \ | |
361 | case 3: l =((unsigned long)(*(--(c))))<< 8; \ | |
362 | case 2: l|=((unsigned long)(*(--(c))))<<16; \ | |
363 | case 1: l|=((unsigned long)(*(--(c))))<<24; \ | |
364 | } } | |
365 | #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \ | |
366 | *((c)++)=(unsigned char)(((l)>>16)&0xff), \ | |
367 | *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ | |
368 | *((c)++)=(unsigned char)(((l) )&0xff), \ | |
369 | l) | |
370 | ||
371 | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) | |
372 | ||
373 | #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \ | |
374 | l|=(((unsigned long)(*((c)++)))<< 8), \ | |
375 | l|=(((unsigned long)(*((c)++)))<<16), \ | |
376 | l|=(((unsigned long)(*((c)++)))<<24), \ | |
377 | l) | |
378 | #define HOST_p_c2l(c,l,n) { \ | |
379 | switch (n) { \ | |
380 | case 0: l =((unsigned long)(*((c)++))); \ | |
381 | case 1: l|=((unsigned long)(*((c)++)))<< 8; \ | |
382 | case 2: l|=((unsigned long)(*((c)++)))<<16; \ | |
383 | case 3: l|=((unsigned long)(*((c)++)))<<24; \ | |
384 | } } | |
385 | #define HOST_p_c2l_p(c,l,sc,len) { \ | |
386 | switch (sc) { \ | |
387 | case 0: l =((unsigned long)(*((c)++))); \ | |
388 | if (--len == 0) break; \ | |
389 | case 1: l|=((unsigned long)(*((c)++)))<< 8; \ | |
390 | if (--len == 0) break; \ | |
391 | case 2: l|=((unsigned long)(*((c)++)))<<16; \ | |
392 | } } | |
393 | /* NOTE the pointer is not incremented at the end of this */ | |
394 | #define HOST_c2l_p(c,l,n) { \ | |
395 | l=0; (c)+=n; \ | |
396 | switch (n) { \ | |
397 | case 3: l =((unsigned long)(*(--(c))))<<16; \ | |
398 | case 2: l|=((unsigned long)(*(--(c))))<< 8; \ | |
399 | case 1: l|=((unsigned long)(*(--(c)))); \ | |
400 | } } | |
401 | #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \ | |
402 | *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ | |
403 | *((c)++)=(unsigned char)(((l)>>16)&0xff), \ | |
404 | *((c)++)=(unsigned char)(((l)>>24)&0xff), \ | |
405 | l) | |
406 | ||
407 | #endif | |
408 | ||
409 | /* | |
410 | * Time for some action:-) | |
411 | */ | |
412 | ||
9e0aad9f | 413 | int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len) |
bd3576d2 | 414 | { |
bd445703 | 415 | const unsigned char *data=data_; |
bd3576d2 | 416 | register HASH_LONG * p; |
9e0aad9f | 417 | register HASH_LONG l; |
bd3576d2 UM |
418 | int sw,sc,ew,ec; |
419 | ||
2dc769a1 | 420 | if (len==0) return 1; |
bd3576d2 | 421 | |
9e0aad9f | 422 | l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL; |
bd3576d2 UM |
423 | /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to |
424 | * Wei Dai <weidai@eskimo.com> for pointing it out. */ | |
425 | if (l < c->Nl) /* overflow */ | |
426 | c->Nh++; | |
9e0aad9f | 427 | c->Nh+=(len>>29); /* might cause compiler warning on 16-bit */ |
bd3576d2 UM |
428 | c->Nl=l; |
429 | ||
430 | if (c->num != 0) | |
431 | { | |
432 | p=c->data; | |
433 | sw=c->num>>2; | |
434 | sc=c->num&0x03; | |
435 | ||
436 | if ((c->num+len) >= HASH_CBLOCK) | |
437 | { | |
438 | l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l; | |
439 | for (; sw<HASH_LBLOCK; sw++) | |
440 | { | |
441 | HOST_c2l(data,l); p[sw]=l; | |
442 | } | |
443 | HASH_BLOCK_HOST_ORDER (c,p,1); | |
444 | len-=(HASH_CBLOCK-c->num); | |
445 | c->num=0; | |
446 | /* drop through and do the rest */ | |
447 | } | |
448 | else | |
449 | { | |
450 | c->num+=len; | |
451 | if ((sc+len) < 4) /* ugly, add char's to a word */ | |
452 | { | |
453 | l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l; | |
454 | } | |
455 | else | |
456 | { | |
457 | ew=(c->num>>2); | |
458 | ec=(c->num&0x03); | |
8a09b386 BM |
459 | if (sc) |
460 | l=p[sw]; | |
461 | HOST_p_c2l(data,l,sc); | |
462 | p[sw++]=l; | |
bd3576d2 UM |
463 | for (; sw < ew; sw++) |
464 | { | |
465 | HOST_c2l(data,l); p[sw]=l; | |
466 | } | |
467 | if (ec) | |
468 | { | |
469 | HOST_c2l_p(data,l,ec); p[sw]=l; | |
470 | } | |
471 | } | |
2dc769a1 | 472 | return 1; |
bd3576d2 UM |
473 | } |
474 | } | |
475 | ||
476 | sw=len/HASH_CBLOCK; | |
477 | if (sw > 0) | |
478 | { | |
0a78c297 | 479 | #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) |
bd3576d2 UM |
480 | /* |
481 | * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined | |
482 | * only if sizeof(HASH_LONG)==4. | |
483 | */ | |
484 | if ((((unsigned long)data)%4) == 0) | |
485 | { | |
4d5d543e | 486 | /* data is properly aligned so that we can cast it: */ |
8087d8f7 | 487 | HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw); |
bd3576d2 UM |
488 | sw*=HASH_CBLOCK; |
489 | data+=sw; | |
490 | len-=sw; | |
491 | } | |
492 | else | |
493 | #if !defined(HASH_BLOCK_DATA_ORDER) | |
494 | while (sw--) | |
495 | { | |
496 | memcpy (p=c->data,data,HASH_CBLOCK); | |
497 | HASH_BLOCK_DATA_ORDER_ALIGNED(c,p,1); | |
498 | data+=HASH_CBLOCK; | |
499 | len-=HASH_CBLOCK; | |
500 | } | |
501 | #endif | |
502 | #endif | |
503 | #if defined(HASH_BLOCK_DATA_ORDER) | |
504 | { | |
531b2cf7 | 505 | HASH_BLOCK_DATA_ORDER(c,data,sw); |
bd3576d2 UM |
506 | sw*=HASH_CBLOCK; |
507 | data+=sw; | |
508 | len-=sw; | |
509 | } | |
510 | #endif | |
511 | } | |
512 | ||
513 | if (len!=0) | |
514 | { | |
515 | p = c->data; | |
516 | c->num = len; | |
517 | ew=len>>2; /* words to copy */ | |
518 | ec=len&0x03; | |
519 | for (; ew; ew--,p++) | |
520 | { | |
521 | HOST_c2l(data,l); *p=l; | |
522 | } | |
523 | HOST_c2l_p(data,l,ec); | |
524 | *p=l; | |
525 | } | |
2dc769a1 | 526 | return 1; |
bd3576d2 UM |
527 | } |
528 | ||
529 | ||
ac7d0785 | 530 | void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data) |
bd3576d2 | 531 | { |
0a78c297 | 532 | #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) |
bd3576d2 | 533 | if ((((unsigned long)data)%4) == 0) |
4d5d543e | 534 | /* data is properly aligned so that we can cast it: */ |
8087d8f7 | 535 | HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1); |
bd3576d2 UM |
536 | else |
537 | #if !defined(HASH_BLOCK_DATA_ORDER) | |
538 | { | |
539 | memcpy (c->data,data,HASH_CBLOCK); | |
540 | HASH_BLOCK_DATA_ORDER_ALIGNED (c,c->data,1); | |
541 | } | |
542 | #endif | |
543 | #endif | |
544 | #if defined(HASH_BLOCK_DATA_ORDER) | |
cdfb093f | 545 | HASH_BLOCK_DATA_ORDER (c,data,1); |
bd3576d2 UM |
546 | #endif |
547 | } | |
548 | ||
549 | ||
2dc769a1 | 550 | int HASH_FINAL (unsigned char *md, HASH_CTX *c) |
bd3576d2 UM |
551 | { |
552 | register HASH_LONG *p; | |
553 | register unsigned long l; | |
554 | register int i,j; | |
555 | static const unsigned char end[4]={0x80,0x00,0x00,0x00}; | |
556 | const unsigned char *cp=end; | |
557 | ||
558 | /* c->num should definitly have room for at least one more byte. */ | |
559 | p=c->data; | |
560 | i=c->num>>2; | |
561 | j=c->num&0x03; | |
562 | ||
563 | #if 0 | |
564 | /* purify often complains about the following line as an | |
565 | * Uninitialized Memory Read. While this can be true, the | |
566 | * following p_c2l macro will reset l when that case is true. | |
567 | * This is because j&0x03 contains the number of 'valid' bytes | |
568 | * already in p[i]. If and only if j&0x03 == 0, the UMR will | |
569 | * occur but this is also the only time p_c2l will do | |
570 | * l= *(cp++) instead of l|= *(cp++) | |
571 | * Many thanks to Alex Tang <altitude@cic.net> for pickup this | |
572 | * 'potential bug' */ | |
573 | #ifdef PURIFY | |
574 | if (j==0) p[i]=0; /* Yeah, but that's not the way to fix it:-) */ | |
575 | #endif | |
576 | l=p[i]; | |
577 | #else | |
578 | l = (j==0) ? 0 : p[i]; | |
579 | #endif | |
580 | HOST_p_c2l(cp,l,j); p[i++]=l; /* i is the next 'undefined word' */ | |
581 | ||
582 | if (i>(HASH_LBLOCK-2)) /* save room for Nl and Nh */ | |
583 | { | |
584 | if (i<HASH_LBLOCK) p[i]=0; | |
585 | HASH_BLOCK_HOST_ORDER (c,p,1); | |
586 | i=0; | |
587 | } | |
588 | for (; i<(HASH_LBLOCK-2); i++) | |
589 | p[i]=0; | |
590 | ||
591 | #if defined(DATA_ORDER_IS_BIG_ENDIAN) | |
592 | p[HASH_LBLOCK-2]=c->Nh; | |
593 | p[HASH_LBLOCK-1]=c->Nl; | |
594 | #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) | |
595 | p[HASH_LBLOCK-2]=c->Nl; | |
596 | p[HASH_LBLOCK-1]=c->Nh; | |
597 | #endif | |
598 | HASH_BLOCK_HOST_ORDER (c,p,1); | |
599 | ||
1cbde6e4 AP |
600 | #ifndef HASH_MAKE_STRING |
601 | #error "HASH_MAKE_STRING must be defined!" | |
602 | #else | |
603 | HASH_MAKE_STRING(c,md); | |
604 | #endif | |
bd3576d2 UM |
605 | |
606 | c->num=0; | |
607 | /* clear stuff, HASH_BLOCK may be leaving some stuff on the stack | |
608 | * but I'm not worried :-) | |
4579924b | 609 | OPENSSL_cleanse((void *)c,sizeof(HASH_CTX)); |
bd3576d2 | 610 | */ |
2dc769a1 | 611 | return 1; |
bd3576d2 | 612 | } |
2f98abbc AP |
613 | |
614 | #ifndef MD32_REG_T | |
615 | #define MD32_REG_T long | |
616 | /* | |
617 | * This comment was originaly written for MD5, which is why it | |
618 | * discusses A-D. But it basically applies to all 32-bit digests, | |
619 | * which is why it was moved to common header file. | |
620 | * | |
621 | * In case you wonder why A-D are declared as long and not | |
622 | * as MD5_LONG. Doing so results in slight performance | |
623 | * boost on LP64 architectures. The catch is we don't | |
624 | * really care if 32 MSBs of a 64-bit register get polluted | |
625 | * with eventual overflows as we *save* only 32 LSBs in | |
626 | * *either* case. Now declaring 'em long excuses the compiler | |
627 | * from keeping 32 MSBs zeroed resulting in 13% performance | |
628 | * improvement under SPARC Solaris7/64 and 5% under AlphaLinux. | |
629 | * Well, to be honest it should say that this *prevents* | |
630 | * performance degradation. | |
631 | * <appro@fy.chalmers.se> | |
632 | * Apparently there're LP64 compilers that generate better | |
633 | * code if A-D are declared int. Most notably GCC-x86_64 | |
634 | * generates better code. | |
635 | * <appro@fy.chalmers.se> | |
636 | */ | |
637 | #endif |