]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/openssl-0.9.8g-sha-padlock.patch
Fix core38 updater replace openvpn config.
[people/pmueller/ipfire-2.x.git] / src / patches / openssl-0.9.8g-sha-padlock.patch
CommitLineData
f24c9564
CS
1diff -urN openssl-0.9.8g.orig/crypto/engine/eng_padlock.c openssl-0.9.8g/crypto/engine/eng_padlock.c
2--- openssl-0.9.8g.orig/crypto/engine/eng_padlock.c 2007-11-13 20:00:28.390611512 +0100
3+++ openssl-0.9.8g/crypto/engine/eng_padlock.c 2007-11-13 20:02:52.398818072 +0100
4@@ -74,12 +74,23 @@
5 #ifndef OPENSSL_NO_AES
6 #include <openssl/aes.h>
7 #endif
8+#ifndef OPENSSL_NO_SHA
9+#include <openssl/sha.h>
10+#endif
11 #include <openssl/rand.h>
12 #include <openssl/err.h>
13
14 #ifndef OPENSSL_NO_HW
15 #ifndef OPENSSL_NO_HW_PADLOCK
16
17+/* PadLock RNG is disabled by default */
18+#define PADLOCK_NO_RNG 1
19+
20+/* No ASM routines for SHA in MSC yet */
21+#ifdef _MSC_VER
22+#define OPENSSL_NO_SHA
23+#endif
24+
25 /* Attempt to have a single source for both 0.9.7 and 0.9.8 :-) */
26 #if (OPENSSL_VERSION_NUMBER >= 0x00908000L)
27 # ifndef OPENSSL_NO_DYNAMIC_ENGINE
28@@ -135,52 +146,89 @@
29 static int padlock_init(ENGINE *e);
30
31 /* RNG Stuff */
32+#ifndef PADLOCK_NO_RNG
33 static RAND_METHOD padlock_rand;
34+#endif
35
36 /* Cipher Stuff */
37 #ifndef OPENSSL_NO_AES
38 static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid);
39 #endif
40
41+/* Digest Stuff */
42+#ifndef OPENSSL_NO_SHA
43+static int padlock_digests(ENGINE *e, const EVP_MD **digest, const int **nids, int nid);
44+#endif
45+
46 /* Engine names */
47 static const char *padlock_id = "padlock";
48 static char padlock_name[100];
49
50 /* Available features */
51-static int padlock_use_ace = 0; /* Advanced Cryptography Engine */
52-static int padlock_use_rng = 0; /* Random Number Generator */
53+enum padlock_flags {
54+ PADLOCK_RNG = 0x01,
55+ PADLOCK_ACE = 0x02,
56+ PADLOCK_ACE2 = 0x04,
57+ PADLOCK_PHE = 0x08,
58+ PADLOCK_PMM = 0x10
59+};
60+enum padlock_flags padlock_flags;
61+
62+#define PADLOCK_HAVE_RNG (padlock_flags & PADLOCK_RNG)
63+#define PADLOCK_HAVE_ACE (padlock_flags & (PADLOCK_ACE|PADLOCK_ACE2))
64+#define PADLOCK_HAVE_ACE1 (padlock_flags & PADLOCK_ACE)
65+#define PADLOCK_HAVE_ACE2 (padlock_flags & PADLOCK_ACE2)
66+#define PADLOCK_HAVE_PHE (padlock_flags & PADLOCK_PHE)
67+#define PADLOCK_HAVE_PMM (padlock_flags & PADLOCK_PMM)
68+
69 #ifndef OPENSSL_NO_AES
70 static int padlock_aes_align_required = 1;
71 #endif
72
73+/* Init / Max buffer sizes for SHA */
74+#define PADLOCK_SHA_INIT_ORD 13 /* = 8192 */
75+#define PADLOCK_SHA_MAX_ORD 13 /* = 8192 */
76+
77 /* ===== Engine "management" functions ===== */
78
79 /* Prepare the ENGINE structure for registration */
80 static int
81 padlock_bind_helper(ENGINE *e)
82 {
83+ char phe_string[20];
84+
85 /* Check available features */
86 padlock_available();
87
88-#if 1 /* disable RNG for now, see commentary in vicinity of RNG code */
89- padlock_use_rng=0;
90-#endif
91+ /* Build PHE info with buffer size argument */
92+ if (PADLOCK_HAVE_PHE)
93+ BIO_snprintf(phe_string, sizeof(phe_string),
94+ "PHE(%lu) ", 1UL << PADLOCK_SHA_MAX_ORD);
95
96 /* Generate a nice engine name with available features */
97 BIO_snprintf(padlock_name, sizeof(padlock_name),
98- "VIA PadLock (%s, %s)",
99- padlock_use_rng ? "RNG" : "no-RNG",
100- padlock_use_ace ? "ACE" : "no-ACE");
101+ "VIA PadLock: %s%s%s%s%s",
102+ padlock_flags ? "" : "not supported",
103+ PADLOCK_HAVE_RNG ? "RNG " : "",
104+ PADLOCK_HAVE_ACE ? (PADLOCK_HAVE_ACE2 ? "ACE2 " : "ACE ") : "",
105+ PADLOCK_HAVE_PHE ? phe_string : "",
106+ PADLOCK_HAVE_PMM ? "PMM " : "");
107
108 /* Register everything or return with an error */
109 if (!ENGINE_set_id(e, padlock_id) ||
110 !ENGINE_set_name(e, padlock_name) ||
111
112- !ENGINE_set_init_function(e, padlock_init) ||
113+ !ENGINE_set_init_function(e, padlock_init)
114 #ifndef OPENSSL_NO_AES
115- (padlock_use_ace && !ENGINE_set_ciphers (e, padlock_ciphers)) ||
116+ || (PADLOCK_HAVE_ACE && !ENGINE_set_ciphers (e, padlock_ciphers))
117+#endif
118+#ifndef OPENSSL_NO_SHA
119+ || (PADLOCK_HAVE_PHE && !ENGINE_set_digests (e, padlock_digests))
120+#endif
121+#ifndef PADLOCK_NO_RNG
122+ || (PADLOCK_HAVE_RNG && !ENGINE_set_RAND (e, &padlock_rand))
123 #endif
124- (padlock_use_rng && !ENGINE_set_RAND (e, &padlock_rand))) {
125+ ) {
126 return 0;
127 }
128
129@@ -210,7 +258,7 @@
130 static int
131 padlock_init(ENGINE *e)
132 {
133- return (padlock_use_rng || padlock_use_ace);
134+ return (padlock_flags);
135 }
136
137 /* This stuff is needed if this ENGINE is being compiled into a self-contained
138@@ -237,6 +285,17 @@
139
140 /* ===== Here comes the "real" engine ===== */
141
142+#ifdef __GNUC__
143+#define likely(x) __builtin_expect(!!(x), 1)
144+#define unlikely(x) __builtin_expect(!!(x), 0)
145+#else
146+#define likely(x) (x)
147+#define unlikely(x) (x)
148+#endif
149+
150+/* How to test if we need to typedef uint32_t ??? */
151+typedef unsigned long uint32_t;
152+
153 #ifndef OPENSSL_NO_AES
154 /* Some AES-related constants */
155 #define AES_BLOCK_SIZE 16
156@@ -362,10 +421,22 @@
157 : "+a"(eax), "=d"(edx) : : "ecx");
158
159 /* Fill up some flags */
160- padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6));
161- padlock_use_rng = ((edx & (0x3<<2)) == (0x3<<2));
162+ padlock_flags |= ((edx & (0x3<<3)) ? PADLOCK_RNG : 0);
163+ padlock_flags |= ((edx & (0x3<<7)) ? PADLOCK_ACE : 0);
164+ padlock_flags |= ((edx & (0x3<<9)) ? PADLOCK_ACE2 : 0);
165+ padlock_flags |= ((edx & (0x3<<11)) ? PADLOCK_PHE : 0);
166+ padlock_flags |= ((edx & (0x3<<13)) ? PADLOCK_PMM : 0);
167
168- return padlock_use_ace + padlock_use_rng;
169+ return padlock_flags;
170+}
171+
172+static inline void
173+padlock_htonl_block(uint32_t *data, size_t count)
174+{
175+ while (count--) {
176+ asm volatile ("bswapl %0" : "+r"(*data));
177+ data++;
178+ }
179 }
180
181 #ifndef OPENSSL_NO_AES
182@@ -374,12 +445,9 @@
183 padlock_bswapl(AES_KEY *ks)
184 {
185 size_t i = sizeof(ks->rd_key)/sizeof(ks->rd_key[0]);
186- unsigned int *key = ks->rd_key;
187+ uint32_t *key = (uint32_t*) ks->rd_key;
188
189- while (i--) {
190- asm volatile ("bswapl %0" : "+r"(*key));
191- key++;
192- }
193+ padlock_htonl_block(key, i);
194 }
195 #endif
196
197@@ -1154,6 +1222,415 @@
198
199 #endif /* OPENSSL_NO_AES */
200
201+#ifndef OPENSSL_NO_SHA
202+
203+// #define PADLOCK_SHA_STAT 1
204+
205+union sha_all_ctx {
206+ SHA_CTX sha_ctx;
207+ SHA256_CTX sha256_ctx; /* shared with SHA224 */
208+};
209+
210+typedef int (*f_sha_init)(void *c);
211+typedef int (*f_sha_update)(void *c, const void *_data, size_t len);
212+typedef int (*f_sha_final)(unsigned char *md, void *c);
213+typedef void (*f_sha_padlock)(char *in, unsigned char *out, int count);
214+
215+struct sha_digest_functions {
216+ f_sha_init init;
217+ f_sha_update update;
218+ f_sha_final final;
219+ f_sha_padlock padlock;
220+};
221+
222+/* Don't forget to initialize all relevant
223+ * fields in padlock_sha_init() or face the
224+ * consequences!!!
225+ * BTW We don't use bzero() on this structure
226+ * because zeroing fallback_ctx is
227+ * a waste of time. */
228+struct padlock_digest_data {
229+ void *buf_start, *buf_alloc;
230+ ssize_t used;
231+ unsigned long order:8, bypass:1;
232+ /* Fallback support */
233+ struct sha_digest_functions fallback_fcs;
234+ union sha_all_ctx fallback_ctx;
235+#ifdef PADLOCK_SHA_STAT
236+ size_t stat_count, stat_total;
237+#endif
238+};
239+
240+#ifdef PADLOCK_SHA_STAT
241+size_t all_count, all_total;
242+#endif
243+
244+#define DIGEST_DATA(ctx) ((struct padlock_digest_data *)(ctx->md_data))
245+#define DDATA_FREE(ddata) ((size_t)(1L << ddata->order) - ddata->used)
246+
247+static void
248+padlock_sha_bypass(struct padlock_digest_data *ddata)
249+{
250+ if (ddata->bypass)
251+ return;
252+
253+ ddata->fallback_fcs.init(&ddata->fallback_ctx);
254+ if (ddata->buf_start && ddata->used > 0) {
255+ ddata->fallback_fcs.update(&ddata->fallback_ctx, ddata->buf_start, ddata->used);
256+ if (ddata->buf_alloc) {
257+ memset(ddata->buf_start, 0, ddata->used);
258+ free(ddata->buf_alloc);
259+ ddata->buf_alloc = 0;
260+ }
261+ }
262+ ddata->buf_start = 0;
263+ ddata->used = 0;
264+ ddata->bypass = 1;
265+
266+ return;
267+}
268+
269+static void
270+padlock_do_sha1(char *in, char *out, int count)
271+{
272+ /* We can't store directly to *out as it
273+ * doesn't have to be aligned. But who cares,
274+ * it's only a few bytes... */
275+ char buf[128+16];
276+ unsigned char *output = NEAREST_ALIGNED(buf);
277+
278+ ((uint32_t*)output)[0] = 0x67452301;
279+ ((uint32_t*)output)[1] = 0xEFCDAB89;
280+ ((uint32_t*)output)[2] = 0x98BADCFE;
281+ ((uint32_t*)output)[3] = 0x10325476;
282+ ((uint32_t*)output)[4] = 0xC3D2E1F0;
283+
284+ asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
285+ : "+S"(in), "+D"(output)
286+ : "c"(count), "a"(0));
287+
288+ memcpy(out, output, 5 * sizeof(uint32_t));
289+
290+ padlock_htonl_block((uint32_t*)out, 5);
291+}
292+
293+static void
294+padlock_do_sha224(char *in, char *out, int count)
295+{
296+ /* We can't store directly to *out as it
297+ * doesn't have to be aligned. But who cares,
298+ * it's only a few bytes... */
299+ char buf[128+16];
300+ unsigned char *output = NEAREST_ALIGNED(buf);
301+
302+ ((uint32_t*)output)[0] = 0xC1059ED8UL;
303+ ((uint32_t*)output)[1] = 0x367CD507UL;
304+ ((uint32_t*)output)[2] = 0x3070DD17UL;
305+ ((uint32_t*)output)[3] = 0xF70E5939UL;
306+ ((uint32_t*)output)[4] = 0xFFC00B31UL;
307+ ((uint32_t*)output)[5] = 0x68581511UL;
308+ ((uint32_t*)output)[6] = 0x64F98FA7UL;
309+ ((uint32_t*)output)[7] = 0xBEFA4FA4UL;
310+
311+ asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
312+ : "+S"(in), "+D"(output)
313+ : "c"(count), "a"(0));
314+
315+ memcpy(out, output, 7 * sizeof(uint32_t));
316+
317+ padlock_htonl_block((uint32_t*)out, 7);
318+}
319+
320+static void
321+padlock_do_sha256(char *in, char *out, int count)
322+{
323+ /* We can't store directly to *out as it
324+ * doesn't have to be aligned. But who cares,
325+ * it's only a few bytes... */
326+ char buf[128+16];
327+ unsigned char *output = NEAREST_ALIGNED(buf);
328+
329+ ((uint32_t*)output)[0] = 0x6A09E667;
330+ ((uint32_t*)output)[1] = 0xBB67AE85;
331+ ((uint32_t*)output)[2] = 0x3C6EF372;
332+ ((uint32_t*)output)[3] = 0xA54FF53A;
333+ ((uint32_t*)output)[4] = 0x510E527F;
334+ ((uint32_t*)output)[5] = 0x9B05688C;
335+ ((uint32_t*)output)[6] = 0x1F83D9AB;
336+ ((uint32_t*)output)[7] = 0x5BE0CD19;
337+
338+ asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
339+ : "+S"(in), "+D"(output)
340+ : "c"(count), "a"(0));
341+
342+ memcpy(out, output, 8 * sizeof(uint32_t));
343+
344+ padlock_htonl_block((uint32_t*)out, 8);
345+}
346+
347+static int
348+padlock_sha_init(EVP_MD_CTX *ctx)
349+{
350+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
351+
352+ ddata->used = 0;
353+ ddata->bypass = 0;
354+
355+ ddata->order = PADLOCK_SHA_INIT_ORD;
356+ ddata->buf_alloc = malloc((1L << ddata->order) + 16);
357+ ddata->buf_start = NEAREST_ALIGNED(ddata->buf_alloc);
358+
359+ return 1;
360+}
361+
362+static int
363+padlock_sha1_init(EVP_MD_CTX *ctx)
364+{
365+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
366+
367+ ddata->fallback_fcs.init = (f_sha_init)SHA1_Init;
368+ ddata->fallback_fcs.update = (f_sha_update)SHA1_Update;
369+ ddata->fallback_fcs.final = (f_sha_final)SHA1_Final;
370+ ddata->fallback_fcs.padlock = (f_sha_padlock)padlock_do_sha1;
371+
372+ return padlock_sha_init(ctx);
373+}
374+
375+static int
376+padlock_sha224_init(EVP_MD_CTX *ctx)
377+{
378+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
379+
380+ ddata->fallback_fcs.init = (f_sha_init)SHA224_Init;
381+ ddata->fallback_fcs.update = (f_sha_update)SHA224_Update;
382+ ddata->fallback_fcs.final = (f_sha_final)SHA224_Final;
383+ ddata->fallback_fcs.padlock = (f_sha_padlock)padlock_do_sha224;
384+
385+ return padlock_sha_init(ctx);
386+}
387+
388+static int
389+padlock_sha256_init(EVP_MD_CTX *ctx)
390+{
391+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
392+
393+ ddata->fallback_fcs.init = (f_sha_init)SHA256_Init;
394+ ddata->fallback_fcs.update = (f_sha_update)SHA256_Update;
395+ ddata->fallback_fcs.final = (f_sha_final)SHA256_Final;
396+ ddata->fallback_fcs.padlock = (f_sha_padlock)padlock_do_sha256;
397+
398+ return padlock_sha_init(ctx);
399+}
400+
401+static int
402+padlock_sha_update(EVP_MD_CTX *ctx, const void *data, size_t length)
403+{
404+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
405+
406+#ifdef PADLOCK_SHA_STAT
407+ ddata->stat_count++;
408+ ddata->stat_total += length;
409+ all_count++;
410+ all_total += length;
411+#endif
412+ if (unlikely(ddata->bypass)) {
413+ ddata->fallback_fcs.update(&ddata->fallback_ctx, data, length);
414+ return 1;
415+ }
416+ if (unlikely(DDATA_FREE(ddata) < length)) {
417+ if (likely(ddata->used + length > (1 << PADLOCK_SHA_MAX_ORD))) {
418+ /* Too much data to be stored -> bypass to SW SHA */
419+ padlock_sha_bypass(ddata);
420+ ddata->fallback_fcs.update(&ddata->fallback_ctx, data, length);
421+ return 1;
422+ } else {
423+ /* Resize the alocated buffer */
424+ char *new_buf;
425+ size_t new_size;
426+
427+ while ((1<<++ddata->order) < (ddata->used + length));
428+ new_size = (1<<ddata->order);
429+ if(!(new_buf = realloc(ddata->buf_alloc, new_size + 16))) {
430+ /* fallback plan again */
431+ padlock_sha_bypass(ddata);
432+ ddata->fallback_fcs.update(&ddata->fallback_ctx, data, length);
433+ return 1;
434+ }
435+ ddata->buf_alloc = new_buf;
436+ ddata->buf_start = NEAREST_ALIGNED(new_buf);
437+ }
438+ }
439+
440+ memcpy(ddata->buf_start + ddata->used, data, length);
441+ ddata->used += length;
442+
443+ return 1;
444+}
445+
446+static int
447+padlock_sha_final(EVP_MD_CTX *ctx, unsigned char *md)
448+{
449+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
450+
451+#ifdef PADLOCK_SHA_STAT
452+ fprintf(stderr, "PadLock CTX: cnt=%zu, tot=%zu, avg=%zu\n",
453+ ddata->stat_count, ddata->stat_total,
454+ ddata->stat_count ? (ddata->stat_total/ddata->stat_count) : 0);
455+ fprintf(stderr, "PadLock ALL: cnt=%zu, tot=%zu, avg=%zu\n",
456+ all_count, all_total, all_count ? (all_total/all_count) : 0);
457+#endif
458+
459+ if (ddata->bypass) {
460+ ddata->fallback_fcs.final(md, &ddata->fallback_ctx);
461+ return 1;
462+ }
463+
464+ /* Pass the input buffer to PadLock microcode... */
465+ ddata->fallback_fcs.padlock(ddata->buf_start, md, ddata->used);
466+ memset(ddata->buf_start, 0, ddata->used);
467+ free(ddata->buf_alloc);
468+ ddata->buf_start = 0;
469+ ddata->buf_alloc = 0;
470+ ddata->used = 0;
471+
472+ return 1;
473+}
474+
475+static int
476+padlock_sha_copy(EVP_MD_CTX *to,const EVP_MD_CTX *from)
477+{
478+ struct padlock_digest_data *ddata_from = DIGEST_DATA(from);
479+ struct padlock_digest_data *ddata_to = DIGEST_DATA(to);
480+
481+ memcpy(ddata_to, ddata_from, sizeof(struct padlock_digest_data));
482+ if (ddata_from->buf_alloc) {
483+ ddata_to->buf_alloc = malloc(1L << ddata_to->order);
484+ if (!ddata_to->buf_start) {
485+ fprintf(stderr, "%s(): malloc() failed\n", __func__);
486+ exit(1);
487+ }
488+ ddata_to->buf_start = NEAREST_ALIGNED(ddata_to->buf_alloc);
489+ memcpy(ddata_to->buf_start, ddata_from->buf_start, ddata_from->used);
490+ }
491+ return 1;
492+}
493+
494+static int
495+padlock_sha_cleanup(EVP_MD_CTX *ctx)
496+{
497+ struct padlock_digest_data *ddata = DIGEST_DATA(ctx);
498+
499+ if (ddata->buf_alloc) {
500+ memset(ddata->buf_start, 0, ddata->used);
501+ free(ddata->buf_alloc);
502+ }
503+
504+ memset(ddata, 0, sizeof(struct padlock_digest_data));
505+
506+ return 1;
507+}
508+
509+static const EVP_MD padlock_sha1_md = {
510+ NID_sha1,
511+ NID_sha1WithRSAEncryption,
512+ SHA_DIGEST_LENGTH,
513+ 0,
514+ padlock_sha1_init,
515+ padlock_sha_update,
516+ padlock_sha_final,
517+ padlock_sha_copy,
518+ padlock_sha_cleanup,
519+ EVP_PKEY_RSA_method,
520+ SHA_CBLOCK,
521+ sizeof(struct padlock_digest_data),
522+};
523+
524+static const EVP_MD padlock_sha224_md = {
525+ NID_sha224,
526+ NID_sha224WithRSAEncryption,
527+ SHA224_DIGEST_LENGTH,
528+ 0,
529+ padlock_sha224_init,
530+ padlock_sha_update,
531+ padlock_sha_final,
532+ padlock_sha_copy,
533+ padlock_sha_cleanup,
534+ EVP_PKEY_RSA_method,
535+ SHA_CBLOCK,
536+ sizeof(struct padlock_digest_data),
537+};
538+
539+static const EVP_MD padlock_sha256_md = {
540+ NID_sha256,
541+ NID_sha256WithRSAEncryption,
542+ SHA256_DIGEST_LENGTH,
543+ 0,
544+ padlock_sha256_init,
545+ padlock_sha_update,
546+ padlock_sha_final,
547+ padlock_sha_copy,
548+ padlock_sha_cleanup,
549+ EVP_PKEY_RSA_method,
550+ SHA_CBLOCK,
551+ sizeof(struct padlock_digest_data),
552+};
553+
554+static int padlock_digest_nids[] = {
555+#if !defined(OPENSSL_NO_SHA)
556+ NID_sha1,
557+#endif
558+#if !defined(OPENSSL_NO_SHA256)
559+#if !defined(OPENSSL_NO_SHA224)
560+ NID_sha224,
561+#endif
562+ NID_sha256,
563+#endif
564+};
565+
566+static int padlock_digest_nids_num = sizeof(padlock_digest_nids)/sizeof(padlock_digest_nids[0]);
567+
568+static int
569+padlock_digests (ENGINE *e, const EVP_MD **digest, const int **nids, int nid)
570+{
571+ /* No specific digest => return a list of supported nids ... */
572+ if (!digest) {
573+ *nids = padlock_digest_nids;
574+ return padlock_digest_nids_num;
575+ }
576+
577+ /* ... or the requested "digest" otherwise */
578+ switch (nid) {
579+#if !defined(OPENSSL_NO_SHA)
580+ case NID_sha1:
581+ *digest = &padlock_sha1_md;
582+ break;
583+#endif
584+
585+
586+#if !defined(OPENSSL_NO_SHA256)
587+#if !defined(OPENSSL_NO_SHA224)
588+ case NID_sha224:
589+ *digest = &padlock_sha224_md;
590+ break;
591+#endif /* OPENSSL_NO_SHA224 */
592+
593+ case NID_sha256:
594+ *digest = &padlock_sha256_md;
595+ break;
596+#endif /* OPENSSL_NO_SHA256 */
597+
598+ default:
599+ /* Sorry, we don't support this NID */
600+ *digest = NULL;
601+ return 0;
602+ }
603+
604+ return 1;
605+}
606+
607+#endif /* OPENSSL_NO_SHA */
608+
609+#ifndef PADLOCK_NO_RNG
610 /* ===== Random Number Generator ===== */
611 /*
612 * This code is not engaged. The reason is that it does not comply
613@@ -1209,6 +1686,7 @@
614 padlock_rand_bytes, /* pseudorand */
615 padlock_rand_status, /* rand status */
616 };
617+#endif /* PADLOCK_NO_RNG */
618
619 #endif /* COMPILE_HW_PADLOCK */
620