]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/mem_sec.c
Copyright update of more files that have changed this year
[thirdparty/openssl.git] / crypto / mem_sec.c
1 /*
2 * Copyright 2015-2018 The OpenSSL Project Authors. All Rights Reserved.
3 * Copyright 2004-2014, Akamai Technologies. All Rights Reserved.
4 *
5 * Licensed under the OpenSSL license (the "License"). You may not use
6 * this file except in compliance with the License. You can obtain a copy
7 * in the file LICENSE in the source distribution or at
8 * https://www.openssl.org/source/license.html
9 */
10
11 /*
12 * This file is in two halves. The first half implements the public API
13 * to be used by external consumers, and to be used by OpenSSL to store
14 * data in a "secure arena." The second half implements the secure arena.
15 * For details on that implementation, see below (look for uppercase
16 * "SECURE HEAP IMPLEMENTATION").
17 */
18 #include "e_os.h"
19 #include <openssl/crypto.h>
20
21 #include <string.h>
22
23 /* e_os.h includes unistd.h, which defines _POSIX_VERSION */
24 #if defined(OPENSSL_SYS_UNIX) \
25 && defined(_POSIX_VERSION) && _POSIX_VERSION >= 200112L
26 # define IMPLEMENTED
27 # include <stdlib.h>
28 # include <assert.h>
29 # include <unistd.h>
30 # include <sys/types.h>
31 # include <sys/mman.h>
32 # if defined(OPENSSL_SYS_LINUX)
33 # include <sys/syscall.h>
34 # include <linux/mman.h>
35 # include <errno.h>
36 # endif
37 # include <sys/param.h>
38 # include <sys/stat.h>
39 # include <fcntl.h>
40 #endif
41
42 #define CLEAR(p, s) OPENSSL_cleanse(p, s)
43 #ifndef PAGE_SIZE
44 # define PAGE_SIZE 4096
45 #endif
46
47 #ifdef IMPLEMENTED
48 static size_t secure_mem_used;
49
50 static int secure_mem_initialized;
51
52 static CRYPTO_RWLOCK *sec_malloc_lock = NULL;
53
54 /*
55 * These are the functions that must be implemented by a secure heap (sh).
56 */
57 static int sh_init(size_t size, int minsize);
58 static void *sh_malloc(size_t size);
59 static void sh_free(void *ptr);
60 static void sh_done(void);
61 static size_t sh_actual_size(char *ptr);
62 static int sh_allocated(const char *ptr);
63 #endif
64
65 int CRYPTO_secure_malloc_init(size_t size, int minsize)
66 {
67 #ifdef IMPLEMENTED
68 int ret = 0;
69
70 if (!secure_mem_initialized) {
71 sec_malloc_lock = CRYPTO_THREAD_glock_new("sec_malloc");
72 if (sec_malloc_lock == NULL)
73 return 0;
74 if ((ret = sh_init(size, minsize)) != 0) {
75 secure_mem_initialized = 1;
76 } else {
77 CRYPTO_THREAD_lock_free(sec_malloc_lock);
78 sec_malloc_lock = NULL;
79 }
80 }
81
82 return ret;
83 #else
84 return 0;
85 #endif /* IMPLEMENTED */
86 }
87
88 int CRYPTO_secure_malloc_done()
89 {
90 #ifdef IMPLEMENTED
91 if (secure_mem_used == 0) {
92 sh_done();
93 secure_mem_initialized = 0;
94 CRYPTO_THREAD_lock_free(sec_malloc_lock);
95 sec_malloc_lock = NULL;
96 return 1;
97 }
98 #endif /* IMPLEMENTED */
99 return 0;
100 }
101
102 int CRYPTO_secure_malloc_initialized()
103 {
104 #ifdef IMPLEMENTED
105 return secure_mem_initialized;
106 #else
107 return 0;
108 #endif /* IMPLEMENTED */
109 }
110
111 void *CRYPTO_secure_malloc(size_t num, const char *file, int line)
112 {
113 #ifdef IMPLEMENTED
114 void *ret;
115 size_t actual_size;
116
117 if (!secure_mem_initialized) {
118 return CRYPTO_malloc(num, file, line);
119 }
120 CRYPTO_THREAD_write_lock(sec_malloc_lock);
121 ret = sh_malloc(num);
122 actual_size = ret ? sh_actual_size(ret) : 0;
123 secure_mem_used += actual_size;
124 CRYPTO_THREAD_unlock(sec_malloc_lock);
125 return ret;
126 #else
127 return CRYPTO_malloc(num, file, line);
128 #endif /* IMPLEMENTED */
129 }
130
131 void *CRYPTO_secure_zalloc(size_t num, const char *file, int line)
132 {
133 void *ret = CRYPTO_secure_malloc(num, file, line);
134
135 if (ret != NULL)
136 memset(ret, 0, num);
137 return ret;
138 }
139
140 void CRYPTO_secure_free(void *ptr, const char *file, int line)
141 {
142 #ifdef IMPLEMENTED
143 size_t actual_size;
144
145 if (ptr == NULL)
146 return;
147 if (!CRYPTO_secure_allocated(ptr)) {
148 CRYPTO_free(ptr, file, line);
149 return;
150 }
151 CRYPTO_THREAD_write_lock(sec_malloc_lock);
152 actual_size = sh_actual_size(ptr);
153 CLEAR(ptr, actual_size);
154 secure_mem_used -= actual_size;
155 sh_free(ptr);
156 CRYPTO_THREAD_unlock(sec_malloc_lock);
157 #else
158 CRYPTO_free(ptr, file, line);
159 #endif /* IMPLEMENTED */
160 }
161
162 void CRYPTO_secure_clear_free(void *ptr, size_t num,
163 const char *file, int line)
164 {
165 #ifdef IMPLEMENTED
166 size_t actual_size;
167
168 if (ptr == NULL)
169 return;
170 if (!CRYPTO_secure_allocated(ptr)) {
171 OPENSSL_cleanse(ptr, num);
172 CRYPTO_free(ptr, file, line);
173 return;
174 }
175 CRYPTO_THREAD_write_lock(sec_malloc_lock);
176 actual_size = sh_actual_size(ptr);
177 CLEAR(ptr, actual_size);
178 secure_mem_used -= actual_size;
179 sh_free(ptr);
180 CRYPTO_THREAD_unlock(sec_malloc_lock);
181 #else
182 if (ptr == NULL)
183 return;
184 OPENSSL_cleanse(ptr, num);
185 CRYPTO_free(ptr, file, line);
186 #endif /* IMPLEMENTED */
187 }
188
189 int CRYPTO_secure_allocated(const void *ptr)
190 {
191 #ifdef IMPLEMENTED
192 int ret;
193
194 if (!secure_mem_initialized)
195 return 0;
196 CRYPTO_THREAD_write_lock(sec_malloc_lock);
197 ret = sh_allocated(ptr);
198 CRYPTO_THREAD_unlock(sec_malloc_lock);
199 return ret;
200 #else
201 return 0;
202 #endif /* IMPLEMENTED */
203 }
204
205 size_t CRYPTO_secure_used()
206 {
207 #ifdef IMPLEMENTED
208 return secure_mem_used;
209 #else
210 return 0;
211 #endif /* IMPLEMENTED */
212 }
213
214 size_t CRYPTO_secure_actual_size(void *ptr)
215 {
216 #ifdef IMPLEMENTED
217 size_t actual_size;
218
219 CRYPTO_THREAD_write_lock(sec_malloc_lock);
220 actual_size = sh_actual_size(ptr);
221 CRYPTO_THREAD_unlock(sec_malloc_lock);
222 return actual_size;
223 #else
224 return 0;
225 #endif
226 }
227 /* END OF PAGE ...
228
229 ... START OF PAGE */
230
231 /*
232 * SECURE HEAP IMPLEMENTATION
233 */
234 #ifdef IMPLEMENTED
235
236
237 /*
238 * The implementation provided here uses a fixed-sized mmap() heap,
239 * which is locked into memory, not written to core files, and protected
240 * on either side by an unmapped page, which will catch pointer overruns
241 * (or underruns) and an attempt to read data out of the secure heap.
242 * Free'd memory is zero'd or otherwise cleansed.
243 *
244 * This is a pretty standard buddy allocator. We keep areas in a multiple
245 * of "sh.minsize" units. The freelist and bitmaps are kept separately,
246 * so all (and only) data is kept in the mmap'd heap.
247 *
248 * This code assumes eight-bit bytes. The numbers 3 and 7 are all over the
249 * place.
250 */
251
252 #define ONE ((size_t)1)
253
254 # define TESTBIT(t, b) (t[(b) >> 3] & (ONE << ((b) & 7)))
255 # define SETBIT(t, b) (t[(b) >> 3] |= (ONE << ((b) & 7)))
256 # define CLEARBIT(t, b) (t[(b) >> 3] &= (0xFF & ~(ONE << ((b) & 7))))
257
258 #define WITHIN_ARENA(p) \
259 ((char*)(p) >= sh.arena && (char*)(p) < &sh.arena[sh.arena_size])
260 #define WITHIN_FREELIST(p) \
261 ((char*)(p) >= (char*)sh.freelist && (char*)(p) < (char*)&sh.freelist[sh.freelist_size])
262
263
264 typedef struct sh_list_st
265 {
266 struct sh_list_st *next;
267 struct sh_list_st **p_next;
268 } SH_LIST;
269
270 typedef struct sh_st
271 {
272 char* map_result;
273 size_t map_size;
274 char *arena;
275 size_t arena_size;
276 char **freelist;
277 ossl_ssize_t freelist_size;
278 size_t minsize;
279 unsigned char *bittable;
280 unsigned char *bitmalloc;
281 size_t bittable_size; /* size in bits */
282 } SH;
283
284 static SH sh;
285
286 static size_t sh_getlist(char *ptr)
287 {
288 ossl_ssize_t list = sh.freelist_size - 1;
289 size_t bit = (sh.arena_size + ptr - sh.arena) / sh.minsize;
290
291 for (; bit; bit >>= 1, list--) {
292 if (TESTBIT(sh.bittable, bit))
293 break;
294 OPENSSL_assert((bit & 1) == 0);
295 }
296
297 return list;
298 }
299
300
301 static int sh_testbit(char *ptr, int list, unsigned char *table)
302 {
303 size_t bit;
304
305 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
306 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
307 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
308 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
309 return TESTBIT(table, bit);
310 }
311
312 static void sh_clearbit(char *ptr, int list, unsigned char *table)
313 {
314 size_t bit;
315
316 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
317 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
318 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
319 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
320 OPENSSL_assert(TESTBIT(table, bit));
321 CLEARBIT(table, bit);
322 }
323
324 static void sh_setbit(char *ptr, int list, unsigned char *table)
325 {
326 size_t bit;
327
328 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
329 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
330 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
331 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
332 OPENSSL_assert(!TESTBIT(table, bit));
333 SETBIT(table, bit);
334 }
335
336 static void sh_add_to_list(char **list, char *ptr)
337 {
338 SH_LIST *temp;
339
340 OPENSSL_assert(WITHIN_FREELIST(list));
341 OPENSSL_assert(WITHIN_ARENA(ptr));
342
343 temp = (SH_LIST *)ptr;
344 temp->next = *(SH_LIST **)list;
345 OPENSSL_assert(temp->next == NULL || WITHIN_ARENA(temp->next));
346 temp->p_next = (SH_LIST **)list;
347
348 if (temp->next != NULL) {
349 OPENSSL_assert((char **)temp->next->p_next == list);
350 temp->next->p_next = &(temp->next);
351 }
352
353 *list = ptr;
354 }
355
356 static void sh_remove_from_list(char *ptr)
357 {
358 SH_LIST *temp, *temp2;
359
360 temp = (SH_LIST *)ptr;
361 if (temp->next != NULL)
362 temp->next->p_next = temp->p_next;
363 *temp->p_next = temp->next;
364 if (temp->next == NULL)
365 return;
366
367 temp2 = temp->next;
368 OPENSSL_assert(WITHIN_FREELIST(temp2->p_next) || WITHIN_ARENA(temp2->p_next));
369 }
370
371
372 static int sh_init(size_t size, int minsize)
373 {
374 int ret;
375 size_t i;
376 size_t pgsize;
377 size_t aligned;
378
379 memset(&sh, 0, sizeof(sh));
380
381 /* make sure size and minsize are powers of 2 */
382 OPENSSL_assert(size > 0);
383 OPENSSL_assert((size & (size - 1)) == 0);
384 OPENSSL_assert(minsize > 0);
385 OPENSSL_assert((minsize & (minsize - 1)) == 0);
386 if (size <= 0 || (size & (size - 1)) != 0)
387 goto err;
388 if (minsize <= 0 || (minsize & (minsize - 1)) != 0)
389 goto err;
390
391 while (minsize < (int)sizeof(SH_LIST))
392 minsize *= 2;
393
394 sh.arena_size = size;
395 sh.minsize = minsize;
396 sh.bittable_size = (sh.arena_size / sh.minsize) * 2;
397
398 /* Prevent allocations of size 0 later on */
399 if (sh.bittable_size >> 3 == 0)
400 goto err;
401
402 sh.freelist_size = -1;
403 for (i = sh.bittable_size; i; i >>= 1)
404 sh.freelist_size++;
405
406 sh.freelist = OPENSSL_zalloc(sh.freelist_size * sizeof(char *));
407 OPENSSL_assert(sh.freelist != NULL);
408 if (sh.freelist == NULL)
409 goto err;
410
411 sh.bittable = OPENSSL_zalloc(sh.bittable_size >> 3);
412 OPENSSL_assert(sh.bittable != NULL);
413 if (sh.bittable == NULL)
414 goto err;
415
416 sh.bitmalloc = OPENSSL_zalloc(sh.bittable_size >> 3);
417 OPENSSL_assert(sh.bitmalloc != NULL);
418 if (sh.bitmalloc == NULL)
419 goto err;
420
421 /* Allocate space for heap, and two extra pages as guards */
422 #if defined(_SC_PAGE_SIZE) || defined (_SC_PAGESIZE)
423 {
424 # if defined(_SC_PAGE_SIZE)
425 long tmppgsize = sysconf(_SC_PAGE_SIZE);
426 # else
427 long tmppgsize = sysconf(_SC_PAGESIZE);
428 # endif
429 if (tmppgsize < 1)
430 pgsize = PAGE_SIZE;
431 else
432 pgsize = (size_t)tmppgsize;
433 }
434 #else
435 pgsize = PAGE_SIZE;
436 #endif
437 sh.map_size = pgsize + sh.arena_size + pgsize;
438 if (1) {
439 #ifdef MAP_ANON
440 sh.map_result = mmap(NULL, sh.map_size,
441 PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
442 } else {
443 #endif
444 int fd;
445
446 sh.map_result = MAP_FAILED;
447 if ((fd = open("/dev/zero", O_RDWR)) >= 0) {
448 sh.map_result = mmap(NULL, sh.map_size,
449 PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
450 close(fd);
451 }
452 }
453 if (sh.map_result == MAP_FAILED)
454 goto err;
455 sh.arena = (char *)(sh.map_result + pgsize);
456 sh_setbit(sh.arena, 0, sh.bittable);
457 sh_add_to_list(&sh.freelist[0], sh.arena);
458
459 /* Now try to add guard pages and lock into memory. */
460 ret = 1;
461
462 /* Starting guard is already aligned from mmap. */
463 if (mprotect(sh.map_result, pgsize, PROT_NONE) < 0)
464 ret = 2;
465
466 /* Ending guard page - need to round up to page boundary */
467 aligned = (pgsize + sh.arena_size + (pgsize - 1)) & ~(pgsize - 1);
468 if (mprotect(sh.map_result + aligned, pgsize, PROT_NONE) < 0)
469 ret = 2;
470
471 #if defined(OPENSSL_SYS_LINUX) && defined(MLOCK_ONFAULT) && defined(SYS_mlock2)
472 if (syscall(SYS_mlock2, sh.arena, sh.arena_size, MLOCK_ONFAULT) < 0) {
473 if (errno == ENOSYS) {
474 if (mlock(sh.arena, sh.arena_size) < 0)
475 ret = 2;
476 } else {
477 ret = 2;
478 }
479 }
480 #else
481 if (mlock(sh.arena, sh.arena_size) < 0)
482 ret = 2;
483 #endif
484 #ifdef MADV_DONTDUMP
485 if (madvise(sh.arena, sh.arena_size, MADV_DONTDUMP) < 0)
486 ret = 2;
487 #endif
488
489 return ret;
490
491 err:
492 sh_done();
493 return 0;
494 }
495
496 static void sh_done()
497 {
498 OPENSSL_free(sh.freelist);
499 OPENSSL_free(sh.bittable);
500 OPENSSL_free(sh.bitmalloc);
501 if (sh.map_result != NULL && sh.map_size)
502 munmap(sh.map_result, sh.map_size);
503 memset(&sh, 0, sizeof(sh));
504 }
505
506 static int sh_allocated(const char *ptr)
507 {
508 return WITHIN_ARENA(ptr) ? 1 : 0;
509 }
510
511 static char *sh_find_my_buddy(char *ptr, int list)
512 {
513 size_t bit;
514 char *chunk = NULL;
515
516 bit = (ONE << list) + (ptr - sh.arena) / (sh.arena_size >> list);
517 bit ^= 1;
518
519 if (TESTBIT(sh.bittable, bit) && !TESTBIT(sh.bitmalloc, bit))
520 chunk = sh.arena + ((bit & ((ONE << list) - 1)) * (sh.arena_size >> list));
521
522 return chunk;
523 }
524
525 static void *sh_malloc(size_t size)
526 {
527 ossl_ssize_t list, slist;
528 size_t i;
529 char *chunk;
530
531 if (size > sh.arena_size)
532 return NULL;
533
534 list = sh.freelist_size - 1;
535 for (i = sh.minsize; i < size; i <<= 1)
536 list--;
537 if (list < 0)
538 return NULL;
539
540 /* try to find a larger entry to split */
541 for (slist = list; slist >= 0; slist--)
542 if (sh.freelist[slist] != NULL)
543 break;
544 if (slist < 0)
545 return NULL;
546
547 /* split larger entry */
548 while (slist != list) {
549 char *temp = sh.freelist[slist];
550
551 /* remove from bigger list */
552 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
553 sh_clearbit(temp, slist, sh.bittable);
554 sh_remove_from_list(temp);
555 OPENSSL_assert(temp != sh.freelist[slist]);
556
557 /* done with bigger list */
558 slist++;
559
560 /* add to smaller list */
561 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
562 sh_setbit(temp, slist, sh.bittable);
563 sh_add_to_list(&sh.freelist[slist], temp);
564 OPENSSL_assert(sh.freelist[slist] == temp);
565
566 /* split in 2 */
567 temp += sh.arena_size >> slist;
568 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
569 sh_setbit(temp, slist, sh.bittable);
570 sh_add_to_list(&sh.freelist[slist], temp);
571 OPENSSL_assert(sh.freelist[slist] == temp);
572
573 OPENSSL_assert(temp-(sh.arena_size >> slist) == sh_find_my_buddy(temp, slist));
574 }
575
576 /* peel off memory to hand back */
577 chunk = sh.freelist[list];
578 OPENSSL_assert(sh_testbit(chunk, list, sh.bittable));
579 sh_setbit(chunk, list, sh.bitmalloc);
580 sh_remove_from_list(chunk);
581
582 OPENSSL_assert(WITHIN_ARENA(chunk));
583
584 return chunk;
585 }
586
587 static void sh_free(void *ptr)
588 {
589 size_t list;
590 void *buddy;
591
592 if (ptr == NULL)
593 return;
594 OPENSSL_assert(WITHIN_ARENA(ptr));
595 if (!WITHIN_ARENA(ptr))
596 return;
597
598 list = sh_getlist(ptr);
599 OPENSSL_assert(sh_testbit(ptr, list, sh.bittable));
600 sh_clearbit(ptr, list, sh.bitmalloc);
601 sh_add_to_list(&sh.freelist[list], ptr);
602
603 /* Try to coalesce two adjacent free areas. */
604 while ((buddy = sh_find_my_buddy(ptr, list)) != NULL) {
605 OPENSSL_assert(ptr == sh_find_my_buddy(buddy, list));
606 OPENSSL_assert(ptr != NULL);
607 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
608 sh_clearbit(ptr, list, sh.bittable);
609 sh_remove_from_list(ptr);
610 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
611 sh_clearbit(buddy, list, sh.bittable);
612 sh_remove_from_list(buddy);
613
614 list--;
615
616 if (ptr > buddy)
617 ptr = buddy;
618
619 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
620 sh_setbit(ptr, list, sh.bittable);
621 sh_add_to_list(&sh.freelist[list], ptr);
622 OPENSSL_assert(sh.freelist[list] == ptr);
623 }
624 }
625
626 static size_t sh_actual_size(char *ptr)
627 {
628 int list;
629
630 OPENSSL_assert(WITHIN_ARENA(ptr));
631 if (!WITHIN_ARENA(ptr))
632 return 0;
633 list = sh_getlist(ptr);
634 OPENSSL_assert(sh_testbit(ptr, list, sh.bittable));
635 return sh.arena_size / (ONE << list);
636 }
637 #endif /* IMPLEMENTED */