2 * Copyright 2015-2017 The OpenSSL Project Authors. All Rights Reserved.
3 * Copyright 2004-2014, Akamai Technologies. All Rights Reserved.
5 * Licensed under the OpenSSL license (the "License"). You may not use
6 * this file except in compliance with the License. You can obtain a copy
7 * in the file LICENSE in the source distribution or at
8 * https://www.openssl.org/source/license.html
12 * This file is in two halves. The first half implements the public API
13 * to be used by external consumers, and to be used by OpenSSL to store
14 * data in a "secure arena." The second half implements the secure arena.
15 * For details on that implementation, see below (look for uppercase
16 * "SECURE HEAP IMPLEMENTATION").
18 #include <openssl/crypto.h>
23 #if defined(OPENSSL_SYS_LINUX) || defined(OPENSSL_SYS_UNIX)
28 # include <sys/types.h>
29 # include <sys/mman.h>
30 # if defined(OPENSSL_SYS_LINUX)
31 # include <sys/syscall.h>
32 # include <linux/mman.h>
35 # include <sys/param.h>
36 # include <sys/stat.h>
40 #define CLEAR(p, s) OPENSSL_cleanse(p, s)
42 # define PAGE_SIZE 4096
46 static size_t secure_mem_used
;
48 static int secure_mem_initialized
;
50 static CRYPTO_RWLOCK
*sec_malloc_lock
= NULL
;
53 * These are the functions that must be implemented by a secure heap (sh).
55 static int sh_init(size_t size
, int minsize
);
56 static void *sh_malloc(size_t size
);
57 static void sh_free(void *ptr
);
58 static void sh_done(void);
59 static size_t sh_actual_size(char *ptr
);
60 static int sh_allocated(const char *ptr
);
63 int CRYPTO_secure_malloc_init(size_t size
, int minsize
)
68 if (!secure_mem_initialized
) {
69 sec_malloc_lock
= CRYPTO_THREAD_lock_new();
70 if (sec_malloc_lock
== NULL
)
72 if ((ret
= sh_init(size
, minsize
)) != 0) {
73 secure_mem_initialized
= 1;
75 CRYPTO_THREAD_lock_free(sec_malloc_lock
);
76 sec_malloc_lock
= NULL
;
83 #endif /* IMPLEMENTED */
86 int CRYPTO_secure_malloc_done()
89 if (secure_mem_used
== 0) {
91 secure_mem_initialized
= 0;
92 CRYPTO_THREAD_lock_free(sec_malloc_lock
);
93 sec_malloc_lock
= NULL
;
96 #endif /* IMPLEMENTED */
100 int CRYPTO_secure_malloc_initialized()
103 return secure_mem_initialized
;
106 #endif /* IMPLEMENTED */
109 void *CRYPTO_secure_malloc(size_t num
, const char *file
, int line
)
115 if (!secure_mem_initialized
) {
116 return CRYPTO_malloc(num
, file
, line
);
118 CRYPTO_THREAD_write_lock(sec_malloc_lock
);
119 ret
= sh_malloc(num
);
120 actual_size
= ret
? sh_actual_size(ret
) : 0;
121 secure_mem_used
+= actual_size
;
122 CRYPTO_THREAD_unlock(sec_malloc_lock
);
125 return CRYPTO_malloc(num
, file
, line
);
126 #endif /* IMPLEMENTED */
129 void *CRYPTO_secure_zalloc(size_t num
, const char *file
, int line
)
131 void *ret
= CRYPTO_secure_malloc(num
, file
, line
);
138 void CRYPTO_secure_free(void *ptr
, const char *file
, int line
)
145 if (!CRYPTO_secure_allocated(ptr
)) {
146 CRYPTO_free(ptr
, file
, line
);
149 CRYPTO_THREAD_write_lock(sec_malloc_lock
);
150 actual_size
= sh_actual_size(ptr
);
151 CLEAR(ptr
, actual_size
);
152 secure_mem_used
-= actual_size
;
154 CRYPTO_THREAD_unlock(sec_malloc_lock
);
156 CRYPTO_free(ptr
, file
, line
);
157 #endif /* IMPLEMENTED */
160 void CRYPTO_secure_clear_free(void *ptr
, size_t num
,
161 const char *file
, int line
)
168 if (!CRYPTO_secure_allocated(ptr
)) {
169 OPENSSL_cleanse(ptr
, num
);
170 CRYPTO_free(ptr
, file
, line
);
173 CRYPTO_THREAD_write_lock(sec_malloc_lock
);
174 actual_size
= sh_actual_size(ptr
);
175 CLEAR(ptr
, actual_size
);
176 secure_mem_used
-= actual_size
;
178 CRYPTO_THREAD_unlock(sec_malloc_lock
);
182 OPENSSL_cleanse(ptr
, num
);
183 CRYPTO_free(ptr
, file
, line
);
184 #endif /* IMPLEMENTED */
187 int CRYPTO_secure_allocated(const void *ptr
)
192 if (!secure_mem_initialized
)
194 CRYPTO_THREAD_write_lock(sec_malloc_lock
);
195 ret
= sh_allocated(ptr
);
196 CRYPTO_THREAD_unlock(sec_malloc_lock
);
200 #endif /* IMPLEMENTED */
203 size_t CRYPTO_secure_used()
206 return secure_mem_used
;
209 #endif /* IMPLEMENTED */
212 size_t CRYPTO_secure_actual_size(void *ptr
)
217 CRYPTO_THREAD_write_lock(sec_malloc_lock
);
218 actual_size
= sh_actual_size(ptr
);
219 CRYPTO_THREAD_unlock(sec_malloc_lock
);
230 * SECURE HEAP IMPLEMENTATION
236 * The implementation provided here uses a fixed-sized mmap() heap,
237 * which is locked into memory, not written to core files, and protected
238 * on either side by an unmapped page, which will catch pointer overruns
239 * (or underruns) and an attempt to read data out of the secure heap.
240 * Free'd memory is zero'd or otherwise cleansed.
242 * This is a pretty standard buddy allocator. We keep areas in a multiple
243 * of "sh.minsize" units. The freelist and bitmaps are kept separately,
244 * so all (and only) data is kept in the mmap'd heap.
246 * This code assumes eight-bit bytes. The numbers 3 and 7 are all over the
250 #define ONE ((size_t)1)
252 # define TESTBIT(t, b) (t[(b) >> 3] & (ONE << ((b) & 7)))
253 # define SETBIT(t, b) (t[(b) >> 3] |= (ONE << ((b) & 7)))
254 # define CLEARBIT(t, b) (t[(b) >> 3] &= (0xFF & ~(ONE << ((b) & 7))))
256 #define WITHIN_ARENA(p) \
257 ((char*)(p) >= sh.arena && (char*)(p) < &sh.arena[sh.arena_size])
258 #define WITHIN_FREELIST(p) \
259 ((char*)(p) >= (char*)sh.freelist && (char*)(p) < (char*)&sh.freelist[sh.freelist_size])
262 typedef struct sh_list_st
264 struct sh_list_st
*next
;
265 struct sh_list_st
**p_next
;
275 ossl_ssize_t freelist_size
;
277 unsigned char *bittable
;
278 unsigned char *bitmalloc
;
279 size_t bittable_size
; /* size in bits */
284 static size_t sh_getlist(char *ptr
)
286 ossl_ssize_t list
= sh
.freelist_size
- 1;
287 size_t bit
= (sh
.arena_size
+ ptr
- sh
.arena
) / sh
.minsize
;
289 for (; bit
; bit
>>= 1, list
--) {
290 if (TESTBIT(sh
.bittable
, bit
))
292 OPENSSL_assert((bit
& 1) == 0);
299 static int sh_testbit(char *ptr
, int list
, unsigned char *table
)
303 OPENSSL_assert(list
>= 0 && list
< sh
.freelist_size
);
304 OPENSSL_assert(((ptr
- sh
.arena
) & ((sh
.arena_size
>> list
) - 1)) == 0);
305 bit
= (ONE
<< list
) + ((ptr
- sh
.arena
) / (sh
.arena_size
>> list
));
306 OPENSSL_assert(bit
> 0 && bit
< sh
.bittable_size
);
307 return TESTBIT(table
, bit
);
310 static void sh_clearbit(char *ptr
, int list
, unsigned char *table
)
314 OPENSSL_assert(list
>= 0 && list
< sh
.freelist_size
);
315 OPENSSL_assert(((ptr
- sh
.arena
) & ((sh
.arena_size
>> list
) - 1)) == 0);
316 bit
= (ONE
<< list
) + ((ptr
- sh
.arena
) / (sh
.arena_size
>> list
));
317 OPENSSL_assert(bit
> 0 && bit
< sh
.bittable_size
);
318 OPENSSL_assert(TESTBIT(table
, bit
));
319 CLEARBIT(table
, bit
);
322 static void sh_setbit(char *ptr
, int list
, unsigned char *table
)
326 OPENSSL_assert(list
>= 0 && list
< sh
.freelist_size
);
327 OPENSSL_assert(((ptr
- sh
.arena
) & ((sh
.arena_size
>> list
) - 1)) == 0);
328 bit
= (ONE
<< list
) + ((ptr
- sh
.arena
) / (sh
.arena_size
>> list
));
329 OPENSSL_assert(bit
> 0 && bit
< sh
.bittable_size
);
330 OPENSSL_assert(!TESTBIT(table
, bit
));
334 static void sh_add_to_list(char **list
, char *ptr
)
338 OPENSSL_assert(WITHIN_FREELIST(list
));
339 OPENSSL_assert(WITHIN_ARENA(ptr
));
341 temp
= (SH_LIST
*)ptr
;
342 temp
->next
= *(SH_LIST
**)list
;
343 OPENSSL_assert(temp
->next
== NULL
|| WITHIN_ARENA(temp
->next
));
344 temp
->p_next
= (SH_LIST
**)list
;
346 if (temp
->next
!= NULL
) {
347 OPENSSL_assert((char **)temp
->next
->p_next
== list
);
348 temp
->next
->p_next
= &(temp
->next
);
354 static void sh_remove_from_list(char *ptr
)
356 SH_LIST
*temp
, *temp2
;
358 temp
= (SH_LIST
*)ptr
;
359 if (temp
->next
!= NULL
)
360 temp
->next
->p_next
= temp
->p_next
;
361 *temp
->p_next
= temp
->next
;
362 if (temp
->next
== NULL
)
366 OPENSSL_assert(WITHIN_FREELIST(temp2
->p_next
) || WITHIN_ARENA(temp2
->p_next
));
370 static int sh_init(size_t size
, int minsize
)
377 memset(&sh
, 0, sizeof sh
);
379 /* make sure size and minsize are powers of 2 */
380 OPENSSL_assert(size
> 0);
381 OPENSSL_assert((size
& (size
- 1)) == 0);
382 OPENSSL_assert(minsize
> 0);
383 OPENSSL_assert((minsize
& (minsize
- 1)) == 0);
384 if (size
<= 0 || (size
& (size
- 1)) != 0)
386 if (minsize
<= 0 || (minsize
& (minsize
- 1)) != 0)
389 while (minsize
< (int)sizeof(SH_LIST
))
392 sh
.arena_size
= size
;
393 sh
.minsize
= minsize
;
394 sh
.bittable_size
= (sh
.arena_size
/ sh
.minsize
) * 2;
396 /* Prevent allocations of size 0 later on */
397 if (sh
.bittable_size
>> 3 == 0)
400 sh
.freelist_size
= -1;
401 for (i
= sh
.bittable_size
; i
; i
>>= 1)
404 sh
.freelist
= OPENSSL_zalloc(sh
.freelist_size
* sizeof (char *));
405 OPENSSL_assert(sh
.freelist
!= NULL
);
406 if (sh
.freelist
== NULL
)
409 sh
.bittable
= OPENSSL_zalloc(sh
.bittable_size
>> 3);
410 OPENSSL_assert(sh
.bittable
!= NULL
);
411 if (sh
.bittable
== NULL
)
414 sh
.bitmalloc
= OPENSSL_zalloc(sh
.bittable_size
>> 3);
415 OPENSSL_assert(sh
.bitmalloc
!= NULL
);
416 if (sh
.bitmalloc
== NULL
)
419 /* Allocate space for heap, and two extra pages as guards */
420 #if defined(_SC_PAGE_SIZE) || defined (_SC_PAGESIZE)
422 # if defined(_SC_PAGE_SIZE)
423 long tmppgsize
= sysconf(_SC_PAGE_SIZE
);
425 long tmppgsize
= sysconf(_SC_PAGESIZE
);
430 pgsize
= (size_t)tmppgsize
;
435 sh
.map_size
= pgsize
+ sh
.arena_size
+ pgsize
;
438 sh
.map_result
= mmap(NULL
, sh
.map_size
,
439 PROT_READ
|PROT_WRITE
, MAP_ANON
|MAP_PRIVATE
, -1, 0);
444 sh
.map_result
= MAP_FAILED
;
445 if ((fd
= open("/dev/zero", O_RDWR
)) >= 0) {
446 sh
.map_result
= mmap(NULL
, sh
.map_size
,
447 PROT_READ
|PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
451 if (sh
.map_result
== MAP_FAILED
)
453 sh
.arena
= (char *)(sh
.map_result
+ pgsize
);
454 sh_setbit(sh
.arena
, 0, sh
.bittable
);
455 sh_add_to_list(&sh
.freelist
[0], sh
.arena
);
457 /* Now try to add guard pages and lock into memory. */
460 /* Starting guard is already aligned from mmap. */
461 if (mprotect(sh
.map_result
, pgsize
, PROT_NONE
) < 0)
464 /* Ending guard page - need to round up to page boundary */
465 aligned
= (pgsize
+ sh
.arena_size
+ (pgsize
- 1)) & ~(pgsize
- 1);
466 if (mprotect(sh
.map_result
+ aligned
, pgsize
, PROT_NONE
) < 0)
469 #if defined(OPENSSL_SYS_LINUX) && defined(MLOCK_ONFAULT) && defined(SYS_mlock2)
470 if (syscall(SYS_mlock2
, sh
.arena
, sh
.arena_size
, MLOCK_ONFAULT
) < 0) {
471 if (errno
== ENOSYS
) {
472 if (mlock(sh
.arena
, sh
.arena_size
) < 0)
479 if (mlock(sh
.arena
, sh
.arena_size
) < 0)
483 if (madvise(sh
.arena
, sh
.arena_size
, MADV_DONTDUMP
) < 0)
494 static void sh_done()
496 OPENSSL_free(sh
.freelist
);
497 OPENSSL_free(sh
.bittable
);
498 OPENSSL_free(sh
.bitmalloc
);
499 if (sh
.map_result
!= NULL
&& sh
.map_size
)
500 munmap(sh
.map_result
, sh
.map_size
);
501 memset(&sh
, 0, sizeof sh
);
504 static int sh_allocated(const char *ptr
)
506 return WITHIN_ARENA(ptr
) ? 1 : 0;
509 static char *sh_find_my_buddy(char *ptr
, int list
)
514 bit
= (ONE
<< list
) + (ptr
- sh
.arena
) / (sh
.arena_size
>> list
);
517 if (TESTBIT(sh
.bittable
, bit
) && !TESTBIT(sh
.bitmalloc
, bit
))
518 chunk
= sh
.arena
+ ((bit
& ((ONE
<< list
) - 1)) * (sh
.arena_size
>> list
));
523 static void *sh_malloc(size_t size
)
525 ossl_ssize_t list
, slist
;
529 if (size
> sh
.arena_size
)
532 list
= sh
.freelist_size
- 1;
533 for (i
= sh
.minsize
; i
< size
; i
<<= 1)
538 /* try to find a larger entry to split */
539 for (slist
= list
; slist
>= 0; slist
--)
540 if (sh
.freelist
[slist
] != NULL
)
545 /* split larger entry */
546 while (slist
!= list
) {
547 char *temp
= sh
.freelist
[slist
];
549 /* remove from bigger list */
550 OPENSSL_assert(!sh_testbit(temp
, slist
, sh
.bitmalloc
));
551 sh_clearbit(temp
, slist
, sh
.bittable
);
552 sh_remove_from_list(temp
);
553 OPENSSL_assert(temp
!= sh
.freelist
[slist
]);
555 /* done with bigger list */
558 /* add to smaller list */
559 OPENSSL_assert(!sh_testbit(temp
, slist
, sh
.bitmalloc
));
560 sh_setbit(temp
, slist
, sh
.bittable
);
561 sh_add_to_list(&sh
.freelist
[slist
], temp
);
562 OPENSSL_assert(sh
.freelist
[slist
] == temp
);
565 temp
+= sh
.arena_size
>> slist
;
566 OPENSSL_assert(!sh_testbit(temp
, slist
, sh
.bitmalloc
));
567 sh_setbit(temp
, slist
, sh
.bittable
);
568 sh_add_to_list(&sh
.freelist
[slist
], temp
);
569 OPENSSL_assert(sh
.freelist
[slist
] == temp
);
571 OPENSSL_assert(temp
-(sh
.arena_size
>> slist
) == sh_find_my_buddy(temp
, slist
));
574 /* peel off memory to hand back */
575 chunk
= sh
.freelist
[list
];
576 OPENSSL_assert(sh_testbit(chunk
, list
, sh
.bittable
));
577 sh_setbit(chunk
, list
, sh
.bitmalloc
);
578 sh_remove_from_list(chunk
);
580 OPENSSL_assert(WITHIN_ARENA(chunk
));
585 static void sh_free(void *ptr
)
592 OPENSSL_assert(WITHIN_ARENA(ptr
));
593 if (!WITHIN_ARENA(ptr
))
596 list
= sh_getlist(ptr
);
597 OPENSSL_assert(sh_testbit(ptr
, list
, sh
.bittable
));
598 sh_clearbit(ptr
, list
, sh
.bitmalloc
);
599 sh_add_to_list(&sh
.freelist
[list
], ptr
);
601 /* Try to coalesce two adjacent free areas. */
602 while ((buddy
= sh_find_my_buddy(ptr
, list
)) != NULL
) {
603 OPENSSL_assert(ptr
== sh_find_my_buddy(buddy
, list
));
604 OPENSSL_assert(ptr
!= NULL
);
605 OPENSSL_assert(!sh_testbit(ptr
, list
, sh
.bitmalloc
));
606 sh_clearbit(ptr
, list
, sh
.bittable
);
607 sh_remove_from_list(ptr
);
608 OPENSSL_assert(!sh_testbit(ptr
, list
, sh
.bitmalloc
));
609 sh_clearbit(buddy
, list
, sh
.bittable
);
610 sh_remove_from_list(buddy
);
617 OPENSSL_assert(!sh_testbit(ptr
, list
, sh
.bitmalloc
));
618 sh_setbit(ptr
, list
, sh
.bittable
);
619 sh_add_to_list(&sh
.freelist
[list
], ptr
);
620 OPENSSL_assert(sh
.freelist
[list
] == ptr
);
624 static size_t sh_actual_size(char *ptr
)
628 OPENSSL_assert(WITHIN_ARENA(ptr
));
629 if (!WITHIN_ARENA(ptr
))
631 list
= sh_getlist(ptr
);
632 OPENSSL_assert(sh_testbit(ptr
, list
, sh
.bittable
));
633 return sh
.arena_size
/ (ONE
<< list
);
635 #endif /* IMPLEMENTED */