2 * Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved.
3 * Copyright 2004-2014, Akamai Technologies. All Rights Reserved.
5 * Licensed under the Apache License 2.0 (the "License"). You may not use
6 * this file except in compliance with the License. You can obtain a copy
7 * in the file LICENSE in the source distribution or at
8 * https://www.openssl.org/source/license.html
12 * This file is in two halves. The first half implements the public API
13 * to be used by external consumers, and to be used by OpenSSL to store
14 * data in a "secure arena." The second half implements the secure arena.
15 * For details on that implementation, see below (look for uppercase
16 * "SECURE HEAP IMPLEMENTATION").
19 #include <openssl/crypto.h>
23 #ifndef OPENSSL_NO_SECURE_MEMORY
29 # if defined(OPENSSL_SYS_UNIX)
32 # include <sys/types.h>
33 # if defined(OPENSSL_SYS_UNIX)
34 # include <sys/mman.h>
35 # if defined(__FreeBSD__)
36 # define MADV_DONTDUMP MADV_NOCORE
38 # if !defined(MAP_CONCEAL)
39 # define MAP_CONCEAL 0
42 # if defined(OPENSSL_SYS_LINUX)
43 # include <sys/syscall.h>
44 # if defined(SYS_mlock2)
45 # include <linux/mman.h>
48 # include <sys/param.h>
50 # include <sys/stat.h>
54 #define CLEAR(p, s) OPENSSL_cleanse(p, s)
56 # define PAGE_SIZE 4096
58 #if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
59 # define MAP_ANON MAP_ANONYMOUS
62 #ifndef OPENSSL_NO_SECURE_MEMORY
63 static size_t secure_mem_used
;
65 static int secure_mem_initialized
;
67 static CRYPTO_RWLOCK
*sec_malloc_lock
= NULL
;
70 * These are the functions that must be implemented by a secure heap (sh).
72 static int sh_init(size_t size
, size_t minsize
);
73 static void *sh_malloc(size_t size
);
74 static void sh_free(void *ptr
);
75 static void sh_done(void);
76 static size_t sh_actual_size(char *ptr
);
77 static int sh_allocated(const char *ptr
);
80 int CRYPTO_secure_malloc_init(size_t size
, size_t minsize
)
82 #ifndef OPENSSL_NO_SECURE_MEMORY
85 if (!secure_mem_initialized
) {
86 sec_malloc_lock
= CRYPTO_THREAD_lock_new();
87 if (sec_malloc_lock
== NULL
)
89 if ((ret
= sh_init(size
, minsize
)) != 0) {
90 secure_mem_initialized
= 1;
92 CRYPTO_THREAD_lock_free(sec_malloc_lock
);
93 sec_malloc_lock
= NULL
;
100 #endif /* OPENSSL_NO_SECURE_MEMORY */
103 int CRYPTO_secure_malloc_done(void)
105 #ifndef OPENSSL_NO_SECURE_MEMORY
106 if (secure_mem_used
== 0) {
108 secure_mem_initialized
= 0;
109 CRYPTO_THREAD_lock_free(sec_malloc_lock
);
110 sec_malloc_lock
= NULL
;
113 #endif /* OPENSSL_NO_SECURE_MEMORY */
117 int CRYPTO_secure_malloc_initialized(void)
119 #ifndef OPENSSL_NO_SECURE_MEMORY
120 return secure_mem_initialized
;
123 #endif /* OPENSSL_NO_SECURE_MEMORY */
126 void *CRYPTO_secure_malloc(size_t num
, const char *file
, int line
)
128 #ifndef OPENSSL_NO_SECURE_MEMORY
132 if (!secure_mem_initialized
) {
133 return CRYPTO_malloc(num
, file
, line
);
135 if (!CRYPTO_THREAD_write_lock(sec_malloc_lock
))
137 ret
= sh_malloc(num
);
138 actual_size
= ret
? sh_actual_size(ret
) : 0;
139 secure_mem_used
+= actual_size
;
140 CRYPTO_THREAD_unlock(sec_malloc_lock
);
143 return CRYPTO_malloc(num
, file
, line
);
144 #endif /* OPENSSL_NO_SECURE_MEMORY */
147 void *CRYPTO_secure_zalloc(size_t num
, const char *file
, int line
)
149 #ifndef OPENSSL_NO_SECURE_MEMORY
150 if (secure_mem_initialized
)
151 /* CRYPTO_secure_malloc() zeroes allocations when it is implemented */
152 return CRYPTO_secure_malloc(num
, file
, line
);
154 return CRYPTO_zalloc(num
, file
, line
);
157 void CRYPTO_secure_free(void *ptr
, const char *file
, int line
)
159 #ifndef OPENSSL_NO_SECURE_MEMORY
164 if (!CRYPTO_secure_allocated(ptr
)) {
165 CRYPTO_free(ptr
, file
, line
);
168 if (!CRYPTO_THREAD_write_lock(sec_malloc_lock
))
170 actual_size
= sh_actual_size(ptr
);
171 CLEAR(ptr
, actual_size
);
172 secure_mem_used
-= actual_size
;
174 CRYPTO_THREAD_unlock(sec_malloc_lock
);
176 CRYPTO_free(ptr
, file
, line
);
177 #endif /* OPENSSL_NO_SECURE_MEMORY */
180 void CRYPTO_secure_clear_free(void *ptr
, size_t num
,
181 const char *file
, int line
)
183 #ifndef OPENSSL_NO_SECURE_MEMORY
188 if (!CRYPTO_secure_allocated(ptr
)) {
189 OPENSSL_cleanse(ptr
, num
);
190 CRYPTO_free(ptr
, file
, line
);
193 if (!CRYPTO_THREAD_write_lock(sec_malloc_lock
))
195 actual_size
= sh_actual_size(ptr
);
196 CLEAR(ptr
, actual_size
);
197 secure_mem_used
-= actual_size
;
199 CRYPTO_THREAD_unlock(sec_malloc_lock
);
203 OPENSSL_cleanse(ptr
, num
);
204 CRYPTO_free(ptr
, file
, line
);
205 #endif /* OPENSSL_NO_SECURE_MEMORY */
208 int CRYPTO_secure_allocated(const void *ptr
)
210 #ifndef OPENSSL_NO_SECURE_MEMORY
213 if (!secure_mem_initialized
)
215 if (!CRYPTO_THREAD_write_lock(sec_malloc_lock
))
217 ret
= sh_allocated(ptr
);
218 CRYPTO_THREAD_unlock(sec_malloc_lock
);
222 #endif /* OPENSSL_NO_SECURE_MEMORY */
225 size_t CRYPTO_secure_used(void)
227 #ifndef OPENSSL_NO_SECURE_MEMORY
228 return secure_mem_used
;
231 #endif /* OPENSSL_NO_SECURE_MEMORY */
234 size_t CRYPTO_secure_actual_size(void *ptr
)
236 #ifndef OPENSSL_NO_SECURE_MEMORY
239 if (!CRYPTO_THREAD_write_lock(sec_malloc_lock
))
241 actual_size
= sh_actual_size(ptr
);
242 CRYPTO_THREAD_unlock(sec_malloc_lock
);
250 * SECURE HEAP IMPLEMENTATION
252 #ifndef OPENSSL_NO_SECURE_MEMORY
256 * The implementation provided here uses a fixed-sized mmap() heap,
257 * which is locked into memory, not written to core files, and protected
258 * on either side by an unmapped page, which will catch pointer overruns
259 * (or underruns) and an attempt to read data out of the secure heap.
260 * Free'd memory is zero'd or otherwise cleansed.
262 * This is a pretty standard buddy allocator. We keep areas in a multiple
263 * of "sh.minsize" units. The freelist and bitmaps are kept separately,
264 * so all (and only) data is kept in the mmap'd heap.
266 * This code assumes eight-bit bytes. The numbers 3 and 7 are all over the
270 #define ONE ((size_t)1)
272 # define TESTBIT(t, b) (t[(b) >> 3] & (ONE << ((b) & 7)))
273 # define SETBIT(t, b) (t[(b) >> 3] |= (ONE << ((b) & 7)))
274 # define CLEARBIT(t, b) (t[(b) >> 3] &= (0xFF & ~(ONE << ((b) & 7))))
276 #define WITHIN_ARENA(p) \
277 ((char*)(p) >= sh.arena && (char*)(p) < &sh.arena[sh.arena_size])
278 #define WITHIN_FREELIST(p) \
279 ((char*)(p) >= (char*)sh.freelist && (char*)(p) < (char*)&sh.freelist[sh.freelist_size])
282 typedef struct sh_list_st
284 struct sh_list_st
*next
;
285 struct sh_list_st
**p_next
;
295 ossl_ssize_t freelist_size
;
297 unsigned char *bittable
;
298 unsigned char *bitmalloc
;
299 size_t bittable_size
; /* size in bits */
304 static size_t sh_getlist(char *ptr
)
306 ossl_ssize_t list
= sh
.freelist_size
- 1;
307 size_t bit
= (sh
.arena_size
+ ptr
- sh
.arena
) / sh
.minsize
;
309 for (; bit
; bit
>>= 1, list
--) {
310 if (TESTBIT(sh
.bittable
, bit
))
312 OPENSSL_assert((bit
& 1) == 0);
319 static int sh_testbit(char *ptr
, int list
, unsigned char *table
)
323 OPENSSL_assert(list
>= 0 && list
< sh
.freelist_size
);
324 OPENSSL_assert(((ptr
- sh
.arena
) & ((sh
.arena_size
>> list
) - 1)) == 0);
325 bit
= (ONE
<< list
) + ((ptr
- sh
.arena
) / (sh
.arena_size
>> list
));
326 OPENSSL_assert(bit
> 0 && bit
< sh
.bittable_size
);
327 return TESTBIT(table
, bit
);
330 static void sh_clearbit(char *ptr
, int list
, unsigned char *table
)
334 OPENSSL_assert(list
>= 0 && list
< sh
.freelist_size
);
335 OPENSSL_assert(((ptr
- sh
.arena
) & ((sh
.arena_size
>> list
) - 1)) == 0);
336 bit
= (ONE
<< list
) + ((ptr
- sh
.arena
) / (sh
.arena_size
>> list
));
337 OPENSSL_assert(bit
> 0 && bit
< sh
.bittable_size
);
338 OPENSSL_assert(TESTBIT(table
, bit
));
339 CLEARBIT(table
, bit
);
342 static void sh_setbit(char *ptr
, int list
, unsigned char *table
)
346 OPENSSL_assert(list
>= 0 && list
< sh
.freelist_size
);
347 OPENSSL_assert(((ptr
- sh
.arena
) & ((sh
.arena_size
>> list
) - 1)) == 0);
348 bit
= (ONE
<< list
) + ((ptr
- sh
.arena
) / (sh
.arena_size
>> list
));
349 OPENSSL_assert(bit
> 0 && bit
< sh
.bittable_size
);
350 OPENSSL_assert(!TESTBIT(table
, bit
));
354 static void sh_add_to_list(char **list
, char *ptr
)
358 OPENSSL_assert(WITHIN_FREELIST(list
));
359 OPENSSL_assert(WITHIN_ARENA(ptr
));
361 temp
= (SH_LIST
*)ptr
;
362 temp
->next
= *(SH_LIST
**)list
;
363 OPENSSL_assert(temp
->next
== NULL
|| WITHIN_ARENA(temp
->next
));
364 temp
->p_next
= (SH_LIST
**)list
;
366 if (temp
->next
!= NULL
) {
367 OPENSSL_assert((char **)temp
->next
->p_next
== list
);
368 temp
->next
->p_next
= &(temp
->next
);
374 static void sh_remove_from_list(char *ptr
)
376 SH_LIST
*temp
, *temp2
;
378 temp
= (SH_LIST
*)ptr
;
379 if (temp
->next
!= NULL
)
380 temp
->next
->p_next
= temp
->p_next
;
381 *temp
->p_next
= temp
->next
;
382 if (temp
->next
== NULL
)
386 OPENSSL_assert(WITHIN_FREELIST(temp2
->p_next
) || WITHIN_ARENA(temp2
->p_next
));
390 static int sh_init(size_t size
, size_t minsize
)
398 SYSTEM_INFO systemInfo
;
401 memset(&sh
, 0, sizeof(sh
));
403 /* make sure size is a powers of 2 */
404 OPENSSL_assert(size
> 0);
405 OPENSSL_assert((size
& (size
- 1)) == 0);
406 if (size
== 0 || (size
& (size
- 1)) != 0)
409 if (minsize
<= sizeof(SH_LIST
)) {
410 OPENSSL_assert(sizeof(SH_LIST
) <= 65536);
412 * Compute the minimum possible allocation size.
413 * This must be a power of 2 and at least as large as the SH_LIST
416 minsize
= sizeof(SH_LIST
) - 1;
417 minsize
|= minsize
>> 1;
418 minsize
|= minsize
>> 2;
419 if (sizeof(SH_LIST
) > 16)
420 minsize
|= minsize
>> 4;
421 if (sizeof(SH_LIST
) > 256)
422 minsize
|= minsize
>> 8;
425 /* make sure minsize is a powers of 2 */
426 OPENSSL_assert((minsize
& (minsize
- 1)) == 0);
427 if ((minsize
& (minsize
- 1)) != 0)
431 sh
.arena_size
= size
;
432 sh
.minsize
= minsize
;
433 sh
.bittable_size
= (sh
.arena_size
/ sh
.minsize
) * 2;
435 /* Prevent allocations of size 0 later on */
436 if (sh
.bittable_size
>> 3 == 0)
439 sh
.freelist_size
= -1;
440 for (i
= sh
.bittable_size
; i
; i
>>= 1)
443 sh
.freelist
= OPENSSL_zalloc(sh
.freelist_size
* sizeof(char *));
444 OPENSSL_assert(sh
.freelist
!= NULL
);
445 if (sh
.freelist
== NULL
)
448 sh
.bittable
= OPENSSL_zalloc(sh
.bittable_size
>> 3);
449 OPENSSL_assert(sh
.bittable
!= NULL
);
450 if (sh
.bittable
== NULL
)
453 sh
.bitmalloc
= OPENSSL_zalloc(sh
.bittable_size
>> 3);
454 OPENSSL_assert(sh
.bitmalloc
!= NULL
);
455 if (sh
.bitmalloc
== NULL
)
458 /* Allocate space for heap, and two extra pages as guards */
459 #if defined(_SC_PAGE_SIZE) || defined (_SC_PAGESIZE)
461 # if defined(_SC_PAGE_SIZE)
462 long tmppgsize
= sysconf(_SC_PAGE_SIZE
);
464 long tmppgsize
= sysconf(_SC_PAGESIZE
);
469 pgsize
= (size_t)tmppgsize
;
471 #elif defined(_WIN32)
472 GetSystemInfo(&systemInfo
);
473 pgsize
= (size_t)systemInfo
.dwPageSize
;
477 sh
.map_size
= pgsize
+ sh
.arena_size
+ pgsize
;
481 sh
.map_result
= mmap(NULL
, sh
.map_size
,
482 PROT_READ
|PROT_WRITE
, MAP_ANON
|MAP_PRIVATE
|MAP_CONCEAL
, -1, 0);
487 sh
.map_result
= MAP_FAILED
;
488 if ((fd
= open("/dev/zero", O_RDWR
)) >= 0) {
489 sh
.map_result
= mmap(NULL
, sh
.map_size
,
490 PROT_READ
|PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
495 if (sh
.map_result
== MAP_FAILED
)
498 sh
.map_result
= VirtualAlloc(NULL
, sh
.map_size
, MEM_COMMIT
| MEM_RESERVE
, PAGE_READWRITE
);
500 if (sh
.map_result
== NULL
)
504 sh
.arena
= (char *)(sh
.map_result
+ pgsize
);
505 sh_setbit(sh
.arena
, 0, sh
.bittable
);
506 sh_add_to_list(&sh
.freelist
[0], sh
.arena
);
508 /* Now try to add guard pages and lock into memory. */
512 /* Starting guard is already aligned from mmap. */
513 if (mprotect(sh
.map_result
, pgsize
, PROT_NONE
) < 0)
516 if (VirtualProtect(sh
.map_result
, pgsize
, PAGE_NOACCESS
, &flOldProtect
) == FALSE
)
520 /* Ending guard page - need to round up to page boundary */
521 aligned
= (pgsize
+ sh
.arena_size
+ (pgsize
- 1)) & ~(pgsize
- 1);
523 if (mprotect(sh
.map_result
+ aligned
, pgsize
, PROT_NONE
) < 0)
526 if (VirtualProtect(sh
.map_result
+ aligned
, pgsize
, PAGE_NOACCESS
, &flOldProtect
) == FALSE
)
530 #if defined(OPENSSL_SYS_LINUX) && defined(MLOCK_ONFAULT) && defined(SYS_mlock2)
531 if (syscall(SYS_mlock2
, sh
.arena
, sh
.arena_size
, MLOCK_ONFAULT
) < 0) {
532 if (errno
== ENOSYS
) {
533 if (mlock(sh
.arena
, sh
.arena_size
) < 0)
539 #elif defined(_WIN32)
540 if (VirtualLock(sh
.arena
, sh
.arena_size
) == FALSE
)
543 if (mlock(sh
.arena
, sh
.arena_size
) < 0)
547 if (madvise(sh
.arena
, sh
.arena_size
, MADV_DONTDUMP
) < 0)
558 static void sh_done(void)
560 OPENSSL_free(sh
.freelist
);
561 OPENSSL_free(sh
.bittable
);
562 OPENSSL_free(sh
.bitmalloc
);
564 if (sh
.map_result
!= MAP_FAILED
&& sh
.map_size
)
565 munmap(sh
.map_result
, sh
.map_size
);
567 if (sh
.map_result
!= NULL
&& sh
.map_size
)
568 VirtualFree(sh
.map_result
, 0, MEM_RELEASE
);
570 memset(&sh
, 0, sizeof(sh
));
573 static int sh_allocated(const char *ptr
)
575 return WITHIN_ARENA(ptr
) ? 1 : 0;
578 static char *sh_find_my_buddy(char *ptr
, int list
)
583 bit
= (ONE
<< list
) + (ptr
- sh
.arena
) / (sh
.arena_size
>> list
);
586 if (TESTBIT(sh
.bittable
, bit
) && !TESTBIT(sh
.bitmalloc
, bit
))
587 chunk
= sh
.arena
+ ((bit
& ((ONE
<< list
) - 1)) * (sh
.arena_size
>> list
));
592 static void *sh_malloc(size_t size
)
594 ossl_ssize_t list
, slist
;
598 if (size
> sh
.arena_size
)
601 list
= sh
.freelist_size
- 1;
602 for (i
= sh
.minsize
; i
< size
; i
<<= 1)
607 /* try to find a larger entry to split */
608 for (slist
= list
; slist
>= 0; slist
--)
609 if (sh
.freelist
[slist
] != NULL
)
614 /* split larger entry */
615 while (slist
!= list
) {
616 char *temp
= sh
.freelist
[slist
];
618 /* remove from bigger list */
619 OPENSSL_assert(!sh_testbit(temp
, slist
, sh
.bitmalloc
));
620 sh_clearbit(temp
, slist
, sh
.bittable
);
621 sh_remove_from_list(temp
);
622 OPENSSL_assert(temp
!= sh
.freelist
[slist
]);
624 /* done with bigger list */
627 /* add to smaller list */
628 OPENSSL_assert(!sh_testbit(temp
, slist
, sh
.bitmalloc
));
629 sh_setbit(temp
, slist
, sh
.bittable
);
630 sh_add_to_list(&sh
.freelist
[slist
], temp
);
631 OPENSSL_assert(sh
.freelist
[slist
] == temp
);
634 temp
+= sh
.arena_size
>> slist
;
635 OPENSSL_assert(!sh_testbit(temp
, slist
, sh
.bitmalloc
));
636 sh_setbit(temp
, slist
, sh
.bittable
);
637 sh_add_to_list(&sh
.freelist
[slist
], temp
);
638 OPENSSL_assert(sh
.freelist
[slist
] == temp
);
640 OPENSSL_assert(temp
-(sh
.arena_size
>> slist
) == sh_find_my_buddy(temp
, slist
));
643 /* peel off memory to hand back */
644 chunk
= sh
.freelist
[list
];
645 OPENSSL_assert(sh_testbit(chunk
, list
, sh
.bittable
));
646 sh_setbit(chunk
, list
, sh
.bitmalloc
);
647 sh_remove_from_list(chunk
);
649 OPENSSL_assert(WITHIN_ARENA(chunk
));
651 /* zero the free list header as a precaution against information leakage */
652 memset(chunk
, 0, sizeof(SH_LIST
));
657 static void sh_free(void *ptr
)
664 OPENSSL_assert(WITHIN_ARENA(ptr
));
665 if (!WITHIN_ARENA(ptr
))
668 list
= sh_getlist(ptr
);
669 OPENSSL_assert(sh_testbit(ptr
, list
, sh
.bittable
));
670 sh_clearbit(ptr
, list
, sh
.bitmalloc
);
671 sh_add_to_list(&sh
.freelist
[list
], ptr
);
673 /* Try to coalesce two adjacent free areas. */
674 while ((buddy
= sh_find_my_buddy(ptr
, list
)) != NULL
) {
675 OPENSSL_assert(ptr
== sh_find_my_buddy(buddy
, list
));
676 OPENSSL_assert(ptr
!= NULL
);
677 OPENSSL_assert(!sh_testbit(ptr
, list
, sh
.bitmalloc
));
678 sh_clearbit(ptr
, list
, sh
.bittable
);
679 sh_remove_from_list(ptr
);
680 OPENSSL_assert(!sh_testbit(ptr
, list
, sh
.bitmalloc
));
681 sh_clearbit(buddy
, list
, sh
.bittable
);
682 sh_remove_from_list(buddy
);
686 /* Zero the higher addressed block's free list pointers */
687 memset(ptr
> buddy
? ptr
: buddy
, 0, sizeof(SH_LIST
));
691 OPENSSL_assert(!sh_testbit(ptr
, list
, sh
.bitmalloc
));
692 sh_setbit(ptr
, list
, sh
.bittable
);
693 sh_add_to_list(&sh
.freelist
[list
], ptr
);
694 OPENSSL_assert(sh
.freelist
[list
] == ptr
);
698 static size_t sh_actual_size(char *ptr
)
702 OPENSSL_assert(WITHIN_ARENA(ptr
));
703 if (!WITHIN_ARENA(ptr
))
705 list
= sh_getlist(ptr
);
706 OPENSSL_assert(sh_testbit(ptr
, list
, sh
.bittable
));
707 return sh
.arena_size
/ (ONE
<< list
);
709 #endif /* OPENSSL_NO_SECURE_MEMORY */