2 * Copyright 2015-2018 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the OpenSSL license (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
11 * Copyright 2004-2014, Akamai Technologies. All Rights Reserved.
12 * This file is distributed under the terms of the OpenSSL license.
16 * This file is in two halves. The first half implements the public API
17 * to be used by external consumers, and to be used by OpenSSL to store
18 * data in a "secure arena." The second half implements the secure arena.
19 * For details on that implementation, see below (look for uppercase
20 * "SECURE HEAP IMPLEMENTATION").
22 #include <openssl/crypto.h>
27 /* e_os.h includes unistd.h, which defines _POSIX_VERSION */
28 #if !defined(OPENSSL_NO_SECURE_MEMORY) && defined(OPENSSL_SYS_UNIX) \
29 && ( (defined(_POSIX_VERSION) && _POSIX_VERSION >= 200112L) \
30 || defined(__sun) || defined(__hpux) || defined(__sgi) \
36 # include <sys/types.h>
37 # include <sys/mman.h>
38 # include <sys/param.h>
39 # include <sys/stat.h>
43 #define CLEAR(p, s) OPENSSL_cleanse(p, s)
45 # define PAGE_SIZE 4096
47 #if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
48 # define MAP_ANON MAP_ANONYMOUS
52 static size_t secure_mem_used
;
54 static int secure_mem_initialized
;
56 static CRYPTO_RWLOCK
*sec_malloc_lock
= NULL
;
59 * These are the functions that must be implemented by a secure heap (sh).
61 static int sh_init(size_t size
, int minsize
);
62 static char *sh_malloc(size_t size
);
63 static void sh_free(char *ptr
);
64 static void sh_done(void);
65 static size_t sh_actual_size(char *ptr
);
66 static int sh_allocated(const char *ptr
);
69 int CRYPTO_secure_malloc_init(size_t size
, int minsize
)
74 if (!secure_mem_initialized
) {
75 sec_malloc_lock
= CRYPTO_THREAD_lock_new();
76 if (sec_malloc_lock
== NULL
)
78 if ((ret
= sh_init(size
, minsize
)) != 0) {
79 secure_mem_initialized
= 1;
81 CRYPTO_THREAD_lock_free(sec_malloc_lock
);
82 sec_malloc_lock
= NULL
;
89 #endif /* IMPLEMENTED */
92 int CRYPTO_secure_malloc_done()
95 if (secure_mem_used
== 0) {
97 secure_mem_initialized
= 0;
98 CRYPTO_THREAD_lock_free(sec_malloc_lock
);
99 sec_malloc_lock
= NULL
;
102 #endif /* IMPLEMENTED */
106 int CRYPTO_secure_malloc_initialized()
109 return secure_mem_initialized
;
112 #endif /* IMPLEMENTED */
115 void *CRYPTO_secure_malloc(size_t num
, const char *file
, int line
)
121 if (!secure_mem_initialized
) {
122 return CRYPTO_malloc(num
, file
, line
);
124 CRYPTO_THREAD_write_lock(sec_malloc_lock
);
125 ret
= sh_malloc(num
);
126 actual_size
= ret
? sh_actual_size(ret
) : 0;
127 secure_mem_used
+= actual_size
;
128 CRYPTO_THREAD_unlock(sec_malloc_lock
);
131 return CRYPTO_malloc(num
, file
, line
);
132 #endif /* IMPLEMENTED */
135 void *CRYPTO_secure_zalloc(size_t num
, const char *file
, int line
)
138 if (secure_mem_initialized
)
139 /* CRYPTO_secure_malloc() zeroes allocations when it is implemented */
140 return CRYPTO_secure_malloc(num
, file
, line
);
142 return CRYPTO_zalloc(num
, file
, line
);
145 void CRYPTO_secure_free(void *ptr
, const char *file
, int line
)
152 if (!CRYPTO_secure_allocated(ptr
)) {
153 CRYPTO_free(ptr
, file
, line
);
156 CRYPTO_THREAD_write_lock(sec_malloc_lock
);
157 actual_size
= sh_actual_size(ptr
);
158 CLEAR(ptr
, actual_size
);
159 secure_mem_used
-= actual_size
;
161 CRYPTO_THREAD_unlock(sec_malloc_lock
);
163 CRYPTO_free(ptr
, file
, line
);
164 #endif /* IMPLEMENTED */
167 void CRYPTO_secure_clear_free(void *ptr
, size_t num
,
168 const char *file
, int line
)
175 if (!CRYPTO_secure_allocated(ptr
)) {
176 OPENSSL_cleanse(ptr
, num
);
177 CRYPTO_free(ptr
, file
, line
);
180 CRYPTO_THREAD_write_lock(sec_malloc_lock
);
181 actual_size
= sh_actual_size(ptr
);
182 CLEAR(ptr
, actual_size
);
183 secure_mem_used
-= actual_size
;
185 CRYPTO_THREAD_unlock(sec_malloc_lock
);
189 OPENSSL_cleanse(ptr
, num
);
190 CRYPTO_free(ptr
, file
, line
);
191 #endif /* IMPLEMENTED */
194 int CRYPTO_secure_allocated(const void *ptr
)
199 if (!secure_mem_initialized
)
201 CRYPTO_THREAD_write_lock(sec_malloc_lock
);
202 ret
= sh_allocated(ptr
);
203 CRYPTO_THREAD_unlock(sec_malloc_lock
);
207 #endif /* IMPLEMENTED */
210 size_t CRYPTO_secure_used()
213 return secure_mem_used
;
216 #endif /* IMPLEMENTED */
219 size_t CRYPTO_secure_actual_size(void *ptr
)
224 CRYPTO_THREAD_write_lock(sec_malloc_lock
);
225 actual_size
= sh_actual_size(ptr
);
226 CRYPTO_THREAD_unlock(sec_malloc_lock
);
237 * SECURE HEAP IMPLEMENTATION
243 * The implementation provided here uses a fixed-sized mmap() heap,
244 * which is locked into memory, not written to core files, and protected
245 * on either side by an unmapped page, which will catch pointer overruns
246 * (or underruns) and an attempt to read data out of the secure heap.
247 * Free'd memory is zero'd or otherwise cleansed.
249 * This is a pretty standard buddy allocator. We keep areas in a multiple
250 * of "sh.minsize" units. The freelist and bitmaps are kept separately,
251 * so all (and only) data is kept in the mmap'd heap.
253 * This code assumes eight-bit bytes. The numbers 3 and 7 are all over the
257 #define ONE ((size_t)1)
259 # define TESTBIT(t, b) (t[(b) >> 3] & (ONE << ((b) & 7)))
260 # define SETBIT(t, b) (t[(b) >> 3] |= (ONE << ((b) & 7)))
261 # define CLEARBIT(t, b) (t[(b) >> 3] &= (0xFF & ~(ONE << ((b) & 7))))
263 #define WITHIN_ARENA(p) \
264 ((char*)(p) >= sh.arena && (char*)(p) < &sh.arena[sh.arena_size])
265 #define WITHIN_FREELIST(p) \
266 ((char*)(p) >= (char*)sh.freelist && (char*)(p) < (char*)&sh.freelist[sh.freelist_size])
269 typedef struct sh_list_st
271 struct sh_list_st
*next
;
272 struct sh_list_st
**p_next
;
282 ossl_ssize_t freelist_size
;
284 unsigned char *bittable
;
285 unsigned char *bitmalloc
;
286 size_t bittable_size
; /* size in bits */
291 static size_t sh_getlist(char *ptr
)
293 ossl_ssize_t list
= sh
.freelist_size
- 1;
294 size_t bit
= (sh
.arena_size
+ ptr
- sh
.arena
) / sh
.minsize
;
296 for (; bit
; bit
>>= 1, list
--) {
297 if (TESTBIT(sh
.bittable
, bit
))
299 OPENSSL_assert((bit
& 1) == 0);
306 static int sh_testbit(char *ptr
, int list
, unsigned char *table
)
310 OPENSSL_assert(list
>= 0 && list
< sh
.freelist_size
);
311 OPENSSL_assert(((ptr
- sh
.arena
) & ((sh
.arena_size
>> list
) - 1)) == 0);
312 bit
= (ONE
<< list
) + ((ptr
- sh
.arena
) / (sh
.arena_size
>> list
));
313 OPENSSL_assert(bit
> 0 && bit
< sh
.bittable_size
);
314 return TESTBIT(table
, bit
);
317 static void sh_clearbit(char *ptr
, int list
, unsigned char *table
)
321 OPENSSL_assert(list
>= 0 && list
< sh
.freelist_size
);
322 OPENSSL_assert(((ptr
- sh
.arena
) & ((sh
.arena_size
>> list
) - 1)) == 0);
323 bit
= (ONE
<< list
) + ((ptr
- sh
.arena
) / (sh
.arena_size
>> list
));
324 OPENSSL_assert(bit
> 0 && bit
< sh
.bittable_size
);
325 OPENSSL_assert(TESTBIT(table
, bit
));
326 CLEARBIT(table
, bit
);
329 static void sh_setbit(char *ptr
, int list
, unsigned char *table
)
333 OPENSSL_assert(list
>= 0 && list
< sh
.freelist_size
);
334 OPENSSL_assert(((ptr
- sh
.arena
) & ((sh
.arena_size
>> list
) - 1)) == 0);
335 bit
= (ONE
<< list
) + ((ptr
- sh
.arena
) / (sh
.arena_size
>> list
));
336 OPENSSL_assert(bit
> 0 && bit
< sh
.bittable_size
);
337 OPENSSL_assert(!TESTBIT(table
, bit
));
341 static void sh_add_to_list(char **list
, char *ptr
)
345 OPENSSL_assert(WITHIN_FREELIST(list
));
346 OPENSSL_assert(WITHIN_ARENA(ptr
));
348 temp
= (SH_LIST
*)ptr
;
349 temp
->next
= *(SH_LIST
**)list
;
350 OPENSSL_assert(temp
->next
== NULL
|| WITHIN_ARENA(temp
->next
));
351 temp
->p_next
= (SH_LIST
**)list
;
353 if (temp
->next
!= NULL
) {
354 OPENSSL_assert((char **)temp
->next
->p_next
== list
);
355 temp
->next
->p_next
= &(temp
->next
);
361 static void sh_remove_from_list(char *ptr
)
363 SH_LIST
*temp
, *temp2
;
365 temp
= (SH_LIST
*)ptr
;
366 if (temp
->next
!= NULL
)
367 temp
->next
->p_next
= temp
->p_next
;
368 *temp
->p_next
= temp
->next
;
369 if (temp
->next
== NULL
)
373 OPENSSL_assert(WITHIN_FREELIST(temp2
->p_next
) || WITHIN_ARENA(temp2
->p_next
));
377 static int sh_init(size_t size
, int minsize
)
384 memset(&sh
, 0, sizeof(sh
));
386 /* make sure size and minsize are powers of 2 */
387 OPENSSL_assert(size
> 0);
388 OPENSSL_assert((size
& (size
- 1)) == 0);
389 OPENSSL_assert(minsize
> 0);
390 OPENSSL_assert((minsize
& (minsize
- 1)) == 0);
391 if (size
<= 0 || (size
& (size
- 1)) != 0)
393 if (minsize
<= 0 || (minsize
& (minsize
- 1)) != 0)
396 while (minsize
< (int)sizeof(SH_LIST
))
399 sh
.arena_size
= size
;
400 sh
.minsize
= minsize
;
401 sh
.bittable_size
= (sh
.arena_size
/ sh
.minsize
) * 2;
403 /* Prevent allocations of size 0 later on */
404 if (sh
.bittable_size
>> 3 == 0)
407 sh
.freelist_size
= -1;
408 for (i
= sh
.bittable_size
; i
; i
>>= 1)
411 sh
.freelist
= OPENSSL_zalloc(sh
.freelist_size
* sizeof(char *));
412 OPENSSL_assert(sh
.freelist
!= NULL
);
413 if (sh
.freelist
== NULL
)
416 sh
.bittable
= OPENSSL_zalloc(sh
.bittable_size
>> 3);
417 OPENSSL_assert(sh
.bittable
!= NULL
);
418 if (sh
.bittable
== NULL
)
421 sh
.bitmalloc
= OPENSSL_zalloc(sh
.bittable_size
>> 3);
422 OPENSSL_assert(sh
.bitmalloc
!= NULL
);
423 if (sh
.bitmalloc
== NULL
)
426 /* Allocate space for heap, and two extra pages as guards */
427 #if defined(_SC_PAGE_SIZE) || defined (_SC_PAGESIZE)
429 # if defined(_SC_PAGE_SIZE)
430 long tmppgsize
= sysconf(_SC_PAGE_SIZE
);
432 long tmppgsize
= sysconf(_SC_PAGESIZE
);
437 pgsize
= (size_t)tmppgsize
;
442 sh
.map_size
= pgsize
+ sh
.arena_size
+ pgsize
;
445 sh
.map_result
= mmap(NULL
, sh
.map_size
,
446 PROT_READ
|PROT_WRITE
, MAP_ANON
|MAP_PRIVATE
, -1, 0);
451 sh
.map_result
= MAP_FAILED
;
452 if ((fd
= open("/dev/zero", O_RDWR
)) >= 0) {
453 sh
.map_result
= mmap(NULL
, sh
.map_size
,
454 PROT_READ
|PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
458 if (sh
.map_result
== MAP_FAILED
)
460 sh
.arena
= (char *)(sh
.map_result
+ pgsize
);
461 sh_setbit(sh
.arena
, 0, sh
.bittable
);
462 sh_add_to_list(&sh
.freelist
[0], sh
.arena
);
464 /* Now try to add guard pages and lock into memory. */
467 /* Starting guard is already aligned from mmap. */
468 if (mprotect(sh
.map_result
, pgsize
, PROT_NONE
) < 0)
471 /* Ending guard page - need to round up to page boundary */
472 aligned
= (pgsize
+ sh
.arena_size
+ (pgsize
- 1)) & ~(pgsize
- 1);
473 if (mprotect(sh
.map_result
+ aligned
, pgsize
, PROT_NONE
) < 0)
476 if (mlock(sh
.arena
, sh
.arena_size
) < 0)
479 if (madvise(sh
.arena
, sh
.arena_size
, MADV_DONTDUMP
) < 0)
490 static void sh_done()
492 OPENSSL_free(sh
.freelist
);
493 OPENSSL_free(sh
.bittable
);
494 OPENSSL_free(sh
.bitmalloc
);
495 if (sh
.map_result
!= NULL
&& sh
.map_size
)
496 munmap(sh
.map_result
, sh
.map_size
);
497 memset(&sh
, 0, sizeof(sh
));
500 static int sh_allocated(const char *ptr
)
502 return WITHIN_ARENA(ptr
) ? 1 : 0;
505 static char *sh_find_my_buddy(char *ptr
, int list
)
510 bit
= (ONE
<< list
) + (ptr
- sh
.arena
) / (sh
.arena_size
>> list
);
513 if (TESTBIT(sh
.bittable
, bit
) && !TESTBIT(sh
.bitmalloc
, bit
))
514 chunk
= sh
.arena
+ ((bit
& ((ONE
<< list
) - 1)) * (sh
.arena_size
>> list
));
519 static char *sh_malloc(size_t size
)
521 ossl_ssize_t list
, slist
;
525 if (size
> sh
.arena_size
)
528 list
= sh
.freelist_size
- 1;
529 for (i
= sh
.minsize
; i
< size
; i
<<= 1)
534 /* try to find a larger entry to split */
535 for (slist
= list
; slist
>= 0; slist
--)
536 if (sh
.freelist
[slist
] != NULL
)
541 /* split larger entry */
542 while (slist
!= list
) {
543 char *temp
= sh
.freelist
[slist
];
545 /* remove from bigger list */
546 OPENSSL_assert(!sh_testbit(temp
, slist
, sh
.bitmalloc
));
547 sh_clearbit(temp
, slist
, sh
.bittable
);
548 sh_remove_from_list(temp
);
549 OPENSSL_assert(temp
!= sh
.freelist
[slist
]);
551 /* done with bigger list */
554 /* add to smaller list */
555 OPENSSL_assert(!sh_testbit(temp
, slist
, sh
.bitmalloc
));
556 sh_setbit(temp
, slist
, sh
.bittable
);
557 sh_add_to_list(&sh
.freelist
[slist
], temp
);
558 OPENSSL_assert(sh
.freelist
[slist
] == temp
);
561 temp
+= sh
.arena_size
>> slist
;
562 OPENSSL_assert(!sh_testbit(temp
, slist
, sh
.bitmalloc
));
563 sh_setbit(temp
, slist
, sh
.bittable
);
564 sh_add_to_list(&sh
.freelist
[slist
], temp
);
565 OPENSSL_assert(sh
.freelist
[slist
] == temp
);
567 OPENSSL_assert(temp
-(sh
.arena_size
>> slist
) == sh_find_my_buddy(temp
, slist
));
570 /* peel off memory to hand back */
571 chunk
= sh
.freelist
[list
];
572 OPENSSL_assert(sh_testbit(chunk
, list
, sh
.bittable
));
573 sh_setbit(chunk
, list
, sh
.bitmalloc
);
574 sh_remove_from_list(chunk
);
576 OPENSSL_assert(WITHIN_ARENA(chunk
));
578 /* zero the free list header as a precaution against information leakage */
579 memset(chunk
, 0, sizeof(SH_LIST
));
584 static void sh_free(char *ptr
)
591 OPENSSL_assert(WITHIN_ARENA(ptr
));
592 if (!WITHIN_ARENA(ptr
))
595 list
= sh_getlist(ptr
);
596 OPENSSL_assert(sh_testbit(ptr
, list
, sh
.bittable
));
597 sh_clearbit(ptr
, list
, sh
.bitmalloc
);
598 sh_add_to_list(&sh
.freelist
[list
], ptr
);
600 /* Try to coalesce two adjacent free areas. */
601 while ((buddy
= sh_find_my_buddy(ptr
, list
)) != NULL
) {
602 OPENSSL_assert(ptr
== sh_find_my_buddy(buddy
, list
));
603 OPENSSL_assert(ptr
!= NULL
);
604 OPENSSL_assert(!sh_testbit(ptr
, list
, sh
.bitmalloc
));
605 sh_clearbit(ptr
, list
, sh
.bittable
);
606 sh_remove_from_list(ptr
);
607 OPENSSL_assert(!sh_testbit(ptr
, list
, sh
.bitmalloc
));
608 sh_clearbit(buddy
, list
, sh
.bittable
);
609 sh_remove_from_list(buddy
);
613 /* Zero the higher addressed block's free list pointers */
614 memset(ptr
> buddy
? ptr
: buddy
, 0, sizeof(SH_LIST
));
618 OPENSSL_assert(!sh_testbit(ptr
, list
, sh
.bitmalloc
));
619 sh_setbit(ptr
, list
, sh
.bittable
);
620 sh_add_to_list(&sh
.freelist
[list
], ptr
);
621 OPENSSL_assert(sh
.freelist
[list
] == ptr
);
625 static size_t sh_actual_size(char *ptr
)
629 OPENSSL_assert(WITHIN_ARENA(ptr
));
630 if (!WITHIN_ARENA(ptr
))
632 list
= sh_getlist(ptr
);
633 OPENSSL_assert(sh_testbit(ptr
, list
, sh
.bittable
));
634 return sh
.arena_size
/ (ONE
<< list
);
636 #endif /* IMPLEMENTED */