]>
git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/mem_sec.c
2 * Copyright 2004-2014, Akamai Technologies. All Rights Reserved.
3 * This file is distributed under the terms of the OpenSSL license.
7 * This file is in two halves. The first half implements the public API
8 * to be used by external consumers, and to be used by OpenSSL to store
9 * data in a "secure arena." The second half implements the secure arena.
10 * For details on that implementation, see below (look for uppercase
11 * "SECURE HEAP IMPLEMENTATION").
13 #include <openssl/crypto.h>
18 #if defined(OPENSSL_SYS_LINUX) || defined(OPENSSL_SYS_UNIX)
23 # include <sys/types.h>
24 # include <sys/mman.h>
25 # include <sys/param.h>
26 # include <sys/stat.h>
30 #define LOCK() CRYPTO_w_lock(CRYPTO_LOCK_MALLOC)
31 #define UNLOCK() CRYPTO_w_unlock(CRYPTO_LOCK_MALLOC)
32 #define CLEAR(p, s) OPENSSL_cleanse(p, s)
34 # define PAGE_SIZE 4096
38 static size_t secure_mem_used
;
40 static int secure_mem_initialized
;
44 * These are the functions that must be implemented by a secure heap (sh).
46 static int sh_init(size_t size
, int minsize
);
47 static char *sh_malloc(size_t size
);
48 static void sh_free(char *ptr
);
49 static void sh_done(void);
50 static int sh_actual_size(char *ptr
);
51 static int sh_allocated(const char *ptr
);
54 int CRYPTO_secure_malloc_init(size_t size
, int minsize
)
62 OPENSSL_assert(!secure_mem_initialized
);
63 if (!secure_mem_initialized
) {
64 ret
= sh_init(size
, minsize
);
65 secure_mem_initialized
= 1;
71 #endif /* IMPLEMENTED */
74 void CRYPTO_secure_malloc_done()
79 secure_mem_initialized
= 0;
81 #endif /* IMPLEMENTED */
84 int CRYPTO_secure_malloc_initialized()
87 return secure_mem_initialized
;
90 #endif /* IMPLEMENTED */
93 void *CRYPTO_secure_malloc(size_t num
, const char *file
, int line
)
99 if (!secure_mem_initialized
) {
101 return CRYPTO_malloc(num
, file
, line
);
104 ret
= sh_malloc(num
);
105 actual_size
= ret
? sh_actual_size(ret
) : 0;
106 secure_mem_used
+= actual_size
;
110 return CRYPTO_malloc(num
, file
, line
);
111 #endif /* IMPLEMENTED */
114 void *CRYPTO_secure_zalloc(size_t num
, const char *file
, int line
)
116 void *ret
= CRYPTO_secure_malloc(num
, file
, line
);
123 void CRYPTO_secure_free(void *ptr
)
130 if (!secure_mem_initialized
) {
135 actual_size
= sh_actual_size(ptr
);
136 CLEAR(ptr
, actual_size
);
137 secure_mem_used
-= actual_size
;
142 #endif /* IMPLEMENTED */
145 int CRYPTO_secure_allocated(const void *ptr
)
150 if (!secure_mem_initialized
)
153 ret
= sh_allocated(ptr
);
158 #endif /* IMPLEMENTED */
161 size_t CRYPTO_secure_used()
164 return secure_mem_used
;
167 #endif /* IMPLEMENTED */
170 size_t CRYPTO_secure_actual_size(void *ptr
)
176 actual_size
= sh_actual_size(ptr
);
188 * SECURE HEAP IMPLEMENTATION
194 * The implementation provided here uses a fixed-sized mmap() heap,
195 * which is locked into memory, not written to core files, and protected
196 * on either side by an unmapped page, which will catch pointer overruns
197 * (or underruns) and an attempt to read data out of the secure heap.
198 * Free'd memory is zero'd or otherwise cleansed.
200 * This is a pretty standard buddy allocator. We keep areas in a multiple
201 * of "sh.minsize" units. The freelist and bitmaps are kept separately,
202 * so all (and only) data is kept in the mmap'd heap.
204 * This code assumes eight-bit bytes. The numbers 3 and 7 are all over the
208 # define TESTBIT(t, b) (t[(b) >> 3] & (1 << ((b) & 7)))
209 # define SETBIT(t, b) (t[(b) >> 3] |= (1 << ((b) & 7)))
210 # define CLEARBIT(t, b) (t[(b) >> 3] &= (0xFF & ~(1 << ((b) & 7))))
212 #define WITHIN_ARENA(p) \
213 ((char*)(p) >= sh.arena && (char*)(p) < &sh.arena[sh.arena_size])
214 #define WITHIN_FREELIST(p) \
215 ((char*)(p) >= (char*)sh.freelist && (char*)(p) < (char*)&sh.freelist[sh.freelist_size])
218 typedef struct sh_list_st
220 struct sh_list_st
*next
;
221 struct sh_list_st
**p_next
;
233 unsigned char *bittable
;
234 unsigned char *bitmalloc
;
235 int bittable_size
; /* size in bits */
240 static int sh_getlist(char *ptr
)
242 int list
= sh
.freelist_size
- 1;
243 int bit
= (sh
.arena_size
+ ptr
- sh
.arena
) / sh
.minsize
;
245 for (; bit
; bit
>>= 1, list
--) {
246 if (TESTBIT(sh
.bittable
, bit
))
248 OPENSSL_assert((bit
& 1) == 0);
255 static int sh_testbit(char *ptr
, int list
, unsigned char *table
)
259 OPENSSL_assert(list
>= 0 && list
< sh
.freelist_size
);
260 OPENSSL_assert(((ptr
- sh
.arena
) & ((sh
.arena_size
>> list
) - 1)) == 0);
261 bit
= (1 << list
) + ((ptr
- sh
.arena
) / (sh
.arena_size
>> list
));
262 OPENSSL_assert(bit
> 0 && bit
< sh
.bittable_size
);
263 return TESTBIT(table
, bit
);
266 static void sh_clearbit(char *ptr
, int list
, unsigned char *table
)
270 OPENSSL_assert(list
>= 0 && list
< sh
.freelist_size
);
271 OPENSSL_assert(((ptr
- sh
.arena
) & ((sh
.arena_size
>> list
) - 1)) == 0);
272 bit
= (1 << list
) + ((ptr
- sh
.arena
) / (sh
.arena_size
>> list
));
273 OPENSSL_assert(bit
> 0 && bit
< sh
.bittable_size
);
274 OPENSSL_assert(TESTBIT(table
, bit
));
275 CLEARBIT(table
, bit
);
278 static void sh_setbit(char *ptr
, int list
, unsigned char *table
)
282 OPENSSL_assert(list
>= 0 && list
< sh
.freelist_size
);
283 OPENSSL_assert(((ptr
- sh
.arena
) & ((sh
.arena_size
>> list
) - 1)) == 0);
284 bit
= (1 << list
) + ((ptr
- sh
.arena
) / (sh
.arena_size
>> list
));
285 OPENSSL_assert(bit
> 0 && bit
< sh
.bittable_size
);
286 OPENSSL_assert(!TESTBIT(table
, bit
));
290 static void sh_add_to_list(char **list
, char *ptr
)
294 OPENSSL_assert(WITHIN_FREELIST(list
));
295 OPENSSL_assert(WITHIN_ARENA(ptr
));
297 temp
= (SH_LIST
*)ptr
;
298 temp
->next
= *(SH_LIST
**)list
;
299 OPENSSL_assert(temp
->next
== NULL
|| WITHIN_ARENA(temp
->next
));
300 temp
->p_next
= (SH_LIST
**)list
;
302 if (temp
->next
!= NULL
) {
303 OPENSSL_assert((char **)temp
->next
->p_next
== list
);
304 temp
->next
->p_next
= &(temp
->next
);
310 static void sh_remove_from_list(char *ptr
, char *list
)
312 SH_LIST
*temp
, *temp2
;
314 temp
= (SH_LIST
*)ptr
;
315 if (temp
->next
!= NULL
)
316 temp
->next
->p_next
= temp
->p_next
;
317 *temp
->p_next
= temp
->next
;
318 if (temp
->next
== NULL
)
322 OPENSSL_assert(WITHIN_FREELIST(temp2
->p_next
) || WITHIN_ARENA(temp2
->p_next
));
326 static int sh_init(size_t size
, int minsize
)
332 memset(&sh
, 0, sizeof sh
);
334 /* make sure size and minsize are powers of 2 */
335 OPENSSL_assert(size
> 0);
336 OPENSSL_assert((size
& (size
- 1)) == 0);
337 OPENSSL_assert(minsize
> 0);
338 OPENSSL_assert((minsize
& (minsize
- 1)) == 0);
339 if (size
<= 0 || (size
& (size
- 1)) != 0)
341 if (minsize
<= 0 || (minsize
& (minsize
- 1)) != 0)
344 sh
.arena_size
= size
;
345 sh
.minsize
= minsize
;
346 sh
.bittable_size
= (sh
.arena_size
/ sh
.minsize
) * 2;
348 sh
.freelist_size
= -1;
349 for (i
= sh
.bittable_size
; i
; i
>>= 1)
352 sh
.freelist
= OPENSSL_zalloc(sh
.freelist_size
* sizeof (char *));
353 OPENSSL_assert(sh
.freelist
!= NULL
);
354 if (sh
.freelist
== NULL
)
357 sh
.bittable
= OPENSSL_zalloc(sh
.bittable_size
>> 3);
358 OPENSSL_assert(sh
.bittable
!= NULL
);
359 if (sh
.bittable
== NULL
)
362 sh
.bitmalloc
= OPENSSL_zalloc(sh
.bittable_size
>> 3);
363 OPENSSL_assert(sh
.bitmalloc
!= NULL
);
364 if (sh
.bitmalloc
== NULL
)
367 /* Allocate space for heap, and two extra pages as guards */
368 #if defined(_SC_PAGE_SIZE) || defined (_SC_PAGESIZE)
370 # if defined(_SC_PAGE_SIZE)
371 long tmppgsize
= sysconf(_SC_PAGE_SIZE
);
373 long tmppgsize
= sysconf(_SC_PAGESIZE
);
378 pgsize
= (size_t)tmppgsize
;
383 sh
.map_size
= pgsize
+ sh
.arena_size
+ pgsize
;
386 sh
.map_result
= mmap(NULL
, sh
.map_size
,
387 PROT_READ
|PROT_WRITE
, MAP_ANON
|MAP_PRIVATE
, -1, 0);
392 sh
.map_result
= MAP_FAILED
;
393 if ((fd
= open("/dev/zero", O_RDWR
)) >= 0) {
394 sh
.map_result
= mmap(NULL
, sh
.map_size
,
395 PROT_READ
|PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
399 OPENSSL_assert(sh
.map_result
!= MAP_FAILED
);
400 if (sh
.map_result
== MAP_FAILED
)
402 sh
.arena
= (char *)(sh
.map_result
+ pgsize
);
403 sh_setbit(sh
.arena
, 0, sh
.bittable
);
404 sh_add_to_list(&sh
.freelist
[0], sh
.arena
);
406 /* Now try to add guard pages and lock into memory. */
409 /* Starting guard is already aligned from mmap. */
410 if (mprotect(sh
.map_result
, pgsize
, PROT_NONE
) < 0)
413 /* Ending guard page - need to round up to page boundary */
414 aligned
= (pgsize
+ sh
.arena_size
+ (pgsize
- 1)) & ~(pgsize
- 1);
415 if (mprotect(sh
.map_result
+ aligned
, pgsize
, PROT_NONE
) < 0)
418 if (mlock(sh
.arena
, sh
.arena_size
) < 0)
421 if (madvise(sh
.arena
, sh
.arena_size
, MADV_DONTDUMP
) < 0)
432 static void sh_done()
434 OPENSSL_free(sh
.freelist
);
435 OPENSSL_free(sh
.bittable
);
436 OPENSSL_free(sh
.bitmalloc
);
437 if (sh
.map_result
!= NULL
&& sh
.map_size
)
438 munmap(sh
.map_result
, sh
.map_size
);
439 memset(&sh
, 0, sizeof sh
);
442 static int sh_allocated(const char *ptr
)
444 return WITHIN_ARENA(ptr
) ? 1 : 0;
447 static char *sh_find_my_buddy(char *ptr
, int list
)
452 bit
= (1 << list
) + (ptr
- sh
.arena
) / (sh
.arena_size
>> list
);
455 if (TESTBIT(sh
.bittable
, bit
) && !TESTBIT(sh
.bitmalloc
, bit
))
456 chunk
= sh
.arena
+ ((bit
& ((1 << list
) - 1)) * (sh
.arena_size
>> list
));
461 static char *sh_malloc(size_t size
)
467 list
= sh
.freelist_size
- 1;
468 for (i
= sh
.minsize
; i
< size
; i
<<= 1)
473 /* try to find a larger entry to split */
474 for (slist
= list
; slist
>= 0; slist
--)
475 if (sh
.freelist
[slist
] != NULL
)
480 /* split larger entry */
481 while (slist
!= list
) {
482 char *temp
= sh
.freelist
[slist
];
484 /* remove from bigger list */
485 OPENSSL_assert(!sh_testbit(temp
, slist
, sh
.bitmalloc
));
486 sh_clearbit(temp
, slist
, sh
.bittable
);
487 sh_remove_from_list(temp
, sh
.freelist
[slist
]);
488 OPENSSL_assert(temp
!= sh
.freelist
[slist
]);
490 /* done with bigger list */
493 /* add to smaller list */
494 OPENSSL_assert(!sh_testbit(temp
, slist
, sh
.bitmalloc
));
495 sh_setbit(temp
, slist
, sh
.bittable
);
496 sh_add_to_list(&sh
.freelist
[slist
], temp
);
497 OPENSSL_assert(sh
.freelist
[slist
] == temp
);
500 temp
+= sh
.arena_size
>> slist
;
501 OPENSSL_assert(!sh_testbit(temp
, slist
, sh
.bitmalloc
));
502 sh_setbit(temp
, slist
, sh
.bittable
);
503 sh_add_to_list(&sh
.freelist
[slist
], temp
);
504 OPENSSL_assert(sh
.freelist
[slist
] == temp
);
506 OPENSSL_assert(temp
-(sh
.arena_size
>> slist
) == sh_find_my_buddy(temp
, slist
));
509 /* peel off memory to hand back */
510 chunk
= sh
.freelist
[list
];
511 OPENSSL_assert(sh_testbit(chunk
, list
, sh
.bittable
));
512 sh_setbit(chunk
, list
, sh
.bitmalloc
);
513 sh_remove_from_list(chunk
, sh
.freelist
[list
]);
515 OPENSSL_assert(WITHIN_ARENA(chunk
));
520 static void sh_free(char *ptr
)
527 OPENSSL_assert(WITHIN_ARENA(ptr
));
528 if (!WITHIN_ARENA(ptr
))
531 list
= sh_getlist(ptr
);
532 OPENSSL_assert(sh_testbit(ptr
, list
, sh
.bittable
));
533 sh_clearbit(ptr
, list
, sh
.bitmalloc
);
534 sh_add_to_list(&sh
.freelist
[list
], ptr
);
536 /* Try to coalesce two adjacent free areas. */
537 while ((buddy
= sh_find_my_buddy(ptr
, list
)) != NULL
) {
538 OPENSSL_assert(ptr
== sh_find_my_buddy(buddy
, list
));
539 OPENSSL_assert(ptr
!= NULL
);
540 OPENSSL_assert(!sh_testbit(ptr
, list
, sh
.bitmalloc
));
541 sh_clearbit(ptr
, list
, sh
.bittable
);
542 sh_remove_from_list(ptr
, sh
.freelist
[list
]);
543 OPENSSL_assert(!sh_testbit(ptr
, list
, sh
.bitmalloc
));
544 sh_clearbit(buddy
, list
, sh
.bittable
);
545 sh_remove_from_list(buddy
, sh
.freelist
[list
]);
552 OPENSSL_assert(!sh_testbit(ptr
, list
, sh
.bitmalloc
));
553 sh_setbit(ptr
, list
, sh
.bittable
);
554 sh_add_to_list(&sh
.freelist
[list
], ptr
);
555 OPENSSL_assert(sh
.freelist
[list
] == ptr
);
559 static int sh_actual_size(char *ptr
)
563 OPENSSL_assert(WITHIN_ARENA(ptr
));
564 if (!WITHIN_ARENA(ptr
))
566 list
= sh_getlist(ptr
);
567 OPENSSL_assert(sh_testbit(ptr
, list
, sh
.bittable
));
568 return sh
.arena_size
/ (1 << list
);
570 #endif /* IMPLEMENTED */