]>
Commit | Line | Data |
---|---|---|
f35db108 WM |
1 | //===-- sanitizer_allocator.cc --------------------------------------------===// |
2 | // | |
3 | // This file is distributed under the University of Illinois Open Source | |
4 | // License. See LICENSE.TXT for details. | |
5 | // | |
6 | //===----------------------------------------------------------------------===// | |
7 | // | |
8 | // This file is shared between AddressSanitizer and ThreadSanitizer | |
9 | // run-time libraries. | |
10 | // This allocator that is used inside run-times. | |
11 | //===----------------------------------------------------------------------===// | |
12 | #include "sanitizer_common.h" | |
13 | ||
14 | // FIXME: We should probably use more low-level allocator that would | |
15 | // mmap some pages and split them into chunks to fulfill requests. | |
16 | #if defined(__linux__) && !defined(__ANDROID__) | |
17 | extern "C" void *__libc_malloc(__sanitizer::uptr size); | |
18 | extern "C" void __libc_free(void *ptr); | |
19 | # define LIBC_MALLOC __libc_malloc | |
20 | # define LIBC_FREE __libc_free | |
21 | #else // __linux__ && !ANDROID | |
22 | # include <stdlib.h> | |
23 | # define LIBC_MALLOC malloc | |
24 | # define LIBC_FREE free | |
25 | #endif // __linux__ && !ANDROID | |
26 | ||
27 | namespace __sanitizer { | |
28 | ||
29 | const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; | |
30 | ||
31 | void *InternalAlloc(uptr size) { | |
32 | if (size + sizeof(u64) < size) | |
33 | return 0; | |
34 | void *p = LIBC_MALLOC(size + sizeof(u64)); | |
35 | if (p == 0) | |
36 | return 0; | |
37 | ((u64*)p)[0] = kBlockMagic; | |
38 | return (char*)p + sizeof(u64); | |
39 | } | |
40 | ||
41 | void InternalFree(void *addr) { | |
42 | if (addr == 0) | |
43 | return; | |
44 | addr = (char*)addr - sizeof(u64); | |
45 | CHECK_EQ(((u64*)addr)[0], kBlockMagic); | |
46 | ((u64*)addr)[0] = 0; | |
47 | LIBC_FREE(addr); | |
48 | } | |
49 | ||
50 | void *InternalAllocBlock(void *p) { | |
51 | CHECK_NE(p, (void*)0); | |
52 | u64 *pp = (u64*)((uptr)p & ~0x7); | |
53 | for (; pp[0] != kBlockMagic; pp--) {} | |
54 | return pp + 1; | |
55 | } | |
56 | ||
57 | // LowLevelAllocator | |
58 | static LowLevelAllocateCallback low_level_alloc_callback; | |
59 | ||
60 | void *LowLevelAllocator::Allocate(uptr size) { | |
61 | // Align allocation size. | |
62 | size = RoundUpTo(size, 8); | |
63 | if (allocated_end_ - allocated_current_ < (sptr)size) { | |
64 | uptr size_to_allocate = Max(size, kPageSize); | |
65 | allocated_current_ = | |
66 | (char*)MmapOrDie(size_to_allocate, __FUNCTION__); | |
67 | allocated_end_ = allocated_current_ + size_to_allocate; | |
68 | if (low_level_alloc_callback) { | |
69 | low_level_alloc_callback((uptr)allocated_current_, | |
70 | size_to_allocate); | |
71 | } | |
72 | } | |
73 | CHECK(allocated_end_ - allocated_current_ >= (sptr)size); | |
74 | void *res = allocated_current_; | |
75 | allocated_current_ += size; | |
76 | return res; | |
77 | } | |
78 | ||
79 | void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { | |
80 | low_level_alloc_callback = callback; | |
81 | } | |
82 | ||
83 | } // namespace __sanitizer |