-6e7dd1e3e1170080b76b5dcc5716bdd974343233
+f58e0513dd95944b81ce7a6e7b49ba656de7d75f
The first line of this file holds the git revision number of the
last merge done from the master library sources.
return false;
if (m->Beg() != addr) return false;
AsanThread *t = GetCurrentThread();
- m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack));
+ m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
return true;
}
m->SetUsedSize(size);
m->user_requested_alignment_log = user_requested_alignment_log;
- m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack));
+ m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
uptr size_rounded_down_to_granularity =
RoundDownTo(size, SHADOW_GRANULARITY);
m->lsan_tag = __lsan::kIgnored;
return kIgnoreObjectSuccess;
}
+
+void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
+ // Look for the arg pointer of threads that have been created or are running.
+ // This is necessary to prevent false positive leaks due to the AsanThread
+ // holding the only live reference to a heap object. This can happen because
+ // the `pthread_create()` interceptor doesn't wait for the child thread to
+ // start before returning and thus loosing the the only live reference to the
+ // heap object on the stack.
+
+ __asan::AsanThreadContext *atctx =
+ reinterpret_cast<__asan::AsanThreadContext *>(tctx);
+ __asan::AsanThread *asan_thread = atctx->thread;
+
+ // Note ThreadStatusRunning is required because there is a small window where
+ // the thread status switches to `ThreadStatusRunning` but the `arg` pointer
+ // still isn't on the stack yet.
+ if (atctx->status != ThreadStatusCreated &&
+ atctx->status != ThreadStatusRunning)
+ return;
+
+ uptr thread_arg = reinterpret_cast<uptr>(asan_thread->get_arg());
+ if (!thread_arg)
+ return;
+
+ auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs);
+ ptrsVec->push_back(thread_arg);
+}
+
} // namespace __lsan
// ---------------------- Interface ---------------- {{{1
CHECK(context);
asanThreadRegistry().CheckLocked();
// No need to announce the main thread.
- if (context->tid == 0 || context->announced) {
+ if (context->tid == kMainTid || context->announced) {
return;
}
context->announced = true;
- InternalScopedString str(1024);
+ InternalScopedString str;
str.append("Thread %s", AsanThreadIdAndName(context).c_str());
if (context->parent_tid == kInvalidTid) {
str.append(" created by unknown thread\n");
} else if (AddrIsInLowShadow(addr)) {
*shadow_kind = kShadowKindLow;
} else {
- CHECK(0 && "Address is not in memory and not in shadow?");
return false;
}
return true;
static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
Decorator d;
- InternalScopedString str(4096);
+ InternalScopedString str;
str.append("%s", d.Location());
switch (descr.access_type) {
case kAccessTypeLeft:
else if (addr >= prev_var_end && addr - prev_var_end >= var.beg - addr_end)
pos_descr = "underflows";
}
- InternalScopedString str(1024);
+ InternalScopedString str;
str.append(" [%zd, %zd)", var.beg, var_end);
// Render variable name.
str.append(" '");
// Global descriptions
static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
const __asan_global &g) {
- InternalScopedString str(4096);
+ InternalScopedString str;
Decorator d;
str.append("%s", d.Location());
if (addr < g.beg) {
return;
}
data.kind = kAddressKindWild;
- addr = 0;
+ data.wild.addr = addr;
+ data.wild.access_size = access_size;
+}
+
+void WildAddressDescription::Print() const {
+ Printf("Address %p is a wild pointer inside of access range of size %p.\n",
+ addr, access_size);
}
void PrintAddressDescription(uptr addr, uptr access_size,
bool GetStackAddressInformation(uptr addr, uptr access_size,
StackAddressDescription *descr);
+struct WildAddressDescription {
+ uptr addr;
+ uptr access_size;
+
+ void Print() const;
+};
+
struct GlobalAddressDescription {
uptr addr;
// Assume address is close to at most four globals.
HeapAddressDescription heap;
StackAddressDescription stack;
GlobalAddressDescription global;
- uptr addr;
+ WildAddressDescription wild;
};
};
uptr Address() const {
switch (data.kind) {
case kAddressKindWild:
- return data.addr;
+ return data.wild.addr;
case kAddressKindShadow:
return data.shadow.addr;
case kAddressKindHeap:
void Print(const char *bug_descr = nullptr) const {
switch (data.kind) {
case kAddressKindWild:
- Printf("Address %p is a wild pointer.\n", data.addr);
+ data.wild.Print();
return;
case kAddressKindShadow:
return data.shadow.Print();
Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(),
global1.beg);
Printf("%s", d.Default());
- InternalScopedString g1_loc(256), g2_loc(256);
+ InternalScopedString g1_loc;
+ InternalScopedString g2_loc;
PrintGlobalLocation(&g1_loc, global1);
PrintGlobalLocation(&g2_loc, global2);
Printf(" [1] size=%zd '%s' %s\n", global1.size,
Report(
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=detect_odr_violation=0\n");
- InternalScopedString error_msg(256);
+ InternalScopedString error_msg;
error_msg.append("%s: global '%s' at %s", scariness.GetDescription(),
MaybeDemangleGlobalName(global1.name), g1_loc.data());
ReportErrorSummary(error_msg.data());
uptr shadow_addr = MemToShadow(addr);
const uptr n_bytes_per_row = 16;
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
- InternalScopedString str(4096 * 8);
+ InternalScopedString str;
str.append("Shadow bytes around the buggy address:\n");
for (int i = -5; i <= 5; i++) {
uptr row_shadow_addr = aligned_shadow + i * n_bytes_per_row;
void FakeStack::Destroy(int tid) {
PoisonAll(0);
if (Verbosity() >= 2) {
- InternalScopedString str(kNumberOfSizeClasses * 50);
+ InternalScopedString str;
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
NumberOfFrames(stack_size_log(), class_id));
void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); }
static inline size_t AsanThreadMmapSize() {
- return RoundUpTo(sizeof(AsanThread), PAGE_SIZE);
+ return RoundUpTo(sizeof(AsanThread), _zx_system_get_page_size());
}
struct AsanThread::InitOptions {
}
}
+// Check ODR violation for given global G by checking if it's already poisoned.
+// We use this method in case compiler doesn't use private aliases for global
+// variables.
+static void CheckODRViolationViaPoisoning(const Global *g) {
+ if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
+ // This check may not be enough: if the first global is much larger
+ // the entire redzone of the second global may be within the first global.
+ for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+ if (g->beg == l->g->beg &&
+ (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
+ !IsODRViolationSuppressed(g->name))
+ ReportODRViolation(g, FindRegistrationSite(g),
+ l->g, FindRegistrationSite(l->g));
+ }
+ }
+}
+
// Clang provides two different ways for global variables protection:
// it can poison the global itself or its private alias. In former
// case we may poison same symbol multiple times, that can help us to
// where two globals with the same name are defined in different modules.
if (UseODRIndicator(g))
CheckODRViolationViaIndicator(g);
+ else
+ CheckODRViolationViaPoisoning(g);
}
if (CanPoisonMemory())
PoisonRedZones(*g);
#include "sanitizer_common/sanitizer_common_syscalls.inc"
#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
-struct ThreadStartParam {
- atomic_uintptr_t t;
- atomic_uintptr_t is_registered;
-};
-
#if ASAN_INTERCEPT_PTHREAD_CREATE
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
- ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
- AsanThread *t = nullptr;
- while ((t = reinterpret_cast<AsanThread *>(
- atomic_load(¶m->t, memory_order_acquire))) == nullptr)
- internal_sched_yield();
+ AsanThread *t = (AsanThread *)arg;
SetCurrentThread(t);
- return t->ThreadStart(GetTid(), ¶m->is_registered);
+ return t->ThreadStart(GetTid());
}
INTERCEPTOR(int, pthread_create, void *thread,
int detached = 0;
if (attr)
REAL(pthread_attr_getdetachstate)(attr, &detached);
- ThreadStartParam param;
- atomic_store(¶m.t, 0, memory_order_relaxed);
- atomic_store(¶m.is_registered, 0, memory_order_relaxed);
+
+ u32 current_tid = GetCurrentTidOrInvalid();
+ AsanThread *t =
+ AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
+
int result;
{
// Ignore all allocations made by pthread_create: thread stack/TLS may be
#if CAN_SANITIZE_LEAKS
__lsan::ScopedInterceptorDisabler disabler;
#endif
- result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m);
+ result = REAL(pthread_create)(thread, attr, asan_thread_start, t);
}
- if (result == 0) {
- u32 current_tid = GetCurrentTidOrInvalid();
- AsanThread *t =
- AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
- atomic_store(¶m.t, reinterpret_cast<uptr>(t), memory_order_release);
- // Wait until the AsanThread object is initialized and the ThreadRegistry
- // entry is in "started" state. One reason for this is that after this
- // interceptor exits, the child thread's stack may be the only thing holding
- // the |arg| pointer. This may cause LSan to report a leak if leak checking
- // happens at a point when the interceptor has already exited, but the stack
- // range for the child thread is not yet known.
- while (atomic_load(¶m.is_registered, memory_order_acquire) == 0)
- internal_sched_yield();
+ if (result != 0) {
+ // If the thread didn't start delete the AsanThread to avoid leaking it.
+ // Note AsanThreadContexts never get destroyed so the AsanThreadContext
+ // that was just created for the AsanThread is wasted.
+ t->Destroy();
}
return result;
}
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
#endif
-#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_SOLARIS
+#if SANITIZER_GLIBC || SANITIZER_SOLARIS
# define ASAN_INTERCEPT_SWAPCONTEXT 1
#else
# define ASAN_INTERCEPT_SWAPCONTEXT 0
# define ASAN_INTERCEPT_SIGLONGJMP 0
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
# define ASAN_INTERCEPT___LONGJMP_CHK 1
#else
# define ASAN_INTERCEPT___LONGJMP_CHK 0
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
!SANITIZER_NETBSD
# define ASAN_INTERCEPT___CXA_THROW 1
-# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
- || ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
-# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
-# else
-# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
-# endif
+# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
# else
# define ASAN_INTERCEPT_ATEXIT 0
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
# define ASAN_INTERCEPT___STRDUP 1
#else
# define ASAN_INTERCEPT___STRDUP 0
DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
#if !SANITIZER_MAC
-#define ASAN_INTERCEPT_FUNC(name) \
- do { \
- if (!INTERCEPT_FUNCTION(name)) \
- VReport(1, "AddressSanitizer: failed to intercept '%s'\n'", #name); \
+#define ASAN_INTERCEPT_FUNC(name) \
+ do { \
+ if (!INTERCEPT_FUNCTION(name)) \
+ VReport(1, "AddressSanitizer: failed to intercept '%s'\n", #name); \
} while (0)
#define ASAN_INTERCEPT_FUNC_VER(name, ver) \
do { \
#else
#include <sys/ucontext.h>
#include <link.h>
+extern ElfW(Dyn) _DYNAMIC[];
#endif
// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
void *AsanDoesNotSupportStaticLinkage() {
// This will fail to link with -static.
- return &_DYNAMIC; // defined in link.h
+ return &_DYNAMIC;
}
#if ASAN_PREMAP_SHADOW
// || `[0x2000000000, 0x23ffffffff]` || LowShadow ||
// || `[0x0000000000, 0x1fffffffff]` || LowMem ||
//
+// Default Linux/RISCV64 Sv39 mapping:
+// || `[0x1555550000, 0x3fffffffff]` || HighMem ||
+// || `[0x0fffffa000, 0x1555555fff]` || HighShadow ||
+// || `[0x0effffa000, 0x0fffff9fff]` || ShadowGap ||
+// || `[0x0d55550000, 0x0effff9fff]` || LowShadow ||
+// || `[0x0000000000, 0x0d5554ffff]` || LowMem ||
+//
// Default Linux/AArch64 (39-bit VMA) mapping:
// || `[0x2000000000, 0x7fffffffff]` || highmem ||
// || `[0x1400000000, 0x1fffffffff]` || highshadow ||
// || `[0x1000000000, 0x11ffffffff]` || lowshadow ||
// || `[0x0000000000, 0x0fffffffff]` || lowmem ||
//
-// RISC-V has only 38 bits for task size
-// Low mem size is set with kRiscv64_ShadowOffset64 in
-// compiler-rt/lib/asan/asan_allocator.h and in
-// llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp with
-// kRiscv64_ShadowOffset64, High mem top border is set with
-// GetMaxVirtualAddress() in
-// compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
-// Default Linux/RISCV64 Sv39/Sv48 mapping:
-// || `[0x000820000000, 0x003fffffffff]` || HighMem ||
-// || `[0x000124000000, 0x00081fffffff]` || HighShadow ||
-// || `[0x000024000000, 0x000123ffffff]` || ShadowGap ||
-// || `[0x000020000000, 0x000023ffffff]` || LowShadow ||
-// || `[0x000000000000, 0x00001fffffff]` || LowMem ||
-//
// Default Linux/AArch64 (42-bit VMA) mapping:
// || `[0x10000000000, 0x3ffffffffff]` || highmem ||
// || `[0x0a000000000, 0x0ffffffffff]` || highshadow ||
static const u64 kDefaultShort64bitShadowOffset =
0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G.
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
-static const u64 kRiscv64_ShadowOffset64 = 0x20000000;
+static const u64 kRiscv64_ShadowOffset64 = 0xd55550000;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
-static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
+static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
#endif
#undef COMMENT_EXPORT
#else
-#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
+#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
#endif
using namespace __asan;
&stack);
}
CHECK_LE(end - beg,
- FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check.
+ FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
if (signal_stack.ss_flags != SS_ONSTACK)
return false;
- // Since we're on the signal altnerate stack, we cannot find the DEFAULT
+ // Since we're on the signal alternate stack, we cannot find the DEFAULT
// stack bottom using a local variable.
uptr default_bottom, tls_addr, tls_size, stack_size;
GetThreadStackAndTls(/*main=*/false, &default_bottom, &stack_size, &tls_addr,
}
}
-static void AsanCheckFailed(const char *file, int line, const char *cond,
- u64 v1, u64 v2) {
- Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
- line, cond, (uptr)v1, (uptr)v2);
-
- // Print a stack trace the first time we come here. Otherwise, we probably
- // failed a CHECK during symbolization.
- static atomic_uint32_t num_calls;
- if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) == 0) {
- PRINT_CURRENT_STACK_CHECK();
- }
-
- Die();
+static void CheckUnwind() {
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check);
+ stack.Print();
}
// -------------------------- Globals --------------------- {{{1
// Install tool-specific callbacks in sanitizer_common.
AddDieCallback(AsanDie);
- SetCheckFailedCallback(AsanCheckFailed);
+ SetCheckUnwindCallback(CheckUnwind);
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
__sanitizer_set_report_path(common_flags()->log_path);
type, top, bottom, top - bottom, top - bottom);
return;
}
- PoisonShadow(bottom, top - bottom, 0);
+ PoisonShadow(bottom, RoundUpTo(top - bottom, SHADOW_GRANULARITY), 0);
}
static void UnpoisonDefaultStack() {
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
-#define GET_STACK_TRACE_CHECK_HERE \
- GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check)
-
#define GET_STACK_TRACE_THREAD \
GET_STACK_TRACE(kStackTraceMax, true)
stack.Print(); \
}
-#define PRINT_CURRENT_STACK_CHECK() \
- { \
- GET_STACK_TRACE_CHECK_HERE; \
- stack.Print(); \
- }
-
#endif // ASAN_STACK_H
int tid = this->tid();
VReport(1, "T%d exited\n", tid);
- malloc_storage().CommitBack();
- if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack();
- asanThreadRegistry().FinishThread(tid);
- FlushToDeadThreadStats(&stats_);
- // We also clear the shadow on thread destruction because
- // some code may still be executing in later TSD destructors
- // and we don't want it to have any poisoned stack.
- ClearShadowForThreadStackAndTLS();
- DeleteFakeStack(tid);
+ bool was_running =
+ (asanThreadRegistry().FinishThread(tid) == ThreadStatusRunning);
+ if (was_running) {
+ if (AsanThread *thread = GetCurrentThread())
+ CHECK_EQ(this, thread);
+ malloc_storage().CommitBack();
+ if (common_flags()->use_sigaltstack)
+ UnsetAlternateSignalStack();
+ FlushToDeadThreadStats(&stats_);
+ // We also clear the shadow on thread destruction because
+ // some code may still be executing in later TSD destructors
+ // and we don't want it to have any poisoned stack.
+ ClearShadowForThreadStackAndTLS();
+ DeleteFakeStack(tid);
+ } else {
+ CHECK_NE(this, GetCurrentThread());
+ }
uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
UnmapOrDie(this, size);
- DTLS_Destroy();
+ if (was_running)
+ DTLS_Destroy();
}
void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
}
void AsanThread::Init(const InitOptions *options) {
- DCHECK_NE(tid(), ThreadRegistry::kUnknownTid);
+ DCHECK_NE(tid(), kInvalidTid);
next_stack_top_ = next_stack_bottom_ = 0;
atomic_store(&stack_switching_, false, memory_order_release);
CHECK_EQ(this->stack_size(), 0U);
// SetThreadStackAndTls.
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
-thread_return_t AsanThread::ThreadStart(
- tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) {
+thread_return_t AsanThread::ThreadStart(tid_t os_id) {
Init();
asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr);
- if (signal_thread_is_registered)
- atomic_store(signal_thread_is_registered, 1, memory_order_release);
if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
AsanThread *CreateMainThread() {
AsanThread *main_thread = AsanThread::Create(
- /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
+ /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ kMainTid,
/* stack */ nullptr, /* detached */ true);
SetCurrentThread(main_thread);
- main_thread->ThreadStart(internal_getpid(),
- /* signal_thread_is_registered */ nullptr);
+ main_thread->ThreadStart(internal_getpid());
return main_thread;
}
DCHECK_EQ(options, nullptr);
uptr tls_size = 0;
uptr stack_size = 0;
- GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size, &tls_begin_,
- &tls_size);
- stack_top_ = stack_bottom_ + stack_size;
+ GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size,
+ &tls_begin_, &tls_size);
+ stack_top_ = RoundDownTo(stack_bottom_ + stack_size, SHADOW_GRANULARITY);
tls_end_ = tls_begin_ + tls_size;
dtls_ = DTLS_Get();
// address. We are not entirely sure that we have correct main thread
// limits, so only do this magic on Android, and only if the found thread
// is the main thread.
- AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
+ AsanThreadContext *tctx = GetThreadContextByTidLocked(kMainTid);
if (tctx && ThreadStackContainsAddress(tctx, &context)) {
SetCurrentThread(tctx->thread);
return tctx->thread;
void EnsureMainThreadIDIsCorrect() {
AsanThreadContext *context =
reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
- if (context && (context->tid == 0))
+ if (context && (context->tid == kMainTid))
context->os_id = GetTid();
}
namespace __asan {
-const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits.
const u32 kMaxNumberOfThreads = (1 << 22); // 4M
class AsanThread;
struct InitOptions;
void Init(const InitOptions *options = nullptr);
- thread_return_t ThreadStart(tid_t os_id,
- atomic_uintptr_t *signal_thread_is_registered);
+ thread_return_t ThreadStart(tid_t os_id);
uptr stack_top();
uptr stack_bottom();
void *extra_spill_area() { return &extra_spill_area_; }
+ void *get_arg() { return arg_; }
+
private:
// NOTE: There is no AsanThread constructor. It is allocated
// via mmap() and *must* be valid in zero-initialized state.
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread *)arg;
SetCurrentThread(t);
- return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr);
+ return t->ThreadStart(GetTid());
}
INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security,
#ifndef COMPILERRT_ASSEMBLY_H
#define COMPILERRT_ASSEMBLY_H
-#if defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__)
-#define SEPARATOR @
+#if defined(__APPLE__) && defined(__aarch64__)
+#define SEPARATOR %%
#else
#define SEPARATOR ;
#endif
#define HIDDEN(name) .hidden name
#define LOCAL_LABEL(name) .L_##name
#define FILE_LEVEL_DIRECTIVE
-#if defined(__arm__)
+#if defined(__arm__) || defined(__aarch64__)
#define SYMBOL_IS_FUNC(name) .type name,%function
#else
#define SYMBOL_IS_FUNC(name) .type name,@function
#endif
#define CONST_SECTION .section .rodata
-#if defined(__GNU__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \
+#if defined(__GNU__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \
defined(__linux__)
#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
#else
#endif
+#if defined(__arm__) || defined(__aarch64__)
+#define FUNC_ALIGN \
+ .text SEPARATOR \
+ .balign 16 SEPARATOR
+#else
+#define FUNC_ALIGN
+#endif
+
+// BTI and PAC gnu property note
+#define NT_GNU_PROPERTY_TYPE_0 5
+#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
+#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI 1
+#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC 2
+
+#if defined(__ARM_FEATURE_BTI_DEFAULT)
+#define BTI_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_BTI
+#else
+#define BTI_FLAG 0
+#endif
+
+#if __ARM_FEATURE_PAC_DEFAULT & 3
+#define PAC_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_PAC
+#else
+#define PAC_FLAG 0
+#endif
+
+#define GNU_PROPERTY(type, value) \
+ .pushsection .note.gnu.property, "a" SEPARATOR \
+ .p2align 3 SEPARATOR \
+ .word 4 SEPARATOR \
+ .word 16 SEPARATOR \
+ .word NT_GNU_PROPERTY_TYPE_0 SEPARATOR \
+ .asciz "GNU" SEPARATOR \
+ .word type SEPARATOR \
+ .word 4 SEPARATOR \
+ .word value SEPARATOR \
+ .word 0 SEPARATOR \
+ .popsection
+
+#if BTI_FLAG != 0
+#define BTI_C hint #34
+#define BTI_J hint #36
+#else
+#define BTI_C
+#define BTI_J
+#endif
+
+#if (BTI_FLAG | PAC_FLAG) != 0
+#define GNU_PROPERTY_BTI_PAC \
+ GNU_PROPERTY(GNU_PROPERTY_AARCH64_FEATURE_1_AND, BTI_FLAG | PAC_FLAG)
+#else
+#define GNU_PROPERTY_BTI_PAC
+#endif
+
+#if defined(__clang__) || defined(__GCC_HAVE_DWARF2_CFI_ASM)
+#define CFI_START .cfi_startproc
+#define CFI_END .cfi_endproc
+#else
+#define CFI_START
+#define CFI_END
+#endif
+
#if defined(__arm__)
// Determine actual [ARM][THUMB[1][2]] ISA using compiler predefined macros:
#define DEFINE_CODE_STATE
#endif
-#define GLUE2(a, b) a##b
-#define GLUE(a, b) GLUE2(a, b)
+#define GLUE2_(a, b) a##b
+#define GLUE(a, b) GLUE2_(a, b)
+#define GLUE2(a, b) GLUE2_(a, b)
+#define GLUE3_(a, b, c) a##b##c
+#define GLUE3(a, b, c) GLUE3_(a, b, c)
+#define GLUE4_(a, b, c, d) a##b##c##d
+#define GLUE4(a, b, c, d) GLUE4_(a, b, c, d)
+
#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
#ifdef VISIBILITY_HIDDEN
#define DECLARE_SYMBOL_VISIBILITY(name) \
HIDDEN(SYMBOL_NAME(name)) SEPARATOR
+#define DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) \
+ HIDDEN(name) SEPARATOR
#else
#define DECLARE_SYMBOL_VISIBILITY(name)
+#define DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name)
#endif
#define DEFINE_COMPILERRT_FUNCTION(name) \
DECLARE_FUNC_ENCODING \
name:
+#define DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(name) \
+ DEFINE_CODE_STATE \
+ FUNC_ALIGN \
+ .globl name SEPARATOR \
+ SYMBOL_IS_FUNC(name) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) SEPARATOR \
+ CFI_START SEPARATOR \
+ DECLARE_FUNC_ENCODING \
+ name: SEPARATOR BTI_C
+
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
#ifdef __ELF__
#define END_COMPILERRT_FUNCTION(name) \
.size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
+#define END_COMPILERRT_OUTLINE_FUNCTION(name) \
+ CFI_END SEPARATOR \
+ .size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
#else
#define END_COMPILERRT_FUNCTION(name)
+#define END_COMPILERRT_OUTLINE_FUNCTION(name) \
+ CFI_END
#endif
#endif // COMPILERRT_ASSEMBLY_H
if (common_flags()->help) parser.PrintFlagDescriptions();
}
-static void HWAsanCheckFailed(const char *file, int line, const char *cond,
- u64 v1, u64 v2) {
- Report("HWAddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
- line, cond, (uptr)v1, (uptr)v2);
- PRINT_CURRENT_STACK_CHECK();
- Die();
+static void CheckUnwind() {
+ GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
+ stack.Print();
}
-static constexpr uptr kMemoryUsageBufferSize = 4096;
-
static void HwasanFormatMemoryUsage(InternalScopedString &s) {
HwasanThreadList &thread_list = hwasanThreadList();
auto thread_stats = thread_list.GetThreadStats();
}
#if SANITIZER_ANDROID
+static constexpr uptr kMemoryUsageBufferSize = 4096;
+
static char *memory_usage_buffer = nullptr;
static void InitMemoryUsage() {
return;
if (!memory_usage_buffer)
InitMemoryUsage();
- InternalScopedString s(kMemoryUsageBufferSize);
+ InternalScopedString s;
HwasanFormatMemoryUsage(s);
internal_strncpy(memory_usage_buffer, s.data(), kMemoryUsageBufferSize - 1);
memory_usage_buffer[kMemoryUsageBufferSize - 1] = '\0';
InitializeFlags();
// Install tool-specific callbacks in sanitizer_common.
- SetCheckFailedCallback(HWAsanCheckFailed);
+ SetCheckUnwindCallback(CheckUnwind);
__sanitizer_set_report_path(common_flags()->log_path);
}
void __hwasan_print_memory_usage() {
- InternalScopedString s(kMemoryUsageBufferSize);
+ InternalScopedString s;
HwasanFormatMemoryUsage(s);
Printf("%s\n", s.data());
}
#ifndef HWASAN_H
#define HWASAN_H
+#include "hwasan_flags.h"
+#include "hwasan_interface_internal.h"
+#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "hwasan_interface_internal.h"
-#include "hwasan_flags.h"
#include "ubsan/ubsan_platform.h"
#ifndef HWASAN_CONTAINS_UBSAN
typedef u8 tag_t;
+#if defined(__x86_64__)
+// Tags are done in middle bits using userspace aliasing.
+constexpr unsigned kAddressTagShift = 39;
+constexpr unsigned kTagBits = 3;
+
+// The alias region is placed next to the shadow so the upper bits of all
+// taggable addresses matches the upper bits of the shadow base. This shift
+// value determines which upper bits must match. It has a floor of 44 since the
+// shadow is always 8TB.
+// TODO(morehouse): In alias mode we can shrink the shadow and use a
+// simpler/faster shadow calculation.
+constexpr unsigned kTaggableRegionCheckShift =
+ __sanitizer::Max(kAddressTagShift + kTagBits + 1U, 44U);
+#else
// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address
// translation and can be used to store a tag.
-const unsigned kAddressTagShift = 56;
-const uptr kAddressTagMask = 0xFFUL << kAddressTagShift;
+constexpr unsigned kAddressTagShift = 56;
+constexpr unsigned kTagBits = 8;
+#endif // defined(__x86_64__)
+
+// Mask for extracting tag bits from the lower 8 bits.
+constexpr uptr kTagMask = (1UL << kTagBits) - 1;
+
+// Mask for extracting tag bits from full pointers.
+constexpr uptr kAddressTagMask = kTagMask << kAddressTagShift;
// Minimal alignment of the shadow base address. Determines the space available
// for threads and stack histories. This is an ABI constant.
const unsigned kRecordFPModulus = 1 << (64 - kRecordFPShift + kRecordFPLShift);
static inline tag_t GetTagFromPointer(uptr p) {
- return p >> kAddressTagShift;
+ return (p >> kAddressTagShift) & kTagMask;
}
static inline uptr UntagAddr(uptr tagged_addr) {
if (hwasan_inited) \
stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal)
-#define GET_FATAL_STACK_TRACE_HERE \
- GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
-
-#define PRINT_CURRENT_STACK_CHECK() \
- { \
- GET_FATAL_STACK_TRACE_HERE; \
- stack.Print(); \
- }
-
void HwasanTSDInit();
void HwasanTSDThreadInit();
static SpinMutex fallback_mutex;
static atomic_uint8_t hwasan_allocator_tagging_enabled;
-static const tag_t kFallbackAllocTag = 0xBB;
-static const tag_t kFallbackFreeTag = 0xBC;
+static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
+static constexpr tag_t kFallbackFreeTag = 0xBC;
enum RightAlignMode {
kRightAlignNever,
atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
!flags()->disable_allocator_tagging);
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
- allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
+ allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
+ kAliasRegionStart);
for (uptr i = 0; i < sizeof(tail_magic); i++)
tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
}
// Tagging can only be skipped when both tag_in_malloc and tag_in_free are
// false. When tag_in_malloc = false and tag_in_free = true malloc needs to
// retag to 0.
- if ((flags()->tag_in_malloc || flags()->tag_in_free) &&
+ if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
+ (flags()->tag_in_malloc || flags()->tag_in_free) &&
atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
CHECK(tagged_ptr);
uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
+ if (!InTaggableRegion(tagged_uptr))
+ return true;
tag_t mem_tag = *reinterpret_cast<tag_t *>(
MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
if (!PointerAndMemoryTagsMatch(tagged_ptr))
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
- void *untagged_ptr = UntagPtr(tagged_ptr);
+ void *untagged_ptr = InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr))
+ ? UntagPtr(tagged_ptr)
+ : tagged_ptr;
void *aligned_ptr = reinterpret_cast<void *>(
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
Metadata *meta =
Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
}
- if (flags()->tag_in_free && malloc_bisect(stack, 0) &&
- atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
+ if (InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr)) &&
+ flags()->tag_in_free && malloc_bisect(stack, 0) &&
+ atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
+ // Always store full 8-bit tags on free to maximize UAF detection.
+ tag_t tag = t ? t->GenerateRandomTag(/*num_bits=*/8) : kFallbackFreeTag;
TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
- t ? t->GenerateRandomTag() : kFallbackFreeTag);
+ tag);
+ }
if (t) {
allocator.Deallocate(t->allocator_cache(), aligned_ptr);
if (auto *ha = t->heap_allocations())
// OOM error is already taken care of by HwasanAllocate.
return errno_ENOMEM;
CHECK(IsAligned((uptr)ptr, alignment));
- *(void **)UntagPtr(memptr) = ptr;
+ *memptr = ptr;
return 0;
}
#ifndef HWASAN_ALLOCATOR_H
#define HWASAN_ALLOCATOR_H
+#include "hwasan.h"
+#include "hwasan_interface_internal.h"
+#include "hwasan_poisoning.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_ring_buffer.h"
-#include "hwasan_poisoning.h"
#if !defined(__aarch64__) && !defined(__x86_64__)
#error Unsupported platform
struct AP64 {
static const uptr kSpaceBeg = ~0ULL;
+
+#if defined(__x86_64__)
+ static const uptr kSpaceSize = 1ULL << kAddressTagShift;
+#else
static const uptr kSpaceSize = 0x2000000000ULL;
+#endif
static const uptr kMetadataSize = sizeof(Metadata);
typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
using AddressSpaceView = LocalAddressSpaceView;
void GetAllocatorStats(AllocatorStatCounters s);
+inline bool InTaggableRegion(uptr addr) {
+#if defined(__x86_64__)
+ // Aliases are mapped next to shadow so that the upper bits match the shadow
+ // base.
+ return (addr >> kTaggableRegionCheckShift) ==
+ (__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
+#endif
+ return true;
+}
+
} // namespace __hwasan
#endif // HWASAN_ALLOCATOR_H
#ifndef HWASAN_CHECKS_H
#define HWASAN_CHECKS_H
+#include "hwasan_allocator.h"
#include "hwasan_mapping.h"
#include "sanitizer_common/sanitizer_common.h"
template <ErrorAction EA, AccessType AT, unsigned LogSize>
__attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
+ if (!InTaggableRegion(p))
+ return;
uptr ptr_raw = p & ~kAddressTagMask;
tag_t mem_tag = *(tag_t *)MemToShadow(ptr_raw);
if (UNLIKELY(!PossiblyShortTagMatches(mem_tag, p, 1 << LogSize))) {
template <ErrorAction EA, AccessType AT>
__attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
uptr sz) {
- if (sz == 0)
+ if (sz == 0 || !InTaggableRegion(p))
return;
tag_t ptr_tag = GetTagFromPointer(p);
uptr ptr_raw = p & ~kAddressTagMask;
///
//===----------------------------------------------------------------------===//
-#include "hwasan.h"
#include "hwasan_dynamic_shadow.h"
-#include "hwasan_mapping.h"
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_posix.h"
#include <elf.h>
#include <link.h>
+#include "hwasan.h"
+#include "hwasan_mapping.h"
+#include "hwasan_thread_list.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_posix.h"
+
// The code in this file needs to run in an unrelocated binary. It should not
// access any external symbol, including its own non-hidden globals.
void InitShadowGOT() {}
uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
+#if defined(__x86_64__)
+ constexpr uptr kAliasSize = 1ULL << kAddressTagShift;
+ constexpr uptr kNumAliases = 1ULL << kTagBits;
+ return MapDynamicShadowAndAliases(shadow_size_bytes, kAliasSize, kNumAliases,
+ RingBufferSize());
+#endif
return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment,
kHighMemEnd);
}
#ifndef HWASAN_FLAGS_H
#define HWASAN_FLAGS_H
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
namespace __hwasan {
struct Flags {
HWASAN_FLAG(bool, malloc_bisect_dump, false,
"Print all allocations within [malloc_bisect_left, "
"malloc_bisect_right] range ")
+
+
+// Exit if we fail to enable the AArch64 kernel ABI relaxation which allows
+// tagged pointers in syscalls. This is the default, but being able to disable
+// that behaviour is useful for running the testsuite on more platforms (the
+// testsuite can run since we manually ensure any pointer arguments to syscalls
+// are untagged before the call.
+HWASAN_FLAG(bool, fail_without_syscall_abi, true,
+ "Exit if fail to request relaxed syscall ABI.")
ThreadStartArg *A = reinterpret_cast<ThreadStartArg *> (MmapOrDie(
GetPageSizeCached(), "pthread_create"));
*A = {callback, param};
- int res = REAL(pthread_create)(UntagPtr(th), UntagPtr(attr),
- &HwasanThreadStartFunc, A);
+ int res = REAL(pthread_create)(th, attr, &HwasanThreadStartFunc, A);
return res;
}
#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
#if defined(__linux__) && HWASAN_WITH_INTERCEPTORS
#define COMMON_INTERCEPTOR_SPILL_AREA __hwasan_extra_spill_area
#endif
NO_EXEC_STACK_DIRECTIVE
+
+GNU_PROPERTY_BTI_PAC
void *__hwasan_memset(void *s, int c, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
void *__hwasan_memmove(void *dest, const void *src, uptr n);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_set_error_report_callback(void (*callback)(const char *));
} // extern "C"
#endif // HWASAN_INTERFACE_INTERNAL_H
uptr kHighMemStart;
uptr kHighMemEnd;
+uptr kAliasRegionStart; // Always 0 on non-x86.
+
static void PrintRange(uptr start, uptr end, const char *name) {
Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
}
#define PR_GET_TAGGED_ADDR_CTRL 56
#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
// Check we're running on a kernel that can use the tagged address ABI.
- if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) == (uptr)-1 &&
- errno == EINVAL) {
-#if SANITIZER_ANDROID
+ int local_errno = 0;
+ if (internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0),
+ &local_errno) &&
+ local_errno == EINVAL) {
+#if SANITIZER_ANDROID || defined(__x86_64__)
// Some older Android kernels have the tagged pointer ABI on
// unconditionally, and hence don't have the tagged-addr prctl while still
// allow the ABI.
// case.
return;
#else
- Printf(
- "FATAL: "
- "HWAddressSanitizer requires a kernel with tagged address ABI.\n");
- Die();
+ if (flags()->fail_without_syscall_abi) {
+ Printf(
+ "FATAL: "
+ "HWAddressSanitizer requires a kernel with tagged address ABI.\n");
+ Die();
+ }
#endif
}
// Turn on the tagged address ABI.
- if (internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) ==
- (uptr)-1 ||
- !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) {
+ if ((internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
+ PR_TAGGED_ADDR_ENABLE, 0, 0, 0)) ||
+ !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) &&
+ flags()->fail_without_syscall_abi) {
Printf(
"FATAL: HWAddressSanitizer failed to enable tagged address syscall "
"ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
// High memory starts where allocated shadow allows.
kHighMemStart = ShadowToMem(kHighShadowStart);
+#if defined(__x86_64__)
+ constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
+ kAliasRegionStart =
+ __hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
+
+ CHECK_EQ(kAliasRegionStart >> kTaggableRegionCheckShift,
+ __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
+ CHECK_EQ(
+ (kAliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
+ __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
+#endif
+
// Check the sanity of the defined memory ranges (there might be gaps).
CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
CHECK_GT(kHighMemStart, kHighShadowEnd);
}
bool MemIsApp(uptr p) {
+#if !defined(__x86_64__) // Memory outside the alias range has non-zero tags.
CHECK(GetTagFromPointer(p) == 0);
+#endif
return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
}
extern uptr kHighMemStart;
extern uptr kHighMemEnd;
+extern uptr kAliasRegionStart;
+
inline uptr MemToShadow(uptr untagged_addr) {
return (untagged_addr >> kShadowScale) +
__hwasan_shadow_memory_dynamic_address;
void *__hwasan_memset(void *block, int c, uptr size) {
CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
reinterpret_cast<uptr>(block), size);
- return memset(UntagPtr(block), c, size);
+ return memset(block, c, size);
}
void *__hwasan_memcpy(void *to, const void *from, uptr size) {
reinterpret_cast<uptr>(to), size);
CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
reinterpret_cast<uptr>(from), size);
- return memcpy(UntagPtr(to), UntagPtr(from), size);
+ return memcpy(to, from, size);
}
void *__hwasan_memmove(void *to, const void *from, uptr size) {
void *res = hwasan_malloc(size, &stack);\
if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
return res
+#define OPERATOR_NEW_ALIGN_BODY(nothrow) \
+ GET_MALLOC_STACK_TRACE; \
+ void *res = hwasan_aligned_alloc(static_cast<uptr>(align), size, &stack); \
+ if (!nothrow && UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res
#define OPERATOR_DELETE_BODY \
GET_MALLOC_STACK_TRACE; \
// Fake std::nothrow_t to avoid including <new>.
namespace std {
struct nothrow_t {};
+ enum class align_val_t : size_t {};
} // namespace std
void *operator new[](size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(true /*nothrow*/);
}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
+ size_t size, std::align_val_t align) {
+ OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
+ size_t size, std::align_val_t align) {
+ OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
+ size_t size, std::align_val_t align, std::nothrow_t const &) {
+ OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
+ size_t size, std::align_val_t align, std::nothrow_t const &) {
+ OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/);
+}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
void operator delete[](void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY;
}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
#endif // OPERATOR_NEW_BODY
}
~ScopedReport() {
+ void (*report_cb)(const char *);
{
BlockingMutexLock lock(&error_message_lock_);
- if (fatal)
- SetAbortMessage(error_message_.data());
+ report_cb = error_report_callback_;
error_message_ptr_ = nullptr;
}
+ if (report_cb)
+ report_cb(error_message_.data());
+ if (fatal)
+ SetAbortMessage(error_message_.data());
if (common_flags()->print_module_map >= 2 ||
(fatal && common_flags()->print_module_map))
DumpProcessMap();
// overwrite old trailing '\0', keep new trailing '\0' untouched.
internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
}
+
+ static void SetErrorReportCallback(void (*callback)(const char *)) {
+ BlockingMutexLock lock(&error_message_lock_);
+ error_report_callback_ = callback;
+ }
+
private:
ScopedErrorReportLock error_report_lock_;
InternalMmapVector<char> error_message_;
static InternalMmapVector<char> *error_message_ptr_;
static BlockingMutex error_message_lock_;
+ static void (*error_report_callback_)(const char *);
};
InternalMmapVector<char> *ScopedReport::error_message_ptr_;
BlockingMutex ScopedReport::error_message_lock_;
+void (*ScopedReport::error_report_callback_)(const char *);
// If there is an active ScopedReport, append to its error message.
void AppendToErrorMessageBuffer(const char *buffer) {
// We didn't find any locals. Most likely we don't have symbols, so dump
// the information that we have for offline analysis.
- InternalScopedString frame_desc(GetPageSizeCached() * 2);
+ InternalScopedString frame_desc;
Printf("Previously allocated frames:\n");
for (uptr i = 0; i < frames; i++) {
const uptr *record_addr = &(*sa)[i];
RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
- InternalScopedString s(GetPageSizeCached() * 8);
+ InternalScopedString s;
for (tag_t *row = beg_row; row < end_row; row += row_len) {
s.append("%s", row == center_row_beg ? "=>" : " ");
s.append("%p:", row);
GetStackTraceFromId(chunk.GetAllocStackId()).Print();
}
- InternalScopedString s(GetPageSizeCached() * 8);
+ InternalScopedString s;
CHECK_GT(tail_size, 0U);
CHECK_LT(tail_size, kShadowAlignment);
u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
}
} // namespace __hwasan
+
+void __hwasan_set_error_report_callback(void (*callback)(const char *)) {
+ __hwasan::ScopedReport::SetErrorReportCallback(callback);
+}
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
#include "sanitizer_common/sanitizer_platform.h"
ASM_TYPE_FUNCTION(__interceptor_setjmp)
__interceptor_setjmp:
CFI_STARTPROC
+ BTI_C
mov x1, #0
b __interceptor_sigsetjmp
CFI_ENDPROC
ASM_TYPE_FUNCTION(__interceptor_setjmp_bionic)
__interceptor_setjmp_bionic:
CFI_STARTPROC
+ BTI_C
mov x1, #1
b __interceptor_sigsetjmp
CFI_ENDPROC
ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
__interceptor_sigsetjmp:
CFI_STARTPROC
+ BTI_C
stp x19, x20, [x0, #0<<3]
stp x21, x22, [x0, #2<<3]
stp x23, x24, [x0, #4<<3]
// We do not need executable stack.
NO_EXEC_STACK_DIRECTIVE
+
+GNU_PROPERTY_BTI_PAC
#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
// The content of this file is AArch64-only:
#if defined(__aarch64__)
.global __hwasan_tag_mismatch
.type __hwasan_tag_mismatch, %function
__hwasan_tag_mismatch:
+ BTI_J
+
// Compute the granule position one past the end of the access.
mov x16, #1
and x17, x1, #0xf
.type __hwasan_tag_mismatch_v2, %function
__hwasan_tag_mismatch_v2:
CFI_STARTPROC
+ BTI_J
// Set the CFA to be the return address for caller of __hwasan_check_*. Note
// that we do not emit CFI predicates to describe the contents of this stack
// We do not need executable stack.
NO_EXEC_STACK_DIRECTIVE
+
+GNU_PROPERTY_BTI_PAC
}
void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size) {
+ CHECK_EQ(0, unique_id_); // try to catch bad stack reuse
+ CHECK_EQ(0, stack_top_);
+ CHECK_EQ(0, stack_bottom_);
+
static u64 unique_id;
unique_id_ = unique_id++;
if (auto sz = flags()->heap_history_size)
}
// Generate a (pseudo-)random non-zero tag.
-tag_t Thread::GenerateRandomTag() {
+tag_t Thread::GenerateRandomTag(uptr num_bits) {
+ DCHECK_GT(num_bits, 0);
if (tagging_disabled_) return 0;
tag_t tag;
+ const uptr tag_mask = (1ULL << num_bits) - 1;
do {
if (flags()->random_tags) {
if (!random_buffer_)
random_buffer_ = random_state_ = xorshift(random_state_);
CHECK(random_buffer_);
- tag = random_buffer_ & 0xFF;
- random_buffer_ >>= 8;
+ tag = random_buffer_ & tag_mask;
+ random_buffer_ >>= num_bits;
} else {
- tag = random_state_ = (random_state_ + 1) & 0xFF;
+ random_state_ += 1;
+ tag = random_state_ & tag_mask;
}
} while (!tag);
return tag;
HeapAllocationsRingBuffer *heap_allocations() { return heap_allocations_; }
StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; }
- tag_t GenerateRandomTag();
+ tag_t GenerateRandomTag(uptr num_bits = kTagBits);
void DisableTagging() { tagging_disabled_++; }
void EnableTagging() { tagging_disabled_--; }
HeapAllocationsRingBuffer *heap_allocations_;
StackAllocationsRingBuffer *stack_allocations_;
- Thread *next_; // All live threads form a linked list.
-
u64 unique_id_; // counting from zero.
u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread.
return 0;
}
-struct ThreadListHead {
- Thread *list_;
-
- ThreadListHead() : list_(nullptr) {}
-
- void Push(Thread *t) {
- t->next_ = list_;
- list_ = t;
- }
-
- Thread *Pop() {
- Thread *t = list_;
- if (t)
- list_ = t->next_;
- return t;
- }
-
- void Remove(Thread *t) {
- Thread **cur = &list_;
- while (*cur != t) cur = &(*cur)->next_;
- CHECK(*cur && "thread not found");
- *cur = (*cur)->next_;
- }
-
- template <class CB>
- void ForEach(CB cb) {
- Thread *t = list_;
- while (t) {
- cb(t);
- t = t->next_;
- }
- }
-};
-
struct ThreadStats {
uptr n_live_threads;
uptr total_stack_size;
}
Thread *CreateCurrentThread() {
- Thread *t;
+ Thread *t = nullptr;
{
- SpinMutexLock l(&list_mutex_);
- t = free_list_.Pop();
- if (t) {
- uptr start = (uptr)t - ring_buffer_size_;
- internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
- } else {
- t = AllocThread();
+ SpinMutexLock l(&free_list_mutex_);
+ if (!free_list_.empty()) {
+ t = free_list_.back();
+ free_list_.pop_back();
}
- live_list_.Push(t);
+ }
+ if (t) {
+ uptr start = (uptr)t - ring_buffer_size_;
+ internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
+ } else {
+ t = AllocThread();
+ }
+ {
+ SpinMutexLock l(&live_list_mutex_);
+ live_list_.push_back(t);
}
t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_);
AddThreadStats(t);
ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
}
+ void RemoveThreadFromLiveList(Thread *t) {
+ SpinMutexLock l(&live_list_mutex_);
+ for (Thread *&t2 : live_list_)
+ if (t2 == t) {
+ // To remove t2, copy the last element of the list in t2's position, and
+ // pop_back(). This works even if t2 is itself the last element.
+ t2 = live_list_.back();
+ live_list_.pop_back();
+ return;
+ }
+ CHECK(0 && "thread not found in live list");
+ }
+
void ReleaseThread(Thread *t) {
RemoveThreadStats(t);
t->Destroy();
- SpinMutexLock l(&list_mutex_);
- live_list_.Remove(t);
- free_list_.Push(t);
DontNeedThread(t);
+ RemoveThreadFromLiveList(t);
+ SpinMutexLock l(&free_list_mutex_);
+ free_list_.push_back(t);
}
Thread *GetThreadByBufferAddress(uptr p) {
template <class CB>
void VisitAllLiveThreads(CB cb) {
- SpinMutexLock l(&list_mutex_);
- live_list_.ForEach(cb);
+ SpinMutexLock l(&live_list_mutex_);
+ for (Thread *t : live_list_) cb(t);
}
void AddThreadStats(Thread *t) {
private:
Thread *AllocThread() {
+ SpinMutexLock l(&free_space_mutex_);
uptr align = ring_buffer_size_ * 2;
CHECK(IsAligned(free_space_, align));
Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
return t;
}
+ SpinMutex free_space_mutex_;
uptr free_space_;
uptr free_space_end_;
uptr ring_buffer_size_;
uptr thread_alloc_size_;
- ThreadListHead free_list_;
- ThreadListHead live_list_;
- SpinMutex list_mutex_;
+ SpinMutex free_list_mutex_;
+ InternalMmapVector<Thread *> free_list_;
+ SpinMutex live_list_mutex_;
+ InternalMmapVector<Thread *> live_list_;
ThreadStats stats_;
SpinMutex stats_mutex_;
// Tell the tools to write their reports to the provided file descriptor
// (casted to void *).
void __sanitizer_set_report_fd(void *fd);
+// Get the current full report file path, if a path was specified by
+// an earlier call to __sanitizer_set_report_path. Returns null otherwise.
+const char *__sanitizer_get_report_path();
// Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions
#endif
typedef uint16_t dfsan_label;
+typedef uint32_t dfsan_origin;
/// Stores information associated with a specific label identifier. A label
/// may be a base label created using dfsan_create_label, with associated
/// value.
dfsan_label dfsan_get_label(long data);
+/// Retrieves the immediate origin associated with the given data. The returned
+/// origin may point to another origin.
+///
+/// The type of 'data' is arbitrary.
+dfsan_origin dfsan_get_origin(long data);
+
/// Retrieves the label associated with the data at the given address.
dfsan_label dfsan_read_label(const void *addr, size_t size);
void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
size_t n, dfsan_label s1_label,
dfsan_label s2_label, dfsan_label n_label);
+
+/// Prints the origin trace of the label at the address addr to stderr. It also
+/// prints description at the beginning of the trace. If origin tracking is not
+/// on, or the address is not labeled, it prints nothing.
+void dfsan_print_origin_trace(const void *addr, const char *description);
+
+/// Retrieves the very first origin associated with the data at the given
+/// address.
+dfsan_origin dfsan_get_init_origin(const void *addr);
#ifdef __cplusplus
} // extern "C"
* accessed through the pointer in x, or -1 if the whole range is good. */
intptr_t __hwasan_test_shadow(const volatile void *x, size_t size);
+ /* Sets the callback function to be called during HWASan error reporting. */
+ void __hwasan_set_error_report_callback(void (*callback)(const char *));
+
int __sanitizer_posix_memalign(void **memptr, size_t alignment, size_t size);
void * __sanitizer_memalign(size_t alignment, size_t size);
void * __sanitizer_aligned_alloc(size_t alignment, size_t size);
/// \returns Default options string.
const char *__memprof_default_options(void);
+/// Prints the memory profile to the current profile file.
+///
+/// \returns 0 on success.
+int __memprof_profile_dump(void);
+
#ifdef __cplusplus
} // extern "C"
#endif
// the corresponding __tsan_mutex_post_lock annotation.
static const unsigned __tsan_mutex_recursive_unlock = 1 << 7;
+// Convenient composed constants.
+static const unsigned __tsan_mutex_try_read_lock =
+ __tsan_mutex_read_lock | __tsan_mutex_try_lock;
+static const unsigned __tsan_mutex_try_read_lock_failed =
+ __tsan_mutex_try_read_lock | __tsan_mutex_try_lock_failed;
+
// Annotate creation of a mutex.
// Supported flags: mutex creation flags.
void __tsan_mutex_create(void *addr, unsigned flags);
// and freed by __tsan_destroy_fiber.
// - TSAN context of current fiber or thread can be obtained
// by calling __tsan_get_current_fiber.
-// - __tsan_switch_to_fiber should be called immediatly before switch
+// - __tsan_switch_to_fiber should be called immediately before switch
// to fiber, such as call of swapcontext.
// - Fiber name can be set by __tsan_set_fiber_name.
void *__tsan_get_current_fiber(void);
// Do not establish a happens-before relation between fibers
static const unsigned __tsan_switch_to_fiber_no_sync = 1 << 0;
+// User-provided callback invoked on TSan initialization.
+void __tsan_on_initialize();
+
+// User-provided callback invoked on TSan shutdown.
+// `failed` - Nonzero if TSan did detect issues, zero otherwise.
+// Return `0` if TSan should exit as if no issues were detected. Return nonzero
+// if TSan should exit as if issues were detected.
+int __tsan_on_finalize(int failed);
+
#ifdef __cplusplus
} // extern "C"
#endif
#endif
// Part of ABI, do not change.
-// https://github.com/llvm/llvm-project/blob/master/libcxx/include/atomic
+// https://github.com/llvm/llvm-project/blob/main/libcxx/include/atomic
typedef enum {
__tsan_memory_order_relaxed,
__tsan_memory_order_consume,
return addr && (func == wrapper);
}
-// Android and Solaris do not have dlvsym
-#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS
+// dlvsym is a GNU extension supported by some other platforms.
+#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
static void *GetFuncAddr(const char *name, const char *ver) {
return dlvsym(RTLD_NEXT, name, ver);
}
*ptr_to_real = (uptr)addr;
return addr && (func == wrapper);
}
-#endif // !SANITIZER_ANDROID
+#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
} // namespace __interception
(::__interception::uptr) & (func), \
(::__interception::uptr) & WRAP(func))
-// Android and Solaris do not have dlvsym
-#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS
+// dlvsym is a GNU extension supported by some other platforms.
+#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
::__interception::InterceptFunction( \
#func, symver, \
#else
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
-#endif // !SANITIZER_ANDROID && !SANITIZER_SOLARIS
+#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
#endif // INTERCEPTION_LINUX_H
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
static const int kAddressLength = FIRST_32_SECOND_64(4, 8);
static const int kJumpInstructionLength = 5;
static const int kShortJumpInstructionLength = 2;
-static const int kIndirectJumpInstructionLength = 6;
+UNUSED static const int kIndirectJumpInstructionLength = 6;
static const int kBranchLength =
FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength);
static const int kDirectBranchLength = kBranchLength + kAddressLength;
return si.dwAllocationGranularity;
}
-static uptr RoundUpTo(uptr size, uptr boundary) {
+UNUSED static uptr RoundUpTo(uptr size, uptr boundary) {
return (size + boundary - 1) & ~(boundary - 1);
}
uptr max_size;
};
-static const uptr kTrampolineScanLimitRange = 1 << 31; // 2 gig
+UNUSED static const uptr kTrampolineScanLimitRange = 1 << 31; // 2 gig
static const int kMaxTrampolineRegion = 1024;
static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment) {
- RegisterDeallocation(p);
if (new_size > max_malloc_size) {
- allocator.Deallocate(GetAllocatorCache(), p);
- return ReportAllocationSizeTooBig(new_size, stack);
+ ReportAllocationSizeTooBig(new_size, stack);
+ return nullptr;
}
- p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
- RegisterAllocation(stack, p, new_size);
- return p;
+ RegisterDeallocation(p);
+ void *new_p =
+ allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
+ if (new_p)
+ RegisterAllocation(stack, new_p, new_size);
+ else if (new_size != 0)
+ RegisterAllocation(stack, p, new_size);
+ return new_p;
}
void GetAllocatorCacheRange(uptr *begin, uptr *end) {
return kIgnoreObjectInvalid;
}
}
+
+void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
+ // This function can be used to treat memory reachable from `tctx` as live.
+ // This is useful for threads that have been created but not yet started.
+
+ // This is currently a no-op because the LSan `pthread_create()` interceptor
+ // blocks until the child thread starts which keeps the thread's `arg` pointer
+ // live.
+}
+
} // namespace __lsan
using namespace __lsan;
};
#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
- defined(__arm__)
+ defined(__arm__) || SANITIZER_RISCV64
template <typename AddressSpaceViewTy>
struct AP32 {
static const uptr kSpaceBeg = 0;
if (flags()->log_threads) Report(__VA_ARGS__); \
} while (0)
-ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
-static SuppressionContext *suppression_ctx = nullptr;
+class LeakSuppressionContext {
+ bool parsed = false;
+ SuppressionContext context;
+ bool suppressed_stacks_sorted = true;
+ InternalMmapVector<u32> suppressed_stacks;
+
+ Suppression *GetSuppressionForAddr(uptr addr);
+ void LazyInit();
+
+ public:
+ LeakSuppressionContext(const char *supprression_types[],
+ int suppression_types_num)
+ : context(supprression_types, suppression_types_num) {}
+
+ Suppression *GetSuppressionForStack(u32 stack_trace_id);
+
+ const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
+ if (!suppressed_stacks_sorted) {
+ suppressed_stacks_sorted = true;
+ SortAndDedup(suppressed_stacks);
+ }
+ return suppressed_stacks;
+ }
+ void PrintMatchedSuppressions();
+};
+
+ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
+static LeakSuppressionContext *suppression_ctx = nullptr;
static const char kSuppressionLeak[] = "leak";
static const char *kSuppressionTypes[] = { kSuppressionLeak };
static const char kStdSuppressions[] =
void InitializeSuppressions() {
CHECK_EQ(nullptr, suppression_ctx);
suppression_ctx = new (suppression_placeholder)
- SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
- suppression_ctx->ParseFromFile(flags()->suppressions);
- if (&__lsan_default_suppressions)
- suppression_ctx->Parse(__lsan_default_suppressions());
- suppression_ctx->Parse(kStdSuppressions);
+ LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+}
+
+void LeakSuppressionContext::LazyInit() {
+ if (!parsed) {
+ parsed = true;
+ context.ParseFromFile(flags()->suppressions);
+ if (&__lsan_default_suppressions)
+ context.Parse(__lsan_default_suppressions());
+ context.Parse(kStdSuppressions);
+ }
}
-static SuppressionContext *GetSuppressionContext() {
+static LeakSuppressionContext *GetSuppressionContext() {
CHECK(suppression_ctx);
return suppression_ctx;
}
pid_t, void (*cb)(void *, void *, uptr, void *), void *);
#endif
+static void ProcessThreadRegistry(Frontier *frontier) {
+ InternalMmapVector<uptr> ptrs;
+ GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ GetAdditionalThreadContextPtrs, &ptrs);
+
+ for (uptr i = 0; i < ptrs.size(); ++i) {
+ void *ptr = reinterpret_cast<void *>(ptrs[i]);
+ uptr chunk = PointsIntoChunk(ptr);
+ if (!chunk)
+ continue;
+ LsanMetadata m(chunk);
+ if (!m.allocated())
+ continue;
+
+ // Mark as reachable and add to frontier.
+ LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
+ m.set_tag(kReachable);
+ frontier->push_back(chunk);
+ }
+}
+
// Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
__libc_iterate_dynamic_tls(os_id, cb, frontier);
#else
if (dtls && !DTLSInDestruction(dtls)) {
- for (uptr j = 0; j < dtls->dtv_size; ++j) {
- uptr dtls_beg = dtls->dtv[j].beg;
- uptr dtls_end = dtls_beg + dtls->dtv[j].size;
+ ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
+ uptr dtls_beg = dtv.beg;
+ uptr dtls_end = dtls_beg + dtv.size;
if (dtls_beg < dtls_end) {
- LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
+ LOG_THREADS("DTLS %zu at %p-%p.\n", id, dtls_beg, dtls_end);
ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
kReachable);
}
- }
+ });
} else {
// We are handling a thread with DTLS under destruction. Log about
// this and continue.
#endif
}
}
+
+ // Add pointers reachable from ThreadContexts
+ ProcessThreadRegistry(frontier);
}
#endif // SANITIZER_FUCHSIA
}
}
+static void IgnoredSuppressedCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (!m.allocated() || m.tag() == kIgnored)
+ return;
+
+ const InternalMmapVector<u32> &suppressed =
+ *static_cast<const InternalMmapVector<u32> *>(arg);
+ uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
+ if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
+ return;
+
+ LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", chunk,
+ chunk + m.requested_size(), m.requested_size());
+ m.set_tag(kIgnored);
+}
+
// ForEachChunk callback. If chunk is marked as ignored, adds its address to
// frontier.
static void CollectIgnoredCb(uptr chunk, void *arg) {
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
+ const InternalMmapVector<u32> &suppressed_stacks =
+ GetSuppressionContext()->GetSortedSuppressedStacks();
+ if (!suppressed_stacks.empty()) {
+ ForEachChunk(IgnoredSuppressedCb,
+ const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
+ }
ForEachChunk(CollectIgnoredCb, frontier);
ProcessGlobalRegions(frontier);
ProcessThreads(suspended_threads, frontier);
}
}
-static void PrintMatchedSuppressions() {
+void LeakSuppressionContext::PrintMatchedSuppressions() {
InternalMmapVector<Suppression *> matched;
- GetSuppressionContext()->GetMatched(&matched);
+ context.GetMatched(&matched);
if (!matched.size())
return;
const char *line = "-----------------------------------------------------";
Printf("%s\n", line);
Printf("Suppressions used:\n");
Printf(" count bytes template\n");
- for (uptr i = 0; i < matched.size(); i++)
- Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
- &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
+ for (uptr i = 0; i < matched.size(); i++) {
+ Printf("%7zu %10zu %s\n",
+ static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
+ matched[i]->weight, matched[i]->templ);
+ }
Printf("%s\n\n", line);
}
const InternalMmapVector<tid_t> &suspended_threads =
*(const InternalMmapVector<tid_t> *)arg;
if (tctx->status == ThreadStatusRunning) {
- uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(),
- tctx->os_id, CompareLess<int>());
+ uptr i = InternalLowerBound(suspended_threads, tctx->os_id);
if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
Report("Running thread %d was not suspended. False leaks are possible.\n",
tctx->os_id);
param->success = true;
}
-static bool CheckForLeaks() {
- if (&__lsan_is_turned_off && __lsan_is_turned_off())
- return false;
- EnsureMainThreadIDIsCorrect();
- CheckForLeaksParam param;
- LockStuffAndStopTheWorld(CheckForLeaksCallback, ¶m);
-
- if (!param.success) {
- Report("LeakSanitizer has encountered a fatal error.\n");
- Report(
- "HINT: For debugging, try setting environment variable "
- "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
- Report(
- "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
- Die();
- }
- param.leak_report.ApplySuppressions();
- uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
- if (unsuppressed_count > 0) {
+static bool PrintResults(LeakReport &report) {
+ uptr unsuppressed_count = report.UnsuppressedLeakCount();
+ if (unsuppressed_count) {
Decorator d;
- Printf("\n"
- "================================================================="
- "\n");
+ Printf(
+ "\n"
+ "================================================================="
+ "\n");
Printf("%s", d.Error());
Report("ERROR: LeakSanitizer: detected memory leaks\n");
Printf("%s", d.Default());
- param.leak_report.ReportTopLeaks(flags()->max_leaks);
+ report.ReportTopLeaks(flags()->max_leaks);
}
if (common_flags()->print_suppressions)
- PrintMatchedSuppressions();
+ GetSuppressionContext()->PrintMatchedSuppressions();
if (unsuppressed_count > 0) {
- param.leak_report.PrintSummary();
+ report.PrintSummary();
return true;
}
return false;
}
+static bool CheckForLeaks() {
+ if (&__lsan_is_turned_off && __lsan_is_turned_off())
+ return false;
+ // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
+ // suppressions. However if a stack id was previously suppressed, it should be
+ // suppressed in future checks as well.
+ for (int i = 0;; ++i) {
+ EnsureMainThreadIDIsCorrect();
+ CheckForLeaksParam param;
+ LockStuffAndStopTheWorld(CheckForLeaksCallback, ¶m);
+ if (!param.success) {
+ Report("LeakSanitizer has encountered a fatal error.\n");
+ Report(
+ "HINT: For debugging, try setting environment variable "
+ "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
+ Report(
+ "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
+ "etc)\n");
+ Die();
+ }
+ // No new suppressions stacks, so rerun will not help and we can report.
+ if (!param.leak_report.ApplySuppressions())
+ return PrintResults(param.leak_report);
+
+ // No indirect leaks to report, so we are done here.
+ if (!param.leak_report.IndirectUnsuppressedLeakCount())
+ return PrintResults(param.leak_report);
+
+ if (i >= 8) {
+ Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
+ return PrintResults(param.leak_report);
+ }
+
+ // We found a new previously unseen suppressed call stack. Rerun to make
+ // sure it does not hold indirect leaks.
+ VReport(1, "Rerun with %zu suppressed stacks.",
+ GetSuppressionContext()->GetSortedSuppressedStacks().size());
+ }
+}
+
static bool has_reported_leaks = false;
bool HasReportedLeaks() { return has_reported_leaks; }
void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
-static Suppression *GetSuppressionForAddr(uptr addr) {
+Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
Suppression *s = nullptr;
// Suppress by module name.
- SuppressionContext *suppressions = GetSuppressionContext();
if (const char *module_name =
Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
- if (suppressions->Match(module_name, kSuppressionLeak, &s))
+ if (context.Match(module_name, kSuppressionLeak, &s))
return s;
// Suppress by file or function name.
SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
- if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
- suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
+ if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
+ context.Match(cur->info.file, kSuppressionLeak, &s)) {
break;
}
}
return s;
}
-static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
+Suppression *LeakSuppressionContext::GetSuppressionForStack(
+ u32 stack_trace_id) {
+ LazyInit();
StackTrace stack = StackDepotGet(stack_trace_id);
for (uptr i = 0; i < stack.size; i++) {
Suppression *s = GetSuppressionForAddr(
StackTrace::GetPreviousInstructionPc(stack.trace[i]));
- if (s) return s;
+ if (s) {
+ suppressed_stacks_sorted = false;
+ suppressed_stacks.push_back(stack_trace_id);
+ return s;
+ }
}
return nullptr;
}
bytes += leaks_[i].total_size;
allocations += leaks_[i].hit_count;
}
- InternalScopedString summary(kMaxSummaryLength);
+ InternalScopedString summary;
summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
allocations);
ReportErrorSummary(summary.data());
}
-void LeakReport::ApplySuppressions() {
+uptr LeakReport::ApplySuppressions() {
+ LeakSuppressionContext *suppressions = GetSuppressionContext();
+ uptr new_suppressions = false;
for (uptr i = 0; i < leaks_.size(); i++) {
- Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
+ Suppression *s =
+ suppressions->GetSuppressionForStack(leaks_[i].stack_trace_id);
if (s) {
s->weight += leaks_[i].total_size;
atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
leaks_[i].hit_count);
leaks_[i].is_suppressed = true;
+ ++new_suppressions;
}
}
+ return new_suppressions;
}
uptr LeakReport::UnsuppressedLeakCount() {
return result;
}
+uptr LeakReport::IndirectUnsuppressedLeakCount() {
+ uptr result = 0;
+ for (uptr i = 0; i < leaks_.size(); i++)
+ if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
+ result++;
+ return result;
+}
+
} // namespace __lsan
#else // CAN_SANITIZE_LEAKS
namespace __lsan {
#define CAN_SANITIZE_LEAKS 1
#elif defined(__arm__) && SANITIZER_LINUX
#define CAN_SANITIZE_LEAKS 1
+#elif SANITIZER_RISCV64 && SANITIZER_LINUX
+#define CAN_SANITIZE_LEAKS 1
#elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
#define CAN_SANITIZE_LEAKS 1
#else
namespace __sanitizer {
class FlagParser;
class ThreadRegistry;
+class ThreadContextBase;
struct DTLS;
}
kIgnored = 3
};
-const u32 kInvalidTid = (u32) -1;
-
struct Flags {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "lsan_flags.inc"
ChunkTag tag);
void ReportTopLeaks(uptr max_leaks);
void PrintSummary();
- void ApplySuppressions();
+ uptr ApplySuppressions();
uptr UnsuppressedLeakCount();
+ uptr IndirectUnsuppressedLeakCount();
private:
void PrintReportForLeak(uptr index);
void ScanRootRegion(Frontier *frontier, RootRegion const ®ion,
uptr region_begin, uptr region_end, bool is_readable);
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg);
+void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs);
// Run stoptheworld while holding any platform-specific locks, as well as the
// allocator and thread registry locks.
void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
auto params = static_cast<const Params *>(data);
uptr begin = reinterpret_cast<uptr>(chunk);
uptr end = begin + size;
- auto i = __sanitizer::InternalLowerBound(params->allocator_caches, 0,
- params->allocator_caches.size(),
- begin, CompareLess<uptr>());
+ auto i = __sanitizer::InternalLowerBound(params->allocator_caches, begin);
if (i < params->allocator_caches.size() &&
params->allocator_caches[i] >= begin &&
end - params->allocator_caches[i] <= sizeof(AllocatorCache)) {
namespace __lsan {
-class ThreadContext : public ThreadContextLsanBase {
+class ThreadContext final : public ThreadContextLsanBase {
public:
explicit ThreadContext(int tid);
void OnCreated(void *arg) override;
if (res == 0) {
int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th,
IsStateDetached(detached));
- CHECK_NE(tid, 0);
+ CHECK_NE(tid, kMainTid);
atomic_store(&p.tid, tid, memory_order_release);
while (atomic_load(&p.tid, memory_order_acquire) != 0)
internal_sched_yield();
OnStartedArgs args;
uptr stack_size = 0;
uptr tls_size = 0;
- GetThreadStackAndTls(tid == 0, &args.stack_begin, &stack_size,
+ GetThreadStackAndTls(tid == kMainTid, &args.stack_begin, &stack_size,
&args.tls_begin, &tls_size);
args.stack_end = args.stack_begin + stack_size;
args.tls_end = args.tls_begin + tls_size;
}
void InitializeMainThread() {
- u32 tid = ThreadCreate(0, 0, true);
- CHECK_EQ(tid, 0);
+ u32 tid = ThreadCreate(kMainTid, 0, true);
+ CHECK_EQ(tid, kMainTid);
ThreadStart(tid, GetTid());
}
}
void EnsureMainThreadIDIsCorrect() {
- if (GetCurrentThread() == 0)
+ if (GetCurrentThread() == kMainTid)
CurrentThreadContext()->os_id = GetTid();
}
secondary_.InitLinkerInitialized();
}
- void Init(s32 release_to_os_interval_ms) {
+ void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
stats_.Init();
- primary_.Init(release_to_os_interval_ms);
+ primary_.Init(release_to_os_interval_ms, heap_start);
secondary_.Init();
}
typedef SizeClassAllocator32<Params> ThisT;
typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
- void Init(s32 release_to_os_interval_ms) {
+ void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
+ CHECK(!heap_start);
possible_regions.Init();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
}
// The template parameter Params is a class containing the actual parameters.
//
// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
-// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
+// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically by mmap.
// Otherwise SpaceBeg=kSpaceBeg (fixed address).
// kSpaceSize is a power of two.
// At the beginning the entire space is mprotect-ed, then small parts of it
return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
}
- void Init(s32 release_to_os_interval_ms) {
+ // If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W
+ // at heap_start and places the heap there. This mode requires kSpaceBeg ==
+ // ~(uptr)0.
+ void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
- if (kUsingConstantSpaceBeg) {
- CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
- CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize,
- PrimaryAllocatorName, kSpaceBeg));
+ PremappedHeap = heap_start != 0;
+ if (PremappedHeap) {
+ CHECK(!kUsingConstantSpaceBeg);
+ NonConstSpaceBeg = heap_start;
+ uptr RegionInfoSize = AdditionalSize();
+ RegionInfoSpace =
+ address_range.Init(RegionInfoSize, PrimaryAllocatorName);
+ CHECK_NE(RegionInfoSpace, ~(uptr)0);
+ CHECK_EQ(RegionInfoSpace,
+ address_range.MapOrDie(RegionInfoSpace, RegionInfoSize,
+ "SizeClassAllocator: region info"));
+ MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize);
} else {
- // Combined allocator expects that an 2^N allocation is always aligned to
- // 2^N. For this to work, the start of the space needs to be aligned as
- // high as the largest size class (which also needs to be a power of 2).
- NonConstSpaceBeg = address_range.InitAligned(
- TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
- CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
+ if (kUsingConstantSpaceBeg) {
+ CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
+ CHECK_EQ(kSpaceBeg,
+ address_range.Init(TotalSpaceSize, PrimaryAllocatorName,
+ kSpaceBeg));
+ } else {
+ // Combined allocator expects that an 2^N allocation is always aligned
+ // to 2^N. For this to work, the start of the space needs to be aligned
+ // as high as the largest size class (which also needs to be a power of
+ // 2).
+ NonConstSpaceBeg = address_range.InitAligned(
+ TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
+ CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
+ }
+ RegionInfoSpace = SpaceEnd();
+ MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(),
+ "SizeClassAllocator: region info");
}
SetReleaseToOSIntervalMs(release_to_os_interval_ms);
- MapWithCallbackOrDie(SpaceEnd(), AdditionalSize(),
- "SizeClassAllocator: region info");
// Check that the RegionInfo array is aligned on the CacheLine size.
- DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0);
+ DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0);
}
s32 ReleaseToOSIntervalMs() const {
CompactPtrT *free_array = GetFreeArray(region_beg);
BlockingMutexLock l(®ion->mutex);
+#if SANITIZER_WINDOWS
+ /* On Windows unmapping of memory during __sanitizer_purge_allocator is
+ explicit and immediate, so unmapped regions must be explicitly mapped back
+ in when they are accessed again. */
+ if (region->rtoi.last_released_bytes > 0) {
+ MmapFixedOrDie(region_beg, region->mapped_user,
+ "SizeClassAllocator: region data");
+ region->rtoi.n_freed_at_last_release = 0;
+ region->rtoi.last_released_bytes = 0;
+ }
+#endif
if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,
n_chunks - region->num_freed_chunks)))
}
~PackedCounterArray() {
if (buffer) {
- memory_mapper->UnmapPackedCounterArrayBuffer(
- reinterpret_cast<uptr>(buffer), buffer_size);
+ memory_mapper->UnmapPackedCounterArrayBuffer(buffer, buffer_size);
}
}
atomic_sint32_t release_to_os_interval_ms_;
+ uptr RegionInfoSpace;
+
+ // True if the user has already mapped the entire heap R/W.
+ bool PremappedHeap;
+
struct Stats {
uptr n_allocated;
uptr n_freed;
RegionInfo *GetRegionInfo(uptr class_id) const {
DCHECK_LT(class_id, kNumClasses);
- RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
+ RegionInfo *regions = reinterpret_cast<RegionInfo *>(RegionInfoSpace);
return ®ions[class_id];
}
}
bool MapWithCallback(uptr beg, uptr size, const char *name) {
+ if (PremappedHeap)
+ return beg >= NonConstSpaceBeg &&
+ beg + size <= NonConstSpaceBeg + kSpaceSize;
uptr mapped = address_range.Map(beg, size, name);
if (UNLIKELY(!mapped))
return false;
}
void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
+ if (PremappedHeap) {
+ CHECK_GE(beg, NonConstSpaceBeg);
+ CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize);
+ return;
+ }
CHECK_EQ(beg, address_range.MapOrDie(beg, size, name));
MapUnmapCallback().OnMap(beg, size);
}
void UnmapWithCallbackOrDie(uptr beg, uptr size) {
+ if (PremappedHeap)
+ return;
MapUnmapCallback().OnUnmap(beg, size);
address_range.Unmap(beg, size);
}
return released_bytes;
}
- uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
+ void *MapPackedCounterArrayBuffer(uptr buffer_size) {
// TODO(alekseyshl): The idea to explore is to check if we have enough
// space between num_freed_chunks*sizeof(CompactPtrT) and
// mapped_free_array to fit buffer_size bytes and use that space instead
// of mapping a temporary one.
- return reinterpret_cast<uptr>(
- MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters"));
+ return MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters");
}
- void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
- UnmapOrDie(reinterpret_cast<void *>(buffer), buffer_size);
+ void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {
+ UnmapOrDie(buffer, buffer_size);
}
// Releases [from, to) range of pages back to OS.
// Attempts to release RAM occupied by freed chunks back to OS. The region is
// expected to be locked.
+ //
+ // TODO(morehouse): Support a callback on memory release so HWASan can release
+ // aliases as well.
void MaybeReleaseToOS(uptr class_id, bool force) {
RegionInfo *region = GetRegionInfo(class_id);
const uptr chunk_size = ClassIdToSize(class_id);
// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
// look like 0b1xx0..0, where x is either 0 or 1.
//
-// Example: kNumBits=3, kMidSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
+// Example: kNumBits=3, kMinSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
//
// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
// Next 4 classes: 256 + i * 64 (i = 1 to 4).
atomic_uint64_t::Type val,
memory_order mo) {
DCHECK(mo &
- (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
atomic_uint64_t::Type ret;
atomic_uint64_t::Type xchg,
memory_order mo) {
DCHECK(mo &
- (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
typedef atomic_uint64_t::Type Type;
inline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
memory_order mo) {
DCHECK(mo &
- (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
atomic_uint64_t::Type zero = 0;
inline void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
memory_order mo) {
DCHECK(mo &
- (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
__spin_lock(&lock.lock);
--- /dev/null
+//===-- sanitizer_chained_origin_depot.cpp --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A storage for chained origins.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_chained_origin_depot.h"
+
+namespace __sanitizer {
+
+bool ChainedOriginDepot::ChainedOriginDepotNode::eq(
+ u32 hash, const args_type &args) const {
+ return here_id == args.here_id && prev_id == args.prev_id;
+}
+
+uptr ChainedOriginDepot::ChainedOriginDepotNode::storage_size(
+ const args_type &args) {
+ return sizeof(ChainedOriginDepotNode);
+}
+
+/* This is murmur2 hash for the 64->32 bit case.
+ It does not behave all that well because the keys have a very biased
+ distribution (I've seen 7-element buckets with the table only 14% full).
+
+ here_id is built of
+ * (1 bits) Reserved, zero.
+ * (8 bits) Part id = bits 13..20 of the hash value of here_id's key.
+ * (23 bits) Sequential number (each part has each own sequence).
+
+ prev_id has either the same distribution as here_id (but with 3:8:21)
+ split, or one of two reserved values (-1) or (-2). Either case can
+ dominate depending on the workload.
+*/
+u32 ChainedOriginDepot::ChainedOriginDepotNode::hash(const args_type &args) {
+ const u32 m = 0x5bd1e995;
+ const u32 seed = 0x9747b28c;
+ const u32 r = 24;
+ u32 h = seed;
+ u32 k = args.here_id;
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+
+ k = args.prev_id;
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+ return h;
+}
+
+bool ChainedOriginDepot::ChainedOriginDepotNode::is_valid(
+ const args_type &args) {
+ return true;
+}
+
+void ChainedOriginDepot::ChainedOriginDepotNode::store(const args_type &args,
+ u32 other_hash) {
+ here_id = args.here_id;
+ prev_id = args.prev_id;
+}
+
+ChainedOriginDepot::ChainedOriginDepotNode::args_type
+ChainedOriginDepot::ChainedOriginDepotNode::load() const {
+ args_type ret = {here_id, prev_id};
+ return ret;
+}
+
+ChainedOriginDepot::ChainedOriginDepotNode::Handle
+ChainedOriginDepot::ChainedOriginDepotNode::get_handle() {
+ return Handle(this);
+}
+
+ChainedOriginDepot::ChainedOriginDepot() {}
+
+StackDepotStats *ChainedOriginDepot::GetStats() { return depot.GetStats(); }
+
+bool ChainedOriginDepot::Put(u32 here_id, u32 prev_id, u32 *new_id) {
+ ChainedOriginDepotDesc desc = {here_id, prev_id};
+ bool inserted;
+ ChainedOriginDepotNode::Handle h = depot.Put(desc, &inserted);
+ *new_id = h.valid() ? h.id() : 0;
+ return inserted;
+}
+
+u32 ChainedOriginDepot::Get(u32 id, u32 *other) {
+ ChainedOriginDepotDesc desc = depot.Get(id);
+ *other = desc.prev_id;
+ return desc.here_id;
+}
+
+void ChainedOriginDepot::LockAll() { depot.LockAll(); }
+
+void ChainedOriginDepot::UnlockAll() { depot.UnlockAll(); }
+
+} // namespace __sanitizer
--- /dev/null
+//===-- sanitizer_chained_origin_depot.h ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A storage for chained origins.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_CHAINED_ORIGIN_DEPOT_H
+#define SANITIZER_CHAINED_ORIGIN_DEPOT_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_stackdepotbase.h"
+
+namespace __sanitizer {
+
+class ChainedOriginDepot {
+ public:
+ ChainedOriginDepot();
+
+ // Gets the statistic of the origin chain storage.
+ StackDepotStats *GetStats();
+
+ // Stores a chain with StackDepot ID here_id and previous chain ID prev_id.
+ // If successful, returns true and the new chain id new_id.
+ // If the same element already exists, returns false and sets new_id to the
+ // existing ID.
+ bool Put(u32 here_id, u32 prev_id, u32 *new_id);
+
+ // Retrieves the stored StackDepot ID for the given origin ID.
+ u32 Get(u32 id, u32 *other);
+
+ void LockAll();
+ void UnlockAll();
+
+ private:
+ struct ChainedOriginDepotDesc {
+ u32 here_id;
+ u32 prev_id;
+ };
+
+ struct ChainedOriginDepotNode {
+ ChainedOriginDepotNode *link;
+ u32 id;
+ u32 here_id;
+ u32 prev_id;
+
+ typedef ChainedOriginDepotDesc args_type;
+
+ bool eq(u32 hash, const args_type &args) const;
+
+ static uptr storage_size(const args_type &args);
+
+ static u32 hash(const args_type &args);
+
+ static bool is_valid(const args_type &args);
+
+ void store(const args_type &args, u32 other_hash);
+
+ args_type load() const;
+
+ struct Handle {
+ ChainedOriginDepotNode *node_;
+ Handle() : node_(nullptr) {}
+ explicit Handle(ChainedOriginDepotNode *node) : node_(node) {}
+ bool valid() { return node_; }
+ u32 id() { return node_->id; }
+ int here_id() { return node_->here_id; }
+ int prev_id() { return node_->prev_id; }
+ };
+
+ Handle get_handle();
+
+ typedef Handle handle_type;
+ };
+
+ StackDepotBase<ChainedOriginDepotNode, 4, 20> depot;
+
+ ChainedOriginDepot(const ChainedOriginDepot &) = delete;
+ void operator=(const ChainedOriginDepot &) = delete;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_CHAINED_ORIGIN_DEPOT_H
void ReportErrorSummary(const char *error_message, const char *alt_tool_name) {
if (!common_flags()->print_summary)
return;
- InternalScopedString buff(kMaxSummaryLength);
+ InternalScopedString buff;
buff.append("SUMMARY: %s: %s",
alt_tool_name ? alt_tool_name : SanitizerToolName, error_message);
__sanitizer_report_error_summary(buff.data());
return name_len;
}
+uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len) {
+ ReadBinaryNameCached(buf, buf_len);
+ const char *exec_name_pos = StripModuleName(buf);
+ uptr name_len = exec_name_pos - buf;
+ buf[name_len] = '\0';
+ return name_len;
+}
+
#if !SANITIZER_GO
void PrintCmdline() {
char **argv = GetArgv();
const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
-static const uptr kErrorMessageBufferSize = 1 << 16;
+const uptr kErrorMessageBufferSize = 1 << 16;
// Denotes fake PC values that come from JIT/JAVA/etc.
// For such PC values __tsan_symbolize_external_ex() will be called.
uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
uptr min_shadow_base_alignment, uptr &high_mem_end);
+// Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
+// Reserves 2*S bytes of address space to the right of the returned address and
+// ring_buffer_size bytes to the left. The returned address is aligned to 2*S.
+// Also creates num_aliases regions of accessible memory starting at offset S
+// from the returned address. Each region has size alias_size and is backed by
+// the same physical memory.
+uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
+ uptr num_aliases, uptr ring_buffer_size);
+
// Reserve memory range [beg, end]. If madvise_shadow is true then apply
// madvise (e.g. hugepages, core dumping) requested by options.
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
// OS
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
+uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
const char *GetProcessName();
void UpdateProcessName();
const char *mmap_type, error_t err,
bool raw_report = false);
-// Specific tools may override behavior of "Die" and "CheckFailed" functions
-// to do tool-specific job.
+// Specific tools may override behavior of "Die" function to do tool-specific
+// job.
typedef void (*DieCallbackType)(void);
// It's possible to add several callbacks that would be run when "Die" is
void SetUserDieCallback(DieCallbackType callback);
-typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
- u64, u64);
-void SetCheckFailedCallback(CheckFailedCallbackType callback);
+void SetCheckUnwindCallback(void (*callback)());
// Callback will be called if soft_rss_limit_mb is given and the limit is
// exceeded (exceeded==true) or if rss went down below the limit
void SetAlternateSignalStack();
void UnsetAlternateSignalStack();
-// We don't want a summary too long.
-const int kMaxSummaryLength = 1024;
// Construct a one-line string:
// SUMMARY: SanitizerToolName: error_message
// and pass it to __sanitizer_report_error_summary.
// Don't use std::min, std::max or std::swap, to minimize dependency
// on libstdc++.
-template<class T> T Min(T a, T b) { return a < b ? a : b; }
-template<class T> T Max(T a, T b) { return a > b ? a : b; }
+template <class T>
+constexpr T Min(T a, T b) {
+ return a < b ? a : b;
+}
+template <class T>
+constexpr T Max(T a, T b) {
+ return a > b ? a : b;
+}
template<class T> void Swap(T& a, T& b) {
T tmp = a;
a = b;
template<typename T>
class InternalMmapVectorNoCtor {
public:
+ using value_type = T;
void Initialize(uptr initial_capacity) {
capacity_bytes_ = 0;
size_ = 0;
InternalMmapVector &operator=(InternalMmapVector &&) = delete;
};
-class InternalScopedString : public InternalMmapVector<char> {
+class InternalScopedString {
public:
- explicit InternalScopedString(uptr max_length)
- : InternalMmapVector<char>(max_length), length_(0) {
- (*this)[0] = '\0';
- }
- uptr length() { return length_; }
+ InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
+
+ uptr length() const { return buffer_.size() - 1; }
void clear() {
- (*this)[0] = '\0';
- length_ = 0;
+ buffer_.resize(1);
+ buffer_[0] = '\0';
}
void append(const char *format, ...);
+ const char *data() const { return buffer_.data(); }
+ char *data() { return buffer_.data(); }
private:
- uptr length_;
+ InternalMmapVector<char> buffer_;
};
template <class T>
// Works like std::lower_bound: finds the first element that is not less
// than the val.
-template <class Container, class Value, class Compare>
-uptr InternalLowerBound(const Container &v, uptr first, uptr last,
- const Value &val, Compare comp) {
+template <class Container,
+ class Compare = CompareLess<typename Container::value_type>>
+uptr InternalLowerBound(const Container &v,
+ const typename Container::value_type &val,
+ Compare comp = {}) {
+ uptr first = 0;
+ uptr last = v.size();
while (last > first) {
uptr mid = (first + last) / 2;
if (comp(v[mid], val))
kModuleArchRISCV64
};
+// Sorts and removes duplicates from the container.
+template <class Container,
+ class Compare = CompareLess<typename Container::value_type>>
+void SortAndDedup(Container &v, Compare comp = {}) {
+ Sort(v.data(), v.size(), comp);
+ uptr size = v.size();
+ if (size < 2)
+ return;
+ uptr last = 0;
+ for (uptr i = 1; i < size; ++i) {
+ if (comp(v[last], v[i])) {
+ ++last;
+ if (last != i)
+ v[last] = v[i];
+ } else {
+ CHECK(!comp(v[i], v[last]));
+ }
+ }
+ v.resize(last + 1);
+}
+
// Opens the file 'file_name" and reads up to 'max_len' bytes.
// The resulting buffer is mmaped and stored in '*buff'.
// Returns true if file was successfully opened and read.
COMMON_INTERCEPT_FUNCTION(fn)
#endif
-#ifdef __GLIBC__
+#if SANITIZER_GLIBC
// If we could not find the versioned symbol, fall back to an unversioned
// lookup. This is needed to work around a GLibc bug that causes dlsym
// with RTLD_NEXT to return the oldest versioned symbol.
}
return res;
}
+#if SANITIZER_GLIBC
namespace __sanitizer {
extern "C" {
int real_clock_gettime(u32 clk_id, void *tp) {
}
} // extern "C"
} // namespace __sanitizer
+#endif
INTERCEPTOR(int, clock_settime, u32 clk_id, const void *tp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, clock_settime, clk_id, tp);
COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, REAL(strlen)(locale) + 1);
char *res = REAL(setlocale)(category, locale);
if (res) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
unpoison_ctype_arrays(ctx);
}
return res;
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
- int res = REAL(sigwait)(set, sig);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sigwait)(set, sig);
if (!res && sig) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sig, sizeof(*sig));
return res;
}
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
- int res = REAL(sigwaitinfo)(set, info);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sigwaitinfo)(set, info);
if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
return res;
}
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
- int res = REAL(sigtimedwait)(set, info, timeout);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sigtimedwait)(set, info, timeout);
if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
return res;
}
if (fp->_IO_read_base && fp->_IO_read_base < fp->_IO_read_end)
COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_read_base,
fp->_IO_read_end - fp->_IO_read_base);
+ if (fp->_IO_write_base && fp->_IO_write_base < fp->_IO_write_end)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_write_base,
+ fp->_IO_write_end - fp->_IO_write_base);
#endif
#endif // SANITIZER_HAS_STRUCT_FILE
}
INTERCEPTOR(int, fflush, __sanitizer_FILE *fp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fflush, fp);
+ if (fp)
+ unpoison_file(fp);
int res = REAL(fflush)(fp);
// FIXME: handle fp == NULL
if (fp) {
COMMON_INTERCEPTOR_ENTER(ctx, fclose, fp);
COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
const FileMetadata *m = GetInterceptorMetadata(fp);
+ if (fp)
+ unpoison_file(fp);
int res = REAL(fclose)(fp);
if (m) {
COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);
_(SOUND_PCM_WRITE_CHANNELS, WRITE, sizeof(int));
_(SOUND_PCM_WRITE_FILTER, WRITE, sizeof(int));
_(TCFLSH, NONE, 0);
+#if SANITIZER_GLIBC
_(TCGETA, WRITE, struct_termio_sz);
+#endif
_(TCGETS, WRITE, struct_termios_sz);
_(TCSBRK, NONE, 0);
_(TCSBRKP, NONE, 0);
+#if SANITIZER_GLIBC
_(TCSETA, READ, struct_termio_sz);
_(TCSETAF, READ, struct_termio_sz);
_(TCSETAW, READ, struct_termio_sz);
+#endif
_(TCSETS, READ, struct_termios_sz);
_(TCSETSF, READ, struct_termios_sz);
_(TCSETSW, READ, struct_termios_sz);
_(VT_WAITACTIVE, NONE, 0);
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
// _(SIOCDEVPLIP, WRITE, struct_ifreq_sz); // the same as EQL_ENSLAVE
_(CYGETDEFTHRESH, WRITE, sizeof(int));
_(CYGETDEFTIMEOUT, WRITE, sizeof(int));
#if defined(__aarch64__) && defined(__linux__)
#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
ASM_WRAPPER_NAME(vfork):
// Save x30 in the off-stack spill area.
+ hint #25 // paciasp
stp xzr, x30, [sp, #-16]!
bl COMMON_INTERCEPTOR_SPILL_AREA
ldp xzr, x30, [sp], 16
bl COMMON_INTERCEPTOR_SPILL_AREA
ldr x30, [x0]
ldp x0, xzr, [sp], 16
+ hint #29 // autiasp
ret
ASM_SIZE(vfork)
.weak vfork
.set vfork, ASM_WRAPPER_NAME(vfork)
+GNU_PROPERTY_BTI_PAC
+
#endif
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
INTERFACE_FUNCTION(__sanitizer_set_report_path)
INTERFACE_FUNCTION(__sanitizer_set_report_fd)
+INTERFACE_FUNCTION(__sanitizer_get_report_path)
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
INTERFACE_WEAK_FUNCTION(__sanitizer_on_print)
INTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary)
#endif
void WriteToSyslog(const char *msg) {
- InternalScopedString msg_copy(kErrorMessageBufferSize);
+ InternalScopedString msg_copy;
msg_copy.append("%s", msg);
- char *p = msg_copy.data();
- char *q;
+ const char *p = msg_copy.data();
// Print one line at a time.
// syslog, at least on Android, has an implicit message length limit.
- while ((q = internal_strchr(p, '\n'))) {
+ while (char* q = internal_strchr(p, '\n')) {
*q = '\0';
WriteOneLineToSyslog(p);
p = q + 1;
} else {
internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid);
}
+ if (common_flags()->log_suffix) {
+ internal_strlcat(full_path, common_flags()->log_suffix, kMaxPathLength);
+ }
error_t err;
fd = OpenFile(full_path, WrOnly, &err);
if (fd == kInvalidFd) {
}
}
+const char *ReportFile::GetReportPath() {
+ SpinMutexLock l(mu);
+ ReopenIfNecessary();
+ return full_path;
+}
+
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
uptr *read_len, uptr max_len, error_t *errno_p) {
*buff = nullptr;
report_file.fd = (fd_t)reinterpret_cast<uptr>(fd);
report_file.fd_pid = internal_getpid();
}
+
+const char *__sanitizer_get_report_path() {
+ return report_file.GetReportPath();
+}
} // extern "C"
#endif // !SANITIZER_FUCHSIA
void Write(const char *buffer, uptr length);
bool SupportsColors();
void SetReportPath(const char *path);
+ const char *GetReportPath();
// Don't use fields directly. They are only declared public to allow
// aggregate initialization.
// Copy the string from "s" to "out", making the following substitutions:
// %b = binary basename
// %p = pid
+// %d = binary directory
void SubstituteForFlagValue(const char *s, char *out, uptr out_size) {
char *out_end = out + out_size;
while (*s && out < out_end - 1) {
s += 2; // skip "%p"
break;
}
+ case 'd': {
+ uptr len = ReadBinaryDir(out, out_end - out);
+ out += len;
+ s += 2; // skip "%d"
+ break;
+ }
default:
*out++ = *s++;
break;
bool, log_exe_name, false,
"Mention name of executable when reporting error and "
"append executable name to logs (as in \"log_path.exe_name.pid\").")
+COMMON_FLAG(const char *, log_suffix, nullptr,
+ "String to append to log file name, e.g. \".txt\".")
COMMON_FLAG(
bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,
"Write all sanitizer output to syslog in addition to other means of "
#include "sanitizer_fuchsia.h"
#if SANITIZER_FUCHSIA
-#include <limits.h>
#include <pthread.h>
#include <stdlib.h>
#include <unistd.h>
return pid;
}
-int internal_dlinfo(void *handle, int request, void *p) {
- UNIMPLEMENTED();
-}
+int internal_dlinfo(void *handle, int request, void *p) { UNIMPLEMENTED(); }
uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
}
-uptr GetPageSize() { return PAGE_SIZE; }
+uptr GetPageSize() { return _zx_system_get_page_size(); }
-uptr GetMmapGranularity() { return PAGE_SIZE; }
+uptr GetMmapGranularity() { return _zx_system_get_page_size(); }
sanitizer_shadow_bounds_t ShadowBounds;
static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
bool raw_report, bool die_for_nomem) {
- size = RoundUpTo(size, PAGE_SIZE);
+ size = RoundUpTo(size, GetPageSize());
zx_handle_t vmo;
zx_status_t status = _zx_vmo_create(size, 0, &vmo);
uptr ReservedAddressRange::Init(uptr init_size, const char *name,
uptr fixed_addr) {
- init_size = RoundUpTo(init_size, PAGE_SIZE);
+ init_size = RoundUpTo(init_size, GetPageSize());
DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
uintptr_t base;
zx_handle_t vmar;
- zx_status_t status =
- _zx_vmar_allocate(
- _zx_vmar_root_self(),
- ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC,
- 0, init_size, &vmar, &base);
+ zx_status_t status = _zx_vmar_allocate(
+ _zx_vmar_root_self(),
+ ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
+ init_size, &vmar, &base);
if (status != ZX_OK)
ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
base_ = reinterpret_cast<void *>(base);
static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
void *base, const char *name, bool die_for_nomem) {
uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
- map_size = RoundUpTo(map_size, PAGE_SIZE);
+ map_size = RoundUpTo(map_size, GetPageSize());
zx_handle_t vmo;
zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
if (status != ZX_OK) {
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
const char *name) {
- return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
- name_, false);
+ return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_,
+ false);
}
uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
const char *name) {
- return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
- name_, true);
+ return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_, true);
}
void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
- if (!addr || !size) return;
- size = RoundUpTo(size, PAGE_SIZE);
+ if (!addr || !size)
+ return;
+ size = RoundUpTo(size, GetPageSize());
zx_status_t status =
_zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
const char *mem_type) {
- CHECK_GE(size, PAGE_SIZE);
+ CHECK_GE(size, GetPageSize());
CHECK(IsPowerOfTwo(size));
CHECK(IsPowerOfTwo(alignment));
_zx_vmar_root_self(),
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
addr - info.base, vmo, 0, size, &new_addr);
- if (status == ZX_OK) CHECK_EQ(new_addr, addr);
+ if (status == ZX_OK)
+ CHECK_EQ(new_addr, addr);
}
}
if (status == ZX_OK && addr != map_addr)
UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
}
-// This is used on the shadow mapping, which cannot be changed.
-// Zircon doesn't have anything like MADV_DONTNEED.
-void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
+ uptr beg_aligned = RoundUpTo(beg, GetPageSize());
+ uptr end_aligned = RoundDownTo(end, GetPageSize());
+ if (beg_aligned < end_aligned) {
+ zx_handle_t root_vmar = _zx_vmar_root_self();
+ CHECK_NE(root_vmar, ZX_HANDLE_INVALID);
+ zx_status_t status =
+ _zx_vmar_op_range(root_vmar, ZX_VMAR_OP_DECOMMIT, beg_aligned,
+ end_aligned - beg_aligned, nullptr, 0);
+ CHECK_EQ(status, ZX_OK);
+ }
+}
void DumpProcessMap() {
// TODO(mcgrathr): write it
uint64_t vmo_size;
status = _zx_vmo_get_size(vmo, &vmo_size);
if (status == ZX_OK) {
- if (vmo_size < max_len) max_len = vmo_size;
- size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
+ if (vmo_size < max_len)
+ max_len = vmo_size;
+ size_t map_size = RoundUpTo(max_len, GetPageSize());
uintptr_t addr;
status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
map_size, &addr);
}
_zx_handle_close(vmo);
}
- if (status != ZX_OK && errno_p) *errno_p = status;
+ if (status != ZX_OK && errno_p)
+ *errno_p = status;
return status == ZX_OK;
}
return true;
}
-u32 GetNumberOfCPUs() {
- return zx_system_get_num_cpus();
-}
+u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); }
uptr GetRSS() { UNIMPLEMENTED(); }
void __sanitizer_set_report_fd(void *fd) {
UNREACHABLE("not available on Fuchsia");
}
+
+const char *__sanitizer_get_report_path() {
+ UNREACHABLE("not available on Fuchsia");
+}
} // extern "C"
#endif // SANITIZER_FUCHSIA
// (casted to void *).
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_fd(void *fd);
+ // Get the current full report file path, if a path was specified by
+ // an earlier call to __sanitizer_set_report_path. Returns null otherwise.
+ SANITIZER_INTERFACE_ATTRIBUTE
+ const char *__sanitizer_get_report_path();
typedef struct {
int coverage_sandboxed;
(void)enable_fp; \
} while (0)
+constexpr u32 kInvalidTid = -1;
+constexpr u32 kMainTid = 0;
+
} // namespace __sanitizer
namespace __asan {
void LibIgnore::OnLibraryLoaded(const char *name) {
BlockingMutexLock lock(&mutex_);
// Try to match suppressions with symlink target.
- InternalScopedString buf(kMaxPathLength);
+ InternalMmapVector<char> buf(kMaxPathLength);
if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
buf[0]) {
for (uptr i = 0; i < count_; i++) {
return internal_syscall(SYSCALL(munmap), (uptr)addr, length);
}
+#if SANITIZER_LINUX
+uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
+ void *new_address) {
+ return internal_syscall(SYSCALL(mremap), (uptr)old_address, old_size,
+ new_size, flags, (uptr)new_address);
+}
+#endif
+
int internal_mprotect(void *addr, uptr length, int prot) {
return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot);
}
}
#endif
-#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+#if SANITIZER_GLIBC
u64 NanoTime() {
-#if SANITIZER_FREEBSD
- timeval tv;
-#else
kernel_timeval tv;
-#endif
internal_memset(&tv, 0, sizeof(tv));
internal_syscall(SYSCALL(gettimeofday), &tv, 0);
- return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
+ return (u64)tv.tv_sec * 1000 * 1000 * 1000 + tv.tv_usec * 1000;
}
-
+// Used by real_clock_gettime.
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
return internal_syscall(SYSCALL(clock_gettime), clk_id, tp);
}
-#endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+#elif !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+u64 NanoTime() {
+ struct timespec ts;
+ clock_gettime(CLOCK_REALTIME, &ts);
+ return (u64)ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec;
+}
+#endif
// Like getenv, but reads env directly from /proc (on Linux) or parses the
// 'environ' array (on some others) and does not use libc. This function
#elif SANITIZER_RISCV64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
- long long res;
if (!fn || !child_stack)
return -EINVAL;
+
CHECK_EQ(0, (uptr)child_stack % 16);
- child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
- ((unsigned long long *)child_stack)[0] = (uptr)fn;
- ((unsigned long long *)child_stack)[1] = (uptr)arg;
- register int (*__fn)(void *) __asm__("a0") = fn;
+ register int res __asm__("a0");
+ register int __flags __asm__("a0") = flags;
register void *__stack __asm__("a1") = child_stack;
- register int __flags __asm__("a2") = flags;
- register void *__arg __asm__("a3") = arg;
- register int *__ptid __asm__("a4") = parent_tidptr;
- register void *__tls __asm__("a5") = newtls;
- register int *__ctid __asm__("a6") = child_tidptr;
+ register int *__ptid __asm__("a2") = parent_tidptr;
+ register void *__tls __asm__("a3") = newtls;
+ register int *__ctid __asm__("a4") = child_tidptr;
+ register int (*__fn)(void *) __asm__("a5") = fn;
+ register void *__arg __asm__("a6") = arg;
+ register int nr_clone __asm__("a7") = __NR_clone;
__asm__ __volatile__(
- "mv a0,a2\n" /* flags */
- "mv a2,a4\n" /* ptid */
- "mv a3,a5\n" /* tls */
- "mv a4,a6\n" /* ctid */
- "addi a7, zero, %9\n" /* clone */
-
"ecall\n"
- /* if (%r0 != 0)
- * return %r0;
+ /* if (a0 != 0)
+ * return a0;
*/
"bnez a0, 1f\n"
- /* In the child, now. Call "fn(arg)". */
- "ld a0, 8(sp)\n"
- "ld a1, 16(sp)\n"
- "jalr a1\n"
+ // In the child, now. Call "fn(arg)".
+ "mv a0, a6\n"
+ "jalr a5\n"
- /* Call _exit(%r0). */
- "addi a7, zero, %10\n"
+ // Call _exit(a0).
+ "addi a7, zero, %9\n"
"ecall\n"
"1:\n"
: "=r"(res)
- : "i"(-EINVAL), "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
- "r"(__ptid), "r"(__tls), "r"(__ctid), "i"(__NR_clone), "i"(__NR_exit)
- : "ra", "memory");
+ : "0"(__flags), "r"(__stack), "r"(__ptid), "r"(__tls), "r"(__ctid),
+ "r"(__fn), "r"(__arg), "r"(nr_clone), "i"(__NR_exit)
+ : "memory");
return res;
}
#elif defined(__aarch64__)
uptr internal_sigaltstack(const void* ss, void* oss);
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);
+#if SANITIZER_GLIBC
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp);
+#endif
// Linux-only syscalls.
#if SANITIZER_LINUX
// Exposed for testing.
uptr ThreadDescriptorSize();
uptr ThreadSelf();
-uptr ThreadSelfOffset();
// Matches a library's file name against a base name (stripping path and version
// information).
#include <link.h>
#include <pthread.h>
#include <signal.h>
+#include <sys/mman.h>
#include <sys/resource.h>
#include <syslog.h>
#include <osreldate.h>
#include <sys/sysctl.h>
#define pthread_getattr_np pthread_attr_get_np
+// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
+// that, it was never implemented. So just define it to zero.
+#undef MAP_NORESERVE
+#define MAP_NORESERVE 0
#endif
#if SANITIZER_NETBSD
#endif
}
-#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO && \
- !SANITIZER_NETBSD && !SANITIZER_SOLARIS
-static uptr g_tls_size;
+// True if we can use dlpi_tls_data. glibc before 2.25 may leave NULL (BZ
+// #19826) so dlpi_tls_data cannot be used.
+//
+// musl before 1.2.3 and FreeBSD as of 12.2 incorrectly set dlpi_tls_data to
+// the TLS initialization image
+// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=254774
+__attribute__((unused)) static int g_use_dlpi_tls_data;
-#ifdef __i386__
-#define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27))
-#else
-#define CHECK_GET_TLS_STATIC_INFO_VERSION 0
-#endif
+#if SANITIZER_GLIBC && !SANITIZER_GO
+__attribute__((unused)) static uptr g_tls_size;
+void InitTlsSize() {
+ int major, minor, patch;
+ g_use_dlpi_tls_data =
+ GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25;
-#if CHECK_GET_TLS_STATIC_INFO_VERSION
-#define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
-#else
-#define DL_INTERNAL_FUNCTION
+#if defined(__x86_64__) || defined(__powerpc64__)
+ void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
+ size_t tls_align;
+ ((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align);
#endif
-
-namespace {
-struct GetTlsStaticInfoCall {
- typedef void (*get_tls_func)(size_t*, size_t*);
-};
-struct GetTlsStaticInfoRegparmCall {
- typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
-};
-
-template <typename T>
-void CallGetTls(void* ptr, size_t* size, size_t* align) {
- typename T::get_tls_func get_tls;
- CHECK_EQ(sizeof(get_tls), sizeof(ptr));
- internal_memcpy(&get_tls, &ptr, sizeof(ptr));
- CHECK_NE(get_tls, 0);
- get_tls(size, align);
-}
-
-bool CmpLibcVersion(int major, int minor, int patch) {
- int ma;
- int mi;
- int pa;
- if (!GetLibcVersion(&ma, &mi, &pa))
- return false;
- if (ma > major)
- return true;
- if (ma < major)
- return false;
- if (mi > minor)
- return true;
- if (mi < minor)
- return false;
- return pa >= patch;
-}
-
-} // namespace
-
-void InitTlsSize() {
- // all current supported platforms have 16 bytes stack alignment
- const size_t kStackAlign = 16;
- void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
- size_t tls_size = 0;
- size_t tls_align = 0;
- // On i?86, _dl_get_tls_static_info used to be internal_function, i.e.
- // __attribute__((regparm(3), stdcall)) before glibc 2.27 and is normal
- // function in 2.27 and later.
- if (CHECK_GET_TLS_STATIC_INFO_VERSION && !CmpLibcVersion(2, 27, 0))
- CallGetTls<GetTlsStaticInfoRegparmCall>(get_tls_static_info_ptr,
- &tls_size, &tls_align);
- else
- CallGetTls<GetTlsStaticInfoCall>(get_tls_static_info_ptr,
- &tls_size, &tls_align);
- if (tls_align < kStackAlign)
- tls_align = kStackAlign;
- g_tls_size = RoundUpTo(tls_size, tls_align);
}
#else
void InitTlsSize() { }
-#endif
+#endif // SANITIZER_GLIBC && !SANITIZER_GO
-#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) || \
- defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) || \
- defined(__arm__) || SANITIZER_RISCV64) && \
- SANITIZER_LINUX && !SANITIZER_ANDROID
+// On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage
+// of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan
+// to get the pointer to thread-specific data keys in the thread control block.
+#if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !SANITIZER_ANDROID
// sizeof(struct pthread) from glibc.
static atomic_uintptr_t thread_descriptor_size;
val = FIRST_32_SECOND_64(1168, 2288);
else if (minor <= 14)
val = FIRST_32_SECOND_64(1168, 2304);
- else
+ else if (minor < 32) // Unknown version
val = FIRST_32_SECOND_64(1216, 2304);
+ else // minor == 32
+ val = FIRST_32_SECOND_64(1344, 2496);
}
+#elif defined(__s390__) || defined(__sparc__)
+ // The size of a prefix of TCB including pthread::{specific_1stblock,specific}
+ // suffices. Just return offsetof(struct pthread, specific_used), which hasn't
+ // changed since 2007-05. Technically this applies to i386/x86_64 as well but
+ // we call _dl_get_tls_static_info and need the precise size of struct
+ // pthread.
+ return FIRST_32_SECOND_64(524, 1552);
#elif defined(__mips__)
// TODO(sagarthakur): add more values as per different glibc versions.
val = FIRST_32_SECOND_64(1152, 1776);
val = 1776;
#elif defined(__powerpc64__)
val = 1776; // from glibc.ppc64le 2.20-8.fc21
-#elif defined(__s390__)
- val = FIRST_32_SECOND_64(1152, 1776); // valid for glibc 2.22
#endif
if (val)
atomic_store_relaxed(&thread_descriptor_size, val);
return val;
}
-// The offset at which pointer to self is located in the thread descriptor.
-const uptr kThreadSelfOffset = FIRST_32_SECOND_64(8, 16);
-
-uptr ThreadSelfOffset() {
- return kThreadSelfOffset;
-}
-
#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
// head structure. It lies before the static tls blocks.
}
#endif
-uptr ThreadSelf() {
- uptr descr_addr;
-#if defined(__i386__)
- asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
-#elif defined(__x86_64__)
- asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
-#elif defined(__mips__)
- // MIPS uses TLS variant I. The thread pointer (in hardware register $29)
- // points to the end of the TCB + 0x7000. The pthread_descr structure is
- // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
- // TCB and the size of pthread_descr.
- const uptr kTlsTcbOffset = 0x7000;
- uptr thread_pointer;
- asm volatile(".set push;\
- .set mips64r2;\
- rdhwr %0,$29;\
- .set pop" : "=r" (thread_pointer));
- descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
-#elif defined(__aarch64__) || defined(__arm__)
- descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
- ThreadDescriptorSize();
-#elif SANITIZER_RISCV64
- // https://github.com/riscv/riscv-elf-psabi-doc/issues/53
- uptr thread_pointer = reinterpret_cast<uptr>(__builtin_thread_pointer());
- descr_addr = thread_pointer - TlsPreTcbSize();
-#elif defined(__s390__)
- descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
-#elif defined(__powerpc64__)
- // PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
- // points to the end of the TCB + 0x7000. The pthread_descr structure is
- // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
- // TCB and the size of pthread_descr.
- const uptr kTlsTcbOffset = 0x7000;
- uptr thread_pointer;
- asm("addi %0,13,%1" : "=r"(thread_pointer) : "I"(-kTlsTcbOffset));
- descr_addr = thread_pointer - TlsPreTcbSize();
-#else
-#error "unsupported CPU arch"
-#endif
- return descr_addr;
-}
-#endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX
+#if !SANITIZER_GO
+namespace {
+struct TlsBlock {
+ uptr begin, end, align;
+ size_t tls_modid;
+ bool operator<(const TlsBlock &rhs) const { return begin < rhs.begin; }
+};
+} // namespace
-#if SANITIZER_FREEBSD
-static void **ThreadSelfSegbase() {
- void **segbase = 0;
-#if defined(__i386__)
- // sysarch(I386_GET_GSBASE, segbase);
- __asm __volatile("mov %%gs:0, %0" : "=r" (segbase));
-#elif defined(__x86_64__)
- // sysarch(AMD64_GET_FSBASE, segbase);
- __asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
-#else
-#error "unsupported CPU arch"
+extern "C" void *__tls_get_addr(size_t *);
+
+static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ if (!info->dlpi_tls_modid)
+ return 0;
+ uptr begin = (uptr)info->dlpi_tls_data;
+#ifndef __s390__
+ if (!g_use_dlpi_tls_data) {
+ // Call __tls_get_addr as a fallback. This forces TLS allocation on glibc
+ // and FreeBSD.
+ size_t mod_and_off[2] = {info->dlpi_tls_modid, 0};
+ begin = (uptr)__tls_get_addr(mod_and_off);
+ }
#endif
- return segbase;
+ for (unsigned i = 0; i != info->dlpi_phnum; ++i)
+ if (info->dlpi_phdr[i].p_type == PT_TLS) {
+ static_cast<InternalMmapVector<TlsBlock> *>(data)->push_back(
+ TlsBlock{begin, begin + info->dlpi_phdr[i].p_memsz,
+ info->dlpi_phdr[i].p_align, info->dlpi_tls_modid});
+ break;
+ }
+ return 0;
}
-uptr ThreadSelf() {
- return (uptr)ThreadSelfSegbase()[2];
-}
-#endif // SANITIZER_FREEBSD
+__attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size,
+ uptr *align) {
+ InternalMmapVector<TlsBlock> ranges;
+ dl_iterate_phdr(CollectStaticTlsBlocks, &ranges);
+ uptr len = ranges.size();
+ Sort(ranges.begin(), len);
+ // Find the range with tls_modid=1. For glibc, because libc.so uses PT_TLS,
+ // this module is guaranteed to exist and is one of the initially loaded
+ // modules.
+ uptr one = 0;
+ while (one != len && ranges[one].tls_modid != 1) ++one;
+ if (one == len) {
+ // This may happen with musl if no module uses PT_TLS.
+ *addr = 0;
+ *size = 0;
+ *align = 1;
+ return;
+ }
+ // Find the maximum consecutive ranges. We consider two modules consecutive if
+ // the gap is smaller than the alignment. The dynamic loader places static TLS
+ // blocks this way not to waste space.
+ uptr l = one;
+ *align = ranges[l].align;
+ while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l - 1].align)
+ *align = Max(*align, ranges[--l].align);
+ uptr r = one + 1;
+ while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r - 1].align)
+ *align = Max(*align, ranges[r++].align);
+ *addr = ranges[l].begin;
+ *size = ranges[r - 1].end - ranges[l].begin;
+}
+#endif // !SANITIZER_GO
+#endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD ||
+ // SANITIZER_LINUX) && !SANITIZER_ANDROID
#if SANITIZER_NETBSD
static struct tls_tcb * ThreadSelfTlsTcb() {
*addr = 0;
*size = 0;
}
-#elif SANITIZER_LINUX
-#if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
- *addr = ThreadSelf();
- *size = GetTlsSize();
+#elif SANITIZER_GLIBC && defined(__x86_64__)
+ // For x86-64, use an O(1) approach which requires precise
+ // ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize.
+ asm("mov %%fs:16,%0" : "=r"(*addr));
+ *size = g_tls_size;
*addr -= *size;
*addr += ThreadDescriptorSize();
-#elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) || \
- defined(__arm__) || SANITIZER_RISCV64
- *addr = ThreadSelf();
- *size = GetTlsSize();
+#elif SANITIZER_GLIBC && defined(__powerpc64__)
+ // Workaround for glibc<2.25(?). 2.27 is known to not need this.
+ uptr tp;
+ asm("addi %0,13,-0x7000" : "=r"(tp));
+ const uptr pre_tcb_size = TlsPreTcbSize();
+ *addr = tp - pre_tcb_size;
+ *size = g_tls_size + pre_tcb_size;
+#elif SANITIZER_FREEBSD || SANITIZER_LINUX
+ uptr align;
+ GetStaticTlsBoundary(addr, size, &align);
+#if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \
+ defined(__sparc__)
+ if (SANITIZER_GLIBC) {
+#if defined(__x86_64__) || defined(__i386__)
+ align = Max<uptr>(align, 64);
#else
- *addr = 0;
- *size = 0;
+ align = Max<uptr>(align, 16);
#endif
-#elif SANITIZER_FREEBSD
- void** segbase = ThreadSelfSegbase();
- *addr = 0;
- *size = 0;
- if (segbase != 0) {
- // tcbalign = 16
- // tls_size = round(tls_static_space, tcbalign);
- // dtv = segbase[1];
- // dtv[2] = segbase - tls_static_space;
- void **dtv = (void**) segbase[1];
- *addr = (uptr) dtv[2];
- *size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]);
}
+ const uptr tp = RoundUpTo(*addr + *size, align);
+
+ // lsan requires the range to additionally cover the static TLS surplus
+ // (elf/dl-tls.c defines 1664). Otherwise there may be false positives for
+ // allocations only referenced by tls in dynamically loaded modules.
+ if (SANITIZER_GLIBC)
+ *size += 1644;
+ else if (SANITIZER_FREEBSD)
+ *size += 128; // RTLD_STATIC_TLS_EXTRA
+
+ // Extend the range to include the thread control block. On glibc, lsan needs
+ // the range to include pthread::{specific_1stblock,specific} so that
+ // allocations only referenced by pthread_setspecific can be scanned. This may
+ // underestimate by at most TLS_TCB_ALIGN-1 bytes but it should be fine
+ // because the number of bytes after pthread::specific is larger.
+ *addr = tp - RoundUpTo(*size, align);
+ *size = tp - *addr + ThreadDescriptorSize();
+#else
+ if (SANITIZER_GLIBC)
+ *size += 1664;
+ else if (SANITIZER_FREEBSD)
+ *size += 128; // RTLD_STATIC_TLS_EXTRA
+#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
+ const uptr pre_tcb_size = TlsPreTcbSize();
+ *addr -= pre_tcb_size;
+ *size += pre_tcb_size;
+#else
+ // arm and aarch64 reserve two words at TP, so this underestimates the range.
+ // However, this is sufficient for the purpose of finding the pointers to
+ // thread-specific data keys.
+ const uptr tcb_size = ThreadDescriptorSize();
+ *addr -= tcb_size;
+ *size += tcb_size;
+#endif
+#endif
#elif SANITIZER_NETBSD
struct tls_tcb * const tcb = ThreadSelfTlsTcb();
*addr = 0;
#if !SANITIZER_GO
uptr GetTlsSize() {
-#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD || \
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
uptr addr, size;
GetTls(&addr, &size);
return size;
-#elif defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
- return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16);
#else
- return g_tls_size;
+ return 0;
#endif
}
#endif
if (!main) {
// If stack and tls intersect, make them non-intersecting.
if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
- CHECK_GT(*tls_addr + *tls_size, *stk_addr);
- CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size);
- *stk_size -= *tls_size;
- *tls_addr = *stk_addr + *stk_size;
+ if (*stk_addr + *stk_size < *tls_addr + *tls_size)
+ *tls_size = *stk_addr + *stk_size - *tls_addr;
+ *stk_size = *tls_addr - *stk_addr;
}
}
#endif
bool first;
};
-static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
- DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
- InternalScopedString module_name(kMaxPathLength);
- if (data->first) {
- data->first = false;
- // First module is the binary itself.
- ReadBinaryNameCached(module_name.data(), module_name.size());
- } else if (info->dlpi_name) {
- module_name.append("%s", info->dlpi_name);
- }
+static int AddModuleSegments(const char *module_name, dl_phdr_info *info,
+ InternalMmapVectorNoCtor<LoadedModule> *modules) {
if (module_name[0] == '\0')
return 0;
LoadedModule cur_module;
- cur_module.set(module_name.data(), info->dlpi_addr);
+ cur_module.set(module_name, info->dlpi_addr);
for (int i = 0; i < (int)info->dlpi_phnum; i++) {
const Elf_Phdr *phdr = &info->dlpi_phdr[i];
if (phdr->p_type == PT_LOAD) {
writable);
}
}
- data->modules->push_back(cur_module);
+ modules->push_back(cur_module);
+ return 0;
+}
+
+static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
+ DlIteratePhdrData *data = (DlIteratePhdrData *)arg;
+ if (data->first) {
+ InternalMmapVector<char> module_name(kMaxPathLength);
+ data->first = false;
+ // First module is the binary itself.
+ ReadBinaryNameCached(module_name.data(), module_name.size());
+ return AddModuleSegments(module_name.data(), info, data->modules);
+ }
+
+ if (info->dlpi_name) {
+ InternalScopedString module_name;
+ module_name.append("%s", info->dlpi_name);
+ return AddModuleSegments(module_name.data(), info, data->modules);
+ }
+
return 0;
}
#elif SANITIZER_SOLARIS
return sysconf(_SC_NPROCESSORS_ONLN);
#else
-#if defined(CPU_COUNT)
cpu_set_t CPUs;
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
return CPU_COUNT(&CPUs);
-#else
- return 1;
-#endif
#endif
}
#endif // SANITIZER_LINUX
-#if SANITIZER_LINUX && !SANITIZER_GO
+#if SANITIZER_GLIBC && !SANITIZER_GO
// glibc crashes when using clock_gettime from a preinit_array function as the
// vDSO function pointers haven't been initialized yet. __progname is
// initialized after the vDSO function pointers, so if it exists, is not null
// and is not empty, we can use clock_gettime.
extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
-inline bool CanUseVDSO() {
- // Bionic is safe, it checks for the vDSO function pointers to be initialized.
- if (SANITIZER_ANDROID)
- return true;
- if (&__progname && __progname && *__progname)
- return true;
- return false;
-}
+inline bool CanUseVDSO() { return &__progname && __progname && *__progname; }
// MonotonicNanoTime is a timing function that can leverage the vDSO by calling
// clock_gettime. real_clock_gettime only exists if clock_gettime is
return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
}
#else
-// Non-Linux & Go always use the syscall.
+// Non-glibc & Go always use the regular function.
u64 MonotonicNanoTime() {
timespec ts;
- internal_clock_gettime(CLOCK_MONOTONIC, &ts);
+ clock_gettime(CLOCK_MONOTONIC, &ts);
return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
}
-#endif // SANITIZER_LINUX && !SANITIZER_GO
+#endif // SANITIZER_GLIBC && !SANITIZER_GO
void ReExec() {
const char *pathname = "/proc/self/exe";
return shadow_start;
}
+static uptr MmapSharedNoReserve(uptr addr, uptr size) {
+ return internal_mmap(
+ reinterpret_cast<void *>(addr), size, PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
+}
+
+static uptr MremapCreateAlias(uptr base_addr, uptr alias_addr,
+ uptr alias_size) {
+#if SANITIZER_LINUX
+ return internal_mremap(reinterpret_cast<void *>(base_addr), 0, alias_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED,
+ reinterpret_cast<void *>(alias_addr));
+#else
+ CHECK(false && "mremap is not supported outside of Linux");
+ return 0;
+#endif
+}
+
+static void CreateAliases(uptr start_addr, uptr alias_size, uptr num_aliases) {
+ uptr total_size = alias_size * num_aliases;
+ uptr mapped = MmapSharedNoReserve(start_addr, total_size);
+ CHECK_EQ(mapped, start_addr);
+
+ for (uptr i = 1; i < num_aliases; ++i) {
+ uptr alias_addr = start_addr + i * alias_size;
+ CHECK_EQ(MremapCreateAlias(start_addr, alias_addr, alias_size), alias_addr);
+ }
+}
+
+uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
+ uptr num_aliases, uptr ring_buffer_size) {
+ CHECK_EQ(alias_size & (alias_size - 1), 0);
+ CHECK_EQ(num_aliases & (num_aliases - 1), 0);
+ CHECK_EQ(ring_buffer_size & (ring_buffer_size - 1), 0);
+
+ const uptr granularity = GetMmapGranularity();
+ shadow_size = RoundUpTo(shadow_size, granularity);
+ CHECK_EQ(shadow_size & (shadow_size - 1), 0);
+
+ const uptr alias_region_size = alias_size * num_aliases;
+ const uptr alignment =
+ 2 * Max(Max(shadow_size, alias_region_size), ring_buffer_size);
+ const uptr left_padding = ring_buffer_size;
+
+ const uptr right_size = alignment;
+ const uptr map_size = left_padding + 2 * alignment;
+
+ const uptr map_start = reinterpret_cast<uptr>(MmapNoAccess(map_size));
+ CHECK_NE(map_start, static_cast<uptr>(-1));
+ const uptr right_start = RoundUpTo(map_start + left_padding, alignment);
+
+ UnmapFromTo(map_start, right_start - left_padding);
+ UnmapFromTo(right_start + right_size, map_start + map_size);
+
+ CreateAliases(right_start + right_size / 2, alias_size, num_aliases);
+
+ return right_start;
+}
+
void InitializePlatformCommonFlags(CommonFlags *cf) {
#if SANITIZER_ANDROID
if (&__libc_get_static_tls_bounds == nullptr)
//===----------------------------------------------------------------------===//
//
// `LocalAddressSpaceView` provides the local (i.e. target and current address
-// space are the same) implementation of the `AddressSpaveView` interface which
+// space are the same) implementation of the `AddressSpaceView` interface which
// provides a simple interface to load memory from another process (i.e.
// out-of-process)
//
extern char **environ;
#endif
-#if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__)
+#if defined(__has_include) && __has_include(<os/trace.h>)
#define SANITIZER_OS_TRACE 1
#include <os/trace.h>
#else
#define SANITIZER_OS_TRACE 0
#endif
+// import new crash reporting api
+#if defined(__has_include) && __has_include(<CrashReporterClient.h>)
+#define HAVE_CRASHREPORTERCLIENT_H 1
+#include <CrashReporterClient.h>
+#else
+#define HAVE_CRASHREPORTERCLIENT_H 0
+#endif
+
#if !SANITIZER_IOS
#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
#else
#include <mach/mach_time.h>
#include <mach/vm_statistics.h>
#include <malloc/malloc.h>
+#include <os/log.h>
#include <pthread.h>
#include <sched.h>
#include <signal.h>
return munmap(addr, length);
}
+uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
+ void *new_address) {
+ CHECK(false && "internal_mremap is unimplemented on Mac");
+ return 0;
+}
+
int internal_mprotect(void *addr, uptr length, int prot) {
return mprotect(addr, length, prot);
}
// On OS X the executable path is saved to the stack by dyld. Reading it
// from there is much faster than calling dladdr, especially for large
// binaries with symbols.
- InternalScopedString exe_path(kMaxPathLength);
+ InternalMmapVector<char> exe_path(kMaxPathLength);
uint32_t size = exe_path.size();
if (_NSGetExecutablePath(exe_path.data(), &size) == 0 &&
realpath(exe_path.data(), buf) != 0) {
using VersStr = char[64];
+static uptr ApproximateOSVersionViaKernelVersion(VersStr vers) {
+ u16 kernel_major = GetDarwinKernelVersion().major;
+ u16 offset = GetOSMajorKernelOffset();
+ CHECK_GE(kernel_major, offset);
+ u16 os_major = kernel_major - offset;
+
+ const char *format = "%d.0";
+ if (TARGET_OS_OSX) {
+ if (os_major >= 16) { // macOS 11+
+ os_major -= 5;
+ } else { // macOS 10.15 and below
+ format = "10.%d";
+ }
+ }
+ return internal_snprintf(vers, sizeof(VersStr), format, os_major);
+}
+
static void GetOSVersion(VersStr vers) {
uptr len = sizeof(VersStr);
if (SANITIZER_IOSSIM) {
} else {
int res =
internal_sysctlbyname("kern.osproductversion", vers, &len, nullptr, 0);
- if (res) {
- // Fallback for XNU 17 (macOS 10.13) and below that do not provide the
- // `kern.osproductversion` property.
- u16 kernel_major = GetDarwinKernelVersion().major;
- u16 offset = GetOSMajorKernelOffset();
- CHECK_LE(kernel_major, 17);
- CHECK_GE(kernel_major, offset);
- u16 os_major = kernel_major - offset;
-
- auto format = TARGET_OS_OSX ? "10.%d" : "%d.0";
- len = internal_snprintf(vers, len, format, os_major);
+
+ // XNU 17 (macOS 10.13) and below do not provide the sysctl
+ // `kern.osproductversion` entry (res != 0).
+ bool no_os_version = res != 0;
+
+ // For launchd, sanitizer initialization runs before sysctl is setup
+ // (res == 0 && len != strlen(vers), vers is not a valid version). However,
+ // the kernel version `kern.osrelease` is available.
+ bool launchd = (res == 0 && internal_strlen(vers) < 3);
+ if (launchd) CHECK_EQ(internal_getpid(), 1);
+
+ if (no_os_version || launchd) {
+ len = ApproximateOSVersionViaKernelVersion(vers);
}
}
CHECK_LT(len, sizeof(VersStr));
}
static MacosVersion GetMacosAlignedVersionInternal() {
- VersStr vers;
+ VersStr vers = {};
GetOSVersion(vers);
u16 major, minor;
}
DarwinKernelVersion GetDarwinKernelVersion() {
- VersStr vers;
+ VersStr vers = {};
uptr len = sizeof(VersStr);
int res = internal_sysctlbyname("kern.osrelease", vers, &len, nullptr, 0);
CHECK_EQ(res, 0);
void WriteOneLineToSyslog(const char *s) {
#if !SANITIZER_GO
syslog_lock.CheckLocked();
- asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s);
+ if (GetMacosAlignedVersion() >= MacosVersion(10, 12)) {
+ os_log_error(OS_LOG_DEFAULT, "%{public}s", s);
+ } else {
+ asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s);
+ }
+#endif
+}
+
+// buffer to store crash report application information
+static char crashreporter_info_buff[__sanitizer::kErrorMessageBufferSize] = {};
+static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
+
+extern "C" {
+// Integrate with crash reporter libraries.
+#if HAVE_CRASHREPORTERCLIENT_H
+CRASH_REPORTER_CLIENT_HIDDEN
+struct crashreporter_annotations_t gCRAnnotations
+ __attribute__((section("__DATA," CRASHREPORTER_ANNOTATIONS_SECTION))) = {
+ CRASHREPORTER_ANNOTATIONS_VERSION,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+#if CRASHREPORTER_ANNOTATIONS_VERSION > 4
+ 0,
+#endif
+};
+
+#else
+// fall back to old crashreporter api
+static const char *__crashreporter_info__ __attribute__((__used__)) =
+ &crashreporter_info_buff[0];
+asm(".desc ___crashreporter_info__, 0x10");
+#endif
+
+} // extern "C"
+
+static void CRAppendCrashLogMessage(const char *msg) {
+ BlockingMutexLock l(&crashreporter_info_mutex);
+ internal_strlcat(crashreporter_info_buff, msg,
+ sizeof(crashreporter_info_buff));
+#if HAVE_CRASHREPORTERCLIENT_H
+ (void)CRSetCrashLogMessage(crashreporter_info_buff);
#endif
}
if (DyldNeedsEnvVariable() && !lib_is_in_env) {
// DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
// library.
- InternalScopedString program_name(1024);
+ InternalMmapVector<char> program_name(1024);
uint32_t buf_size = program_name.size();
_NSGetExecutablePath(program_name.data(), &buf_size);
char *new_env = const_cast<char*>(info.dli_fname);
return *_NSGetArgv();
}
-#if SANITIZER_IOS
+#if SANITIZER_IOS && !SANITIZER_IOSSIM
// The task_vm_info struct is normally provided by the macOS SDK, but we need
// fields only available in 10.12+. Declare the struct manually to be able to
// build against older SDKs.
uptr GetMaxUserVirtualAddress() {
static uptr max_vm = GetTaskInfoMaxAddress();
- if (max_vm != 0)
- return max_vm - 1;
+ if (max_vm != 0) {
+ const uptr ret_value = max_vm - 1;
+ CHECK_LE(ret_value, SANITIZER_MMAP_RANGE_SIZE);
+ return ret_value;
+ }
// xnu cannot provide vm address limit
# if SANITIZER_WORDSIZE == 32
- return 0xffe00000 - 1;
+ constexpr uptr fallback_max_vm = 0xffe00000 - 1;
# else
- return 0x200000000 - 1;
+ constexpr uptr fallback_max_vm = 0x200000000 - 1;
# endif
+ static_assert(fallback_max_vm <= SANITIZER_MMAP_RANGE_SIZE,
+ "Max virtual address must be less than mmap range size.");
+ return fallback_max_vm;
}
#else // !SANITIZER_IOS
uptr GetMaxUserVirtualAddress() {
# if SANITIZER_WORDSIZE == 64
- return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
+ constexpr uptr max_vm = (1ULL << 47) - 1; // 0x00007fffffffffffUL;
# else // SANITIZER_WORDSIZE == 32
static_assert(SANITIZER_WORDSIZE == 32, "Wrong wordsize");
- return (1ULL << 32) - 1; // 0xffffffff;
+ constexpr uptr max_vm = (1ULL << 32) - 1; // 0xffffffff;
# endif
+ static_assert(max_vm <= SANITIZER_MMAP_RANGE_SIZE,
+ "Max virtual address must be less than mmap range size.");
+ return max_vm;
}
#endif
return shadow_start;
}
+uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
+ uptr num_aliases, uptr ring_buffer_size) {
+ CHECK(false && "HWASan aliasing is unimplemented on Mac");
+ return 0;
+}
+
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
uptr *largest_gap_found,
uptr *max_occupied_addr) {
#include "sanitizer_common.h"
#include "sanitizer_platform.h"
-
-/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use
- TARGET_OS_MAC (we have no support for iOS in any form for these versions,
- so there's no ambiguity). */
-#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC
-# define TARGET_OS_OSX 1
-#endif
-
-/* Other TARGET_OS_xxx are not present on earlier versions, define them to
- 0 (we have no support for them; they are not valid targets anyway). */
-#ifndef TARGET_OS_IOS
-#define TARGET_OS_IOS 0
-#endif
-#ifndef TARGET_OS_TV
-#define TARGET_OS_TV 0
-#endif
-#ifndef TARGET_OS_WATCH
-#define TARGET_OS_WATCH 0
-#endif
-
#if SANITIZER_MAC
#include "sanitizer_posix.h"
} // namespace __sanitizer
-extern "C" {
-static char __crashreporter_info_buff__[__sanitizer::kErrorMessageBufferSize] =
- {};
-static const char *__crashreporter_info__ __attribute__((__used__)) =
- &__crashreporter_info_buff__[0];
-asm(".desc ___crashreporter_info__, 0x10");
-} // extern "C"
-
-namespace __sanitizer {
-static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
-
-inline void CRAppendCrashLogMessage(const char *msg) {
- BlockingMutexLock l(&crashreporter_info_mutex);
- internal_strlcat(__crashreporter_info_buff__, msg,
- sizeof(__crashreporter_info_buff__)); }
-} // namespace __sanitizer
-
#endif // SANITIZER_MAC
#endif // SANITIZER_MAC_H
INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
COMMON_MALLOC_ENTER();
- // Allocate |sizeof(COMMON_MALLOC_ZONE_NAME "-") + internal_strlen(name)|
- // bytes.
- size_t buflen =
- sizeof(COMMON_MALLOC_ZONE_NAME "-") + (name ? internal_strlen(name) : 0);
- InternalScopedString new_name(buflen);
+ InternalScopedString new_name;
if (name && zone->introspect == sanitizer_zone.introspect) {
new_name.append(COMMON_MALLOC_ZONE_NAME "-%s", name);
name = new_name.data();
return _REAL(munmap, addr, length);
}
+uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
+ void *new_address) {
+ CHECK(false && "internal_mremap is unimplemented on NetBSD");
+ return 0;
+}
+
int internal_mprotect(void *addr, uptr length, int prot) {
DEFINE__REAL(int, mprotect, void *a, uptr b, int c);
return _REAL(mprotect, addr, length, prot);
# error "This operating system is not supported"
#endif
+// Get __GLIBC__ on a glibc platform. Exclude Android: features.h includes C
+// function declarations into a .S file which doesn't compile.
+// https://crbug.com/1162741
+#if __has_include(<features.h>) && !defined(__ANDROID__)
+#include <features.h>
+#endif
+
#if defined(__linux__)
# define SANITIZER_LINUX 1
#else
# define SANITIZER_LINUX 0
#endif
+#if defined(__GLIBC__)
+# define SANITIZER_GLIBC 1
+#else
+# define SANITIZER_GLIBC 0
+#endif
+
#if defined(__FreeBSD__)
# define SANITIZER_FREEBSD 1
#else
#if defined(__APPLE__)
# define SANITIZER_MAC 1
# include <TargetConditionals.h>
+# if TARGET_OS_OSX
+# define SANITIZER_OSX 1
+# else
+# define SANITIZER_OSX 0
+# endif
# if TARGET_OS_IPHONE
# define SANITIZER_IOS 1
# else
# define SANITIZER_MAC 0
# define SANITIZER_IOS 0
# define SANITIZER_IOSSIM 0
+# define SANITIZER_OSX 0
#endif
#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH
#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
#elif defined(__aarch64__)
# if SANITIZER_MAC
-// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
+# if SANITIZER_OSX || SANITIZER_IOSSIM
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# else
+ // Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
+# endif
# else
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
# endif
#define SI_LINUX_NOT_ANDROID 0
#endif
+#if SANITIZER_GLIBC
+#define SI_GLIBC 1
+#else
+#define SI_GLIBC 0
+#endif
+
#if SANITIZER_ANDROID
#define SI_ANDROID 1
#else
SANITIZER_INTERCEPT_MEMCMP && \
((SI_POSIX && _GNU_SOURCE) || SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_STRNDUP SI_POSIX
-#define SANITIZER_INTERCEPT___STRNDUP SI_LINUX_NOT_FREEBSD
+#define SANITIZER_INTERCEPT___STRNDUP SI_GLIBC
#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070
#define SI_MAC_DEPLOYMENT_BELOW_10_7 1
#define SANITIZER_INTERCEPT_FPUTS SI_POSIX
#define SANITIZER_INTERCEPT_PUTS SI_POSIX
-#define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
-#define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#define SANITIZER_INTERCEPT_PREAD64 (SI_GLIBC || SI_SOLARIS32)
+#define SANITIZER_INTERCEPT_PWRITE64 (SI_GLIBC || SI_SOLARIS32)
#define SANITIZER_INTERCEPT_READV SI_POSIX
#define SANITIZER_INTERCEPT_WRITEV SI_POSIX
#define SANITIZER_INTERCEPT_PREADV \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PREADV64 SI_GLIBC
+#define SANITIZER_INTERCEPT_PWRITEV64 SI_GLIBC
#define SANITIZER_INTERCEPT_PRCTL SI_LINUX
#define SANITIZER_INTERCEPT_STRPTIME SI_POSIX
#define SANITIZER_INTERCEPT_SCANF SI_POSIX
-#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_GLIBC
#ifndef SANITIZER_INTERCEPT_PRINTF
#define SANITIZER_INTERCEPT_PRINTF SI_POSIX
#define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD)
-#define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_GLIBC
#endif
#define SANITIZER_INTERCEPT___PRINTF_CHK \
- (SANITIZER_INTERCEPT_PRINTF && SI_LINUX_NOT_ANDROID)
+ (SANITIZER_INTERCEPT_PRINTF && SI_GLIBC)
#define SANITIZER_INTERCEPT_FREXP SI_NOT_FUCHSIA
#define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_POSIX
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GETPWENT \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_FGETGRENT_R \
- (SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_FGETGRENT_R (SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_GETPWENT_R \
- (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_FGETPWENT_R \
- (SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_FGETPWENT_R (SI_FREEBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_SETPWENT \
(SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CLOCK_GETTIME \
#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID SI_LINUX
#define SANITIZER_INTERCEPT_GETITIMER SI_POSIX
#define SANITIZER_INTERCEPT_TIME SI_POSIX
-#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID || SI_SOLARIS
-#define SANITIZER_INTERCEPT_GLOB64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GLOB (SI_GLIBC || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GLOB64 SI_GLIBC
#define SANITIZER_INTERCEPT_WAIT SI_POSIX
#define SANITIZER_INTERCEPT_INET SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_POSIX
(SI_FREEBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R \
(SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_GETHOSTENT_R \
- (SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETHOSTENT_R (SI_FREEBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GETSOCKOPT SI_POSIX
#define SANITIZER_INTERCEPT_ACCEPT SI_POSIX
#define SANITIZER_INTERCEPT_ACCEPT4 (SI_LINUX_NOT_ANDROID || SI_NETBSD)
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_REALPATH SI_POSIX
-#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME \
- (SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME (SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CONFSTR \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGPROCMASK SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_SIGMASK SI_POSIX
#define SANITIZER_INTERCEPT_BACKTRACE \
- (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS \
#define SANITIZER_INTERCEPT_SHMCTL \
(((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64) || \
SI_NETBSD || SI_SOLARIS) // NOLINT
-#define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_RANDOM_R SI_GLIBC
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_GLIBC
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED \
(SI_POSIX && !SI_NETBSD)
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED \
(SI_POSIX && !SI_NETBSD)
-#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_GLIBC
#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED (SI_POSIX && !SI_NETBSD)
#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK \
(SI_LINUX_NOT_ANDROID || SI_SOLARIS)
(SI_LINUX_NOT_ANDROID && !SI_NETBSD)
#define SANITIZER_INTERCEPT_THR_EXIT SI_FREEBSD
#define SANITIZER_INTERCEPT_TMPNAM SI_POSIX
-#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_TMPNAM_R (SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTSNAME SI_LINUX
#define SANITIZER_INTERCEPT_PTSNAME_R SI_LINUX
#define SANITIZER_INTERCEPT_TTYNAME SI_POSIX
#define SANITIZER_INTERCEPT_LGAMMAL (SI_POSIX && !SI_NETBSD)
#define SANITIZER_INTERCEPT_LGAMMA_R (SI_FREEBSD || SI_LINUX || SI_SOLARIS)
#define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID || SI_SOLARIS
-#define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_DRAND48_R SI_GLIBC
#define SANITIZER_INTERCEPT_RAND_R \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_ICONV \
(SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_MAC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_MUTEX SI_POSIX
-#define SANITIZER_INTERCEPT___PTHREAD_MUTEX SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT___PTHREAD_MUTEX SI_GLIBC
#define SANITIZER_INTERCEPT___LIBC_MUTEX SI_NETBSD
#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
- (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP \
- (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_TLS_GET_ADDR \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#else
#define SANITIZER_INTERCEPT_AEABI_MEM 0
#endif
-#define SANITIZER_INTERCEPT___BZERO SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT___BZERO SI_MAC || SI_GLIBC
#define SANITIZER_INTERCEPT_BZERO SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_FTIME (!SI_FREEBSD && !SI_NETBSD && SI_POSIX)
-#define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID || SI_SOLARIS
-#define SANITIZER_INTERCEPT_XDRREC SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_XDR (SI_GLIBC || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_XDRREC SI_GLIBC
#define SANITIZER_INTERCEPT_TSEARCH \
(SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_GLIBC
#define SANITIZER_INTERCEPT_FOPEN SI_POSIX
-#define SANITIZER_INTERCEPT_FOPEN64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#define SANITIZER_INTERCEPT_FOPEN64 (SI_GLIBC || SI_SOLARIS32)
#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM \
(SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_OBSTACK SI_GLIBC
#define SANITIZER_INTERCEPT_FFLUSH SI_POSIX
#define SANITIZER_INTERCEPT_FCLOSE SI_POSIX
#define SANITIZER_INTERCEPT_CTERMID_R (SI_MAC || SI_FREEBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPTOR_HOOKS \
- (SI_LINUX || SI_MAC || SI_WINDOWS || SI_NETBSD)
+ (SI_LINUX || SI_MAC || SI_WINDOWS || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_RECV_RECVFROM SI_POSIX
#define SANITIZER_INTERCEPT_SEND_SENDTO SI_POSIX
#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX
#define SANITIZER_INTERCEPT_MMAP SI_POSIX
#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \
- (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA && SI_NOT_RTEMS && \
- !SI_SOLARIS) // NOLINT
+#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (SI_GLIBC || SI_ANDROID)
#define SANITIZER_INTERCEPT_MEMALIGN \
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_RTEMS)
-#define SANITIZER_INTERCEPT___LIBC_MEMALIGN \
- (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_RTEMS && \
- !SI_ANDROID) // NOLINT
-#define SANITIZER_INTERCEPT_PVALLOC \
- (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA && SI_NOT_RTEMS && \
- !SI_SOLARIS) // NOLINT
-#define SANITIZER_INTERCEPT_CFREE \
- (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA && SI_NOT_RTEMS && \
- !SI_SOLARIS && !SANITIZER_ANDROID) // NOLINT
+#define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC
+#define SANITIZER_INTERCEPT_PVALLOC (SI_GLIBC || SI_ANDROID)
+#define SANITIZER_INTERCEPT_CFREE (SI_GLIBC && !SANITIZER_RISCV64)
#define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX
#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC && SI_NOT_RTEMS)
#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_TTYENT SI_NETBSD
#define SANITIZER_INTERCEPT_PROTOENT (SI_NETBSD || SI_LINUX)
-#define SANITIZER_INTERCEPT_PROTOENT_R (SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_PROTOENT_R SI_GLIBC
#define SANITIZER_INTERCEPT_NETENT SI_NETBSD
#define SANITIZER_INTERCEPT_SETVBUF \
(SI_NETBSD || SI_FREEBSD || SI_LINUX || SI_MAC)
#define SANITIZER_INTERCEPT_GETENTROPY SI_FREEBSD
#define SANITIZER_INTERCEPT_QSORT \
(SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
-#define SANITIZER_INTERCEPT_QSORT_R (SI_LINUX && !SI_ANDROID)
+#define SANITIZER_INTERCEPT_QSORT_R SI_GLIBC
// sigaltstack on i386 macOS cannot be intercepted due to setjmp()
// calling it and assuming that it does not clobber registers.
#define SANITIZER_INTERCEPT_SIGALTSTACK \
#define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD)
#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD
+// This macro gives a way for downstream users to override the above
+// interceptor macros irrespective of the platform they are on. They have
+// to do two things:
+// 1. Build compiler-rt with -DSANITIZER_OVERRIDE_INTERCEPTORS.
+// 2. Provide a header file named sanitizer_intercept_overriders.h in the
+// include path for their compiler-rt build.
+// An example of an overrider for strlen interceptor that one can list in
+// sanitizer_intercept_overriders.h is as follows:
+//
+// #ifdef SANITIZER_INTERCEPT_STRLEN
+// #undef SANITIZER_INTERCEPT_STRLEN
+// #define SANITIZER_INTERCEPT_STRLEN <value of choice>
+// #endif
+//
+// This "feature" is useful for downstream users who do not want some of
+// their libc funtions to be intercepted. They can selectively disable
+// interception of those functions.
+#ifdef SANITIZER_OVERRIDE_INTERCEPTORS
+#include <sanitizer_intercept_overriders.h>
+#endif
+
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
#include <sys/stat.h>
#include <sys/statvfs.h>
#include <sys/time.h>
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-W#warnings"
#include <sys/timeb.h>
+#pragma clang diagnostic pop
#include <sys/times.h>
#include <sys/timespec.h>
#include <sys/types.h>
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
// are not defined anywhere in userspace headers. Fake them. This seems to work
-// fine with newer headers, too. Beware that with <sys/stat.h>, struct stat
-// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
-// Also, for some platforms (e.g. mips) there are additional members in the
-// <sys/stat.h> struct stat:s.
+// fine with newer headers, too.
#include <linux/posix_types.h>
-#if defined(__x86_64__)
+#if defined(__x86_64__) || defined(__mips__)
#include <sys/stat.h>
#else
#define ino_t __kernel_ino_t
// Sizes and layouts of platform-specific POSIX data structures.
//===----------------------------------------------------------------------===//
-#include "sanitizer_platform.h"
-
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if defined(__linux__) || defined(__APPLE__)
// Tests in this file assume that off_t-dependent data structures match the
// libc ABI. For example, struct dirent here is what readdir() function (as
// exported from libc) returns, and not the user-facing "dirent", which
// depends on _FILE_OFFSET_BITS setting.
// To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below.
-#ifdef _FILE_OFFSET_BITS
#undef _FILE_OFFSET_BITS
#endif
+// Must go after undef _FILE_OFFSET_BITS.
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX || SANITIZER_MAC
// Must go after undef _FILE_OFFSET_BITS.
#include "sanitizer_glibc_version.h"
#include <pwd.h>
#include <signal.h>
#include <stddef.h>
+#include <stdio.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/socket.h>
#endif
#if !SANITIZER_ANDROID
-#include <fstab.h>
#include <sys/mount.h>
#include <sys/timeb.h>
#include <utmpx.h>
#include <wordexp.h>
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-#include <glob.h>
-#include <obstack.h>
-#include <mqueue.h>
+#if SANITIZER_LINUX
+#if SANITIZER_GLIBC
+#include <fstab.h>
#include <net/if_ppp.h>
#include <netax25/ax25.h>
#include <netipx/ipx.h>
#include <netrom/netrom.h>
+#include <obstack.h>
#if HAVE_RPC_XDR_H
# include <rpc/xdr.h>
#endif
#include <scsi/scsi.h>
-#include <sys/mtio.h>
+#else
+#include <linux/if_ppp.h>
+#include <linux/kd.h>
+#include <linux/ppp_defs.h>
+#endif // SANITIZER_GLIBC
+
+#if SANITIZER_ANDROID
+#include <linux/mtio.h>
+#else
+#include <glob.h>
+#include <mqueue.h>
#include <sys/kd.h>
+#include <sys/mtio.h>
#include <sys/shm.h>
#include <sys/statvfs.h>
#include <sys/timex.h>
#include <sys/msg.h>
#include <sys/ipc.h>
#include <crypt.h>
-#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+#endif // SANITIZER_ANDROID
-#if SANITIZER_ANDROID
-#include <linux/kd.h>
-#include <linux/mtio.h>
-#include <linux/ppp_defs.h>
-#include <linux/if_ppp.h>
-#endif
-
-#if SANITIZER_LINUX
#include <link.h>
#include <sys/vfs.h>
#include <sys/epoll.h>
#include <linux/capability.h>
+#else
+#include <fstab.h>
#endif // SANITIZER_LINUX
#if SANITIZER_MAC
unsigned struct_statfs64_sz = sizeof(struct statfs64);
#endif // (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
-#if !SANITIZER_ANDROID
+#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
unsigned struct_fstab_sz = sizeof(struct fstab);
+#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
+ // SANITIZER_MAC
+#if !SANITIZER_ANDROID
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
unsigned ucontext_t_sz = sizeof(ucontext_t);
unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
int glob_nomatch = GLOB_NOMATCH;
int glob_altdirfunc = GLOB_ALTDIRFUNC;
#endif
unsigned struct_input_id_sz = sizeof(struct input_id);
unsigned struct_mtpos_sz = sizeof(struct mtpos);
unsigned struct_rtentry_sz = sizeof(struct rtentry);
+#if SANITIZER_GLIBC || SANITIZER_ANDROID
unsigned struct_termio_sz = sizeof(struct termio);
+#endif
unsigned struct_vt_consize_sz = sizeof(struct vt_consize);
unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes);
unsigned struct_vt_stat_sz = sizeof(struct vt_stat);
unsigned struct_vt_mode_sz = sizeof(struct vt_mode);
#endif // SANITIZER_LINUX
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
unsigned struct_ax25_parms_struct_sz = sizeof(struct ax25_parms_struct);
unsigned struct_cyclades_monitor_sz = sizeof(struct cyclades_monitor);
#if EV_VERSION > (0x010000)
unsigned struct_sockaddr_ax25_sz = sizeof(struct sockaddr_ax25);
unsigned struct_unimapdesc_sz = sizeof(struct unimapdesc);
unsigned struct_unimapinit_sz = sizeof(struct unimapinit);
-#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
-#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+#endif // SANITIZER_GLIBC
#if !SANITIZER_ANDROID && !SANITIZER_MAC
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
unsigned IOCTL_PIO_UNIMAP = PIO_UNIMAP;
unsigned IOCTL_PIO_UNIMAPCLR = PIO_UNIMAPCLR;
unsigned IOCTL_PIO_UNISCRNMAP = PIO_UNISCRNMAP;
+#if SANITIZER_GLIBC
unsigned IOCTL_SCSI_IOCTL_GET_IDLUN = SCSI_IOCTL_GET_IDLUN;
unsigned IOCTL_SCSI_IOCTL_PROBE_HOST = SCSI_IOCTL_PROBE_HOST;
unsigned IOCTL_SCSI_IOCTL_TAGGED_DISABLE = SCSI_IOCTL_TAGGED_DISABLE;
unsigned IOCTL_SIOCNRGETPARMS = SIOCNRGETPARMS;
unsigned IOCTL_SIOCNRRTCTL = SIOCNRRTCTL;
unsigned IOCTL_SIOCNRSETPARMS = SIOCNRSETPARMS;
+#endif
unsigned IOCTL_TIOCGSERIAL = TIOCGSERIAL;
unsigned IOCTL_TIOCSERGETMULTI = TIOCSERGETMULTI;
unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI;
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
-#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC || SANITIZER_FREEBSD
CHECK_TYPE_SIZE(glob_t);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);
CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);
CHECK_SIZE_AND_OFFSET(glob_t, gl_stat);
-#endif
+#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD
CHECK_TYPE_SIZE(addrinfo);
CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
CHECK_SIZE_AND_OFFSET(iovec, iov_base);
CHECK_SIZE_AND_OFFSET(iovec, iov_len);
+// In POSIX, int msg_iovlen; socklen_t msg_controllen; socklen_t cmsg_len; but
+// many implementations don't conform to the standard. Since we pick the
+// non-conforming glibc definition, exclude the checks for musl (incompatible
+// sizes but compatible offsets).
CHECK_TYPE_SIZE(msghdr);
CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
+#if SANITIZER_GLIBC || SANITIZER_ANDROID
CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
+#endif
CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
+#if SANITIZER_GLIBC || SANITIZER_ANDROID
CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
+#endif
CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
CHECK_TYPE_SIZE(cmsghdr);
+#if SANITIZER_GLIBC || SANITIZER_ANDROID
CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
+#endif
CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
CHECK_TYPE_SIZE(ether_addr);
-#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC || SANITIZER_FREEBSD
CHECK_TYPE_SIZE(ipc_perm);
# if SANITIZER_FREEBSD
CHECK_SIZE_AND_OFFSET(ipc_perm, key);
CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
#endif
-#if SANITIZER_LINUX
+#if SANITIZER_GLIBC || SANITIZER_ANDROID
COMPILER_CHECK(sizeof(__sanitizer_struct_mallinfo) == sizeof(struct mallinfo));
#endif
COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
COMPILER_CHECK(sizeof(__sanitizer_FILE) <= sizeof(FILE));
CHECK_SIZE_AND_OFFSET(FILE, _flags);
CHECK_SIZE_AND_OFFSET(FILE, _IO_read_ptr);
CHECK_SIZE_AND_OFFSET(FILE, _markers);
CHECK_SIZE_AND_OFFSET(FILE, _chain);
CHECK_SIZE_AND_OFFSET(FILE, _fileno);
-#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
COMPILER_CHECK(sizeof(__sanitizer__obstack_chunk) <= sizeof(_obstack_chunk));
CHECK_SIZE_AND_OFFSET(_obstack_chunk, limit);
CHECK_SIZE_AND_OFFSET(_obstack_chunk, prev);
CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, write);
CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, seek);
CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, close);
-#endif
+#endif // SANITIZER_GLIBC
#if SANITIZER_LINUX || SANITIZER_FREEBSD
CHECK_TYPE_SIZE(sem_t);
#elif defined(__mips__)
const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
? FIRST_32_SECOND_64(104, 128)
- : FIRST_32_SECOND_64(144, 216);
+ : FIRST_32_SECOND_64(160, 216);
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__s390__) && !defined(__s390x__)
const unsigned struct_kernel_stat_sz = 64;
int cmsg_type;
};
#else
+// In POSIX, int msg_iovlen; socklen_t msg_controllen; socklen_t cmsg_len; but
+// many implementations don't conform to the standard.
struct __sanitizer_msghdr {
void *msg_name;
unsigned msg_namelen;
bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
- InternalScopedString buff(kMaxPathLength);
- MemoryMappedSegment segment(buff.data(), kMaxPathLength);
+ InternalMmapVector<char> buff(kMaxPathLength);
+ MemoryMappedSegment segment(buff.data(), buff.size());
while (proc_maps.Next(&segment)) {
if (segment.IsExecutable() &&
internal_strcmp(module, segment.filename) == 0) {
uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset);
uptr internal_munmap(void *addr, uptr length);
+#if SANITIZER_LINUX
+uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
+ void *new_address);
+#endif
int internal_mprotect(void *addr, uptr length, int prot);
int internal_madvise(uptr addr, uptr length, int advice);
if (GetHandleSignalMode(SIGABRT) != kHandleSignalNo) {
struct sigaction sigact;
internal_memset(&sigact, 0, sizeof(sigact));
- sigact.sa_sigaction = (sa_sigaction_t)SIG_DFL;
+ sigact.sa_handler = SIG_DFL;
internal_sigaction(SIGABRT, &sigact, nullptr);
}
#endif
va_list args) {
va_list args2;
va_copy(args2, args);
- const int kLen = 16 * 1024;
- int needed_length;
+ InternalMmapVector<char> v;
+ int needed_length = 0;
char *buffer = local_buffer;
// First try to print a message using a local buffer, and then fall back to
// mmaped buffer.
- for (int use_mmap = 0; use_mmap < 2; use_mmap++) {
+ for (int use_mmap = 0;; use_mmap++) {
if (use_mmap) {
va_end(args);
va_copy(args, args2);
- buffer = (char*)MmapOrDie(kLen, "Report");
- buffer_size = kLen;
+ v.resize(needed_length + 1);
+ buffer_size = v.capacity();
+ v.resize(buffer_size);
+ buffer = &v[0];
}
needed_length = 0;
- // Check that data fits into the current buffer.
-# define CHECK_NEEDED_LENGTH \
- if (needed_length >= buffer_size) { \
- if (!use_mmap) continue; \
- RAW_CHECK_MSG(needed_length < kLen, \
- "Buffer in Report is too short!\n"); \
- }
// Fuchsia's logging infrastructure always keeps track of the logging
// process, thread, and timestamp, so never prepend such information.
if (!SANITIZER_FUCHSIA && append_pid) {
if (common_flags()->log_exe_name && exe_name) {
needed_length += internal_snprintf(buffer, buffer_size,
"==%s", exe_name);
- CHECK_NEEDED_LENGTH
+ if (needed_length >= buffer_size)
+ continue;
}
needed_length += internal_snprintf(
buffer + needed_length, buffer_size - needed_length, "==%d==", pid);
- CHECK_NEEDED_LENGTH
+ if (needed_length >= buffer_size)
+ continue;
}
needed_length += VSNPrintf(buffer + needed_length,
buffer_size - needed_length, format, args);
- CHECK_NEEDED_LENGTH
+ if (needed_length >= buffer_size)
+ continue;
// If the message fit into the buffer, print it and exit.
break;
-# undef CHECK_NEEDED_LENGTH
}
RawWrite(buffer);
CallPrintfAndReportCallback(buffer);
LogMessageOnPrintf(buffer);
- // If we had mapped any memory, clean up.
- if (buffer != local_buffer)
- UnmapOrDie((void *)buffer, buffer_size);
va_end(args2);
}
FORMAT(2, 3)
void InternalScopedString::append(const char *format, ...) {
- CHECK_LT(length_, size());
- va_list args;
- va_start(args, format);
- VSNPrintf(data() + length_, size() - length_, format, args);
- va_end(args);
- length_ += internal_strlen(data() + length_);
- CHECK_LT(length_, size());
+ uptr prev_len = length();
+
+ while (true) {
+ buffer_.resize(buffer_.capacity());
+
+ va_list args;
+ va_start(args, format);
+ uptr sz = VSNPrintf(buffer_.data() + prev_len, buffer_.size() - prev_len,
+ format, args);
+ va_end(args);
+ if (sz < buffer_.size() - prev_len) {
+ buffer_.resize(prev_len + sz + 1);
+ break;
+ }
+
+ buffer_.reserve(buffer_.capacity() * 2);
+ }
+ CHECK_EQ(buffer_[length()], '\0');
}
} // namespace __sanitizer
void MemoryMappingLayout::DumpListOfModules(
InternalMmapVectorNoCtor<LoadedModule> *modules) {
Reset();
- InternalScopedString module_name(kMaxPathLength);
+ InternalMmapVector<char> module_name(kMaxPathLength);
MemoryMappedSegment segment(module_name.data(), module_name.size());
for (uptr i = 0; Next(&segment); i++) {
const char *cur_name = segment.filename;
void MemoryMappingLayout::DumpListOfModules(
InternalMmapVectorNoCtor<LoadedModule> *modules) {
Reset();
- InternalScopedString module_name(kMaxPathLength);
- MemoryMappedSegment segment(module_name.data(), kMaxPathLength);
+ InternalMmapVector<char> module_name(kMaxPathLength);
+ MemoryMappedSegment segment(module_name.data(), module_name.size());
MemoryMappedSegmentData data;
segment.data_ = &data;
while (Next(&segment)) {
// Information about the process mappings (Solaris-specific parts).
//===----------------------------------------------------------------------===//
+// Before Solaris 11.4, <procfs.h> doesn't work in a largefile environment.
+#undef _FILE_OFFSET_BITS
#include "sanitizer_platform.h"
#if SANITIZER_SOLARIS
#include "sanitizer_common.h"
#include "sanitizer_procmaps.h"
-// Before Solaris 11.4, <procfs.h> doesn't work in a largefile environment.
-#undef _FILE_OFFSET_BITS
#include <procfs.h>
#include <limits.h>
#if __has_feature(ptrauth_calls)
#include <ptrauth.h>
+#elif defined(__ARM_FEATURE_PAC_DEFAULT) && !defined(__APPLE__)
+inline unsigned long ptrauth_strip(void* __value, unsigned int __key) {
+ // On the stack the link register is protected with Pointer
+ // Authentication Code when compiled with -mbranch-protection.
+ // Let's stripping the PAC unconditionally because xpaclri is in
+ // the NOP space so will do nothing when it is not enabled or not available.
+ unsigned long ret;
+ asm volatile(
+ "mov x30, %1\n\t"
+ "hint #7\n\t" // xpaclri
+ "mov %0, x30\n\t"
+ : "=r"(ret)
+ : "r"(__value)
+ : "x30");
+ return ret;
+}
+#define ptrauth_auth_data(__value, __old_key, __old_data) __value
+#define ptrauth_string_discriminator(__string) ((int)0)
#else
// Copied from <ptrauth.h>
#define ptrauth_strip(__value, __key) __value
#define ptrauth_string_discriminator(__string) ((int)0)
#endif
-#define STRIP_PC(pc) ((uptr)ptrauth_strip(pc, 0))
+#define STRIP_PAC_PC(pc) ((uptr)ptrauth_strip(pc, 0))
#endif // SANITIZER_PTRAUTH_H
if (!map_.size())
return StackTrace();
IdDescPair pair = {id, nullptr};
- uptr idx =
- InternalLowerBound(map_, 0, map_.size(), pair, IdDescPair::IdComparator);
+ uptr idx = InternalLowerBound(map_, pair, IdDescPair::IdComparator);
if (idx > map_.size() || map_[idx].id != id)
return StackTrace();
return map_[idx].desc->load();
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_platform.h"
+#include "sanitizer_ptrauth.h"
namespace __sanitizer {
// Nope, this does not look right either. This means the frame after next does
// not have a valid frame pointer, but we can still extract the caller PC.
// Unfortunately, there is no way to decide between GCC and LLVM frame
- // layouts. Assume GCC.
- return bp_prev - 1;
+ // layouts. Assume LLVM.
+ return bp_prev;
#else
return (uhwptr*)bp;
#endif
IsAligned((uptr)frame, sizeof(*frame)) &&
size < max_depth) {
#ifdef __powerpc__
- // PowerPC ABIs specify that the return address is saved on the
- // *caller's* stack frame. Thus we must dereference the back chain
- // to find the caller frame before extracting it.
+ // PowerPC ABIs specify that the return address is saved at offset
+ // 16 of the *caller's* stack frame. Thus we must dereference the
+ // back chain to find the caller frame before extracting it.
uhwptr *caller_frame = (uhwptr*)frame[0];
if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
!IsAligned((uptr)caller_frame, sizeof(uhwptr)))
break;
- // For most ABIs the offset where the return address is saved is two
- // register sizes. The exception is the SVR4 ABI, which uses an
- // offset of only one register size.
-#ifdef _CALL_SYSV
- uhwptr pc1 = caller_frame[1];
-#else
uhwptr pc1 = caller_frame[2];
-#endif
#elif defined(__s390__)
uhwptr pc1 = frame[14];
#elif defined(__riscv)
// frame[-1] contains the return address
uhwptr pc1 = frame[-1];
#else
- uhwptr pc1 = frame[1];
+ uhwptr pc1 = STRIP_PAC_PC((void *)frame[1]);
#endif
// Let's assume that any pointer in the 0th page (i.e. <0x1000 on i386 and
// x86_64) is invalid and stop unwinding here. If we're adding support for
static uptr GetCurrentPc();
static inline uptr GetPreviousInstructionPc(uptr pc);
static uptr GetNextInstructionPc(uptr pc);
- typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
- int out_size);
};
// Performance-critical, must be in the header.
Printf(" <empty stack>\n\n");
return;
}
- InternalScopedString frame_desc(GetPageSizeCached() * 2);
- InternalScopedString dedup_token(GetPageSizeCached());
+ InternalScopedString frame_desc;
+ InternalScopedString dedup_token;
int dedup_frames = common_flags()->dedup_token_length;
bool symbolize = RenderNeedsSymbolization(common_flags()->stack_trace_format);
uptr frame_num = 0;
out_buf[out_buf_size - 1] = 0;
return;
}
- InternalScopedString frame_desc(GetPageSizeCached());
+ InternalScopedString frame_desc;
uptr frame_num = 0;
// Reserve one byte for the final 0.
char *out_end = out_buf + out_buf_size - 1;
out_buf[0] = 0;
DataInfo DI;
if (!Symbolizer::GetOrInit()->SymbolizeData(data_addr, &DI)) return;
- InternalScopedString data_desc(GetPageSizeCached());
+ InternalScopedString data_desc;
RenderData(&data_desc, fmt, &DI, common_flags()->strip_path_prefix);
internal_strncpy(out_buf, data_desc.data(), out_buf_size);
out_buf[out_buf_size - 1] = 0;
#ifndef NT_X86_XSTATE
#define NT_X86_XSTATE 0x202
#endif
+#ifndef PTRACE_GETREGSET
+#define PTRACE_GETREGSET 0x4204
+#endif
// Compiler may use FP registers to store pointers.
static constexpr uptr kExtraRegs[] = {NT_X86_XSTATE, NT_FPREGSET};
#elif SANITIZER_RISCV64
typedef struct user_regs_struct regs_struct;
+// sys/ucontext.h already defines REG_SP as 2. Undefine it first.
+#undef REG_SP
#define REG_SP sp
static constexpr uptr kExtraRegs[] = {0};
#define ARCH_IOVEC_FOR_GETREGSET
static bool GetPathAssumingFileIsRelativeToExec(const char *file_path,
/*out*/char *new_file_path,
uptr new_file_path_size) {
- InternalScopedString exec(kMaxPathLength);
+ InternalMmapVector<char> exec(kMaxPathLength);
if (ReadBinaryNameCached(exec.data(), exec.size())) {
const char *file_name_pos = StripModuleName(exec.data());
uptr path_to_exec_len = file_name_pos - exec.data();
if (filename[0] == '\0')
return;
- InternalScopedString new_file_path(kMaxPathLength);
+ InternalMmapVector<char> new_file_path(kMaxPathLength);
filename = FindFile(filename, new_file_path.data(), new_file_path.size());
// Read the file.
InternalFree(info->function);
info->function = 0;
}
- if (0 == internal_strcmp(info->file, "??")) {
+ if (info->file && 0 == internal_strcmp(info->file, "??")) {
InternalFree(info->file);
info->file = 0;
}
return false;
}
+// This is mainly used by hwasan for online symbolization. This isn't needed
+// since hwasan can always just dump stack frames for offline symbolization.
+bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) { return false; }
+
// This is used in some places for suppression checking, which we
// don't really support for Fuchsia. It's also used in UBSan to
// identify a PC location to a function name, so we always fill in
static SymbolizerTool *ChooseExternalSymbolizer(LowLevelAllocator *allocator) {
const char *path = common_flags()->external_symbolizer_path;
+
+ if (path && internal_strchr(path, '%')) {
+ char *new_path = (char *)InternalAlloc(kMaxPathLength);
+ SubstituteForFlagValue(path, new_path, kMaxPathLength);
+ path = new_path;
+ }
+
const char *binary_name = path ? StripModuleName(path) : "";
+ static const char kLLVMSymbolizerPrefix[] = "llvm-symbolizer";
if (path && path[0] == '\0') {
VReport(2, "External symbolizer is explicitly disabled.\n");
return nullptr;
- } else if (!internal_strcmp(binary_name, "llvm-symbolizer")) {
+ } else if (!internal_strncmp(binary_name, kLLVMSymbolizerPrefix,
+ internal_strlen(kLLVMSymbolizerPrefix))) {
VReport(2, "Using llvm-symbolizer at user-specified path: %s\n", path);
return new(*allocator) LLVMSymbolizer(path, allocator);
} else if (!internal_strcmp(binary_name, "atos")) {
void ReportErrorSummary(const char *error_type, const AddressInfo &info,
const char *alt_tool_name) {
if (!common_flags()->print_summary) return;
- InternalScopedString buff(kMaxSummaryLength);
+ InternalScopedString buff;
buff.append("%s ", error_type);
RenderFrame(&buff, "%L %F", 0, info.address, &info,
common_flags()->symbolize_vs_style,
static void MaybeDumpInstructionBytes(uptr pc) {
if (!common_flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))
return;
- InternalScopedString str(1024);
+ InternalScopedString str;
str.append("First 16 instruction bytes at pc: ");
if (IsAccessibleMemoryRange(pc, 16)) {
for (int i = 0; i < 16; ++i) {
Report("The signal is caused by a %s memory access.\n", access_type);
if (!sig.is_true_faulting_addr)
Report("Hint: this fault was caused by a dereference of a high value "
- "address (see register values below). Dissassemble the provided "
+ "address (see register values below). Disassemble the provided "
"pc to learn which register was used.\n");
else if (sig.addr < GetPageSizeCached())
Report("Hint: address points to the zero page.\n");
bool WinSymbolizerTool::SymbolizePC(uptr addr, SymbolizedStack *frame) {
InitializeDbgHelpIfNeeded();
- // See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
- char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
- PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer;
+ // See https://docs.microsoft.com/en-us/windows/win32/debug/retrieving-symbol-information-by-address
+ InternalMmapVector<char> buffer(sizeof(SYMBOL_INFO) +
+ MAX_SYM_NAME * sizeof(CHAR));
+ PSYMBOL_INFO symbol = (PSYMBOL_INFO)&buffer[0];
symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
symbol->MaxNameLen = MAX_SYM_NAME;
DWORD64 offset = 0;
// Compute the command line. Wrap double quotes around everything.
const char *argv[kArgVMax];
GetArgV(path_, argv);
- InternalScopedString command_line(kMaxPathLength * 3);
+ InternalScopedString command_line;
for (int i = 0; argv[i]; i++) {
const char *arg = argv[i];
int arglen = internal_strlen(arg);
return;
}
- // Add llvm-symbolizer in case the binary has dwarf.
+ // Add llvm-symbolizer.
const char *user_path = common_flags()->external_symbolizer_path;
+
+ if (user_path && internal_strchr(user_path, '%')) {
+ char *new_path = (char *)InternalAlloc(kMaxPathLength);
+ SubstituteForFlagValue(user_path, new_path, kMaxPathLength);
+ user_path = new_path;
+ }
+
const char *path =
user_path ? user_path : FindPathToBinary("llvm-symbolizer.exe");
if (path) {
internal__exit(common_flags()->exitcode);
}
-static CheckFailedCallbackType CheckFailedCallback;
-void SetCheckFailedCallback(CheckFailedCallbackType callback) {
- CheckFailedCallback = callback;
+static void (*CheckUnwindCallback)();
+void SetCheckUnwindCallback(void (*callback)()) {
+ CheckUnwindCallback = callback;
}
-const int kSecondsToSleepWhenRecursiveCheckFailed = 2;
-
void NORETURN CheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
- static atomic_uint32_t num_calls;
- if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) > 10) {
- SleepForSeconds(kSecondsToSleepWhenRecursiveCheckFailed);
+ u32 tid = GetTid();
+ Printf("%s: CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx) (tid=%u)\n",
+ SanitizerToolName, StripModuleName(file), line, cond, (uptr)v1,
+ (uptr)v2, tid);
+ static atomic_uint32_t first_tid;
+ u32 cmp = 0;
+ if (!atomic_compare_exchange_strong(&first_tid, &cmp, tid,
+ memory_order_relaxed)) {
+ if (cmp == tid) {
+ // Recursing into CheckFailed.
+ } else {
+ // Another thread fails already, let it print the stack and terminate.
+ SleepForSeconds(2);
+ }
Trap();
}
-
- if (CheckFailedCallback) {
- CheckFailedCallback(file, line, cond, v1, v2);
- }
- Report("Sanitizer CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond,
- v1, v2);
+ if (CheckUnwindCallback)
+ CheckUnwindCallback();
Die();
}
unique_id = _unique_id;
detached = _detached;
// Parent tid makes no sense for the main thread.
- if (tid != 0)
+ if (tid != kMainTid)
parent_tid = _parent_tid;
OnCreated(arg);
}
// ThreadRegistry implementation.
-const u32 ThreadRegistry::kUnknownTid = ~0U;
-
ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
u32 thread_quarantine_size, u32 max_reuse)
: context_factory_(factory),
u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
void *arg) {
BlockingMutexLock l(&mtx_);
- u32 tid = kUnknownTid;
+ u32 tid = kInvalidTid;
ThreadContextBase *tctx = QuarantinePop();
if (tctx) {
tid = tctx->tid;
Die();
}
CHECK_NE(tctx, 0);
- CHECK_NE(tid, kUnknownTid);
+ CHECK_NE(tid, kInvalidTid);
CHECK_LT(tid, max_threads_);
CHECK_EQ(tctx->status, ThreadStatusInvalid);
alive_threads_++;
if (tctx != 0 && cb(tctx, arg))
return tctx->tid;
}
- return kUnknownTid;
+ return kInvalidTid;
}
ThreadContextBase *
// really started. We just did CreateThread for a prospective new
// thread before trying to create it, and then failed to actually
// create it, and so never called StartThread.
-void ThreadRegistry::FinishThread(u32 tid) {
+ThreadStatus ThreadRegistry::FinishThread(u32 tid) {
BlockingMutexLock l(&mtx_);
CHECK_GT(alive_threads_, 0);
alive_threads_--;
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
bool dead = tctx->detached;
+ ThreadStatus prev_status = tctx->status;
if (tctx->status == ThreadStatusRunning) {
CHECK_GT(running_threads_, 0);
running_threads_--;
QuarantinePush(tctx);
}
tctx->SetDestroyed();
+ return prev_status;
}
void ThreadRegistry::StartThread(u32 tid, tid_t os_id, ThreadType thread_type,
class ThreadRegistry {
public:
- static const u32 kUnknownTid;
-
ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
u32 thread_quarantine_size, u32 max_reuse = 0);
void GetNumberOfThreads(uptr *total = nullptr, uptr *running = nullptr,
void RunCallbackForEachThreadLocked(ThreadCallback cb, void *arg);
typedef bool (*FindThreadCallback)(ThreadContextBase *tctx, void *arg);
- // Finds a thread using the provided callback. Returns kUnknownTid if no
+ // Finds a thread using the provided callback. Returns kInvalidTid if no
// thread is found.
u32 FindThread(FindThreadCallback cb, void *arg);
// Should be guarded by ThreadRegistryLock. Return 0 if no thread
void SetThreadNameByUserId(uptr user_id, const char *name);
void DetachThread(u32 tid, void *arg);
void JoinThread(u32 tid, void *arg);
- void FinishThread(u32 tid);
+ // Finishes thread and returns previous status.
+ ThreadStatus FinishThread(u32 tid);
void StartThread(u32 tid, tid_t os_id, ThreadType thread_type, void *arg);
void SetThreadUserId(u32 tid, uptr user_id);
#include "sanitizer_tls_get_addr.h"
+#include "sanitizer_atomic.h"
#include "sanitizer_flags.h"
#include "sanitizer_platform_interceptors.h"
static const uptr kDestroyedThread = -1;
-static inline void DTLS_Deallocate(DTLS::DTV *dtv, uptr size) {
- if (!size) return;
- VReport(2, "__tls_get_addr: DTLS_Deallocate %p %zd\n", dtv, size);
- UnmapOrDie(dtv, size * sizeof(DTLS::DTV));
+static void DTLS_Deallocate(DTLS::DTVBlock *block) {
+ VReport(2, "__tls_get_addr: DTLS_Deallocate %p %zd\n", block);
+ UnmapOrDie(block, sizeof(DTLS::DTVBlock));
atomic_fetch_sub(&number_of_live_dtls, 1, memory_order_relaxed);
}
-static inline void DTLS_Resize(uptr new_size) {
- if (dtls.dtv_size >= new_size) return;
- new_size = RoundUpToPowerOfTwo(new_size);
- new_size = Max(new_size, 4096UL / sizeof(DTLS::DTV));
- DTLS::DTV *new_dtv =
- (DTLS::DTV *)MmapOrDie(new_size * sizeof(DTLS::DTV), "DTLS_Resize");
+static DTLS::DTVBlock *DTLS_NextBlock(atomic_uintptr_t *cur) {
+ uptr v = atomic_load(cur, memory_order_acquire);
+ if (v == kDestroyedThread)
+ return nullptr;
+ DTLS::DTVBlock *next = (DTLS::DTVBlock *)v;
+ if (next)
+ return next;
+ DTLS::DTVBlock *new_dtv =
+ (DTLS::DTVBlock *)MmapOrDie(sizeof(DTLS::DTVBlock), "DTLS_NextBlock");
+ uptr prev = 0;
+ if (!atomic_compare_exchange_strong(cur, &prev, (uptr)new_dtv,
+ memory_order_seq_cst)) {
+ UnmapOrDie(new_dtv, sizeof(DTLS::DTVBlock));
+ return (DTLS::DTVBlock *)prev;
+ }
uptr num_live_dtls =
atomic_fetch_add(&number_of_live_dtls, 1, memory_order_relaxed);
- VReport(2, "__tls_get_addr: DTLS_Resize %p %zd\n", &dtls, num_live_dtls);
- CHECK_LT(num_live_dtls, 1 << 20);
- uptr old_dtv_size = dtls.dtv_size;
- DTLS::DTV *old_dtv = dtls.dtv;
- if (old_dtv_size)
- internal_memcpy(new_dtv, dtls.dtv, dtls.dtv_size * sizeof(DTLS::DTV));
- dtls.dtv = new_dtv;
- dtls.dtv_size = new_size;
- if (old_dtv_size)
- DTLS_Deallocate(old_dtv, old_dtv_size);
+ VReport(2, "__tls_get_addr: DTLS_NextBlock %p %zd\n", &dtls, num_live_dtls);
+ return new_dtv;
+}
+
+static DTLS::DTV *DTLS_Find(uptr id) {
+ VReport(2, "__tls_get_addr: DTLS_Find %p %zd\n", &dtls, id);
+ static constexpr uptr kPerBlock = ARRAY_SIZE(DTLS::DTVBlock::dtvs);
+ DTLS::DTVBlock *cur = DTLS_NextBlock(&dtls.dtv_block);
+ if (!cur)
+ return nullptr;
+ for (; id >= kPerBlock; id -= kPerBlock) cur = DTLS_NextBlock(&cur->next);
+ return cur->dtvs + id;
}
void DTLS_Destroy() {
if (!common_flags()->intercept_tls_get_addr) return;
- VReport(2, "__tls_get_addr: DTLS_Destroy %p %zd\n", &dtls, dtls.dtv_size);
- uptr s = dtls.dtv_size;
- dtls.dtv_size = kDestroyedThread; // Do this before unmap for AS-safety.
- DTLS_Deallocate(dtls.dtv, s);
+ VReport(2, "__tls_get_addr: DTLS_Destroy %p\n", &dtls);
+ DTLS::DTVBlock *block = (DTLS::DTVBlock *)atomic_exchange(
+ &dtls.dtv_block, kDestroyedThread, memory_order_release);
+ while (block) {
+ DTLS::DTVBlock *next =
+ (DTLS::DTVBlock *)atomic_load(&block->next, memory_order_acquire);
+ DTLS_Deallocate(block);
+ block = next;
+ }
}
#if defined(__powerpc64__) || defined(__mips__)
if (!common_flags()->intercept_tls_get_addr) return 0;
TlsGetAddrParam *arg = reinterpret_cast<TlsGetAddrParam *>(arg_void);
uptr dso_id = arg->dso_id;
- if (dtls.dtv_size == kDestroyedThread) return 0;
- DTLS_Resize(dso_id + 1);
- if (dtls.dtv[dso_id].beg) return 0;
+ DTLS::DTV *dtv = DTLS_Find(dso_id);
+ if (!dtv || dtv->beg)
+ return 0;
uptr tls_size = 0;
uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset - kDtvOffset;
VReport(2, "__tls_get_addr: %p {%p,%p} => %p; tls_beg: %p; sp: %p "
// This may happen inside the DTOR of main thread, so just ignore it.
tls_size = 0;
}
- dtls.dtv[dso_id].beg = tls_beg;
- dtls.dtv[dso_id].size = tls_size;
- return dtls.dtv + dso_id;
+ dtv->beg = tls_beg;
+ dtv->size = tls_size;
+ return dtv;
}
void DTLS_on_libc_memalign(void *ptr, uptr size) {
DTLS *DTLS_Get() { return &dtls; }
bool DTLSInDestruction(DTLS *dtls) {
- return dtls->dtv_size == kDestroyedThread;
+ return atomic_load(&dtls->dtv_block, memory_order_relaxed) ==
+ kDestroyedThread;
}
#else
#ifndef SANITIZER_TLS_GET_ADDR_H
#define SANITIZER_TLS_GET_ADDR_H
+#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
namespace __sanitizer {
struct DTV {
uptr beg, size;
};
+ struct DTVBlock {
+ atomic_uintptr_t next;
+ DTV dtvs[(4096UL - sizeof(next)) / sizeof(DTLS::DTV)];
+ };
+
+ static_assert(sizeof(DTVBlock) <= 4096UL, "Unexpected block size");
- uptr dtv_size;
- DTV *dtv; // dtv_size elements, allocated by MmapOrDie.
+ atomic_uintptr_t dtv_block;
// Auxiliary fields, don't access them outside sanitizer_tls_get_addr.cpp
uptr last_memalign_size;
uptr last_memalign_ptr;
};
+template <typename Fn>
+void ForEachDVT(DTLS *dtls, const Fn &fn) {
+ DTLS::DTVBlock *block =
+ (DTLS::DTVBlock *)atomic_load(&dtls->dtv_block, memory_order_acquire);
+ while (block) {
+ int id = 0;
+ for (auto &d : block->dtvs) fn(d, id++);
+ block = (DTLS::DTVBlock *)atomic_load(&block->next, memory_order_acquire);
+ }
+}
+
// Returns pointer and size of a linker-allocated TLS block.
// Each block is returned exactly once.
DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res, uptr static_tls_begin,
trace_buffer[0] = pc;
}
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wframe-larger-than="
+#endif
void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
CHECK(context);
CHECK_GE(max_depth, 2);
trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
}
}
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
#endif // #if !SANITIZER_GO
#endif // SANITIZER_WINDOWS
}
void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
- // This is almost useless on 32-bits.
- // FIXME: add madvise-analog when we move to 64-bits.
+ uptr beg_aligned = RoundDownTo(beg, GetPageSizeCached()),
+ end_aligned = RoundDownTo(end, GetPageSizeCached());
+ CHECK(beg < end); // make sure the region is sane
+ if (beg_aligned == end_aligned) // make sure we're freeing at least 1 page;
+ return;
+ UnmapOrDie((void *)beg, end_aligned - beg_aligned);
}
void SetShadowRegionHugePageMode(uptr addr, uptr size) {
return 0;
}
+uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
+ uptr num_aliases, uptr ring_buffer_size) {
+ CHECK(false && "HWASan aliasing is unimplemented on Windows");
+ return 0;
+}
+
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
MEMORY_BASIC_INFORMATION mbi;
CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
// load the image at this address. Therefore, we call it the preferred base. Any
// addresses in the DWARF typically assume that the object has been loaded at
// this address.
-static uptr GetPreferredBase(const char *modname) {
+static uptr GetPreferredBase(const char *modname, char *buf, size_t buf_size) {
fd_t fd = OpenFile(modname, RdOnly, nullptr);
if (fd == kInvalidFd)
return 0;
// IMAGE_FILE_HEADER
// IMAGE_OPTIONAL_HEADER
// Seek to e_lfanew and read all that data.
- char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)];
if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) ==
INVALID_SET_FILE_POINTER)
return 0;
- if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) ||
- bytes_read != sizeof(buf))
+ if (!ReadFromFile(fd, buf, buf_size, &bytes_read) || bytes_read != buf_size)
return 0;
// Check for "PE\0\0" before the PE header.
}
}
+ InternalMmapVector<char> buf(4 + sizeof(IMAGE_FILE_HEADER) +
+ sizeof(IMAGE_OPTIONAL_HEADER));
+ InternalMmapVector<wchar_t> modname_utf16(kMaxPathLength);
+ InternalMmapVector<char> module_name(kMaxPathLength);
// |num_modules| is the number of modules actually present,
size_t num_modules = bytes_required / sizeof(HMODULE);
for (size_t i = 0; i < num_modules; ++i) {
continue;
// Get the UTF-16 path and convert to UTF-8.
- wchar_t modname_utf16[kMaxPathLength];
int modname_utf16_len =
- GetModuleFileNameW(handle, modname_utf16, kMaxPathLength);
+ GetModuleFileNameW(handle, &modname_utf16[0], kMaxPathLength);
if (modname_utf16_len == 0)
modname_utf16[0] = '\0';
- char module_name[kMaxPathLength];
- int module_name_len =
- ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1,
- &module_name[0], kMaxPathLength, NULL, NULL);
+ int module_name_len = ::WideCharToMultiByte(
+ CP_UTF8, 0, &modname_utf16[0], modname_utf16_len + 1, &module_name[0],
+ kMaxPathLength, NULL, NULL);
module_name[module_name_len] = '\0';
uptr base_address = (uptr)mi.lpBaseOfDll;
// RVA when computing the module offset. This helps llvm-symbolizer find the
// right DWARF CU. In the common case that the image is loaded at it's
// preferred address, we will now print normal virtual addresses.
- uptr preferred_base = GetPreferredBase(&module_name[0]);
+ uptr preferred_base =
+ GetPreferredBase(&module_name[0], &buf[0], buf.size());
uptr adjusted_base = base_address - preferred_base;
- LoadedModule cur_module;
- cur_module.set(module_name, adjusted_base);
+ modules_.push_back(LoadedModule());
+ LoadedModule &cur_module = modules_.back();
+ cur_module.set(&module_name[0], adjusted_base);
// We add the whole module as one single address range.
cur_module.addAddressRange(base_address, end_address, /*executable*/ true,
/*writable*/ true);
- modules_.push_back(cur_module);
}
UnmapOrDie(hmodules, modules_buffer_size);
}
uptr SignalContext::GetAddress() const {
EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
- return exception_record->ExceptionInformation[1];
+ if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
+ return exception_record->ExceptionInformation[1];
+ return (uptr)exception_record->ExceptionAddress;
}
bool SignalContext::IsMemoryAccess() const {
- return GetWriteFlag() != SignalContext::UNKNOWN;
+ return ((EXCEPTION_RECORD *)siginfo)->ExceptionCode ==
+ EXCEPTION_ACCESS_VIOLATION;
}
-bool SignalContext::IsTrueFaultingAddress() const {
- // FIXME: Provide real implementation for this. See Linux and Mac variants.
- return IsMemoryAccess();
-}
+bool SignalContext::IsTrueFaultingAddress() const { return true; }
SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
+
+ // The write flag is only available for access violation exceptions.
+ if (exception_record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
+ return SignalContext::UNKNOWN;
+
// The contents of this array are documented at
- // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363082(v=vs.85).aspx
+ // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
// The first element indicates read as 0, write as 1, or execute as 8. The
// second element is the faulting address.
switch (exception_record->ExceptionInformation[0]) {
}
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
- // FIXME: Actually implement this function.
- CHECK_GT(buf_len, 0);
- buf[0] = 0;
- return 0;
+ if (buf_len == 0)
+ return 0;
+
+ // Get the UTF-16 path and convert to UTF-8.
+ InternalMmapVector<wchar_t> binname_utf16(kMaxPathLength);
+ int binname_utf16_len =
+ GetModuleFileNameW(NULL, &binname_utf16[0], kMaxPathLength);
+ if (binname_utf16_len == 0) {
+ buf[0] = '\0';
+ return 0;
+ }
+ int binary_name_len =
+ ::WideCharToMultiByte(CP_UTF8, 0, &binname_utf16[0], binname_utf16_len,
+ buf, buf_len, NULL, NULL);
+ if ((unsigned)binary_name_len == buf_len)
+ --binary_name_len;
+ buf[binary_name_len] = '\0';
+ return binary_name_len;
}
uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
bool acquired = false;
for (unsigned i = 0; i < kDirtyTids; i++) {
SyncClock::Dirty dirty = src->dirty_[i];
- unsigned tid = dirty.tid;
+ unsigned tid = dirty.tid();
if (tid != kInvalidTid) {
if (clk_[tid] < dirty.epoch) {
clk_[tid] = dirty.epoch;
dst->tab_idx_ = cached_idx_;
dst->size_ = cached_size_;
dst->blocks_ = cached_blocks_;
- CHECK_EQ(dst->dirty_[0].tid, kInvalidTid);
+ CHECK_EQ(dst->dirty_[0].tid(), kInvalidTid);
// The cached clock is shared (immutable),
// so this is where we store the current clock.
- dst->dirty_[0].tid = tid_;
+ dst->dirty_[0].set_tid(tid_);
dst->dirty_[0].epoch = clk_[tid_];
dst->release_store_tid_ = tid_;
dst->release_store_reused_ = reused_;
ce.reused = 0;
i++;
}
- for (uptr i = 0; i < kDirtyTids; i++)
- dst->dirty_[i].tid = kInvalidTid;
+ for (uptr i = 0; i < kDirtyTids; i++) dst->dirty_[i].set_tid(kInvalidTid);
dst->release_store_tid_ = tid_;
dst->release_store_reused_ = reused_;
// Rememeber that we don't need to acquire it in future.
// Update the threads time, but preserve 'acquired' flag.
for (unsigned i = 0; i < kDirtyTids; i++) {
SyncClock::Dirty *dirty = &dst->dirty_[i];
- const unsigned tid = dirty->tid;
+ const unsigned tid = dirty->tid();
if (tid == tid_ || tid == kInvalidTid) {
CPP_STAT_INC(StatClockReleaseFast);
- dirty->tid = tid_;
+ dirty->set_tid(tid_);
dirty->epoch = clk_[tid_];
return;
}
return false;
for (unsigned i = 0; i < kDirtyTids; i++) {
SyncClock::Dirty dirty = src->dirty_[i];
- if (dirty.tid != kInvalidTid) {
- if (clk_[dirty.tid] < dirty.epoch)
+ if (dirty.tid() != kInvalidTid) {
+ if (clk_[dirty.tid()] < dirty.epoch)
return false;
}
}
blocks_ = 0;
release_store_tid_ = kInvalidTid;
release_store_reused_ = 0;
- for (uptr i = 0; i < kDirtyTids; i++)
- dirty_[i].tid = kInvalidTid;
+ for (uptr i = 0; i < kDirtyTids; i++) dirty_[i].set_tid(kInvalidTid);
}
void SyncClock::Resize(ClockCache *c, uptr nclk) {
void SyncClock::FlushDirty() {
for (unsigned i = 0; i < kDirtyTids; i++) {
Dirty *dirty = &dirty_[i];
- if (dirty->tid != kInvalidTid) {
- CHECK_LT(dirty->tid, size_);
- elem(dirty->tid).epoch = dirty->epoch;
- dirty->tid = kInvalidTid;
+ if (dirty->tid() != kInvalidTid) {
+ CHECK_LT(dirty->tid(), size_);
+ elem(dirty->tid()).epoch = dirty->epoch;
+ dirty->set_tid(kInvalidTid);
}
}
}
if (size_ == 0)
return false;
for (unsigned i = 0; i < kDirtyTids; i++) {
- if (dirty_[i].tid != kInvalidTid)
+ if (dirty_[i].tid() != kInvalidTid)
return false;
}
return atomic_load_relaxed(ref_ptr(tab_)) == 1;
u64 SyncClock::get(unsigned tid) const {
for (unsigned i = 0; i < kDirtyTids; i++) {
Dirty dirty = dirty_[i];
- if (dirty.tid == tid)
+ if (dirty.tid() == tid)
return dirty.epoch;
}
return elem(tid).epoch;
for (uptr i = 0; i < size_; i++)
printf("%s%llu", i == 0 ? "" : ",", elem(i).reused);
printf("] release_store_tid=%d/%d dirty_tids=%d[%llu]/%d[%llu]",
- release_store_tid_, release_store_reused_,
- dirty_[0].tid, dirty_[0].epoch,
- dirty_[1].tid, dirty_[1].epoch);
+ release_store_tid_, release_store_reused_, dirty_[0].tid(),
+ dirty_[0].epoch, dirty_[1].tid(), dirty_[1].epoch);
}
void SyncClock::Iter::Next() {
namespace __tsan {
-typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
+typedef DenseSlabAlloc<ClockBlock, 1 << 22, 1 << 10> ClockAlloc;
typedef DenseSlabAllocCache ClockCache;
// The clock that lives in sync variables (mutexes, atomics, etc).
static const uptr kDirtyTids = 2;
struct Dirty {
- u64 epoch : kClkBits;
- u64 tid : 64 - kClkBits; // kInvalidId if not active
+ u32 tid() const { return tid_ == kShortInvalidTid ? kInvalidTid : tid_; }
+ void set_tid(u32 tid) {
+ tid_ = tid == kInvalidTid ? kShortInvalidTid : tid;
+ }
+ u64 epoch : kClkBits;
+
+ private:
+ // Full kInvalidTid won't fit into Dirty::tid.
+ static const u64 kShortInvalidTid = (1ull << (64 - kClkBits)) - 1;
+ u64 tid_ : 64 - kClkBits; // kInvalidId if not active
};
+ static_assert(sizeof(Dirty) == 8, "Dirty is not 64bit");
+
unsigned release_store_tid_;
unsigned release_store_reused_;
Dirty dirty_[kDirtyTids];
const bool kCollectHistory = true;
#endif
-const u16 kInvalidTid = kMaxTid + 1;
-
// The following "build consistency" machinery ensures that all source files
// are built in the same configuration. Inconsistent builds lead to
// hard to debug crashes.
typedef u32 IndexT;
uptr pos;
IndexT cache[kSize];
- template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
+ template <typename, uptr, uptr, u64>
+ friend class DenseSlabAlloc;
};
-template<typename T, uptr kL1Size, uptr kL2Size>
+template <typename T, uptr kL1Size, uptr kL2Size, u64 kReserved = 0>
class DenseSlabAlloc {
public:
typedef DenseSlabAllocCache Cache;
typedef typename Cache::IndexT IndexT;
- explicit DenseSlabAlloc(const char *name) {
- // Check that kL1Size and kL2Size are sane.
- CHECK_EQ(kL1Size & (kL1Size - 1), 0);
- CHECK_EQ(kL2Size & (kL2Size - 1), 0);
- CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
- // Check that it makes sense to use the dense alloc.
- CHECK_GE(sizeof(T), sizeof(IndexT));
- internal_memset(map_, 0, sizeof(map_));
+ static_assert((kL1Size & (kL1Size - 1)) == 0,
+ "kL1Size must be a power-of-two");
+ static_assert((kL2Size & (kL2Size - 1)) == 0,
+ "kL2Size must be a power-of-two");
+ static_assert((kL1Size * kL2Size) <= (1ull << (sizeof(IndexT) * 8)),
+ "kL1Size/kL2Size are too large");
+ static_assert(((kL1Size * kL2Size - 1) & kReserved) == 0,
+ "reserved bits don't fit");
+ static_assert(sizeof(T) > sizeof(IndexT),
+ "it doesn't make sense to use dense alloc");
+
+ explicit DenseSlabAlloc(LinkerInitialized, const char *name) {
freelist_ = 0;
fillpos_ = 0;
name_ = name;
}
+ explicit DenseSlabAlloc(const char *name)
+ : DenseSlabAlloc(LINKER_INITIALIZED, name) {
+ // It can be very large.
+ // Don't page it in for linker initialized objects.
+ internal_memset(map_, 0, sizeof(map_));
+ }
+
~DenseSlabAlloc() {
for (uptr i = 0; i < kL1Size; i++) {
if (map_[i] != 0)
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
- ExternalAccess(addr, STRIP_PC(caller_pc), tag, MemoryRead);
+ ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryRead);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
- ExternalAccess(addr, STRIP_PC(caller_pc), tag, MemoryWrite);
+ ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryWrite);
}
} // extern "C"
virtual void on_zero_shared() = 0;
virtual void _unused_0x18() = 0;
virtual void on_zero_shared_weak() = 0;
+ virtual ~fake_shared_weak_count() = 0; // suppress -Wnon-virtual-dtor
};
} // namespace
#define vfork __vfork14
#endif
-#if SANITIZER_ANDROID
-#define mallopt(a, b)
-#endif
-
#ifdef __mips__
const int kSigCount = 129;
#else
extern "C" int pthread_attr_destroy(void *attr);
DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
+extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void),
+ void (*child)(void));
extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
extern "C" int pthread_setspecific(unsigned key, const void *v);
DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
extern "C" int fileno_unlocked(void *stream);
extern "C" int dirfd(void *dirp);
#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_NETBSD
+#if SANITIZER_GLIBC
extern "C" int mallopt(int param, int value);
#endif
#if SANITIZER_NETBSD
return p;
}
+// In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept
+// __libc_memalign so that (1) we can detect races (2) free will not be called
+// on libc internally allocated blocks.
TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
- SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
+ SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz);
return user_memalign(thr, pc, align, sz);
}
if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
void *res = real_mmap(addr, sz, prot, flags, fd, off);
if (res != MAP_FAILED) {
+ if (!IsAppMem((uptr)res) || !IsAppMem((uptr)res + sz - 1)) {
+ Report("ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n",
+ addr, (void*)sz, res);
+ Die();
+ }
if (fd > 0) FdAccess(thr, pc, fd);
MemoryRangeImitateWriteOrResetRange(thr, pc, (uptr)res, sz);
}
return (void*)cond;
}
+namespace {
+
+template <class Fn>
struct CondMutexUnlockCtx {
ScopedInterceptor *si;
ThreadState *thr;
uptr pc;
void *m;
+ void *c;
+ const Fn &fn;
+
+ int Cancel() const { return fn(); }
+ void Unlock() const;
};
-static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
+template <class Fn>
+void CondMutexUnlockCtx<Fn>::Unlock() const {
// pthread_cond_wait interceptor has enabled async signal delivery
// (see BlockingCall below). Disable async signals since we are running
// tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
// since the thread is cancelled, so we have to manually execute them
// (the thread still can run some user code due to pthread_cleanup_push).
- ThreadSignalContext *ctx = SigCtx(arg->thr);
+ ThreadSignalContext *ctx = SigCtx(thr);
CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
- MutexPostLock(arg->thr, arg->pc, (uptr)arg->m, MutexFlagDoPreLockOnPostLock);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
// Undo BlockingCall ctor effects.
- arg->thr->ignore_interceptors--;
- arg->si->~ScopedInterceptor();
+ thr->ignore_interceptors--;
+ si->~ScopedInterceptor();
}
+} // namespace
INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
void *cond = init_cond(c, true);
return REAL(pthread_cond_init)(cond, a);
}
-static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si,
- int (*fn)(void *c, void *m, void *abstime), void *c,
- void *m, void *t) {
+template <class Fn>
+int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
+ void *c, void *m) {
MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
MutexUnlock(thr, pc, (uptr)m);
- CondMutexUnlockCtx arg = {si, thr, pc, m};
int res = 0;
// This ensures that we handle mutex lock even in case of pthread_cancel.
// See test/tsan/cond_cancel.cpp.
{
// Enable signal delivery while the thread is blocked.
BlockingCall bc(thr);
+ CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn};
res = call_pthread_cancel_with_cleanup(
- fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg);
+ [](void *arg) -> int {
+ return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel();
+ },
+ [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); },
+ &arg);
}
if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
- return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL(
- pthread_cond_wait),
- cond, m, 0);
+ return cond_wait(
+ thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond,
+ m);
}
INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
- return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait), cond, m,
- abstime);
+ return cond_wait(
+ thr, pc, &si,
+ [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond,
+ m);
}
+#if SANITIZER_LINUX
+INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
+ __sanitizer_clockid_t clock, void *abstime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
+ return cond_wait(
+ thr, pc, &si,
+ [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
+ cond, m);
+}
+#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait)
+#else
+#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
+#endif
+
#if SANITIZER_MAC
INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
void *reltime) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
- return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait_relative_np), cond,
- m, reltime);
+ return cond_wait(
+ thr, pc, &si,
+ [=]() {
+ return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime);
+ },
+ cond, m);
}
#endif
// because in async signal processing case (when handler is called directly
// from rtl_generic_sighandler) we have not yet received the reraised
// signal; and it looks too fragile to intercept all ways to reraise a signal.
- if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
+ if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
+ errno != 99) {
VarSizeStackTrace stack;
// StackTrace::GetNestInstructionPc(pc) is used because return address is
// expected, OutputReport() will undo this.
if (in_symbolizer())
return REAL(fork)(fake);
SCOPED_INTERCEPTOR_RAW(fork, fake);
+ return REAL(fork)(fake);
+}
+
+void atfork_prepare() {
+ if (in_symbolizer())
+ return;
+ ThreadState *thr = cur_thread();
+ const uptr pc = StackTrace::GetCurrentPc();
ForkBefore(thr, pc);
- int pid;
- {
- // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
- // we'll assert in CheckNoLocks() unless we ignore interceptors.
- ScopedIgnoreInterceptors ignore;
- pid = REAL(fork)(fake);
- }
- if (pid == 0) {
- // child
- ForkChildAfter(thr, pc);
- FdOnFork(thr, pc);
- } else if (pid > 0) {
- // parent
- ForkParentAfter(thr, pc);
- } else {
- // error
- ForkParentAfter(thr, pc);
- }
- return pid;
+}
+
+void atfork_parent() {
+ if (in_symbolizer())
+ return;
+ ThreadState *thr = cur_thread();
+ const uptr pc = StackTrace::GetCurrentPc();
+ ForkParentAfter(thr, pc);
+}
+
+void atfork_child() {
+ if (in_symbolizer())
+ return;
+ ThreadState *thr = cur_thread();
+ const uptr pc = StackTrace::GetCurrentPc();
+ ForkChildAfter(thr, pc);
+ FdOnFork(thr, pc);
}
TSAN_INTERCEPTOR(int, vfork, int fake) {
FdRelease(thr, pc, fd);
}
-static void syscall_pre_fork(uptr pc) {
- TSAN_SYSCALL();
- ForkBefore(thr, pc);
-}
+static void syscall_pre_fork(uptr pc) { ForkBefore(cur_thread(), pc); }
static void syscall_post_fork(uptr pc, int pid) {
- TSAN_SYSCALL();
+ ThreadState *thr = cur_thread();
if (pid == 0) {
// child
ForkChildAfter(thr, pc);
#endif
// Instruct libc malloc to consume less memory.
-#if SANITIZER_LINUX
+#if SANITIZER_GLIBC
mallopt(1, 0); // M_MXFAST
mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
#endif
TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
+ TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
+
TSAN_INTERCEPT(pthread_mutex_init);
TSAN_INTERCEPT(pthread_mutex_destroy);
TSAN_INTERCEPT(pthread_mutex_trylock);
Printf("ThreadSanitizer: failed to setup atexit callback\n");
Die();
}
+ if (pthread_atfork(atfork_prepare, atfork_parent, atfork_child)) {
+ Printf("ThreadSanitizer: failed to setup atfork callbacks\n");
+ Die();
+ }
#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
}
void __tsan_read16_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
- MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr + 8, kSizeLog8);
+ MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8);
}
void __tsan_write16_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
- MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr + 8, kSizeLog8);
+ MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8);
}
// __tsan_unaligned_read/write calls are emitted by compiler.
#endif
// Part of ABI, do not change.
-// https://github.com/llvm/llvm-project/blob/master/libcxx/include/atomic
+// https://github.com/llvm/llvm-project/blob/main/libcxx/include/atomic
typedef enum {
mo_relaxed,
mo_consume,
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
u8 *a);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_on_initialize();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_on_finalize(int failed);
+
} // extern "C"
} // namespace __tsan
}
void __tsan_read1_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog1);
+ MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog1);
}
void __tsan_read2_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog2);
+ MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog2);
}
void __tsan_read4_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog4);
+ MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog4);
}
void __tsan_read8_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
}
void __tsan_write1_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog1);
+ MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog1);
}
void __tsan_write2_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog2);
+ MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog2);
}
void __tsan_write4_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog4);
+ MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog4);
}
void __tsan_write8_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
}
void __tsan_vptr_update(void **vptr_p, void *new_val) {
}
void __tsan_func_entry(void *pc) {
- FuncEntry(cur_thread(), STRIP_PC(pc));
+ FuncEntry(cur_thread(), STRIP_PAC_PC(pc));
}
void __tsan_func_exit() {
}
void __tsan_read_range_pc(void *addr, uptr size, void *pc) {
- MemoryAccessRange(cur_thread(), STRIP_PC(pc), (uptr)addr, size, false);
+ MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, false);
}
void __tsan_write_range_pc(void *addr, uptr size, void *pc) {
- MemoryAccessRange(cur_thread(), STRIP_PC(pc), (uptr)addr, size, true);
+ MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, true);
}
static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
- !flags()->report_signal_unsafe)
+ !ShouldReport(thr, ReportTypeSignalUnsafe))
return;
VarSizeStackTrace stack;
ObtainCurrentStack(thr, pc, &stack);
namespace __tsan {
+#if defined(__x86_64__)
+#define HAS_48_BIT_ADDRESS_SPACE 1
+#elif SANITIZER_IOSSIM // arm64 iOS simulators (order of #if matters)
+#define HAS_48_BIT_ADDRESS_SPACE 1
+#elif SANITIZER_IOS // arm64 iOS devices (order of #if matters)
+#define HAS_48_BIT_ADDRESS_SPACE 0
+#elif SANITIZER_MAC // arm64 macOS (order of #if matters)
+#define HAS_48_BIT_ADDRESS_SPACE 1
+#else
+#define HAS_48_BIT_ADDRESS_SPACE 0
+#endif
+
#if !SANITIZER_GO
-#if defined(__x86_64__)
+#if HAS_48_BIT_ADDRESS_SPACE
/*
C/C++ on linux/x86_64 and freebsd/x86_64
0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB)
ff00 0000 00 - ff80 0000 00: - (2 GB)
ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB)
*/
-struct Mapping {
+struct Mapping40 {
static const uptr kMetaShadowBeg = 0x4000000000ull;
static const uptr kMetaShadowEnd = 0x5000000000ull;
static const uptr kTraceMemBeg = 0xb000000000ull;
};
#define TSAN_MID_APP_RANGE 1
+#define TSAN_RUNTIME_VMA 1
#elif defined(__aarch64__) && defined(__APPLE__)
/*
C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM)
static const uptr kVdsoBeg = 0x7000000000000000ull;
};
-#elif defined(__aarch64__)
+#elif defined(__aarch64__) && !defined(__APPLE__)
// AArch64 supports multiple VMA which leads to multiple address transformation
// functions. To support these multiple VMAS transformations and mappings TSAN
// runtime for AArch64 uses an external memory read (vmaSize) to select which
#define TSAN_RUNTIME_VMA 1
#endif
-#elif SANITIZER_GO && !SANITIZER_WINDOWS && defined(__x86_64__)
+#elif SANITIZER_GO && !SANITIZER_WINDOWS && HAS_48_BIT_ADDRESS_SPACE
/* Go on linux, darwin and freebsd on x86_64
0000 0000 1000 - 0000 1000 0000: executable
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 8000 0000 0000: -
*/
-struct Mapping {
+struct Mapping47 {
static const uptr kMetaShadowBeg = 0x300000000000ull;
static const uptr kMetaShadowEnd = 0x400000000000ull;
static const uptr kTraceMemBeg = 0x600000000000ull;
static const uptr kAppMemBeg = 0x000000001000ull;
static const uptr kAppMemEnd = 0x00e000000000ull;
};
+
+#define TSAN_RUNTIME_VMA 1
+
#else
# error "Unknown platform"
#endif
}
DCHECK(0);
return 0;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return MappingImpl<Mapping40, Type>();
+#else
+ case 47: return MappingImpl<Mapping47, Type>();
+#endif
+ }
+ DCHECK(0);
+ return 0;
#else
return MappingImpl<Mapping, Type>();
#endif
}
DCHECK(0);
return false;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return IsAppMemImpl<Mapping40>(mem);
+#else
+ case 47: return IsAppMemImpl<Mapping47>(mem);
+#endif
+ }
+ DCHECK(0);
+ return false;
#else
return IsAppMemImpl<Mapping>(mem);
#endif
}
DCHECK(0);
return false;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return IsShadowMemImpl<Mapping40>(mem);
+#else
+ case 47: return IsShadowMemImpl<Mapping47>(mem);
+#endif
+ }
+ DCHECK(0);
+ return false;
#else
return IsShadowMemImpl<Mapping>(mem);
#endif
}
DCHECK(0);
return false;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return IsMetaMemImpl<Mapping40>(mem);
+#else
+ case 47: return IsMetaMemImpl<Mapping47>(mem);
+#endif
+ }
+ DCHECK(0);
+ return false;
#else
return IsMetaMemImpl<Mapping>(mem);
#endif
}
DCHECK(0);
return 0;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return MemToShadowImpl<Mapping40>(x);
+#else
+ case 47: return MemToShadowImpl<Mapping47>(x);
+#endif
+ }
+ DCHECK(0);
+ return 0;
#else
return MemToShadowImpl<Mapping>(x);
#endif
}
DCHECK(0);
return 0;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return MemToMetaImpl<Mapping40>(x);
+#else
+ case 47: return MemToMetaImpl<Mapping47>(x);
+#endif
+ }
+ DCHECK(0);
+ return 0;
#else
return MemToMetaImpl<Mapping>(x);
#endif
}
DCHECK(0);
return 0;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return ShadowToMemImpl<Mapping40>(s);
+#else
+ case 47: return ShadowToMemImpl<Mapping47>(s);
+#endif
+ }
+ DCHECK(0);
+ return 0;
#else
return ShadowToMemImpl<Mapping>(s);
#endif
}
DCHECK(0);
return 0;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return GetThreadTraceImpl<Mapping40>(tid);
+#else
+ case 47: return GetThreadTraceImpl<Mapping47>(tid);
+#endif
+ }
+ DCHECK(0);
+ return 0;
#else
return GetThreadTraceImpl<Mapping>(tid);
#endif
}
DCHECK(0);
return 0;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return GetThreadTraceHeaderImpl<Mapping40>(tid);
+#else
+ case 47: return GetThreadTraceHeaderImpl<Mapping47>(tid);
+#endif
+ }
+ DCHECK(0);
+ return 0;
#else
return GetThreadTraceHeaderImpl<Mapping>(tid);
#endif
uptr ExtractLongJmpSp(uptr *env);
void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size);
-int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
- void *abstime), void *c, void *m, void *abstime,
- void(*cleanup)(void *arg), void *arg);
+int call_pthread_cancel_with_cleanup(int (*fn)(void *arg),
+ void (*cleanup)(void *arg), void *arg);
void DestroyThreadState();
void PlatformCleanUpThreadState(ThreadState *thr);
Die();
}
# endif
+#elif defined(__mips64)
+# if !SANITIZER_GO
+ if (vmaSize != 40) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 40\n", vmaSize);
+ Die();
+ }
+# else
+ if (vmaSize != 47) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 47\n", vmaSize);
+ Die();
+ }
+# endif
#endif
#endif
}
// Note: this function runs with async signals enabled,
// so it must not touch any tsan state.
-int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
- void *abstime), void *c, void *m, void *abstime,
- void(*cleanup)(void *arg), void *arg) {
+int call_pthread_cancel_with_cleanup(int (*fn)(void *arg),
+ void (*cleanup)(void *arg), void *arg) {
// pthread_cleanup_push/pop are hardcore macros mess.
// We can't intercept nor call them w/o including pthread.h.
int res;
pthread_cleanup_push(cleanup, arg);
- res = fn(c, m, abstime);
+ res = fn(arg);
pthread_cleanup_pop(0);
return res;
}
dead_thread_state->fast_state.SetIgnoreBit();
dead_thread_state->ignore_interceptors = 1;
dead_thread_state->is_dead = true;
- *const_cast<int*>(&dead_thread_state->tid) = -1;
+ *const_cast<u32*>(&dead_thread_state->tid) = -1;
CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState),
PROT_READ));
}
#endif
void InitializePlatformEarly() {
-#if !SANITIZER_GO && defined(__aarch64__)
+#if !SANITIZER_GO && !HAS_48_BIT_ADDRESS_SPACE
uptr max_vm = GetMaxUserVirtualAddress() + 1;
if (max_vm != Mapping::kHiAppMemEnd) {
Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
#if !SANITIZER_GO
// Note: this function runs with async signals enabled,
// so it must not touch any tsan state.
-int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
- void *abstime), void *c, void *m, void *abstime,
- void(*cleanup)(void *arg), void *arg) {
+int call_pthread_cancel_with_cleanup(int (*fn)(void *arg),
+ void (*cleanup)(void *arg), void *arg) {
// pthread_cleanup_push/pop are hardcore macros mess.
// We can't intercept nor call them w/o including pthread.h.
int res;
pthread_cleanup_push(cleanup, arg);
- res = fn(c, m, abstime);
+ res = fn(arg);
pthread_cleanup_pop(0);
return res;
}
Die();
}
-#if defined(__aarch64__) && defined(__APPLE__)
+#if defined(__aarch64__) && defined(__APPLE__) && !HAS_48_BIT_ADDRESS_SPACE
ProtectRange(HeapMemEnd(), ShadowBeg());
ProtectRange(ShadowEnd(), MetaShadowBeg());
ProtectRange(MetaShadowEnd(), TraceMemBeg());
const int kThreadBufSize = 32;
const char *thread_name(char *buf, int tid) {
- if (tid == 0)
+ if (tid == kMainTid)
return "main thread";
internal_snprintf(buf, kThreadBufSize, "thread T%d", tid);
return buf;
}
SymbolizedStack *frame = ent->frames;
for (int i = 0; frame && frame->info.address; frame = frame->next, i++) {
- InternalScopedString res(2 * GetPageSizeCached());
+ InternalScopedString res;
RenderFrame(&res, common_flags()->stack_trace_format, i,
frame->info.address, &frame->info,
common_flags()->symbolize_vs_style,
static void PrintThread(const ReportThread *rt) {
Decorator d;
- if (rt->id == 0) // Little sense in describing the main thread.
+ if (rt->id == kMainTid) // Little sense in describing the main thread.
return;
Printf("%s", d.ThreadDescription());
Printf(" Thread T%d", rt->id);
#else // #if !SANITIZER_GO
-const int kMainThreadId = 1;
+const u32 kMainGoroutineId = 1;
void PrintStack(const ReportStack *ent) {
if (ent == 0 || ent->frames == 0) {
Printf("%s at %p by ",
(first ? (mop->write ? "Write" : "Read")
: (mop->write ? "Previous write" : "Previous read")), mop->addr);
- if (mop->tid == kMainThreadId)
+ if (mop->tid == kMainGoroutineId)
Printf("main goroutine:\n");
else
Printf("goroutine %d:\n", mop->tid);
Printf("\n");
Printf("Heap block of size %zu at %p allocated by ",
loc->heap_chunk_size, loc->heap_chunk_start);
- if (loc->tid == kMainThreadId)
+ if (loc->tid == kMainGoroutineId)
Printf("main goroutine:\n");
else
Printf("goroutine %d:\n", loc->tid);
}
static void PrintThread(const ReportThread *rt) {
- if (rt->id == kMainThreadId)
+ if (rt->id == kMainGoroutineId)
return;
Printf("\n");
Printf("Goroutine %d (%s) created at:\n",
// Main file (entry points) for the TSan run-time.
//===----------------------------------------------------------------------===//
+#include "tsan_rtl.h"
+
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_file.h"
#include "sanitizer_common/sanitizer_libc.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include "tsan_defs.h"
-#include "tsan_platform.h"
-#include "tsan_rtl.h"
+#include "tsan_interface.h"
#include "tsan_mman.h"
+#include "tsan_platform.h"
#include "tsan_suppressions.h"
#include "tsan_symbolize.h"
#include "ubsan/ubsan_init.h"
bool OnFinalize(bool failed);
void OnInitialize();
#else
+#include <dlfcn.h>
SANITIZER_WEAK_CXX_DEFAULT_IMPL
bool OnFinalize(bool failed) {
+#if !SANITIZER_GO
+ if (auto *ptr = dlsym(RTLD_DEFAULT, "__tsan_on_finalize"))
+ return reinterpret_cast<decltype(&__tsan_on_finalize)>(ptr)(failed);
+#endif
return failed;
}
SANITIZER_WEAK_CXX_DEFAULT_IMPL
-void OnInitialize() {}
+void OnInitialize() {
+#if !SANITIZER_GO
+ if (auto *ptr = dlsym(RTLD_DEFAULT, "__tsan_on_initialize")) {
+ return reinterpret_cast<decltype(&__tsan_on_initialize)>(ptr)();
+ }
+#endif
+}
#endif
static char thread_registry_placeholder[sizeof(ThreadRegistry)];
new((void*)hdr) Trace();
// We are going to use only a small part of the trace with the default
// value of history_size. However, the constructor writes to the whole trace.
- // Unmap the unused part.
+ // Release the unused part.
uptr hdr_end = hdr + sizeof(Trace);
hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
hdr_end = RoundUp(hdr_end, GetPageSizeCached());
- if (hdr_end < hdr + sizeof(Trace))
- UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end);
+ if (hdr_end < hdr + sizeof(Trace)) {
+ ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace));
+ uptr unused = hdr + sizeof(Trace) - hdr_end;
+ if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) {
+ Report("ThreadSanitizer: failed to mprotect(%p, %p)\n",
+ hdr_end, unused);
+ CHECK("unable to mprotect" && 0);
+ }
+ }
void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
return new(mem) ThreadContext(tid);
}
#endif
Context::Context()
- : initialized()
- , report_mtx(MutexTypeReport, StatMtxReport)
- , nreported()
- , nmissed_expected()
- , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
- CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
- , racy_mtx(MutexTypeRacy, StatMtxRacy)
- , racy_stacks()
- , racy_addresses()
- , fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
- , clock_alloc("clock allocator") {
+ : initialized(),
+ report_mtx(MutexTypeReport, StatMtxReport),
+ nreported(),
+ nmissed_expected(),
+ thread_registry(new (thread_registry_placeholder) ThreadRegistry(
+ CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)),
+ racy_mtx(MutexTypeRacy, StatMtxRacy),
+ racy_stacks(),
+ racy_addresses(),
+ fired_suppressions_mtx(MutexTypeFired, StatMtxFired),
+ clock_alloc(LINKER_INITIALIZED, "clock allocator") {
fired_suppressions.reserve(8);
}
// The objects are allocated in TLS, so one may rely on zero-initialization.
-ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
- unsigned reuse_count,
- uptr stk_addr, uptr stk_size,
+ThreadState::ThreadState(Context *ctx, u32 tid, int unique_id, u64 epoch,
+ unsigned reuse_count, uptr stk_addr, uptr stk_size,
uptr tls_addr, uptr tls_size)
- : fast_state(tid, epoch)
- // Do not touch these, rely on zero initialization,
- // they may be accessed before the ctor.
- // , ignore_reads_and_writes()
- // , ignore_interceptors()
- , clock(tid, reuse_count)
+ : fast_state(tid, epoch)
+ // Do not touch these, rely on zero initialization,
+ // they may be accessed before the ctor.
+ // , ignore_reads_and_writes()
+ // , ignore_interceptors()
+ ,
+ clock(tid, reuse_count)
#if !SANITIZER_GO
- , jmp_bufs()
+ ,
+ jmp_bufs()
#endif
- , tid(tid)
- , unique_id(unique_id)
- , stk_addr(stk_addr)
- , stk_size(stk_size)
- , tls_addr(tls_addr)
- , tls_size(tls_size)
+ ,
+ tid(tid),
+ unique_id(unique_id),
+ stk_addr(stk_addr),
+ stk_size(stk_size),
+ tls_addr(tls_addr),
+ tls_size(tls_size)
#if !SANITIZER_GO
- , last_sleep_clock(tid)
+ ,
+ last_sleep_clock(tid)
#endif
{
}
} else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
mprof_fd = 2;
} else {
- InternalScopedString filename(kMaxPathLength);
+ InternalScopedString filename;
filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
fd_t fd = OpenFile(filename.data(), WrOnly);
if (fd == kInvalidFd) {
Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
- &filename[0]);
+ filename.data());
} else {
mprof_fd = fd;
}
}
#endif
+void CheckUnwind() {
+ // There is high probability that interceptors will check-fail as well,
+ // on the other hand there is no sense in processing interceptors
+ // since we are going to die soon.
+ ScopedIgnoreInterceptors ignore;
+#if !SANITIZER_GO
+ cur_thread()->ignore_sync++;
+ cur_thread()->ignore_reads_and_writes++;
+#endif
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
+}
+
void Initialize(ThreadState *thr) {
// Thread safe because done before all threads exist.
static bool is_initialized = false;
ScopedIgnoreInterceptors ignore;
SanitizerToolName = "ThreadSanitizer";
// Install tool-specific callbacks in sanitizer_common.
- SetCheckFailedCallback(TsanCheckFailed);
+ SetCheckUnwindCallback(CheckUnwind);
ctx = new(ctx_placeholder) Context;
const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
void ForkBefore(ThreadState *thr, uptr pc) {
ctx->thread_registry->Lock();
ctx->report_mtx.Lock();
- // Ignore memory accesses in the pthread_atfork callbacks.
- // If any of them triggers a data race we will deadlock
- // on the report_mtx.
- // We could ignore interceptors and sync operations as well,
+ // Suppress all reports in the pthread_atfork callbacks.
+ // Reports will deadlock on the report_mtx.
+ // We could ignore sync operations as well,
// but so far it's unclear if it will do more good or harm.
// Unnecessarily ignoring things can lead to false positives later.
- ThreadIgnoreBegin(thr, pc);
+ thr->suppress_reports++;
+ // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
+ // we'll assert in CheckNoLocks() unless we ignore interceptors.
+ thr->ignore_interceptors++;
}
void ForkParentAfter(ThreadState *thr, uptr pc) {
- ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore.
+ thr->suppress_reports--; // Enabled in ForkBefore.
+ thr->ignore_interceptors--;
ctx->report_mtx.Unlock();
ctx->thread_registry->Unlock();
}
void ForkChildAfter(ThreadState *thr, uptr pc) {
- ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore.
+ thr->suppress_reports--; // Enabled in ForkBefore.
+ thr->ignore_interceptors--;
ctx->report_mtx.Unlock();
ctx->thread_registry->Unlock();
Allocator *allocator();
#endif
-void TsanCheckFailed(const char *file, int line, const char *cond,
- u64 v1, u64 v2);
-
const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
// FastState (from most significant bit):
#if TSAN_COLLECT_STATS
u64 stat[StatCnt];
#endif
- const int tid;
+ const u32 tid;
const int unique_id;
bool in_symbolizer;
bool in_ignored_lib;
const ReportDesc *current_report;
- explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
- unsigned reuse_count,
- uptr stk_addr, uptr stk_size,
+ explicit ThreadState(Context *ctx, u32 tid, int unique_id, u64 epoch,
+ unsigned reuse_count, uptr stk_addr, uptr stk_size,
uptr tls_addr, uptr tls_size);
};
ScopedErrorReportLock lock_;
};
+bool ShouldReport(ThreadState *thr, ReportType typ);
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
MutexSet *mset, uptr *tag = nullptr);
// or false positives (e.g. unlock in a different thread).
if (SANITIZER_GO)
return;
+ if (!ShouldReport(thr, typ))
+ return;
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(typ);
rep.AddMutex(mid);
ctx->dd->MutexInit(&cb, &s->dd);
}
bool unlock_locked = false;
- if (flags()->report_destroy_locked
- && s->owner_tid != SyncVar::kInvalidTid
- && !s->IsFlagSet(MutexFlagBroken)) {
+ if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
+ !s->IsFlagSet(MutexFlagBroken)) {
s->SetFlags(MutexFlagBroken);
unlock_locked = true;
}
if (!unlock_locked)
s->Reset(thr->proc()); // must not reset it before the report is printed
s->mtx.Unlock();
- if (unlock_locked) {
+ if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked)) {
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeMutexDestroyLocked);
rep.AddMutex(mid);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
bool report_double_lock = false;
- if (s->owner_tid == SyncVar::kInvalidTid) {
+ if (s->owner_tid == kInvalidTid) {
CHECK_EQ(s->recursion, 0);
s->owner_tid = thr->tid;
s->last_lock = thr->fast_state.raw();
s->recursion -= rec;
if (s->recursion == 0) {
StatInc(thr, StatMutexUnlock);
- s->owner_tid = SyncVar::kInvalidTid;
+ s->owner_tid = kInvalidTid;
ReleaseStoreImpl(thr, pc, &s->clock);
} else {
StatInc(thr, StatMutexRecUnlock);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
bool report_bad_lock = false;
- if (s->owner_tid != SyncVar::kInvalidTid) {
+ if (s->owner_tid != kInvalidTid) {
if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
s->SetFlags(MutexFlagBroken);
report_bad_lock = true;
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
bool report_bad_unlock = false;
- if (s->owner_tid != SyncVar::kInvalidTid) {
+ if (s->owner_tid != kInvalidTid) {
if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
s->SetFlags(MutexFlagBroken);
report_bad_unlock = true;
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
bool write = true;
bool report_bad_unlock = false;
- if (s->owner_tid == SyncVar::kInvalidTid) {
+ if (s->owner_tid == kInvalidTid) {
// Seems to be read unlock.
write = false;
StatInc(thr, StatMutexReadUnlock);
s->recursion--;
if (s->recursion == 0) {
StatInc(thr, StatMutexUnlock);
- s->owner_tid = SyncVar::kInvalidTid;
+ s->owner_tid = kInvalidTid;
ReleaseStoreImpl(thr, pc, &s->clock);
} else {
StatInc(thr, StatMutexRecUnlock);
void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- s->owner_tid = SyncVar::kInvalidTid;
+ s->owner_tid = kInvalidTid;
s->recursion = 0;
s->mtx.Unlock();
}
}
void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
- if (r == 0)
+ if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
return;
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeDeadlock);
#include "tsan_ppc_regs.h"
- .machine altivec
.section .text
.hidden __tsan_setjmp
.globl _setjmp
static ReportStack *SymbolizeStack(StackTrace trace);
-void TsanCheckFailed(const char *file, int line, const char *cond,
- u64 v1, u64 v2) {
- // There is high probability that interceptors will check-fail as well,
- // on the other hand there is no sense in processing interceptors
- // since we are going to die soon.
- ScopedIgnoreInterceptors ignore;
-#if !SANITIZER_GO
- cur_thread()->ignore_sync++;
- cur_thread()->ignore_reads_and_writes++;
-#endif
- Printf("FATAL: ThreadSanitizer CHECK failed: "
- "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
- file, line, cond, (uptr)v1, (uptr)v2);
- PrintCurrentStackSlow(StackTrace::GetCurrentPc());
- Die();
-}
-
// Can be overriden by an application/test to intercept reports.
#ifdef TSAN_EXTERNAL_HOOKS
bool OnReport(const ReportDesc *rep, bool suppressed);
return stack;
}
+bool ShouldReport(ThreadState *thr, ReportType typ) {
+ // We set thr->suppress_reports in the fork context.
+ // Taking any locking in the fork context can lead to deadlocks.
+ // If any locks are already taken, it's too late to do this check.
+ CheckNoLocks(thr);
+ // For the same reason check we didn't lock thread_registry yet.
+ if (SANITIZER_DEBUG)
+ ThreadRegistryLock l(ctx->thread_registry);
+ if (!flags()->report_bugs || thr->suppress_reports)
+ return false;
+ switch (typ) {
+ case ReportTypeSignalUnsafe:
+ return flags()->report_signal_unsafe;
+ case ReportTypeThreadLeak:
+#if !SANITIZER_GO
+ // It's impossible to join phantom threads
+ // in the child after fork.
+ if (ctx->after_multithreaded_fork)
+ return false;
+#endif
+ return flags()->report_thread_leaks;
+ case ReportTypeMutexDestroyLocked:
+ return flags()->report_destroy_locked;
+ default:
+ return true;
+ }
+}
+
ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
ctx->thread_registry->CheckLocked();
void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
}
bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
- if (!flags()->report_bugs || thr->suppress_reports)
- return false;
+ // These should have been checked in ShouldReport.
+ // It's too late to check them here, we have already taken locks.
+ CHECK(flags()->report_bugs);
+ CHECK(!thr->suppress_reports);
atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
const ReportDesc *rep = srep.GetReport();
CHECK_EQ(thr->current_report, nullptr);
// at best it will cause deadlocks on internal mutexes.
ScopedIgnoreInterceptors ignore;
- if (!flags()->report_bugs)
+ if (!ShouldReport(thr, ReportTypeRace))
return;
if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
return;
// However, this solution is not reliable enough, please see dvyukov's comment
// http://reviews.llvm.org/D19148#406208
// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
-ALWAYS_INLINE
-void PrintCurrentStackSlow(uptr pc) {
+ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) {
#if !SANITIZER_GO
uptr bp = GET_CURRENT_FRAME();
BufferedStackTrace *ptrace =
void ThreadContext::OnCreated(void *arg) {
thr = 0;
- if (tid == 0)
+ if (tid == kMainTid)
return;
OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
if (!args->thr) // GCD workers don't have a parent thread.
#if !SANITIZER_GO
static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
- if (tctx->tid == 0) {
+ if (tctx->tid == kMainTid) {
Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
} else {
Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
void ThreadFinalize(ThreadState *thr) {
ThreadCheckIgnore(thr);
#if !SANITIZER_GO
- if (!flags()->report_thread_leaks)
+ if (!ShouldReport(thr, ReportTypeThreadLeak))
return;
ThreadRegistryLock l(ctx->thread_registry);
Vector<ThreadLeak> leaks;
uptr tls_size = 0;
#if !SANITIZER_GO
if (thread_type != ThreadType::Fiber)
- GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
+ GetThreadStackAndTls(tid == kMainTid, &stk_addr, &stk_size, &tls_addr,
+ &tls_size);
- if (tid) {
+ if (tid != kMainTid) {
if (stk_addr && stk_size)
MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
ConsumeThreadContext findCtx = {uid, nullptr};
ctx->thread_registry->FindThread(ConsumeThreadByUid, &findCtx);
- int tid = findCtx.tctx ? findCtx.tctx->tid : ThreadRegistry::kUnknownTid;
+ int tid = findCtx.tctx ? findCtx.tctx->tid : kInvalidTid;
DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, tid);
return tid;
}
}
MetaMap::MetaMap()
- : block_alloc_("heap block allocator")
- , sync_alloc_("sync allocator") {
+ : block_alloc_(LINKER_INITIALIZED, "heap block allocator"),
+ sync_alloc_(LINKER_INITIALIZED, "sync allocator") {
atomic_store(&uid_gen_, 0, memory_order_relaxed);
}
struct SyncVar {
SyncVar();
- static const int kInvalidTid = -1;
-
uptr addr; // overwritten by DenseSlabAlloc freelist
Mutex mtx;
u64 uid; // Globally unique id.
u32 creation_stack_id;
- int owner_tid; // Set only by exclusive owners.
+ u32 owner_tid; // Set only by exclusive owners.
u64 last_lock;
int recursion;
atomic_uint32_t flags;
static const u32 kFlagMask = 3u << 30;
static const u32 kFlagBlock = 1u << 30;
static const u32 kFlagSync = 2u << 30;
- typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc;
- typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc;
+ typedef DenseSlabAlloc<MBlock, 1 << 18, 1 << 12, kFlagMask> BlockAlloc;
+ typedef DenseSlabAlloc<SyncVar, 1 << 20, 1 << 10, kFlagMask> SyncAlloc;
BlockAlloc block_alloc_;
SyncAlloc sync_alloc_;
atomic_uint64_t uid_gen_;
}
// Emit data.
- InternalScopedString Buffer(1024);
+ InternalScopedString Buffer;
for (uptr P = Min; P != Max; ++P) {
unsigned char C = *reinterpret_cast<const unsigned char*>(P);
Buffer.append("%s%02x", (P % 8 == 0) ? " " : " ", C);
// All diagnostics should be printed under report mutex.
ScopedReport::CheckLocked();
Decorator Decor;
- InternalScopedString Buffer(1024);
+ InternalScopedString Buffer;
// Prepare a report that a monitor process can inspect.
if (Level == DL_Error) {
ScopedReport::~ScopedReport() {
MaybePrintStackTrace(Opts.pc, Opts.bp);
MaybeReportErrorSummary(SummaryLoc, Type);
+
+ if (common_flags()->print_module_map >= 2)
+ DumpProcessMap();
+
if (flags()->halt_on_error)
Die();
}
{
CommonFlags cf;
cf.CopyFrom(*common_flags());
- cf.print_summary = false;
cf.external_symbolizer_path = GetFlag("UBSAN_SYMBOLIZER_PATH");
OverrideCommonFlags(cf);
}
} // namespace __ubsan
-void __ubsan::__ubsan_handle_cfi_bad_icall(CFIBadIcallData *CallData,
- ValueHandle Function) {
- GET_REPORT_OPTIONS(false);
- CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
- handleCFIBadIcall(&Data, Function, Opts);
-}
-
-void __ubsan::__ubsan_handle_cfi_bad_icall_abort(CFIBadIcallData *CallData,
- ValueHandle Function) {
- GET_REPORT_OPTIONS(true);
- CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
- handleCFIBadIcall(&Data, Function, Opts);
- Die();
-}
-
void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data,
ValueHandle Value,
uptr ValidVtable) {
CFITCK_VMFCall,
};
-struct CFIBadIcallData {
- SourceLocation Loc;
- const TypeDescriptor &Type;
-};
-
struct CFICheckFailData {
CFITypeCheckKind CheckKind;
SourceLocation Loc;
const TypeDescriptor &Type;
};
-/// \brief Handle control flow integrity failure for indirect function calls.
-RECOVERABLE(cfi_bad_icall, CFIBadIcallData *Data, ValueHandle Function)
-
/// \brief Handle control flow integrity failures.
RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function,
uptr VtableIsValid)
InitializeSuppressions();
}
+static void UbsanDie() {
+ if (common_flags()->print_module_map >= 1)
+ DumpProcessMap();
+}
+
static void CommonStandaloneInit() {
SanitizerToolName = GetSanititizerToolName();
CacheBinaryName();
AndroidLogInit();
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
CommonInit();
+
+ // Only add die callback when running in standalone mode to avoid printing
+ // the same information from multiple sanitizers' output
+ AddDieCallback(UbsanDie);
Symbolizer::LateInitialize();
}
UndefinedBehaviorReport::UndefinedBehaviorReport(const char *IssueKind,
Location &Loc,
InternalScopedString &Msg)
- : IssueKind(IssueKind), Loc(Loc), Buffer(Msg.length() + 1) {
+ : IssueKind(IssueKind), Loc(Loc) {
// We have the common sanitizer reporting lock, so it's safe to register a
// new UB report.
RegisterUndefinedBehaviorReport(this);
// Ensure that the first character of the diagnostic text can't start with a
// lowercase letter.
- char FirstChar = Buf.data()[0];
+ char FirstChar = *Buf.data();
if (FirstChar >= 'a' && FirstChar <= 'z')
- Buf.data()[0] = FirstChar - 'a' + 'A';
+ *Buf.data() += 'A' - 'a';
*OutIssueKind = CurrentUBR->IssueKind;
*OutMessage = Buf.data();
#ifndef UBSAN_PLATFORM_H
#define UBSAN_PLATFORM_H
-#ifndef CAN_SANITIZE_UB
// Other platforms should be easy to add, and probably work as-is.
#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
- defined(__NetBSD__) || \
+ defined(__NetBSD__) || defined(__DragonFly__) || \
(defined(__sun__) && defined(__svr4__)) || \
defined(_WIN32) || defined(__Fuchsia__) || defined(__rtems__)
# define CAN_SANITIZE_UB 1
#else
# define CAN_SANITIZE_UB 0
#endif
-#endif //CAN_SANITIZE_UB
#endif