* MERGE: Merge from master.
-61a6439f35b6de28ff4aff4450d6fca970292fd5
+8723fe5606de6dfb344afacd667c20f55bb2f5e0
The first line of this file holds the git revision number of the
last merge done from the master library sources.
cf.verbosity = Verbosity();
cf.help = false; // this is activation-specific help
- // Check if activation flags need to be overriden.
+ // Check if activation flags need to be overridden.
if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) {
parser.ParseString(env);
}
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
+#include "asan_suppressions.h"
#include "asan_thread.h"
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
}
- void ReInitialize(const AllocatorOptions &options) {
+ // Apply provided AllocatorOptions to an Allocator
+ void ApplyOptions(const AllocatorOptions &options) {
SetAllocatorMayReturnNull(options.may_return_null);
allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
SharedInitCode(options);
+ }
+
+ void ReInitialize(const AllocatorOptions &options) {
+ ApplyOptions(options);
// Poison all existing allocation's redzones.
if (CanPoisonMemory()) {
ComputeUserRequestedAlignmentLog(alignment);
if (alignment < min_alignment)
alignment = min_alignment;
+ bool upgraded_from_zero = false;
if (size == 0) {
// We'd be happy to avoid allocating memory for zero-size requests, but
// some programs/tests depend on this behavior and assume that malloc
// consecutive "new" calls must be different even if the allocated size
// is zero.
size = 1;
+ upgraded_from_zero = true;
}
CHECK(IsPowerOfTwo(alignment));
uptr rz_log = ComputeRZLog(size);
*shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0;
}
+ if (upgraded_from_zero)
+ PoisonShadow(user_beg, ASAN_SHADOW_GRANULARITY,
+ kAsanHeapLeftRedzoneMagic);
+
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mallocs++;
thread_stats.malloced += size;
if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
if (m->alloc_type != alloc_type) {
- if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
+ if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire) &&
+ !IsAllocDeallocMismatchSuppressed(stack)) {
ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
(AllocType)alloc_type);
}
instance.ReInitialize(options);
}
+// Apply provided AllocatorOptions to an Allocator
+void ApplyAllocatorOptions(const AllocatorOptions &options) {
+ instance.ApplyOptions(options);
+}
+
void GetAllocatorOptions(AllocatorOptions *options) {
instance.GetOptions(options);
}
instance.PrintStats();
}
-void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
- instance.Deallocate(ptr, 0, 0, stack, alloc_type);
-}
-
-void asan_delete(void *ptr, uptr size, uptr alignment,
- BufferedStackTrace *stack, AllocType alloc_type) {
- instance.Deallocate(ptr, size, alignment, stack, alloc_type);
+void asan_free(void *ptr, BufferedStackTrace *stack) {
+ instance.Deallocate(ptr, 0, 0, stack, FROM_MALLOC);
}
void *asan_malloc(uptr size, BufferedStackTrace *stack) {
instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
}
-void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
- AllocType alloc_type) {
+void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack) {
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL;
if (AllocatorMayReturnNull())
ReportInvalidAllocationAlignment(alignment, stack);
}
return SetErrnoOnNull(
- instance.Allocate(size, alignment, stack, alloc_type, true));
+ instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
}
void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
return usable_size;
}
+namespace {
+
+void *asan_new(uptr size, BufferedStackTrace *stack, bool array) {
+ return SetErrnoOnNull(
+ instance.Allocate(size, 0, stack, array ? FROM_NEW_BR : FROM_NEW, true));
+}
+
+void *asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace *stack,
+ bool array) {
+ if (UNLIKELY(alignment == 0 || !IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAllocationAlignment(alignment, stack);
+ }
+ return SetErrnoOnNull(instance.Allocate(
+ size, alignment, stack, array ? FROM_NEW_BR : FROM_NEW, true));
+}
+
+void asan_delete(void *ptr, BufferedStackTrace *stack, bool array) {
+ instance.Deallocate(ptr, 0, 0, stack, array ? FROM_NEW_BR : FROM_NEW);
+}
+
+void asan_delete_aligned(void *ptr, uptr alignment, BufferedStackTrace *stack,
+ bool array) {
+ instance.Deallocate(ptr, 0, alignment, stack, array ? FROM_NEW_BR : FROM_NEW);
+}
+
+void asan_delete_sized(void *ptr, uptr size, BufferedStackTrace *stack,
+ bool array) {
+ instance.Deallocate(ptr, size, 0, stack, array ? FROM_NEW_BR : FROM_NEW);
+}
+
+void asan_delete_sized_aligned(void *ptr, uptr size, uptr alignment,
+ BufferedStackTrace *stack, bool array) {
+ instance.Deallocate(ptr, size, alignment, stack,
+ array ? FROM_NEW_BR : FROM_NEW);
+}
+
+} // namespace
+
+void *asan_new(uptr size, BufferedStackTrace *stack) {
+ return asan_new(size, stack, /*array=*/false);
+}
+
+void *asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace *stack) {
+ return asan_new_aligned(size, alignment, stack, /*array=*/false);
+}
+
+void *asan_new_array(uptr size, BufferedStackTrace *stack) {
+ return asan_new(size, stack, /*array=*/true);
+}
+
+void *asan_new_array_aligned(uptr size, uptr alignment,
+ BufferedStackTrace *stack) {
+ return asan_new_aligned(size, alignment, stack, /*array=*/true);
+}
+
+void asan_delete(void *ptr, BufferedStackTrace *stack) {
+ asan_delete(ptr, stack, /*array=*/false);
+}
+
+void asan_delete_aligned(void *ptr, uptr alignment, BufferedStackTrace *stack) {
+ asan_delete_aligned(ptr, alignment, stack, /*array=*/false);
+}
+
+void asan_delete_sized(void *ptr, uptr size, BufferedStackTrace *stack) {
+ asan_delete_sized(ptr, size, stack, /*array=*/false);
+}
+
+void asan_delete_sized_aligned(void *ptr, uptr size, uptr alignment,
+ BufferedStackTrace *stack) {
+ asan_delete_sized_aligned(ptr, size, alignment, stack, /*array=*/false);
+}
+
+void asan_delete_array(void *ptr, BufferedStackTrace *stack) {
+ asan_delete(ptr, stack, /*array=*/true);
+}
+
+void asan_delete_array_aligned(void *ptr, uptr alignment,
+ BufferedStackTrace *stack) {
+ asan_delete_aligned(ptr, alignment, stack, /*array=*/true);
+}
+
+void asan_delete_array_sized(void *ptr, uptr size, BufferedStackTrace *stack) {
+ asan_delete_sized(ptr, size, stack, /*array=*/true);
+}
+
+void asan_delete_array_sized_aligned(void *ptr, uptr size, uptr alignment,
+ BufferedStackTrace *stack) {
+ asan_delete_sized_aligned(ptr, size, alignment, stack, /*array=*/true);
+}
+
uptr asan_mz_size(const void *ptr) {
return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
}
void InitializeAllocator(const AllocatorOptions &options);
void ReInitializeAllocator(const AllocatorOptions &options);
void GetAllocatorOptions(AllocatorOptions *options);
+void ApplyAllocatorOptions(const AllocatorOptions &options);
class AsanChunkView {
public:
typedef CompactSizeClassMap SizeClassMap;
template <typename AddressSpaceViewTy>
struct AP32 {
- static const uptr kSpaceBeg = 0;
+ static const uptr kSpaceBeg = SANITIZER_MMAP_BEGIN;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = 0;
typedef __asan::SizeClassMap SizeClassMap;
AsanThreadLocalMallocStorage() {}
};
-void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
- AllocType alloc_type);
-void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type);
-void asan_delete(void *ptr, uptr size, uptr alignment,
- BufferedStackTrace *stack, AllocType alloc_type);
+void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack);
+void asan_free(void *ptr, BufferedStackTrace *stack);
void *asan_malloc(uptr size, BufferedStackTrace *stack);
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);
BufferedStackTrace *stack);
uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp);
+void *asan_new(uptr size, BufferedStackTrace *stack);
+void *asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace *stack);
+void *asan_new_array(uptr size, BufferedStackTrace *stack);
+void *asan_new_array_aligned(uptr size, uptr alignment,
+ BufferedStackTrace *stack);
+void asan_delete(void *ptr, BufferedStackTrace *stack);
+void asan_delete_aligned(void *ptr, uptr alignment, BufferedStackTrace *stack);
+void asan_delete_sized(void *ptr, uptr size, BufferedStackTrace *stack);
+void asan_delete_sized_aligned(void *ptr, uptr size, uptr alignment,
+ BufferedStackTrace *stack);
+void asan_delete_array(void *ptr, BufferedStackTrace *stack);
+void asan_delete_array_aligned(void *ptr, uptr alignment,
+ BufferedStackTrace *stack);
+void asan_delete_array_sized(void *ptr, uptr size, BufferedStackTrace *stack);
+void asan_delete_array_sized_aligned(void *ptr, uptr size, uptr alignment,
+ BufferedStackTrace *stack);
+
uptr asan_mz_size(const void *ptr);
void asan_mz_force_lock();
void asan_mz_force_unlock();
}
context->announced = true;
+ InternalScopedString str;
+ str.AppendF("Thread %s", AsanThreadIdAndName(context).c_str());
+
AsanThreadContext *parent_context =
context->parent_tid == kInvalidTid
? nullptr
// `context->parent_tid` may point to reused slot. Check `unique_id` which
// is always smaller for the parent, always greater for a new user.
- if (context->unique_id <= parent_context->unique_id)
- parent_context = nullptr;
-
- InternalScopedString str;
- str.AppendF("Thread %s", AsanThreadIdAndName(context).c_str());
- if (!parent_context) {
+ if (!parent_context || context->unique_id <= parent_context->unique_id) {
str.Append(" created by unknown thread\n");
Printf("%s", str.data());
return;
descr->frame_pc = access.frame_pc;
descr->frame_descr = access.frame_descr;
-#if SANITIZER_PPC64V1
- // On PowerPC64 ELFv1, the address of a function actually points to a
- // three-doubleword data structure with the first field containing
- // the address of the function's code.
+#if SANITIZER_PPC64V1 || SANITIZER_AIX
+ // On PowerPC64 ELFv1 or AIX, the address of a function actually points to a
+ // three-doubleword (or three-word for 32-bit AIX) data structure with
+ // the first field containing the address of the function's code.
descr->frame_pc = *reinterpret_cast<uptr *>(descr->frame_pc);
#endif
descr->frame_pc += 16;
data.kind = kAddressKindShadow;
return;
}
+
+ // Check global first. On AIX, some global data defined in shared libraries
+ // are put to the STACK region for unknown reasons. Check global first can
+ // workaround this issue.
+ // TODO: Look into whether there's a different solution to this problem.
+#if SANITIZER_AIX
+ if (GetGlobalAddressInformation(addr, access_size, &data.global)) {
+ data.kind = kAddressKindGlobal;
+ return;
+ }
+#endif
+
if (GetHeapAddressInformation(addr, access_size, &data.heap)) {
data.kind = kAddressKindHeap;
return;
return;
}
+// GetGlobalAddressInformation is called earlier on AIX due to a workaround
+#if !SANITIZER_AIX
if (GetGlobalAddressInformation(addr, access_size, &data.global)) {
data.kind = kAddressKindGlobal;
return;
}
+#endif
+
data.kind = kAddressKindWild;
data.wild.addr = addr;
data.wild.access_size = access_size;
//===----------------------------------------------------------------------===//
#include "asan_errors.h"
+
#include "asan_descriptions.h"
#include "asan_mapping.h"
+#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
Printf("%s", str.data());
}
+static void CheckPoisonRecords(uptr addr) {
+ if (!AddrIsInMem(addr))
+ return;
+
+ u8 *shadow_addr = (u8 *)MemToShadow(addr);
+ // If we are in the partial right redzone, look at the next shadow byte.
+ if (*shadow_addr > 0 && *shadow_addr < 128)
+ shadow_addr++;
+ u8 shadow_val = *shadow_addr;
+
+ if (shadow_val != kAsanUserPoisonedMemoryMagic)
+ return;
+
+ Printf("\n");
+
+ if (flags()->poison_history_size <= 0) {
+ Printf(
+ "NOTE: the stack trace above identifies the code that *accessed* "
+ "the poisoned memory.\n");
+ Printf(
+ "To identify the code that *poisoned* the memory, try the "
+ "experimental setting ASAN_OPTIONS=poison_history_size=<size>.\n");
+ return;
+ }
+
+ PoisonRecord record;
+ if (FindPoisonRecord(addr, record)) {
+ StackTrace poison_stack = StackDepotGet(record.stack_id);
+ if (poison_stack.size > 0) {
+ Printf("Memory was manually poisoned by thread T%u:\n", record.thread_id);
+ poison_stack.Print();
+ }
+ } else {
+ Printf("ERROR: no matching poison tracking record found.\n");
+ Printf("Try a larger value for ASAN_OPTIONS=poison_history_size=<size>.\n");
+ }
+}
+
void ErrorGeneric::Print() {
Decorator d;
Printf("%s", d.Error());
PrintContainerOverflowHint();
ReportErrorSummary(bug_descr, &stack);
PrintShadowMemoryForAddress(addr);
+
+ // This is an experimental flag, hence we don't make a special handler.
+ CheckPoisonRecords(addr);
}
} // namespace __asan
u32 tid, BufferedStackTrace *stack_, uptr old_storage_beg_,
uptr old_storage_end_, uptr new_storage_beg_, uptr new_storage_end_)
: ErrorBase(tid, 10,
- "bad-__sanitizer_annotate_double_ended_contiguous_container"),
+ "bad-__sanitizer_copy_contiguous_container_annotations"),
stack(stack_),
old_storage_beg(old_storage_beg_),
old_storage_end(old_storage_end_),
// For small size classes inline PoisonShadow for better performance.
ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
- u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
+ CHECK(AddrIsAlignedByGranularity(ptr + size));
+ u64* shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
if (ASAN_SHADOW_SCALE == 3 && class_id <= 6) {
// This code expects ASAN_SHADOW_SCALE=3.
for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
// The size class is too big, it's cheaper to poison only size bytes.
PoisonShadow(ptr, size, static_cast<u8>(magic));
}
+
+ if (magic == 0) {
+ uptr redzone_size = FakeStack::BytesInSizeClass(class_id) - size;
+ PoisonShadow(ptr + size, redzone_size, kAsanStackRightRedzoneMagic);
+ }
}
-FakeStack *FakeStack::Create(uptr stack_size_log) {
+FakeStack* FakeStack::Create(uptr stack_size_log) {
static uptr kMinStackSizeLog = 16;
static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28);
if (stack_size_log < kMinStackSizeLog)
stack_size_log = kMinStackSizeLog;
if (stack_size_log > kMaxStackSizeLog)
stack_size_log = kMaxStackSizeLog;
+ CHECK_LE(kMaxStackFrameSizeLog, stack_size_log);
uptr size = RequiredSize(stack_size_log);
- FakeStack *res = reinterpret_cast<FakeStack *>(
- flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
- : MmapOrDie(size, "FakeStack"));
+ uptr padded_size = size + kMaxStackFrameSize;
+ void* true_res = reinterpret_cast<void*>(
+ flags()->uar_noreserve ? MmapNoReserveOrDie(padded_size, "FakeStack")
+ : MmapOrDie(padded_size, "FakeStack"));
+ // GetFrame() requires the property that
+ // (res + kFlagsOffset + SizeRequiredForFlags(stack_size_log)) is aligned to
+ // kMaxStackFrameSize.
+ // We didn't use MmapAlignedOrDieOnFatalError, because it requires that the
+ // *size* is a power of 2, which is an overly strong condition.
+ static_assert(alignof(FakeStack) <= kMaxStackFrameSize);
+ FakeStack* res = reinterpret_cast<FakeStack*>(
+ RoundUpTo(
+ (uptr)true_res + kFlagsOffset + SizeRequiredForFlags(stack_size_log),
+ kMaxStackFrameSize) -
+ kFlagsOffset - SizeRequiredForFlags(stack_size_log));
+ res->true_start = true_res;
res->stack_size_log_ = stack_size_log;
- u8 *p = reinterpret_cast<u8 *>(res);
+ u8* p = reinterpret_cast<u8*>(res);
VReport(1,
"T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
- "mmapped %zdK, noreserve=%d \n",
- GetCurrentTidOrInvalid(), (void *)p,
- (void *)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log,
- size >> 10, flags()->uar_noreserve);
+ "mmapped %zdK, noreserve=%d, true_start: %p, start of first frame: "
+ "0x%zx\n",
+ GetCurrentTidOrInvalid(), (void*)p,
+ (void*)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log,
+ size >> 10, flags()->uar_noreserve, res->true_start,
+ res->GetFrame(stack_size_log, /*class_id*/ 0, /*pos*/ 0));
return res;
}
Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
}
uptr size = RequiredSize(stack_size_log_);
- FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
- UnmapOrDie(this, size);
+ uptr padded_size = size + kMaxStackFrameSize;
+ FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(true_start),
+ padded_size);
+ UnmapOrDie(true_start, padded_size);
}
void FakeStack::PoisonAll(u8 magic) {
#if !defined(_MSC_VER) || defined(__clang__)
ALWAYS_INLINE USED
#endif
-FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
- uptr real_stack) {
+ FakeFrame* FakeStack::Allocate(uptr stack_size_log, uptr class_id,
+ uptr real_stack) {
CHECK_LT(class_id, kNumberOfSizeClasses);
if (needs_gc_)
GC(real_stack);
- uptr &hint_position = hint_position_[class_id];
+ uptr& hint_position = hint_position_[class_id];
const int num_iter = NumberOfFrames(stack_size_log, class_id);
- u8 *flags = GetFlags(stack_size_log, class_id);
+ u8* flags = GetFlags(stack_size_log, class_id);
for (int i = 0; i < num_iter; i++) {
uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++);
// This part is tricky. On one hand, checking and setting flags[pos]
// and so will not touch this particular byte. So, it is safe to do this
// with regular non-atomic load and store (at least I was not able to make
// this code crash).
- if (flags[pos]) continue;
+ if (flags[pos])
+ continue;
flags[pos] = 1;
- FakeFrame *res = reinterpret_cast<FakeFrame *>(
- GetFrame(stack_size_log, class_id, pos));
+ FakeFrame* res =
+ reinterpret_cast<FakeFrame*>(GetFrame(stack_size_log, class_id, pos));
res->real_stack = real_stack;
*SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
return res;
}
- return nullptr; // We are out of fake stack.
+ return nullptr; // We are out of fake stack.
}
-uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
+uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr* frame_beg, uptr* frame_end) {
uptr stack_size_log = this->stack_size_log();
uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
- if (ptr < beg || ptr >= end) return 0;
+ if (ptr < beg || ptr >= end)
+ return 0;
uptr class_id = (ptr - beg) >> stack_size_log;
uptr base = beg + (class_id << stack_size_log);
CHECK_LE(base, ptr);
return res;
}
-void FakeStack::HandleNoReturn() {
- needs_gc_ = true;
-}
+void FakeStack::HandleNoReturn() { needs_gc_ = true; }
// Hack: The statement below is not true if we take into account sigaltstack or
// makecontext. It should be possible to make GC to discard wrong stack frame if
// We do it based on their 'real_stack' values -- everything that is lower
// than the current real_stack is garbage.
NOINLINE void FakeStack::GC(uptr real_stack) {
- AsanThread *curr_thread = GetCurrentThread();
+ AsanThread* curr_thread = GetCurrentThread();
if (!curr_thread)
return; // Try again when we have a thread.
auto top = curr_thread->stack_top();
return; // Not the default stack.
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
- u8 *flags = GetFlags(stack_size_log(), class_id);
+ u8* flags = GetFlags(stack_size_log(), class_id);
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
i++) {
- if (flags[i] == 0) continue; // not allocated.
- FakeFrame *ff = reinterpret_cast<FakeFrame *>(
- GetFrame(stack_size_log(), class_id, i));
+ if (flags[i] == 0)
+ continue; // not allocated.
+ FakeFrame* ff =
+ reinterpret_cast<FakeFrame*>(GetFrame(stack_size_log(), class_id, i));
// GC only on the default stack.
if (bottom < ff->real_stack && ff->real_stack < real_stack) {
flags[i] = 0;
needs_gc_ = false;
}
-void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
+void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void* arg) {
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
- u8 *flags = GetFlags(stack_size_log(), class_id);
+ u8* flags = GetFlags(stack_size_log(), class_id);
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
i++) {
- if (flags[i] == 0) continue; // not allocated.
- FakeFrame *ff = reinterpret_cast<FakeFrame *>(
- GetFrame(stack_size_log(), class_id, i));
+ if (flags[i] == 0)
+ continue; // not allocated.
+ FakeFrame* ff =
+ reinterpret_cast<FakeFrame*>(GetFrame(stack_size_log(), class_id, i));
uptr begin = reinterpret_cast<uptr>(ff);
callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg);
}
}
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
-static THREADLOCAL FakeStack *fake_stack_tls;
+static THREADLOCAL FakeStack* fake_stack_tls;
-FakeStack *GetTLSFakeStack() {
- return fake_stack_tls;
-}
-void SetTLSFakeStack(FakeStack *fs) {
- fake_stack_tls = fs;
-}
+static FakeStack* GetTLSFakeStack() { return fake_stack_tls; }
+static void SetTLSFakeStack(FakeStack* fs) { fake_stack_tls = fs; }
+void ResetTLSFakeStack() { fake_stack_tls = nullptr; }
#else
-FakeStack *GetTLSFakeStack() { return 0; }
-void SetTLSFakeStack(FakeStack *fs) { }
+static FakeStack* GetTLSFakeStack() { return nullptr; }
+static void SetTLSFakeStack(FakeStack*) {}
+void ResetTLSFakeStack() {}
#endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
-static FakeStack *GetFakeStack() {
- AsanThread *t = GetCurrentThread();
- if (!t) return nullptr;
+static FakeStack* GetFakeStack() {
+ AsanThread* t = GetCurrentThread();
+ if (!t)
+ return nullptr;
return t->get_or_create_fake_stack();
}
-static FakeStack *GetFakeStackFast() {
- if (FakeStack *fs = GetTLSFakeStack())
+static FakeStack* GetFakeStackFast() {
+ FakeStack* fs = GetTLSFakeStack();
+ if (LIKELY(fs))
return fs;
if (!__asan_option_detect_stack_use_after_return)
return nullptr;
- return GetFakeStack();
+ fs = GetFakeStack();
+ if (LIKELY(fs))
+ SetTLSFakeStack(fs);
+ return fs;
}
-static FakeStack *GetFakeStackFastAlways() {
- if (FakeStack *fs = GetTLSFakeStack())
+static FakeStack* GetFakeStackFastAlways() {
+ FakeStack* fs = GetTLSFakeStack();
+ if (LIKELY(fs))
return fs;
- return GetFakeStack();
+ fs = GetFakeStack();
+ if (LIKELY(fs))
+ SetTLSFakeStack(fs);
+ return fs;
}
static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
- FakeStack *fs = GetFakeStackFast();
+ FakeStack* fs = GetFakeStackFast();
if (!fs)
return 0;
- FakeFrame *ff =
+ FakeFrame* ff =
fs->Allocate(fs->stack_size_log(), class_id, GET_CURRENT_FRAME());
if (!ff)
return 0; // Out of fake stack.
}
static ALWAYS_INLINE uptr OnMallocAlways(uptr class_id, uptr size) {
- FakeStack *fs = GetFakeStackFastAlways();
+ FakeStack* fs = GetFakeStackFastAlways();
if (!fs)
return 0;
- FakeFrame *ff =
+ FakeFrame* ff =
fs->Allocate(fs->stack_size_log(), class_id, GET_CURRENT_FRAME());
if (!ff)
return 0; // Out of fake stack.
SetShadow(ptr, size, class_id, kMagic8);
}
-} // namespace __asan
+} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
- __asan_stack_malloc_##class_id(uptr size) { \
+ __asan_stack_malloc_##class_id(uptr size) { \
return OnMalloc(class_id, size); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
- __asan_stack_malloc_always_##class_id(uptr size) { \
+ __asan_stack_malloc_always_##class_id(uptr size) { \
return OnMallocAlways(class_id, size); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
// -asan-use-after-return=never, after modal UAR flag lands
// (https://github.com/google/sanitizers/issues/1394)
SANITIZER_INTERFACE_ATTRIBUTE
-void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
+void* __asan_get_current_fake_stack() { return GetFakeStackFast(); }
SANITIZER_INTERFACE_ATTRIBUTE
-void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
- void **end) {
- FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
- if (!fs) return nullptr;
+void* __asan_addr_is_in_fake_stack(void* fake_stack, void* addr, void** beg,
+ void** end) {
+ FakeStack* fs = reinterpret_cast<FakeStack*>(fake_stack);
+ if (!fs)
+ return nullptr;
uptr frame_beg, frame_end;
- FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
+ FakeFrame* frame = reinterpret_cast<FakeFrame*>(fs->AddrIsInFakeStack(
reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
- if (!frame) return nullptr;
+ if (!frame)
+ return nullptr;
if (frame->magic != kCurrentStackFrameMagic)
return nullptr;
- if (beg) *beg = reinterpret_cast<void*>(frame_beg);
- if (end) *end = reinterpret_cast<void*>(frame_end);
+ if (beg)
+ *beg = reinterpret_cast<void*>(frame_beg);
+ if (end)
+ *end = reinterpret_cast<void*>(frame_end);
return reinterpret_cast<void*>(frame->real_stack);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_allocas_unpoison(uptr top, uptr bottom) {
- if ((!top) || (top > bottom)) return;
- REAL(memset)
- (reinterpret_cast<void *>(MemToShadow(top)), 0,
- (bottom - top) / ASAN_SHADOW_GRANULARITY);
+ if ((!top) || (top > bottom))
+ return;
+ REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
+ (bottom - top) / ASAN_SHADOW_GRANULARITY);
}
-} // extern "C"
+} // extern "C"
// is not popped but remains there for quite some time until gets used again.
// So, we poison the objects on the fake stack when function returns.
// It helps us find use-after-return bugs.
-//
// The FakeStack objects is allocated by a single mmap call and has no other
// pointers. The size of the fake stack depends on the actual thread stack size
// and thus can not be a constant.
// stack_size is a power of two greater or equal to the thread's stack size;
// we store it as its logarithm (stack_size_log).
+// FakeStack is padded such that GetFrame() is aligned to BytesInSizeClass().
// FakeStack has kNumberOfSizeClasses (11) size classes, each size class
// is a power of two, starting from 64 bytes. Each size class occupies
// stack_size bytes and thus can allocate
class FakeStack {
static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
+ static_assert(kMaxStackFrameSizeLog >= kMinStackFrameSizeLog);
+
+ static const u64 kMaxStackFrameSize = 1 << kMaxStackFrameSizeLog;
public:
static const uptr kNumberOfSizeClasses =
void Destroy(int tid);
- // stack_size_log is at least 15 (stack_size >= 32K).
+ // min_uar_stack_size_log is 16 (stack_size >= 64KB)
static uptr SizeRequiredForFlags(uptr stack_size_log) {
return ((uptr)1) << (stack_size_log + 1 - kMinStackFrameSizeLog);
}
}
// Get frame by class_id and pos.
+ // Return values are guaranteed to be aligned to BytesInSizeClass(class_id),
+ // which is useful in combination with
+ // ASanStackFrameLayout::ComputeASanStackFrameLayout().
+ //
+ // Note that alignment to 1<<kMaxStackFrameSizeLog (aka
+ // BytesInSizeClass(max_class_id)) implies alignment to BytesInSizeClass()
+ // for any class_id, since the class sizes are increasing powers of 2.
+ //
+ // 1) (this + kFlagsOffset + SizeRequiredForFlags())) is aligned to
+ // 1<<kMaxStackFrameSizeLog (see FakeStack::Create)
+ //
+ // Note that SizeRequiredForFlags(16) == 2048. If FakeStack::Create() had
+ // merely returned an address from mmap (4K-aligned), the addition would
+ // not be 4K-aligned.
+ // 2) We know that stack_size_log >= kMaxStackFrameSizeLog (otherwise you
+ // couldn't store a single frame of that size in the entire stack)
+ // hence (1<<stack_size_log) is aligned to 1<<kMaxStackFrameSizeLog
+ // and ((1<<stack_size_log) * class_id) is aligned to
+ // 1<<kMaxStackFrameSizeLog
+ // 3) BytesInSizeClass(class_id) * pos is aligned to
+ // BytesInSizeClass(class_id)
+ // The sum of these is aligned to BytesInSizeClass(class_id).
u8 *GetFrame(uptr stack_size_log, uptr class_id, uptr pos) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
SizeRequiredForFlags(stack_size_log) +
private:
FakeStack() { }
- static const uptr kFlagsOffset = 4096; // This is were the flags begin.
+ static const uptr kFlagsOffset = 4096; // This is where the flags begin.
// Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID
COMPILER_CHECK(kNumberOfSizeClasses == 11);
static const uptr kMaxStackMallocSize = ((uptr)1) << kMaxStackFrameSizeLog;
uptr hint_position_[kNumberOfSizeClasses];
uptr stack_size_log_;
- // a bit is set if something was allocated from the corresponding size class.
bool needs_gc_;
+ // We allocated more memory than needed to ensure the FakeStack (and, by
+ // extension, each of the fake stack frames) is aligned. We keep track of the
+ // true start so that we can unmap it.
+ void *true_start;
};
-FakeStack *GetTLSFakeStack();
-void SetTLSFakeStack(FakeStack *fs);
+void ResetTLSFakeStack();
} // namespace __asan
DisplayHelpMessages(&asan_parser);
}
+// Validate flags and report incompatible configurations
static void ProcessFlags() {
Flags *f = flags();
ProcessFlags();
#if SANITIZER_WINDOWS
- // On Windows, weak symbols are emulated by having the user program
- // register which weak functions are defined.
- // The ASAN DLL will initialize flags prior to user module initialization,
- // so __asan_default_options will not point to the user definition yet.
- // We still want to ensure we capture when options are passed via
+ // On Windows, weak symbols (such as the `__asan_default_options` function)
+ // are emulated by having the user program register which weak functions are
+ // defined. The ASAN DLL will initialize flags prior to user module
+ // initialization, so __asan_default_options will not point to the user
+ // definition yet. We still want to ensure we capture when options are passed
+ // via
// __asan_default_options, so we add a callback to be run
// when it is registered with the runtime.
// __sanitizer_register_weak_function.
AddRegisterWeakFunctionCallback(
reinterpret_cast<uptr>(__asan_default_options), []() {
- FlagParser asan_parser;
-
- RegisterAsanFlags(&asan_parser, flags());
- RegisterCommonFlags(&asan_parser);
- asan_parser.ParseString(__asan_default_options());
-
- DisplayHelpMessages(&asan_parser);
+ // We call `InitializeDefaultFlags` again, instead of just parsing
+ // `__asan_default_options` directly, to ensure that flags set through
+ // `ASAN_OPTS` take precedence over those set through
+ // `__asan_default_options`.
+ InitializeDefaultFlags();
ProcessFlags();
+ ApplyFlags();
});
# if CAN_SANITIZE_UB
// ASan flag values can be defined in four ways:
// 1) initialized with default values at startup.
-// 2) overriden during compilation of ASan runtime by providing
+// 2) overridden during compilation of ASan runtime by providing
// compile definition ASAN_DEFAULT_OPTIONS.
-// 3) overriden from string returned by user-specified function
+// 3) overridden from string returned by user-specified function
// __asan_default_options().
-// 4) overriden from env variable ASAN_OPTIONS.
-// 5) overriden during ASan activation (for now used on Android only).
+// 4) overridden from env variable ASAN_OPTIONS.
+// 5) overridden during ASan activation (for now used on Android only).
namespace __asan {
"stack buffers.")
ASAN_FLAG(bool, poison_array_cookie, true,
"Poison (or not) the array cookie after operator new[].")
+ASAN_FLAG(int, poison_history_size, 0,
+ "[EXPERIMENTAL] Number of most recent memory poisoning calls for "
+ "which the stack traces will be recorded.")
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
// https://github.com/google/sanitizers/issues/131
# include "asan_thread.h"
# include "lsan/lsan_common.h"
+namespace __sanitizer {
+// ASan doesn't need to do anything else special in the startup hook.
+void EarlySanitizerInit() {}
+} // namespace __sanitizer
+
namespace __asan {
-// The system already set up the shadow memory for us.
-// __sanitizer::GetMaxUserVirtualAddress has already been called by
-// AsanInitInternal->InitializeHighMemEnd (asan_rtl.cpp).
-// Just do some additional sanity checks here.
void InitializeShadowMemory() {
+ // Explicitly setup shadow here right beforer any of the ShadowBounds members
+ // are used.
+ InitShadowBounds();
+
if (Verbosity())
PrintAddressSpaceLayout();
AddGlobalToList(relevant_globals, g);
}
+// Check ODR violation for given global G by checking if it's already poisoned.
+// We use this method in case compiler doesn't use private aliases for global
+// variables.
+static void CheckODRViolationViaPoisoning(const Global *g)
+ SANITIZER_REQUIRES(mu_for_globals) {
+ if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
+ // This check may not be enough: if the first global is much larger
+ // the entire redzone of the second global may be within the first global.
+ for (const auto &l : list_of_all_globals) {
+ if (g->beg == l.g->beg &&
+ (flags()->detect_odr_violation >= 2 || g->size != l.g->size) &&
+ !IsODRViolationSuppressed(g->name)) {
+ ReportODRViolation(g, FindRegistrationSite(g), l.g,
+ FindRegistrationSite(l.g));
+ }
+ }
+ }
+}
+
// Clang provides two different ways for global variables protection:
// it can poison the global itself or its private alias. In former
// case we may poison same symbol multiple times, that can help us to
// where two globals with the same name are defined in different modules.
if (UseODRIndicator(g))
CheckODRViolationViaIndicator(g);
+ else
+ CheckODRViolationViaPoisoning(g);
}
if (CanPoisonMemory())
PoisonRedZones(*g);
static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
#if SANITIZER_INTERCEPT_STRNLEN
- if (REAL(strnlen)) {
+ if (static_cast<bool>(REAL(strnlen)))
return REAL(strnlen)(s, maxlen);
- }
-#endif
+# endif
return internal_strnlen(s, maxlen);
}
+static inline uptr MaybeRealWcsnlen(const wchar_t* s, uptr maxlen) {
+# if SANITIZER_INTERCEPT_WCSNLEN
+ if (static_cast<bool>(REAL(wcsnlen)))
+ return REAL(wcsnlen)(s, maxlen);
+# endif
+ return internal_wcsnlen(s, maxlen);
+}
+
void SetThreadName(const char *name) {
AsanThread *t = GetCurrentThread();
if (t)
va_list ap;
uptr args[64];
// We don't know a better way to forward ... into REAL function. We can
- // increase args size if neccecary.
+ // increase args size if necessary.
CHECK_LE(argc, ARRAY_SIZE(args));
internal_memset(args, 0, sizeof(args));
va_start(ap, argc);
return REAL(strcpy)(to, from);
}
+INTERCEPTOR(wchar_t*, wcscpy, wchar_t* to, const wchar_t* from) {
+ void* ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, wcscpy);
+ if (!TryAsanInitFromRtl())
+ return REAL(wcscpy)(to, from);
+ if (flags()->replace_str) {
+ uptr size = (internal_wcslen(from) + 1) * sizeof(wchar_t);
+ CHECK_RANGES_OVERLAP("wcscpy", to, size, from, size);
+ ASAN_READ_RANGE(ctx, from, size);
+ ASAN_WRITE_RANGE(ctx, to, size);
+ }
+ return REAL(wcscpy)(to, from);
+}
+
// Windows doesn't always define the strdup identifier,
// and when it does it's a macro defined to either _strdup
// or _strdup_dbg, _strdup_dbg ends up calling _strdup, so
INTERCEPTOR(char*, strdup, const char *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
+ // Allowing null input is Windows-specific
+ if (SANITIZER_WINDOWS && UNLIKELY(!s))
+ return nullptr;
if (UNLIKELY(!TryAsanInitFromRtl()))
return internal_strdup(s);
uptr length = internal_strlen(s);
return REAL(strncpy)(to, from, size);
}
+INTERCEPTOR(wchar_t*, wcsncpy, wchar_t* to, const wchar_t* from, uptr size) {
+ void* ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, wcsncpy);
+ AsanInitFromRtl();
+ if (flags()->replace_str) {
+ uptr from_size =
+ Min(size, MaybeRealWcsnlen(from, size) + 1) * sizeof(wchar_t);
+ CHECK_RANGES_OVERLAP("wcsncpy", to, from_size, from, from_size);
+ ASAN_READ_RANGE(ctx, from, from_size);
+ ASAN_WRITE_RANGE(ctx, to, size * sizeof(wchar_t));
+ }
+ return REAL(wcsncpy)(to, from, size);
+}
+
template <typename Fn>
static ALWAYS_INLINE auto StrtolImpl(void *ctx, Fn real, const char *nptr,
char **endptr, int base)
ASAN_INTERCEPT_FUNC(strncat);
ASAN_INTERCEPT_FUNC(strncpy);
ASAN_INTERCEPT_FUNC(strdup);
+
+ // Intercept wcs* functions.
+ ASAN_INTERCEPT_FUNC(wcscpy);
+ ASAN_INTERCEPT_FUNC(wcsncpy);
+
# if ASAN_INTERCEPT___STRDUP
ASAN_INTERCEPT_FUNC(__strdup);
#endif
ASAN_INTERCEPT_FUNC(__isoc23_strtoll);
# endif
- // Intecept jump-related functions.
+ // Intercept jump-related functions.
ASAN_INTERCEPT_FUNC(longjmp);
# if ASAN_INTERCEPT_SWAPCONTEXT
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_SOLARIS && !SANITIZER_NETBSD && \
(!SANITIZER_WINDOWS || (defined(__MINGW32__) && defined(__i386__)))
# define ASAN_INTERCEPT___CXA_THROW 1
-# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
- || ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
-# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
-# else
-# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
-# endif
+# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
# else
DECLARE_REAL(SIZE_T, strlen, const char *s)
DECLARE_REAL(char*, strncpy, char *to, const char *from, SIZE_T size)
DECLARE_REAL(SIZE_T, strnlen, const char *s, SIZE_T maxlen)
+DECLARE_REAL(SIZE_T, wcsnlen, const wchar_t* s, SIZE_T maxlen)
DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
# if !SANITIZER_APPLE
if (LIKELY(replace_intrin_cached)) { \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
+ } else if (UNLIKELY(!AsanInited())) { \
+ return internal_memmove(to, from, size); \
} \
- return internal_memmove(to, from, size); \
+ return REAL(memmove)(to, from, size); \
} while (0)
void *__asan_memcpy(void *to, const void *from, uptr size) {
DECLARE_REAL(void *, memcpy, void *to, const void *from, SIZE_T size)
DECLARE_REAL(void *, memset, void *block, int c, SIZE_T size)
+DECLARE_REAL(void *, memmove, void *to, const void *from, SIZE_T size)
namespace __asan {
void AsanInitFromRtl();
bool TryAsanInitFromRtl();
+void ApplyFlags();
// asan_win.cpp
void InitializePlatformExceptionHandlers();
uptr FindDynamicShadowStart();
void AsanCheckDynamicRTPrereqs();
void AsanCheckIncompatibleRT();
+void TryReExecWithoutASLR();
// Unpoisons platform-specific stacks.
// Returns true if all stacks have been unpoisoned.
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_SOLARIS
+ SANITIZER_SOLARIS || SANITIZER_HAIKU
+
+# if SANITIZER_HAIKU
+# define _DEFAULT_SOURCE
+# endif
# include <dlfcn.h>
# include <fcntl.h>
# include <stdio.h>
# include <sys/mman.h>
# include <sys/resource.h>
-# include <sys/syscall.h>
+# if !SANITIZER_HAIKU
+# include <sys/syscall.h>
+# endif
# include <sys/time.h>
# include <sys/types.h>
# include <unistd.h>
# include "sanitizer_common/sanitizer_libc.h"
# include "sanitizer_common/sanitizer_procmaps.h"
-# if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD || SANITIZER_HAIKU
# include <sys/link_elf.h>
# endif
+# if SANITIZER_LINUX
+# include <sys/personality.h>
+# endif
+
# if SANITIZER_SOLARIS
# include <link.h>
# endif
# elif SANITIZER_NETBSD
# include <link_elf.h>
# include <ucontext.h>
+# elif SANITIZER_HAIKU
+extern "C" void *_DYNAMIC;
# else
# include <link.h>
# include <sys/ucontext.h>
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
}
+void TryReExecWithoutASLR() {
+# if SANITIZER_LINUX
+ // ASLR personality check.
+ // Caution: 'personality' is sometimes forbidden by sandboxes, so only call
+ // this function as a last resort (when the memory mapping is incompatible
+ // and ASan would fail anyway).
+ int old_personality = personality(0xffffffff);
+ if (old_personality == -1) {
+ VReport(1, "WARNING: unable to run personality check.\n");
+ return;
+ }
+
+ bool aslr_on = (old_personality & ADDR_NO_RANDOMIZE) == 0;
+
+ if (aslr_on) {
+ // Disable ASLR if the memory layout was incompatible.
+ // Alternatively, we could just keep re-execing until we get lucky
+ // with a compatible randomized layout, but the risk is that if it's
+ // not an ASLR-related issue, we will be stuck in an infinite loop of
+ // re-execing (unless we change ReExec to pass a parameter of the
+ // number of retries allowed.)
+ VReport(1,
+ "WARNING: AddressSanitizer: memory layout is incompatible, "
+ "possibly due to high-entropy ASLR.\n"
+ "Re-execing with fixed virtual address space.\n"
+ "N.B. reducing ASLR entropy is preferable.\n");
+ CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
+
+ ReExec();
+ }
+# endif
+}
+
# if SANITIZER_ANDROID
// FIXME: should we do anything for Android?
void AsanCheckDynamicRTPrereqs() {}
return 0;
}
+# if SANITIZER_HAIKU
+ if (!info->dlpi_name[0] ||
+ internal_strncmp(info->dlpi_name, "/boot/system/runtime_loader",
+ sizeof("/boot/system/runtime_loader") - 1) == 0)
+ return 0;
+# endif
# if SANITIZER_LINUX
// Ignore vDSO. glibc versions earlier than 2.15 (and some patched
// by distributors) return an empty name for the vDSO entry, so
} // namespace __asan
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||
- // SANITIZER_SOLARIS
+ // SANITIZER_SOLARIS || SANITIZER_HAIKU
GetMmapGranularity());
}
+// Not used.
+void TryReExecWithoutASLR() {}
+
// No-op. Mac does not support static linkage anyway.
void AsanCheckDynamicRTPrereqs() {}
// dispatch_after()
// dispatch_group_async_f()
// dispatch_group_async()
+// dispatch_apply()
+// dispatch_apply_f()
// TODO(glider): libdispatch API contains other functions that we don't support
// yet.
//
typedef void* dispatch_source_t;
typedef u64 dispatch_time_t;
typedef void (*dispatch_function_t)(void *block);
+typedef void (*dispatch_apply_function_t)(void *, size_t);
typedef void* (*worker_t)(void *block);
typedef unsigned long dispatch_mach_reason;
typedef void *dispatch_mach_msg_t;
// A wrapper for the ObjC blocks used to support libdispatch.
typedef struct {
void *block;
- dispatch_function_t func;
+ union {
+ dispatch_function_t dispatch_func;
+ dispatch_apply_function_t dispatch_apply_func;
+ static_assert(sizeof(dispatch_func) == sizeof(dispatch_apply_func));
+ };
u32 parent_tid;
} asan_block_context_t;
block, (void*)pthread_self());
asan_register_worker_thread(context->parent_tid, &stack);
// Call the original dispatcher for the block.
- context->func(context->block);
- asan_free(context, &stack, FROM_MALLOC);
+ context->dispatch_func(context->block);
+ asan_free(context, &stack);
}
} // namespace __asan
asan_block_context_t *asan_ctxt =
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack);
asan_ctxt->block = ctxt;
- asan_ctxt->func = func;
+ asan_ctxt->dispatch_func = func;
asan_ctxt->parent_tid = GetCurrentTidOrInvalid();
return asan_ctxt;
}
asan_dispatch_call_block_and_release);
}
-#if !defined(MISSING_BLOCKS_SUPPORT)
+extern "C" void asan_dispatch_apply_f_work(void *context, size_t iteration) {
+ GET_STACK_TRACE_THREAD;
+ asan_block_context_t *asan_ctxt = (asan_block_context_t *)context;
+ asan_register_worker_thread(asan_ctxt->parent_tid, &stack);
+ asan_ctxt->dispatch_apply_func(asan_ctxt->block, iteration);
+}
+
+INTERCEPTOR(void, dispatch_apply_f, size_t iterations, dispatch_queue_t queue,
+ void *ctxt, dispatch_apply_function_t work) {
+ GET_STACK_TRACE_THREAD;
+ asan_block_context_t *asan_ctxt =
+ (asan_block_context_t *)asan_malloc(sizeof(asan_block_context_t), &stack);
+ asan_ctxt->block = ctxt;
+ asan_ctxt->dispatch_apply_func = work;
+ asan_ctxt->parent_tid = GetCurrentTidOrInvalid();
+ REAL(dispatch_apply_f)(iterations, queue, (void *)asan_ctxt,
+ asan_dispatch_apply_f_work);
+}
+
+# if !defined(MISSING_BLOCKS_SUPPORT)
extern "C" {
void dispatch_async(dispatch_queue_t dq, void(^work)(void));
void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
void(^work)(void));
void dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
void(^work)(void));
+void dispatch_apply(size_t iterations, dispatch_queue_t queue,
+ void (^block)(size_t iteration));
void dispatch_source_set_cancel_handler(dispatch_source_t ds,
void(^work)(void));
void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
});
}
-#endif
+INTERCEPTOR(void, dispatch_apply, size_t iterations, dispatch_queue_t queue,
+ void (^block)(size_t iteration)) {
+ ENABLE_FRAME_POINTER;
+ int parent_tid = GetCurrentTidOrInvalid();
+
+ void (^asan_block)(size_t) = ^(size_t iteration) {
+ GET_STACK_TRACE_THREAD;
+ asan_register_worker_thread(parent_tid, &stack);
+ block(iteration);
+ };
+
+ REAL(dispatch_apply)(iterations, queue, asan_block);
+}
+
+# endif
#endif // SANITIZER_APPLE
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \
- SANITIZER_NETBSD || SANITIZER_SOLARIS
+ SANITIZER_NETBSD || SANITIZER_SOLARIS || SANITIZER_HAIKU
# include "asan_allocator.h"
# include "asan_interceptors.h"
if (DlsymAlloc::PointerIsMine(ptr))
return DlsymAlloc::Free(ptr);
GET_STACK_TRACE_FREE;
- asan_free(ptr, &stack, FROM_MALLOC);
+ asan_free(ptr, &stack);
}
#if SANITIZER_INTERCEPT_CFREE
if (DlsymAlloc::PointerIsMine(ptr))
return DlsymAlloc::Free(ptr);
GET_STACK_TRACE_FREE;
- asan_free(ptr, &stack, FROM_MALLOC);
+ asan_free(ptr, &stack);
}
#endif // SANITIZER_INTERCEPT_CFREE
#if SANITIZER_INTERCEPT_MEMALIGN
INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
GET_STACK_TRACE_MALLOC;
- return asan_memalign(boundary, size, &stack, FROM_MALLOC);
+ return asan_memalign(boundary, size, &stack);
}
INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
GET_STACK_TRACE_MALLOC;
- return asan_memalign(boundary, size, &stack, FROM_MALLOC);
+ return asan_memalign(boundary, size, &stack);
}
#endif // SANITIZER_INTERCEPT_MEMALIGN
#endif // SANITIZER_ANDROID
#endif // SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX ||
- // SANITIZER_NETBSD || SANITIZER_SOLARIS
+ // SANITIZER_NETBSD || SANITIZER_SOLARIS || SANITIZER_HAIKU
# define COMMON_MALLOC_FORCE_UNLOCK() asan_mz_force_unlock()
# define COMMON_MALLOC_MEMALIGN(alignment, size) \
GET_STACK_TRACE_MALLOC; \
- void *p = asan_memalign(alignment, size, &stack, FROM_MALLOC)
+ void *p = asan_memalign(alignment, size, &stack)
# define COMMON_MALLOC_MALLOC(size) \
GET_STACK_TRACE_MALLOC; \
void *p = asan_malloc(size, &stack)
int res = asan_posix_memalign(memptr, alignment, size, &stack);
# define COMMON_MALLOC_VALLOC(size) \
GET_STACK_TRACE_MALLOC; \
- void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
+ void *p = asan_memalign(GetPageSizeCached(), size, &stack);
# define COMMON_MALLOC_FREE(ptr) \
GET_STACK_TRACE_FREE; \
- asan_free(ptr, &stack, FROM_MALLOC);
+ asan_free(ptr, &stack);
# define COMMON_MALLOC_SIZE(ptr) uptr size = asan_mz_size(ptr);
# define COMMON_MALLOC_FILL_STATS(zone, stats) \
AsanMallocStats malloc_stats; \
__declspec(noinline) void free(void *ptr) {
GET_STACK_TRACE_FREE;
- return asan_free(ptr, &stack, FROM_MALLOC);
+ return asan_free(ptr, &stack);
}
__declspec(noinline) void _free_dbg(void *ptr, int) { free(ptr); }
CHECK((HEAP_FREE_UNSUPPORTED_FLAGS & dwFlags) != 0 && "unsupported flags");
}
GET_STACK_TRACE_FREE;
- asan_free(lpMem, &stack, FROM_MALLOC);
+ asan_free(lpMem, &stack);
return true;
}
if (replacement_alloc) {
size_t old_size = heapSizeFunc(hHeap, dwFlags, lpMem);
if (old_size == ((size_t)0) - 1) {
- asan_free(replacement_alloc, &stack, FROM_MALLOC);
+ asan_free(replacement_alloc, &stack);
return nullptr;
}
REAL(memcpy)(replacement_alloc, lpMem, old_size);
}
}
+ if (dwFlags & HEAP_REALLOC_IN_PLACE_ONLY) {
+ size_t old_usable_size = asan_malloc_usable_size(lpMem, pc, bp);
+ if (dwBytes == old_usable_size) {
+ // Nothing to change, return the current pointer.
+ return lpMem;
+ } else if (dwBytes >= old_usable_size) {
+ // Growing with HEAP_REALLOC_IN_PLACE_ONLY is not supported.
+ return nullptr;
+ } else {
+ // Shrinking with HEAP_REALLOC_IN_PLACE_ONLY is not yet supported.
+ // For now return the current pointer and
+ // leave the allocation size as it is.
+ return lpMem;
+ }
+ }
+
if (ownershipState == ASAN && !only_asan_supported_flags) {
// Conversion to unsupported flags allocation,
// transfer this allocation back to the original allocator.
old_usable_size = asan_malloc_usable_size(lpMem, pc, bp);
REAL(memcpy)(replacement_alloc, lpMem,
Min<size_t>(dwBytes, old_usable_size));
- asan_free(lpMem, &stack, FROM_MALLOC);
+ asan_free(lpMem, &stack);
}
return replacement_alloc;
}
return REAL(RtlFreeHeap)(HeapHandle, Flags, BaseAddress);
}
GET_STACK_TRACE_FREE;
- asan_free(BaseAddress, &stack, FROM_MALLOC);
+ asan_free(BaseAddress, &stack);
return true;
}
// || `[0x0000000000, 0x0d5554ffff]` || LowMem ||
//
// Default Linux/AArch64 (39-bit VMA) mapping:
-// || `[0x2000000000, 0x7fffffffff]` || highmem ||
-// || `[0x1400000000, 0x1fffffffff]` || highshadow ||
-// || `[0x1200000000, 0x13ffffffff]` || shadowgap ||
-// || `[0x1000000000, 0x11ffffffff]` || lowshadow ||
-// || `[0x0000000000, 0x0fffffffff]` || lowmem ||
+// TODO: this mapping is ok, but the allocator size is too large on non-Android
+// AArch64 platforms (see asan_allocator.h)
+// || `[0x2000000000, 0x7fffffffff]` || highmem || 384GB
+// || `[0x1400000000, 0x1fffffffff]` || highshadow || 48GB
+// || `[0x1200000000, 0x13ffffffff]` || shadowgap || 8GB
+// || `[0x1000000000, 0x11ffffffff]` || lowshadow || 8GB
+// || `[0x0000000000, 0x0fffffffff]` || lowmem || 64GB
//
// Default Linux/AArch64 (42-bit VMA) mapping:
-// || `[0x10000000000, 0x3ffffffffff]` || highmem ||
-// || `[0x0a000000000, 0x0ffffffffff]` || highshadow ||
-// || `[0x09000000000, 0x09fffffffff]` || shadowgap ||
-// || `[0x08000000000, 0x08fffffffff]` || lowshadow ||
-// || `[0x00000000000, 0x07fffffffff]` || lowmem ||
+// TODO: this mapping is ok, but the allocator size is too large on non-Android
+// AArch64 platforms (see asan_allocator.h)
+// || `[0x09000000000, 0x03ffffffffff]` || highmem || 3520GB
+// || `[0x02200000000, 0x008fffffffff]` || highshadow || 440GB
+// || `[0x01200000000, 0x0021ffffffff]` || shadowgap || 64GB
+// || `[0x01000000000, 0x0011ffffffff]` || lowshadow || 8GB
+// || `[0x00000000000, 0x000fffffffff]` || lowmem || 64GB
+//
+// Default Linux/AArch64 (48-bit VMA) mapping:
+// || `[0x201000000000, 0xffffffffffff]` || HighMem || 229312GB
+// || `[0x041200000000, 0x200fffffffff]` || HighShadow || 28664GB
+// || `[0x001200000000, 0x0411ffffffff]` || ShadowGap || 4096GB
+// || `[0x001000000000, 0x0011ffffffff]` || LowShadow || 8GB
+// || `[0x000000000000, 0x000fffffffff]` || LowMem || 64GB
//
// Default Linux/S390 mapping:
// || `[0x30000000, 0x7fffffff]` || HighMem ||
# elif defined(__aarch64__)
# define ASAN_SHADOW_OFFSET_CONST 0x0000001000000000
# elif defined(__powerpc64__)
-# define ASAN_SHADOW_OFFSET_CONST 0x0000020000000000
+# define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000
# elif defined(__s390x__)
# define ASAN_SHADOW_OFFSET_CONST 0x0010000000000000
# elif SANITIZER_FREEBSD
// TODO(alekseyshl): throw std::bad_alloc instead of dying on OOM.
// For local pool allocation, align to SHADOW_GRANULARITY to match asan
// allocator behavior.
-#define OPERATOR_NEW_BODY(type, nothrow) \
- GET_STACK_TRACE_MALLOC; \
- void *res = asan_memalign(0, size, &stack, type); \
- if (!nothrow && UNLIKELY(!res)) \
- ReportOutOfMemory(size, &stack); \
- return res;
-#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \
- GET_STACK_TRACE_MALLOC; \
- void *res = asan_memalign((uptr)align, size, &stack, type); \
- if (!nothrow && UNLIKELY(!res)) \
- ReportOutOfMemory(size, &stack); \
- return res;
+#define OPERATOR_NEW_BODY \
+ GET_STACK_TRACE_MALLOC; \
+ void *res = asan_new(size, &stack); \
+ if (UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res
+#define OPERATOR_NEW_BODY_NOTHROW \
+ GET_STACK_TRACE_MALLOC; \
+ return asan_new(size, &stack)
+#define OPERATOR_NEW_BODY_ARRAY \
+ GET_STACK_TRACE_MALLOC; \
+ void *res = asan_new_array(size, &stack); \
+ if (UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res
+#define OPERATOR_NEW_BODY_ARRAY_NOTHROW \
+ GET_STACK_TRACE_MALLOC; \
+ return asan_new_array(size, &stack)
+#define OPERATOR_NEW_BODY_ALIGN \
+ GET_STACK_TRACE_MALLOC; \
+ void *res = asan_new_aligned(size, static_cast<uptr>(align), &stack); \
+ if (UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res
+#define OPERATOR_NEW_BODY_ALIGN_NOTHROW \
+ GET_STACK_TRACE_MALLOC; \
+ return asan_new_aligned(size, static_cast<uptr>(align), &stack)
+#define OPERATOR_NEW_BODY_ALIGN_ARRAY \
+ GET_STACK_TRACE_MALLOC; \
+ void *res = asan_new_array_aligned(size, static_cast<uptr>(align), &stack); \
+ if (UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res
+#define OPERATOR_NEW_BODY_ALIGN_ARRAY_NOTHROW \
+ GET_STACK_TRACE_MALLOC; \
+ return asan_new_array_aligned(size, static_cast<uptr>(align), &stack)
// On OS X it's not enough to just provide our own 'operator new' and
// 'operator delete' implementations, because they're going to be in the
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_APPLE
CXX_OPERATOR_ATTRIBUTE
-void *operator new(size_t size)
-{ OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); }
+void *operator new(size_t size) { OPERATOR_NEW_BODY; }
CXX_OPERATOR_ATTRIBUTE
-void *operator new[](size_t size)
-{ OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); }
+void *operator new[](size_t size) { OPERATOR_NEW_BODY_ARRAY; }
CXX_OPERATOR_ATTRIBUTE
-void *operator new(size_t size, std::nothrow_t const&)
-{ OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); }
+void *operator new(size_t size, std::nothrow_t const &) {
+ OPERATOR_NEW_BODY_NOTHROW;
+}
CXX_OPERATOR_ATTRIBUTE
-void *operator new[](size_t size, std::nothrow_t const&)
-{ OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); }
+void *operator new[](size_t size, std::nothrow_t const &) {
+ OPERATOR_NEW_BODY_ARRAY_NOTHROW;
+}
CXX_OPERATOR_ATTRIBUTE
-void *operator new(size_t size, std::align_val_t align)
-{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, false /*nothrow*/); }
+void *operator new(size_t size, std::align_val_t align) {
+ OPERATOR_NEW_BODY_ALIGN;
+}
CXX_OPERATOR_ATTRIBUTE
-void *operator new[](size_t size, std::align_val_t align)
-{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, false /*nothrow*/); }
+void *operator new[](size_t size, std::align_val_t align) {
+ OPERATOR_NEW_BODY_ALIGN_ARRAY;
+}
CXX_OPERATOR_ATTRIBUTE
-void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
-{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, true /*nothrow*/); }
+void *operator new(size_t size, std::align_val_t align,
+ std::nothrow_t const &) {
+ OPERATOR_NEW_BODY_ALIGN_NOTHROW;
+}
CXX_OPERATOR_ATTRIBUTE
-void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
-{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, true /*nothrow*/); }
+void *operator new[](size_t size, std::align_val_t align,
+ std::nothrow_t const &) {
+ OPERATOR_NEW_BODY_ALIGN_ARRAY_NOTHROW;
+}
#else // SANITIZER_APPLE
-INTERCEPTOR(void *, _Znwm, size_t size) {
- OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/);
-}
-INTERCEPTOR(void *, _Znam, size_t size) {
- OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/);
-}
+INTERCEPTOR(void *, _Znwm, size_t size) { OPERATOR_NEW_BODY; }
+INTERCEPTOR(void *, _Znam, size_t size) { OPERATOR_NEW_BODY_ARRAY; }
INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
- OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/);
+ OPERATOR_NEW_BODY_NOTHROW;
}
INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
- OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/);
+ OPERATOR_NEW_BODY_ARRAY_NOTHROW;
}
#endif // !SANITIZER_APPLE
-#define OPERATOR_DELETE_BODY(type) \
+#define OPERATOR_DELETE_BODY \
+ GET_STACK_TRACE_FREE; \
+ asan_delete(ptr, &stack)
+#define OPERATOR_DELETE_BODY_ARRAY \
GET_STACK_TRACE_FREE; \
- asan_delete(ptr, 0, 0, &stack, type);
-
-#define OPERATOR_DELETE_BODY_SIZE(type) \
- GET_STACK_TRACE_FREE; \
- asan_delete(ptr, size, 0, &stack, type);
-
-#define OPERATOR_DELETE_BODY_ALIGN(type) \
+ asan_delete_array(ptr, &stack)
+#define OPERATOR_DELETE_BODY_ALIGN \
+ GET_STACK_TRACE_FREE; \
+ asan_delete_aligned(ptr, static_cast<uptr>(align), &stack)
+#define OPERATOR_DELETE_BODY_ALIGN_ARRAY \
GET_STACK_TRACE_FREE; \
- asan_delete(ptr, 0, static_cast<uptr>(align), &stack, type);
-
-#define OPERATOR_DELETE_BODY_SIZE_ALIGN(type) \
+ asan_delete_array_aligned(ptr, static_cast<uptr>(align), &stack)
+#define OPERATOR_DELETE_BODY_SIZE \
+ GET_STACK_TRACE_FREE; \
+ asan_delete_sized(ptr, size, &stack)
+#define OPERATOR_DELETE_BODY_SIZE_ARRAY \
+ GET_STACK_TRACE_FREE; \
+ asan_delete_array_sized(ptr, size, &stack)
+#define OPERATOR_DELETE_BODY_SIZE_ALIGN \
+ GET_STACK_TRACE_FREE; \
+ asan_delete_sized_aligned(ptr, size, static_cast<uptr>(align), &stack)
+#define OPERATOR_DELETE_BODY_SIZE_ALIGN_ARRAY \
GET_STACK_TRACE_FREE; \
- asan_delete(ptr, size, static_cast<uptr>(align), &stack, type);
+ asan_delete_array_sized_aligned(ptr, size, static_cast<uptr>(align), &stack)
#if !SANITIZER_APPLE
CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr) NOEXCEPT
-{ OPERATOR_DELETE_BODY(FROM_NEW); }
+void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr) NOEXCEPT
-{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
+void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY_ARRAY; }
CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY(FROM_NEW); }
+void operator delete(void *ptr, std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY;
+}
CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
+void operator delete[](void *ptr, std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY_ARRAY;
+}
CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr, size_t size) NOEXCEPT
-{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW); }
+void operator delete(void *ptr, size_t size) NOEXCEPT {
+ OPERATOR_DELETE_BODY_SIZE;
+}
CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr, size_t size) NOEXCEPT
-{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW_BR); }
+void operator delete[](void *ptr, size_t size) NOEXCEPT {
+ OPERATOR_DELETE_BODY_SIZE_ARRAY;
+}
CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr, std::align_val_t align) NOEXCEPT
-{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); }
+void operator delete(void *ptr, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY_ALIGN;
+}
CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT
-{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); }
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY_ALIGN_ARRAY;
+}
CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); }
+void operator delete(void *ptr, std::align_val_t align,
+ std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY_ALIGN;
+}
CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); }
+void operator delete[](void *ptr, std::align_val_t align,
+ std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY_ALIGN_ARRAY;
+}
CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT
-{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW); }
+void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY_SIZE_ALIGN;
+}
CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT
-{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW_BR); }
+void operator delete[](void *ptr, size_t size,
+ std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY_SIZE_ALIGN_ARRAY;
+}
#else // SANITIZER_APPLE
-INTERCEPTOR(void, _ZdlPv, void *ptr)
-{ OPERATOR_DELETE_BODY(FROM_NEW); }
-INTERCEPTOR(void, _ZdaPv, void *ptr)
-{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
-INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY(FROM_NEW); }
-INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
+INTERCEPTOR(void, _ZdlPv, void *ptr) { OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdaPv, void *ptr) { OPERATOR_DELETE_BODY_ARRAY; }
+INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY_ARRAY;
+}
#endif // !SANITIZER_APPLE
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_ring_buffer.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
+using PoisonRecordRingBuffer = RingBuffer<PoisonRecord>;
+
static atomic_uint8_t can_poison_memory;
+static Mutex poison_records_mutex;
+static PoisonRecordRingBuffer *poison_records
+ SANITIZER_GUARDED_BY(poison_records_mutex) = nullptr;
+
+void AddPoisonRecord(const PoisonRecord &new_record) {
+ if (flags()->poison_history_size <= 0)
+ return;
+
+ GenericScopedLock<Mutex> l(&poison_records_mutex);
+
+ if (poison_records == nullptr)
+ poison_records = PoisonRecordRingBuffer::New(flags()->poison_history_size);
+
+ poison_records->push(new_record);
+}
+
+bool FindPoisonRecord(uptr addr, PoisonRecord &match) {
+ if (flags()->poison_history_size <= 0)
+ return false;
+
+ GenericScopedLock<Mutex> l(&poison_records_mutex);
+
+ if (poison_records) {
+ for (unsigned int i = 0; i < poison_records->size(); i++) {
+ PoisonRecord record = (*poison_records)[i];
+ if (record.begin <= addr && addr < record.end) {
+ internal_memcpy(&match, &record, sizeof(record));
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+void SANITIZER_ACQUIRE(poison_records_mutex) AcquirePoisonRecords() {
+ poison_records_mutex.Lock();
+}
+
+void SANITIZER_RELEASE(poison_records_mutex) ReleasePoisonRecords() {
+ poison_records_mutex.Unlock();
+}
+
void SetCanPoisonMemory(bool value) {
atomic_store(&can_poison_memory, value, memory_order_release);
}
uptr end_addr = beg_addr + size;
VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
(void *)end_addr);
+
+ if (flags()->poison_history_size > 0) {
+ GET_STACK_TRACE(/*max_size=*/16, /*fast=*/false);
+ u32 current_tid = GetCurrentTidOrInvalid();
+
+ u32 stack_id = StackDepotPut(stack);
+
+ PoisonRecord record;
+ record.stack_id = stack_id;
+ record.thread_id = current_tid;
+ record.begin = beg_addr;
+ record.end = end_addr;
+ AddPoisonRecord(record);
+ }
+
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
uptr end_addr = beg_addr + size;
VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
(void *)end_addr);
+
+ // Note: we don't need to update the poison tracking here. Since the shadow
+ // memory will be unpoisoned, the poison tracking ring buffer entries will be
+ // ignored.
+
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
// Shadow memory poisoning by ASan RTL and by user application.
//===----------------------------------------------------------------------===//
+#ifndef ASAN_POISONING_H
+#define ASAN_POISONING_H
+
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
namespace __asan {
+struct PoisonRecord {
+ u32 stack_id;
+ u32 thread_id;
+ uptr begin;
+ uptr end;
+};
+
+void AddPoisonRecord(const PoisonRecord& new_record);
+bool FindPoisonRecord(uptr addr, PoisonRecord& match);
+
+void AcquirePoisonRecords();
+void ReleasePoisonRecords();
+
// Enable/disable memory poisoning.
void SetCanPoisonMemory(bool value);
bool CanPoisonMemory();
void FlushUnneededASanShadowMemory(uptr p, uptr size);
} // namespace __asan
+
+#endif // ASAN_POISONING_H
// stuff we need.
__lsan::LockThreads();
__lsan::LockAllocator();
+
+ AcquirePoisonRecords();
+
StackDepotLockBeforeFork();
}
static void AfterFork(bool fork_child) {
StackDepotUnlockAfterFork(fork_child);
+
+ ReleasePoisonRecords();
+
// `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and unlock
// the stuff we need.
__lsan::UnlockAllocator();
void InstallAtForkHandler() {
# if SANITIZER_SOLARIS || SANITIZER_NETBSD || SANITIZER_APPLE || \
- (SANITIZER_LINUX && SANITIZER_SPARC)
+ (SANITIZER_LINUX && SANITIZER_SPARC) || SANITIZER_HAIKU
// While other Linux targets use clone in internal_fork which doesn't
// trigger pthread_atfork handlers, Linux/sparc64 uses __fork, causing a
// hang.
#include "asan_scariness_score.h"
#include "asan_stack.h"
#include "asan_thread.h"
+#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_interface_internal.h"
public:
explicit ScopedInErrorReport(bool fatal = false)
: halt_on_error_(fatal || flags()->halt_on_error) {
+ // Deadlock Prevention Between ASan and LSan
+ //
+ // Background:
+ // - The `dl_iterate_phdr` function requires holding libdl's internal lock
+ // (Lock A).
+ // - LSan acquires the ASan thread registry lock (Lock B) *after* calling
+ // `dl_iterate_phdr`.
+ //
+ // Problem Scenario:
+ // When ASan attempts to call `dl_iterate_phdr` while holding Lock B (e.g.,
+ // during error reporting via `ErrorDescription::Print`), a circular lock
+ // dependency may occur:
+ // 1. Thread 1: Holds Lock B → Requests Lock A (via dl_iterate_phdr)
+ // 2. Thread 2: Holds Lock A → Requests Lock B (via LSan operations)
+ //
+ // Solution:
+ // Proactively load all required modules before acquiring Lock B.
+ // This ensures:
+ // 1. Any `dl_iterate_phdr` calls during module loading complete before
+ // locking.
+ // 2. Subsequent error reporting avoids nested lock acquisition patterns.
+ // 3. Eliminates the lock order inversion risk between libdl and ASan's
+ // thread registry.
+#if CAN_SANITIZE_LEAKS && (SANITIZER_LINUX || SANITIZER_NETBSD)
+ Symbolizer::GetOrInit()->GetRefreshedListOfModules();
+#endif
+
// Make sure the registry and sanitizer report mutexes are locked while
// we're printing an error report.
// We can lock them only here to avoid self-deadlock in case of
} // extern "C"
// Provide default implementation of __asan_on_error that does nothing
-// and may be overriden by user.
+// and may be overridden by user.
SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {}
kHighShadowBeg > kMidMemEnd);
}
+// Apply most options specified either through the ASAN_OPTIONS
+// environment variable, or through the `__asan_default_options` user function.
+//
+// This function may be called multiple times, once per weak reference callback
+// on Windows, so it needs to be idempotent.
+//
+// Context:
+// For maximum compatibility on Windows, it is necessary for ASan options to be
+// configured/registered/applied inside this method (instead of in
+// ASanInitInternal, for example). That's because, on Windows, the user-provided
+// definition for `__asan_default_opts` may not be bound when `ASanInitInternal`
+// is invoked (it is bound later).
+//
+// To work around the late binding on windows, `ApplyOptions` will be called,
+// again, after binding to the user-provided `__asan_default_opts` function.
+// Therefore, any flags not configured here are not guaranteed to be
+// configurable through `__asan_default_opts` on Windows.
+//
+//
+// For more details on this issue, see:
+// https://github.com/llvm/llvm-project/issues/117925
+void ApplyFlags() {
+ SetCanPoisonMemory(flags()->poison_heap);
+ SetMallocContextSize(common_flags()->malloc_context_size);
+
+ __asan_option_detect_stack_use_after_return =
+ flags()->detect_stack_use_after_return;
+
+ AllocatorOptions allocator_options;
+ allocator_options.SetFrom(flags(), common_flags());
+ ApplyAllocatorOptions(allocator_options);
+}
+
static bool AsanInitInternal() {
if (LIKELY(AsanInited()))
return true;
CacheBinaryName();
- // Initialize flags. This must be done early, because most of the
- // initialization steps look at flags().
+ // Initialize flags. On Windows it also also register weak function callbacks.
+ // This must be done early, because most of the initialization steps look at
+ // flags().
InitializeFlags();
WaitForDebugger(flags()->sleep_before_init, "before init");
AsanCheckDynamicRTPrereqs();
AvoidCVE_2016_2143();
- SetCanPoisonMemory(flags()->poison_heap);
- SetMallocContextSize(common_flags()->malloc_context_size);
-
InitializePlatformExceptionHandlers();
InitializeHighMemEnd();
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
__sanitizer_set_report_path(common_flags()->log_path);
-
- __asan_option_detect_stack_use_after_return =
- flags()->detect_stack_use_after_return;
-
__sanitizer::InitializePlatformEarly();
// Setup internal allocator callback.
DisableCoreDumperIfNecessary();
+#if SANITIZER_POSIX
+ if (StackSizeIsUnlimited()) {
+ VPrintf(1,
+ "WARNING: Unlimited stack size detected. This may affect "
+ "compatibility with the shadow mappings.\n");
+ // MSan and TSan re-exec with a fixed size stack. We don't do that because
+ // it may break the program. InitializeShadowMemory() will, if needed,
+ // re-exec without ASLR, which solves most shadow mapping compatibility
+ // issues.
+ }
+#endif // SANITIZER_POSIX
+
InitializeShadowMemory();
AsanTSDInit(PlatformTSDDtor);
allocator_options.SetFrom(flags(), common_flags());
InitializeAllocator(allocator_options);
+ // Apply ASan flags.
+ // NOTE: In order for options specified through `__asan_default_options` to be
+ // honored on Windows, it is necessary for those options to be configured
+ // inside the `ApplyOptions` method. See the function-level comment for
+ // `ApplyFlags` for more details.
+ ApplyFlags();
+
if (SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL)
MaybeStartBackgroudThread();
AsanThread *main_thread = CreateMainThread();
CHECK_EQ(0, main_thread->tid());
force_interface_symbols(); // no-op.
- SanitizerInitializeUnwinder();
if (CAN_SANITIZE_LEAKS) {
__lsan::InitCommonLsan();
#include "sanitizer_common/sanitizer_platform.h"
.file "asan_rtl_x86_64.S"
+.att_syntax
#define NAME(n, reg, op, s, i) n##_##op##_##i##_##s##_##reg
ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
} else {
+ // ASan's mappings can usually shadow the entire address space, even with
+ // maximum ASLR entropy. However:
+ // - On 32-bit systems, the maximum ASLR entropy (currently up to 16-bits
+ // == 256MB) is a significant chunk of the address space; reclaiming it
+ // by disabling ASLR might allow chonky binaries to run.
+ // - On 64-bit systems, some settings (e.g., for Linux, unlimited stack
+ // size plus 31+ bits of entropy) can lead to an incompatible layout.
+ TryReExecWithoutASLR();
+
Report(
"Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n");
static const char kInterceptorViaFunction[] = "interceptor_via_fun";
static const char kInterceptorViaLibrary[] = "interceptor_via_lib";
static const char kODRViolation[] = "odr_violation";
+static const char kAllocDeallocMismatch[] = "alloc_dealloc_mismatch";
static const char *kSuppressionTypes[] = {
kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary,
- kODRViolation};
+ kODRViolation, kAllocDeallocMismatch};
SANITIZER_INTERFACE_WEAK_DEF(const char *, __asan_default_suppressions, void) {
return "";
return suppression_ctx->Match(global_var_name, kODRViolation, &s);
}
+bool IsAddrSuppressed(const char *suppression, Symbolizer *symbolizer,
+ uptr addr) {
+ CHECK(suppression_ctx);
+ CHECK(suppression_ctx->HasSuppressionType(suppression));
+ CHECK(symbolizer);
+ SymbolizedStackHolder symbolized_stack(symbolizer->SymbolizePC(addr));
+ const SymbolizedStack *frames = symbolized_stack.get();
+ CHECK(frames);
+ for (const SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ const char *function_name = cur->info.function;
+ if (!function_name) {
+ continue;
+ }
+ // Match suppressions.
+ Suppression *s;
+ if (suppression_ctx->Match(function_name, suppression, &s)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool IsAllocDeallocMismatchSuppressed(const StackTrace *stack) {
+ CHECK(suppression_ctx);
+ if (!suppression_ctx->HasSuppressionType(kAllocDeallocMismatch)) {
+ return false;
+ }
+ Symbolizer *symbolizer = Symbolizer::GetOrInit();
+ for (uptr i = 0; i < stack->size && stack->trace[i]; i++) {
+ uptr addr = stack->trace[i];
+ // Match "alloc_dealloc_mismatch" suppressions.
+ if (IsAddrSuppressed(kAllocDeallocMismatch, symbolizer, addr)) {
+ return true;
+ }
+ }
+ return false;
+}
+
bool IsStackTraceSuppressed(const StackTrace *stack) {
if (!HaveStackTraceBasedSuppressions())
return false;
}
if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) {
- SymbolizedStackHolder symbolized_stack(symbolizer->SymbolizePC(addr));
- const SymbolizedStack *frames = symbolized_stack.get();
- CHECK(frames);
- for (const SymbolizedStack *cur = frames; cur; cur = cur->next) {
- const char *function_name = cur->info.function;
- if (!function_name) {
- continue;
- }
- // Match "interceptor_via_fun" suppressions.
- if (suppression_ctx->Match(function_name, kInterceptorViaFunction,
- &s)) {
- return true;
- }
+ // Match "interceptor_via_func" suppressions.
+ if (IsAddrSuppressed(kInterceptorViaFunction, symbolizer, addr)) {
+ return true;
}
}
}
bool HaveStackTraceBasedSuppressions();
bool IsStackTraceSuppressed(const StackTrace *stack);
bool IsODRViolationSuppressed(const char *global_var_name);
+bool IsAllocDeallocMismatchSuppressed(const StackTrace *stack);
} // namespace __asan
if (fake_stack_save)
*fake_stack_save = fake_stack_;
fake_stack_ = nullptr;
- SetTLSFakeStack(nullptr);
+ ResetTLSFakeStack();
// if fake_stack_save is null, the fiber will die, delete the fakestack
if (!fake_stack_save && current_fake_stack)
current_fake_stack->Destroy(this->tid());
}
if (fake_stack_save) {
- SetTLSFakeStack(fake_stack_save);
fake_stack_ = fake_stack_save;
+ ResetTLSFakeStack();
}
if (bottom_old)
Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
fake_stack_ = FakeStack::Create(stack_size_log);
DCHECK_EQ(GetCurrentThread(), this);
- SetTLSFakeStack(fake_stack_);
+ ResetTLSFakeStack();
return fake_stack_;
}
return nullptr;
// asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls.
#if !SANITIZER_FUCHSIA
-void AsanThread::ThreadStart(tid_t os_id) {
+void AsanThread::ThreadStart(ThreadID os_id) {
Init();
asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr);
context->os_id = GetTid();
}
-__asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) {
+__asan::AsanThread *GetAsanThreadByOsIDLocked(ThreadID os_id) {
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
if (!context)
void EnsureMainThreadIDIsCorrect() { __asan::EnsureMainThreadIDIsCorrect(); }
-bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+bool GetThreadRangesLocked(ThreadID os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
-void GetThreadExtraStackRangesLocked(tid_t os_id,
+void GetThreadExtraStackRangesLocked(ThreadID os_id,
InternalMmapVector<Range> *ranges) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
if (!t)
__asan::asanThreadArgRetval().GetAllPtrsLocked(ptrs);
}
-void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
+void GetRunningThreadsLocked(InternalMmapVector<ThreadID> *threads) {
GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
[](ThreadContextBase *tctx, void *threads) {
if (tctx->status == ThreadStatusRunning)
- reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back(
+ reinterpret_cast<InternalMmapVector<ThreadID> *>(threads)->push_back(
tctx->os_id);
},
threads);
struct InitOptions;
void Init(const InitOptions *options = nullptr);
- void ThreadStart(tid_t os_id);
+ void ThreadStart(ThreadID os_id);
thread_return_t RunThread();
uptr stack_top();
if (!fake_stack_) return;
FakeStack *t = fake_stack_;
fake_stack_ = nullptr;
- SetTLSFakeStack(nullptr);
+ ResetTLSFakeStack();
t->Destroy(tid);
}
t->GetStartData(params);
auto res = (*params.start_routine)(params.arg);
- t->Destroy(); // POSIX calls this from TSD destructor.
return res;
}
thr_flags, tid);
}
+INTERCEPTOR_WINAPI(void, ExitThread, DWORD dwExitCode) {
+ AsanThread *t = (AsanThread *)__asan::GetCurrentThread();
+ if (t)
+ t->Destroy();
+ REAL(ExitThread)(dwExitCode);
+}
+
// }}}
namespace __asan {
(LPCWSTR)&InitializePlatformInterceptors, &pinned));
ASAN_INTERCEPT_FUNC(CreateThread);
+ ASAN_INTERCEPT_FUNC(ExitThread);
ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter);
#ifdef _WIN64
GetMmapGranularity());
}
+// Not used
+void TryReExecWithoutASLR() {}
+
void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {}
INTERCEPT_LIBRARY_FUNCTION_ASAN(strspn);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strstr);
INTERCEPT_LIBRARY_FUNCTION_ASAN(strtok);
+INTERCEPT_LIBRARY_FUNCTION_ASAN(wcscat);
+INTERCEPT_LIBRARY_FUNCTION_ASAN(wcscpy);
+INTERCEPT_LIBRARY_FUNCTION_ASAN(wcsncat);
+INTERCEPT_LIBRARY_FUNCTION_ASAN(wcsncpy);
INTERCEPT_LIBRARY_FUNCTION_ASAN(wcslen);
INTERCEPT_LIBRARY_FUNCTION_ASAN(wcsnlen);
#ifndef COMPILERRT_ASSEMBLY_H
#define COMPILERRT_ASSEMBLY_H
-#if defined(__linux__) && defined(__CET__)
+#ifdef __CET__
#if __has_include(<cet.h>)
#include <cet.h>
#endif
#define LOCAL_LABEL(name) .L ## name
#define FILE_LEVEL_DIRECTIVE
#define SYMBOL_IS_FUNC(name) \
- .def name SEPARATOR \
+ .def FUNC_SYMBOL(name) SEPARATOR \
.scl 2 SEPARATOR \
.type 32 SEPARATOR \
.endef
#endif
-#if defined(__arm__) || defined(__aarch64__)
+#if defined(__aarch64__) && defined(__ELF__) && \
+ defined(COMPILER_RT_EXECUTE_ONLY_CODE)
+// The assembler always creates an implicit '.text' section with default flags
+// (SHF_ALLOC | SHF_EXECINSTR), which is incompatible with the execute-only
+// '.text' section we want to create here because of the missing
+// SHF_AARCH64_PURECODE section flag. To solve this, we use 'unique,0' to
+// differentiate the two sections. The output will therefore have two separate
+// sections named '.text', where code will be placed into the execute-only
+// '.text' section, and the implicitly-created one will be empty.
+#define TEXT_SECTION \
+ .section .text,"axy",@progbits,unique,0
+#else
+#define TEXT_SECTION \
+ .text
+#endif
+
+#if defined(__arm__) || defined(__aarch64__) || defined(__arm64ec__)
#define FUNC_ALIGN \
- .text SEPARATOR \
.balign 16 SEPARATOR
#else
#define FUNC_ALIGN
#endif
-// BTI and PAC gnu property note
+// BTI, PAC, and GCS gnu property note
#define NT_GNU_PROPERTY_TYPE_0 5
#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI 1
#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC 2
+#define GNU_PROPERTY_AARCH64_FEATURE_1_GCS 4
#if defined(__ARM_FEATURE_BTI_DEFAULT)
#define BTI_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_BTI
#define PAC_FLAG 0
#endif
+#if defined(__ARM_FEATURE_GCS_DEFAULT)
+#define GCS_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_GCS
+#else
+#define GCS_FLAG 0
+#endif
+
#define GNU_PROPERTY(type, value) \
.pushsection .note.gnu.property, "a" SEPARATOR \
.p2align 3 SEPARATOR \
#define BTI_J
#endif
-#if (BTI_FLAG | PAC_FLAG) != 0
-#define GNU_PROPERTY_BTI_PAC \
- GNU_PROPERTY(GNU_PROPERTY_AARCH64_FEATURE_1_AND, BTI_FLAG | PAC_FLAG)
+#if (BTI_FLAG | PAC_FLAG | GCS_FLAG) != 0
+#define GNU_PROPERTY_BTI_PAC_GCS \
+ GNU_PROPERTY(GNU_PROPERTY_AARCH64_FEATURE_1_AND, \
+ BTI_FLAG | PAC_FLAG | GCS_FLAG)
#else
-#define GNU_PROPERTY_BTI_PAC
+#define GNU_PROPERTY_BTI_PAC_GCS
#endif
#if defined(__clang__) || defined(__GCC_HAVE_DWARF2_CFI_ASM)
#else
#define WIDE(op) op
#endif
+
+#if defined(__ARM_FEATURE_PAC_DEFAULT) && defined(__ARM_FEATURE_BTI_DEFAULT)
+#define PACBTI_LANDING pacbti r12, lr, sp
+#elif defined(__ARM_FEATURE_PAC_DEFAULT)
+#define PACBTI_LANDING pac r12, lr, sp
+#elif defined(__ARM_FEATURE_BTI_DEFAULT)
+#define PACBTI_LANDING bti
+#else
+#define PACBTI_LANDING
+#endif
+
+#if defined(__ARM_FEATURE_PAUTH)
+#define PAC_RETURN bxaut r12, lr, sp
+#else
+#define PAC_RETURN aut r12, lr, sp SEPARATOR bx lr
+#endif
+
#else // !defined(__arm)
#define DECLARE_FUNC_ENCODING
#define DEFINE_CODE_STATE
#define GLUE4(a, b, c, d) GLUE4_(a, b, c, d)
#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
+#ifndef __arm64ec__
+#define FUNC_SYMBOL(name) name
+#else
+// On ARM64EC, function names and calls (but not address-taking or data symbol
+// references) use symbols prefixed with "#".
+#define QUOTE(a) #a
+#define STR(a) QUOTE(a)
+#define HASH #
+#define FUNC_SYMBOL(name) STR(GLUE2(HASH, name))
+#endif
#ifdef VISIBILITY_HIDDEN
#define DECLARE_SYMBOL_VISIBILITY(name) \
#endif
#define DEFINE_COMPILERRT_FUNCTION(name) \
+ TEXT_SECTION SEPARATOR \
DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
- .globl SYMBOL_NAME(name) SEPARATOR \
+ .globl FUNC_SYMBOL(SYMBOL_NAME(name)) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY(name) \
DECLARE_FUNC_ENCODING \
- SYMBOL_NAME(name):
+ FUNC_SYMBOL(SYMBOL_NAME(name)):
#define DEFINE_COMPILERRT_THUMB_FUNCTION(name) \
+ TEXT_SECTION SEPARATOR \
DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
- .globl SYMBOL_NAME(name) SEPARATOR \
+ .globl FUNC_SYMBOL(SYMBOL_NAME(name)) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
.thumb_func SEPARATOR \
- SYMBOL_NAME(name):
+ FUNC_SYMBOL(SYMBOL_NAME(name)):
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
+ TEXT_SECTION SEPARATOR \
DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
- .globl SYMBOL_NAME(name) SEPARATOR \
+ .globl FUNC_SYMBOL(SYMBOL_NAME(name)) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
HIDDEN(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_FUNC_ENCODING \
- SYMBOL_NAME(name):
+ FUNC_SYMBOL(SYMBOL_NAME(name)):
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
+ TEXT_SECTION SEPARATOR \
DEFINE_CODE_STATE \
- .globl name SEPARATOR \
+ .globl FUNC_SYMBOL(name) SEPARATOR \
SYMBOL_IS_FUNC(name) SEPARATOR \
HIDDEN(name) SEPARATOR \
DECLARE_FUNC_ENCODING \
- name:
+ FUNC_SYMBOL(name):
#define DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(name) \
+ TEXT_SECTION SEPARATOR \
DEFINE_CODE_STATE \
FUNC_ALIGN \
- .globl name SEPARATOR \
+ .globl FUNC_SYMBOL(name) SEPARATOR \
SYMBOL_IS_FUNC(name) SEPARATOR \
- DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY_UNMANGLED(FUNC_SYMBOL(name)) SEPARATOR \
DECLARE_FUNC_ENCODING \
- name: \
+ FUNC_SYMBOL(name): \
SEPARATOR CFI_START \
SEPARATOR BTI_C
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
- .globl SYMBOL_NAME(name) SEPARATOR \
+ .globl FUNC_SYMBOL(SYMBOL_NAME(name)) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
- .set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
+ .set FUNC_SYMBOL(SYMBOL_NAME(name)), FUNC_SYMBOL(SYMBOL_NAME(target)) SEPARATOR
#if defined(__ARM_EABI__)
#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name) \
CFI_END
#endif
+#ifdef __arm__
+#include "int_endianness.h"
+
+#if _YUGA_BIG_ENDIAN
+#define VMOV_TO_DOUBLE(dst, src0, src1) vmov dst, src1, src0 SEPARATOR
+#define VMOV_FROM_DOUBLE(dst0, dst1, src) vmov dst1, dst0, src SEPARATOR
+#else
+#define VMOV_TO_DOUBLE(dst, src0, src1) vmov dst, src0, src1 SEPARATOR
+#define VMOV_FROM_DOUBLE(dst0, dst1, src) vmov dst0, dst1, src SEPARATOR
+#endif
+#endif
+
+#if defined(__ASSEMBLER__) && (defined(__i386__) || defined(__amd64__)) && \
+ !defined(__arm64ec__)
+.att_syntax
+#endif
+
#endif // COMPILERRT_ASSEMBLY_H
"HWASAN pid: %d rss: %zd threads: %zd stacks: %zd"
" thr_aux: %zd stack_depot: %zd uniq_stacks: %zd"
" heap: %zd",
- internal_getpid(), GetRSS(), thread_stats.n_live_threads,
+ (int)internal_getpid(), GetRSS(), thread_stats.n_live_threads,
thread_stats.total_stack_size,
thread_stats.n_live_threads * thread_list.MemoryUsedPerThread(),
sds.allocated, sds.n_uniq_ids, asc[AllocatorStatMapped]);
"WARNING: HWASan is ignoring requested __hwasan_handle_longjmp: "
"stack top: %p; target %p; distance: %p (%zd)\n"
"False positive error reports may follow\n",
- (void *)sp, (void *)dst, dst - sp, dst - sp);
+ (void *)sp, (void *)dst, (void *)(dst - sp), dst - sp);
return;
}
TagMemory(sp, dst - sp, 0);
SANITIZER_INTERFACE_ATTRIBUTE
THREADLOCAL uptr __hwasan_tls;
+namespace __sanitizer {
+void EarlySanitizerInit() {
+ // Setup the hwasan runtime before any `__libc_extensions_init`s are called.
+ // This is needed because libraries which define this function (like fdio)
+ // may be instrumented and either access `__hwasan_tls` or make runtime calls.
+ __hwasan_init();
+}
+} // namespace __sanitizer
+
namespace __hwasan {
bool InitShadow() {
continue;
}
- // Only libraries with instrumented globals need to be checked against the
- // code model since they use relocations that aren't checked at link time.
- CheckCodeModel(base, phdr, phnum);
-
auto *global_note = reinterpret_cast<const hwasan_global_note *>(desc);
auto *globals_begin = reinterpret_cast<const hwasan_global *>(
note + global_note->begin_relptr);
auto *globals_end = reinterpret_cast<const hwasan_global *>(
note + global_note->end_relptr);
+ // Only libraries with instrumented globals need to be checked against the
+ // code model since they use relocations that aren't checked at link time.
+ //
+ // There is always a HWASan globals note ("Create the note even if we
+ // aren't instrumenting globals." - HWAddressSanitizer.cpp), but we can
+ // elide the code model check if there are no globals.
+ if (globals_begin != globals_end)
+ CheckCodeModel(base, phdr, phnum);
+
return {globals_begin, globals_end};
}
}
NO_EXEC_STACK_DIRECTIVE
-GNU_PROPERTY_BTI_PAC
+GNU_PROPERTY_BTI_PAC_GCS
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_set_error_report_callback(void (*callback)(const char *));
+
+// hwasan does not need fake stack, so we leave it empty here.
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_start_switch_fiber(void **, const void *bottom, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_finish_switch_fiber(void *, const void **bottom_old,
+ uptr *size_old);
} // extern "C"
#endif // HWASAN_INTERFACE_INTERNAL_H
if (h < left || h > right)
return false;
if (flags()->malloc_bisect_dump) {
- Printf("[alloc] %u %zu\n", h, orig_size);
+ Printf("[alloc] %u %zu\n", (u32)h, orig_size);
stack->Print();
}
return true;
#if HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
-# define OPERATOR_NEW_BODY(nothrow) \
+# define OPERATOR_NEW_BODY \
GET_MALLOC_STACK_TRACE; \
void *res = hwasan_malloc(size, &stack); \
- if (!nothrow && UNLIKELY(!res)) \
+ if (UNLIKELY(!res)) \
ReportOutOfMemory(size, &stack); \
return res
-# define OPERATOR_NEW_ALIGN_BODY(nothrow) \
+# define OPERATOR_NEW_BODY_NOTHROW \
+ GET_MALLOC_STACK_TRACE; \
+ return hwasan_malloc(size, &stack)
+# define OPERATOR_NEW_BODY_ARRAY \
+ GET_MALLOC_STACK_TRACE; \
+ void *res = hwasan_malloc(size, &stack); \
+ if (UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res
+# define OPERATOR_NEW_BODY_ARRAY_NOTHROW \
+ GET_MALLOC_STACK_TRACE; \
+ return hwasan_malloc(size, &stack)
+# define OPERATOR_NEW_BODY_ALIGN \
+ GET_MALLOC_STACK_TRACE; \
+ void *res = hwasan_memalign(static_cast<uptr>(align), size, &stack); \
+ if (UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res
+# define OPERATOR_NEW_BODY_ALIGN_NOTHROW \
+ GET_MALLOC_STACK_TRACE; \
+ return hwasan_memalign(static_cast<uptr>(align), size, &stack)
+# define OPERATOR_NEW_BODY_ALIGN_ARRAY \
GET_MALLOC_STACK_TRACE; \
void *res = hwasan_memalign(static_cast<uptr>(align), size, &stack); \
- if (!nothrow && UNLIKELY(!res)) \
+ if (UNLIKELY(!res)) \
ReportOutOfMemory(size, &stack); \
return res
+# define OPERATOR_NEW_BODY_ALIGN_ARRAY_NOTHROW \
+ GET_MALLOC_STACK_TRACE; \
+ return hwasan_memalign(static_cast<uptr>(align), size, &stack)
# define OPERATOR_DELETE_BODY \
GET_MALLOC_STACK_TRACE; \
if (ptr) \
hwasan_free(ptr, &stack)
+# define OPERATOR_DELETE_BODY_ARRAY \
+ GET_MALLOC_STACK_TRACE; \
+ if (ptr) \
+ hwasan_free(ptr, &stack)
+# define OPERATOR_DELETE_BODY_ALIGN \
+ GET_MALLOC_STACK_TRACE; \
+ if (ptr) \
+ hwasan_free(ptr, &stack)
+# define OPERATOR_DELETE_BODY_ALIGN_ARRAY \
+ GET_MALLOC_STACK_TRACE; \
+ if (ptr) \
+ hwasan_free(ptr, &stack)
+# define OPERATOR_DELETE_BODY_SIZE \
+ GET_MALLOC_STACK_TRACE; \
+ if (ptr) \
+ hwasan_free(ptr, &stack)
+# define OPERATOR_DELETE_BODY_SIZE_ARRAY \
+ GET_MALLOC_STACK_TRACE; \
+ if (ptr) \
+ hwasan_free(ptr, &stack)
+# define OPERATOR_DELETE_BODY_SIZE_ALIGN \
+ GET_MALLOC_STACK_TRACE; \
+ if (ptr) \
+ hwasan_free(ptr, &stack)
+# define OPERATOR_DELETE_BODY_SIZE_ALIGN_ARRAY \
+ GET_MALLOC_STACK_TRACE; \
+ if (ptr) \
+ hwasan_free(ptr, &stack)
#elif defined(__ANDROID__)
// since we previously released a runtime that intercepted these functions,
// removing the interceptors would break ABI. Therefore we simply forward to
// malloc and free.
-# define OPERATOR_NEW_BODY(nothrow) return malloc(size)
+# define OPERATOR_NEW_BODY return malloc(size)
+# define OPERATOR_NEW_BODY_NOTHROW return malloc(size)
+# define OPERATOR_NEW_BODY_ARRAY return malloc(size)
+# define OPERATOR_NEW_BODY_ARRAY_NOTHROW return malloc(size)
# define OPERATOR_DELETE_BODY free(ptr)
+# define OPERATOR_DELETE_BODY_ARRAY free(ptr)
+# define OPERATOR_DELETE_BODY_SIZE free(ptr)
+# define OPERATOR_DELETE_BODY_SIZE_ARRAY free(ptr)
#endif
} // namespace std
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(size_t size) {
- OPERATOR_NEW_BODY(false /*nothrow*/);
+ OPERATOR_NEW_BODY;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
size_t size) {
- OPERATOR_NEW_BODY(false /*nothrow*/);
+ OPERATOR_NEW_BODY_ARRAY;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
size_t size, std::nothrow_t const &) {
- OPERATOR_NEW_BODY(true /*nothrow*/);
+ OPERATOR_NEW_BODY_NOTHROW;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
size_t size, std::nothrow_t const &) {
- OPERATOR_NEW_BODY(true /*nothrow*/);
+ OPERATOR_NEW_BODY_ARRAY_NOTHROW;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr) NOEXCEPT {
- OPERATOR_DELETE_BODY;
+ OPERATOR_DELETE_BODY_ARRAY;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
void *ptr, std::nothrow_t const &) {
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr, std::nothrow_t const &) {
- OPERATOR_DELETE_BODY;
+ OPERATOR_DELETE_BODY_ARRAY;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
void *ptr, size_t) NOEXCEPT {
- OPERATOR_DELETE_BODY;
+ OPERATOR_DELETE_BODY_SIZE;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr, size_t) NOEXCEPT {
- OPERATOR_DELETE_BODY;
+ OPERATOR_DELETE_BODY_SIZE_ARRAY;
}
#endif // OPERATOR_NEW_BODY
-#ifdef OPERATOR_NEW_ALIGN_BODY
+#ifdef OPERATOR_NEW_BODY_ALIGN
namespace std {
enum class align_val_t : size_t {};
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
size_t size, std::align_val_t align) {
- OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/);
+ OPERATOR_NEW_BODY_ALIGN;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
size_t size, std::align_val_t align) {
- OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/);
+ OPERATOR_NEW_BODY_ALIGN_ARRAY;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
size_t size, std::align_val_t align, std::nothrow_t const &) {
- OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/);
+ OPERATOR_NEW_BODY_ALIGN_NOTHROW;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
size_t size, std::align_val_t align, std::nothrow_t const &) {
- OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/);
+ OPERATOR_NEW_BODY_ALIGN_ARRAY_NOTHROW;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
void *ptr, std::align_val_t align) NOEXCEPT {
- OPERATOR_DELETE_BODY;
+ OPERATOR_DELETE_BODY_ALIGN;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr, std::align_val_t) NOEXCEPT {
- OPERATOR_DELETE_BODY;
+ OPERATOR_DELETE_BODY_ALIGN_ARRAY;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
- OPERATOR_DELETE_BODY;
+ OPERATOR_DELETE_BODY_ALIGN;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
- OPERATOR_DELETE_BODY;
+ OPERATOR_DELETE_BODY_ALIGN_ARRAY;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
void *ptr, size_t, std::align_val_t) NOEXCEPT {
- OPERATOR_DELETE_BODY;
+ OPERATOR_DELETE_BODY_SIZE_ALIGN;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr, size_t, std::align_val_t) NOEXCEPT {
- OPERATOR_DELETE_BODY;
+ OPERATOR_DELETE_BODY_SIZE_ALIGN_ARRAY;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
void *ptr, size_t, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
- OPERATOR_DELETE_BODY;
+ OPERATOR_DELETE_BODY_SIZE_ALIGN;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr, size_t, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
- OPERATOR_DELETE_BODY;
+ OPERATOR_DELETE_BODY_SIZE_ALIGN_ARRAY;
}
-#endif // OPERATOR_NEW_ALIGN_BODY
+#endif // OPERATOR_NEW_BODY_ALIGN
#undef SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID
#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID 0
+#undef SANITIZER_INTERCEPT_TIMER_CREATE
+#define SANITIZER_INTERCEPT_TIMER_CREATE 0
+
#undef SANITIZER_INTERCEPT_GETITIMER
#define SANITIZER_INTERCEPT_GETITIMER 0
#undef SANITIZER_INTERCEPT_TIME
#define SANITIZER_INTERCEPT_TIME 0
+#undef SANITIZER_INTERCEPT_TIMESPEC_GET
+#define SANITIZER_INTERCEPT_TIMESPEC_GET 0
+
#undef SANITIZER_INTERCEPT_GLOB
#define SANITIZER_INTERCEPT_GLOB 0
"%p is located %zd bytes %s a %zd-byte local variable %s "
"[%p,%p) "
"in %s %s\n",
- untagged_addr, offset, whence, local.size, local.name, best_beg,
- best_beg + local.size, local.function_name, location.data());
+ (void *)untagged_addr, offset, whence, local.size, local.name,
+ (void *)best_beg, (void *)(best_beg + local.size),
+ local.function_name, location.data());
location.clear();
Printf("%s\n", d.Default());
}
Printf("%s", d.Default());
Printf("%s", d.Location());
Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n",
- untagged_addr, offset, whence,
- candidate.heap.end - candidate.heap.begin, candidate.heap.begin,
- candidate.heap.end);
+ (void*)untagged_addr, offset, whence,
+ candidate.heap.end - candidate.heap.begin,
+ (void*)candidate.heap.begin, (void*)candidate.heap.end);
Printf("%s", d.Allocation());
Printf("allocated by thread T%u here:\n", candidate.heap.thread_id);
Printf("%s", d.Default());
Printf(
"%p is located %zd bytes %s a %zd-byte global variable "
"%s [%p,%p) in %s\n",
- untagged_addr,
+ (void *)untagged_addr,
candidate.after ? untagged_addr - (info.start + info.size)
: info.start - untagged_addr,
candidate.after ? "after" : "before", info.size, info.name,
- info.start, info.start + info.size, module_name);
+ (void *)info.start, (void *)(info.start + info.size), module_name);
} else {
uptr size = GetGlobalSizeFromDescriptor(candidate.untagged_addr);
if (size == 0)
Printf(
"%p is located %s a global variable in "
"\n #0 0x%x (%s+0x%x)\n",
- untagged_addr, candidate.after ? "after" : "before",
- candidate.untagged_addr, module_name, module_address);
+ (void*)untagged_addr, candidate.after ? "after" : "before",
+ (u32)candidate.untagged_addr, module_name, (u32)module_address);
else
Printf(
"%p is located %s a %zd-byte global variable in "
"\n #0 0x%x (%s+0x%x)\n",
- untagged_addr, candidate.after ? "after" : "before", size,
- candidate.untagged_addr, module_name, module_address);
+ (void*)untagged_addr, candidate.after ? "after" : "before", size,
+ (u32)candidate.untagged_addr, module_name, (u32)module_address);
}
Printf("%s", d.Default());
}
int num_descriptions_printed = 0;
if (MemIsShadow(untagged_addr)) {
- Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr,
- d.Default());
+ Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(),
+ (void *)untagged_addr, d.Default());
return;
}
Printf(
"%s[%p,%p) is a %s %s heap chunk; "
"size: %zd offset: %zd\n%s",
- d.Location(), heap.begin, heap.begin + heap.size,
+ d.Location(), (void *)heap.begin, (void *)(heap.begin + heap.size),
heap.from_small_heap ? "small" : "large",
heap.is_allocated ? "allocated" : "unallocated", heap.size,
untagged_addr - heap.begin, d.Default());
Printf("%s", d.Error());
Printf("\nCause: stack tag-mismatch\n");
Printf("%s", d.Location());
- Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
- sa.thread_id());
+ Printf("Address %p is located in stack of thread T%zd\n",
+ (void *)untagged_addr, (ssize)sa.thread_id());
Printf("%s", d.Default());
announce_by_id(sa.thread_id());
PrintStackAllocations(sa.get(), ptr_tag, untagged_addr);
Printf("\nCause: use-after-free\n");
Printf("%s", d.Location());
Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
- untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
- har.requested_size, UntagAddr(har.tagged_addr),
- UntagAddr(har.tagged_addr) + har.requested_size);
+ (void*)untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
+ (ssize)har.requested_size, (void*)UntagAddr(har.tagged_addr),
+ (void*)(UntagAddr(har.tagged_addr) + har.requested_size));
Printf("%s", d.Allocation());
Printf("freed by thread T%u here:\n", ha.free_thread_id);
Printf("%s", d.Default());
// Print a developer note: the index of this heap object
// in the thread's deallocation ring buffer.
Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ha.ring_index + 1,
- flags()->heap_history_size);
+ (ssize)flags()->heap_history_size);
Printf("hwasan_dev_note_num_matching_addrs: %zd\n", ha.num_matching_addrs);
Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
ha.num_matching_addrs_4b);
const Thread *thread = GetCurrentThread();
if (thread) {
Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n",
- SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id());
+ SanitizerToolName, bug_type, (void *)untagged_addr, (void *)pc,
+ (ssize)thread->unique_id());
} else {
Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n",
- SanitizerToolName, bug_type, untagged_addr, pc);
+ SanitizerToolName, bug_type, (void *)untagged_addr, (void *)pc);
}
Printf("%s", d.Access());
if (shadow.addr) {
Printf("%s", d.Error());
const char *bug_type = "allocation-tail-overwritten";
Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
- bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
+ bug_type, (void *)untagged_addr, (void *)(untagged_addr + orig_size),
+ orig_size);
Printf("\n%s", d.Default());
Printf(
"Stack of invalid access unknown. Issue detected at deallocation "
uptr pc = GetTopPc(stack);
Printf("%s", d.Error());
Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
- untagged_addr, pc);
+ (void *)untagged_addr, (void *)pc);
Thread *t = GetCurrentThread();
GetShortTagCopy(MemToShadow(untagged_addr + mismatch_offset));
Printf(
"%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
- is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
- mem_tag, short_tag, t->unique_id());
+ is_store ? "WRITE" : "READ", access_size, (void *)untagged_addr,
+ ptr_tag, mem_tag, short_tag, (ssize)t->unique_id());
} else {
Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
- is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
- mem_tag, t->unique_id());
+ is_store ? "WRITE" : "READ", access_size, (void *)untagged_addr,
+ ptr_tag, mem_tag, (ssize)t->unique_id());
}
if (mismatch_offset)
Printf("Invalid access starting at offset %zu\n", mismatch_offset);
// See the frame breakdown defined in __hwasan_tag_mismatch (from
// hwasan_tag_mismatch_{aarch64,riscv64}.S).
void ReportRegisters(const uptr *frame, uptr pc) {
- Printf("\nRegisters where the failure occurred (pc %p):\n", pc);
+ Printf("\nRegisters where the failure occurred (pc %p):\n", (void *)pc);
// We explicitly print a single line (4 registers/line) each iteration to
// reduce the amount of logcat error messages printed. Each Printf() will
// stack pointer when compiling a C function.
// Hence we have to write this function in assembly.
-.section .text
+TEXT_SECTION
.file "hwasan_setjmp_aarch64.S"
.global ASM_WRAPPER_NAME(setjmp)
// We do not need executable stack.
NO_EXEC_STACK_DIRECTIVE
-GNU_PROPERTY_BTI_PAC
+GNU_PROPERTY_BTI_PAC_GCS
.section .text
.file "hwasan_setjmp_x86_64.S"
+.att_syntax
.global ASM_WRAPPER_NAME(setjmp)
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
// clobbering the x17 register in error reports, and that the program will have
// a runtime dependency on the __hwasan_tag_mismatch_v2 symbol therefore it will
// fail to start up given an older (i.e. incompatible) runtime.
-.section .text
+TEXT_SECTION
.file "hwasan_tag_mismatch_aarch64.S"
.global __hwasan_tag_mismatch
.type __hwasan_tag_mismatch, %function
// We do not need executable stack.
NO_EXEC_STACK_DIRECTIVE
-GNU_PROPERTY_BTI_PAC
+GNU_PROPERTY_BTI_PAC_GCS
*GetCurrentThreadLongPtr() = 0;
}
+void Thread::StartSwitchFiber(uptr bottom, uptr size) {
+ if (atomic_load(&stack_switching_, memory_order_acquire)) {
+ Report("ERROR: starting fiber switch while in fiber switch\n");
+ Die();
+ }
+
+ next_stack_bottom_ = bottom;
+ next_stack_top_ = bottom + size;
+ atomic_store(&stack_switching_, 1, memory_order_release);
+}
+
+void Thread::FinishSwitchFiber(uptr *bottom_old, uptr *size_old) {
+ if (!atomic_load(&stack_switching_, memory_order_acquire)) {
+ Report("ERROR: finishing a fiber switch that has not started\n");
+ Die();
+ }
+
+ if (bottom_old)
+ *bottom_old = stack_bottom_;
+ if (size_old)
+ *size_old = stack_top_ - stack_bottom_;
+ stack_bottom_ = next_stack_bottom_;
+ stack_top_ = next_stack_top_;
+ atomic_store(&stack_switching_, 0, memory_order_release);
+ next_stack_top_ = 0;
+ next_stack_bottom_ = 0;
+}
+
+inline Thread::StackBounds Thread::GetStackBounds() const {
+ if (!atomic_load(&stack_switching_, memory_order_acquire)) {
+ // Make sure the stack bounds are fully initialized.
+ if (stack_bottom_ >= stack_top_)
+ return {0, 0};
+ return {stack_bottom_, stack_top_};
+ }
+ const uptr cur_stack = (uptr)__builtin_frame_address(0);
+ // Note: need to check next stack first, because FinishSwitchFiber
+ // may be in process of overwriting stack_top_/bottom_. But in such case
+ // we are already on the next stack.
+ if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
+ return {next_stack_bottom_, next_stack_top_};
+ return {stack_bottom_, stack_top_};
+}
+
+uptr Thread::stack_top() { return GetStackBounds().top; }
+
+uptr Thread::stack_bottom() { return GetStackBounds().bottom; }
+
+uptr Thread::stack_size() {
+ const auto bounds = GetStackBounds();
+ return bounds.top - bounds.bottom;
+}
+
void Thread::Print(const char *Prefix) {
- Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix, unique_id_,
- (void *)this, stack_bottom(), stack_top(),
- stack_top() - stack_bottom(), tls_begin(), tls_end());
+ Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix,
+ (ssize)unique_id_, (void *)this, (void *)stack_bottom(),
+ (void *)stack_top(), stack_top() - stack_bottom(), (void *)tls_begin(),
+ (void *)tls_end());
}
static u32 xorshift(u32 state) {
return &tl;
}
-static __hwasan::Thread *GetThreadByOsIDLocked(tid_t os_id) {
+static __hwasan::Thread *GetThreadByOsIDLocked(ThreadID os_id) {
return GetHwasanThreadListLocked()->FindThreadLocked(
[os_id](__hwasan::Thread *t) { return t->os_id() == os_id; });
}
void EnsureMainThreadIDIsCorrect() { __hwasan::EnsureMainThreadIDIsCorrect(); }
-bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+bool GetThreadRangesLocked(ThreadID os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
auto *t = GetThreadByOsIDLocked(os_id);
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
-void GetThreadExtraStackRangesLocked(tid_t os_id,
+void GetThreadExtraStackRangesLocked(ThreadID os_id,
InternalMmapVector<Range> *ranges) {}
void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
__hwasan::hwasanThreadArgRetval().GetAllPtrsLocked(ptrs);
}
-void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
+void GetRunningThreadsLocked(InternalMmapVector<ThreadID> *threads) {
// TODO: implement.
}
void PrintThreads() {
}
} // namespace __lsan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __hwasan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_start_switch_fiber(void **, const void *bottom, uptr size) {
+ if (auto *t = GetCurrentThread())
+ t->StartSwitchFiber((uptr)bottom, size);
+ else
+ VReport(1, "__hwasan_start_switch_fiber called from unknown thread\n");
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_finish_switch_fiber(void *, const void **bottom_old,
+ uptr *size_old) {
+ if (auto *t = GetCurrentThread())
+ t->FinishSwitchFiber((uptr *)bottom_old, size_old);
+ else
+ VReport(1, "__hwasan_finish_switch_fiber called from unknown thread\n");
+}
+}
void Destroy();
- uptr stack_top() { return stack_top_; }
- uptr stack_bottom() { return stack_bottom_; }
- uptr stack_size() { return stack_top() - stack_bottom(); }
+ uptr stack_top();
+ uptr stack_bottom();
+ uptr stack_size();
uptr tls_begin() { return tls_begin_; }
uptr tls_end() { return tls_end_; }
DTLS *dtls() { return dtls_; }
return addr >= stack_bottom_ && addr < stack_top_;
}
+ void StartSwitchFiber(uptr bottom, uptr size);
+ void FinishSwitchFiber(uptr *bottom_old, uptr *size_old);
+
AllocatorCache *allocator_cache() { return &allocator_cache_; }
HeapAllocationsRingBuffer *heap_allocations() { return heap_allocations_; }
StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; }
Print("Thread: ");
}
- tid_t os_id() const { return os_id_; }
- void set_os_id(tid_t os_id) { os_id_ = os_id; }
+ ThreadID os_id() const { return os_id_; }
+ void set_os_id(ThreadID os_id) { os_id_ = os_id; }
uptr &vfork_spill() { return vfork_spill_; }
void ClearShadowForThreadStackAndTLS();
void Print(const char *prefix);
void InitRandomState();
+
+ struct StackBounds {
+ uptr bottom;
+ uptr top;
+ };
+ StackBounds GetStackBounds() const;
+
uptr vfork_spill_;
uptr stack_top_;
uptr stack_bottom_;
+ // these variables are used when the thread is about to switch stack
+ uptr next_stack_top_;
+ uptr next_stack_bottom_;
+ // true if switching is in progress
+ atomic_uint8_t stack_switching_;
+
uptr tls_begin_;
uptr tls_end_;
DTLS *dtls_;
u32 unique_id_; // counting from zero.
- tid_t os_id_;
+ ThreadID os_id_;
u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread.
/// User-provided default option settings.
///
-/// You can provide your own implementation of this function to return a string
-/// containing MemProf runtime options (for example,
-/// <c>verbosity=1:print_stats=1</c>).
+/// You can set these options via the -memprof-runtime-default-options LLVM flag
+/// or you can provide your own implementation of this function. See
+/// memprof_flags.h for more info.
///
/// \returns Default options string.
const char *SANITIZER_CDECL __memprof_default_options(void);
} __tsan_memory_order;
__tsan_atomic8 SANITIZER_CDECL
-__tsan_atomic8_load(const volatile __tsan_atomic8 *a, __tsan_memory_order mo);
+__tsan_atomic8_load(const volatile __tsan_atomic8 *a, int mo);
__tsan_atomic16 SANITIZER_CDECL
-__tsan_atomic16_load(const volatile __tsan_atomic16 *a, __tsan_memory_order mo);
+__tsan_atomic16_load(const volatile __tsan_atomic16 *a, int mo);
__tsan_atomic32 SANITIZER_CDECL
-__tsan_atomic32_load(const volatile __tsan_atomic32 *a, __tsan_memory_order mo);
+__tsan_atomic32_load(const volatile __tsan_atomic32 *a, int mo);
__tsan_atomic64 SANITIZER_CDECL
-__tsan_atomic64_load(const volatile __tsan_atomic64 *a, __tsan_memory_order mo);
+__tsan_atomic64_load(const volatile __tsan_atomic64 *a, int mo);
#if __TSAN_HAS_INT128
-__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_load(
- const volatile __tsan_atomic128 *a, __tsan_memory_order mo);
+__tsan_atomic128 SANITIZER_CDECL
+__tsan_atomic128_load(const volatile __tsan_atomic128 *a, int mo);
#endif
void SANITIZER_CDECL __tsan_atomic8_store(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v,
- __tsan_memory_order mo);
+ __tsan_atomic8 v, int mo);
void SANITIZER_CDECL __tsan_atomic16_store(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v,
- __tsan_memory_order mo);
+ __tsan_atomic16 v, int mo);
void SANITIZER_CDECL __tsan_atomic32_store(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v,
- __tsan_memory_order mo);
+ __tsan_atomic32 v, int mo);
void SANITIZER_CDECL __tsan_atomic64_store(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v,
- __tsan_memory_order mo);
+ __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
void SANITIZER_CDECL __tsan_atomic128_store(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v,
- __tsan_memory_order mo);
+ __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_exchange(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_exchange(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_exchange(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_exchange(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_exchange(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_exchange(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_add(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_add(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_add(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_add(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_add(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_sub(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_sub(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_sub(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_sub(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_sub(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_and(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_and(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_and(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_and(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_and(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_or(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_or(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_or(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_or(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_or(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_xor(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_xor(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_xor(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_xor(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_xor(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_nand(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_nand(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_nand(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_nand(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_nand(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
int SANITIZER_CDECL __tsan_atomic8_compare_exchange_weak(
- volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic16_compare_exchange_weak(
- volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic32_compare_exchange_weak(
- volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic64_compare_exchange_weak(
- volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, int mo,
+ int fail_mo);
#if __TSAN_HAS_INT128
int SANITIZER_CDECL __tsan_atomic128_compare_exchange_weak(
volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ int mo, int fail_mo);
#endif
int SANITIZER_CDECL __tsan_atomic8_compare_exchange_strong(
- volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic16_compare_exchange_strong(
- volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic32_compare_exchange_strong(
- volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic64_compare_exchange_strong(
- volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, int mo,
+ int fail_mo);
#if __TSAN_HAS_INT128
int SANITIZER_CDECL __tsan_atomic128_compare_exchange_strong(
volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ int mo, int fail_mo);
#endif
__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_compare_exchange_val(
- volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, int mo,
+ int fail_mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_compare_exchange_val(
- volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, int mo,
+ int fail_mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_compare_exchange_val(
- volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, int mo,
+ int fail_mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_compare_exchange_val(
- volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, int mo,
+ int fail_mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_compare_exchange_val(
volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ int mo, int fail_mo);
#endif
-void SANITIZER_CDECL __tsan_atomic_thread_fence(__tsan_memory_order mo);
-void SANITIZER_CDECL __tsan_atomic_signal_fence(__tsan_memory_order mo);
+void SANITIZER_CDECL __tsan_atomic_thread_fence(int mo);
+void SANITIZER_CDECL __tsan_atomic_signal_fence(int mo);
#ifdef __cplusplus
} // extern "C"
#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_APPLE && \
!SANITIZER_NETBSD && !SANITIZER_WINDOWS && !SANITIZER_FUCHSIA && \
- !SANITIZER_SOLARIS
+ !SANITIZER_SOLARIS && !SANITIZER_HAIKU && !SANITIZER_AIX
# error "Interception doesn't work on this operating system."
#endif
extern "C" ret_type func(__VA_ARGS__);
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
+#elif SANITIZER_AIX
+# define WRAP(x) __interceptor_##x
+# define TRAMPOLINE(x) WRAP(x)
+// # define WRAPPER_NAME(x) "__interceptor_" #x
+# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+// AIX's linker will not select the weak symbol, so don't use weak for the
+// interceptors.
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) \
+ __attribute__((alias("__interceptor_" #func), visibility("default")));
#elif !SANITIZER_FUCHSIA // LINUX, FREEBSD, NETBSD, SOLARIS
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
# if ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
#define INCLUDED_FROM_INTERCEPTION_LIB
-#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
- SANITIZER_SOLARIS
+#if SANITIZER_AIX
+# include "interception_aix.h"
+# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_AIX(func)
+# define INTERCEPT_FUNCTION_VER(func, symver) INTERCEPT_FUNCTION_AIX(func)
-# include "interception_linux.h"
-# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
-# define INTERCEPT_FUNCTION_VER(func, symver) \
+#elif SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS || SANITIZER_HAIKU
+
+# include "interception_linux.h"
+# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
+# define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)
#elif SANITIZER_APPLE
# include "interception_mac.h"
#include "interception.h"
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
- SANITIZER_SOLARIS
+ SANITIZER_SOLARIS || SANITIZER_HAIKU
#include <dlfcn.h> // for dlsym() and dlvsym()
} // namespace __interception
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
- // SANITIZER_SOLARIS
+ // SANITIZER_SOLARIS || SANITIZER_HAIKU
//===----------------------------------------------------------------------===//
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
- SANITIZER_SOLARIS
+ SANITIZER_SOLARIS || SANITIZER_HAIKU
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error interception_linux.h should be included from interception library only
#endif // INTERCEPTION_LINUX_H
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
- // SANITIZER_SOLARIS
+ // SANITIZER_SOLARIS || SANITIZER_HAIKU
return si.dwAllocationGranularity;
}
+UNUSED static uptr RoundDownTo(uptr size, uptr boundary) {
+ return size & ~(boundary - 1);
+}
+
UNUSED static uptr RoundUpTo(uptr size, uptr boundary) {
- return (size + boundary - 1) & ~(boundary - 1);
+ return RoundDownTo(size + boundary - 1, boundary);
}
// FIXME: internal_str* and internal_mem* functions should be moved from the
return nullptr;
}
+static int _strcmp(const char *s1, const char *s2) {
+ while (true) {
+ unsigned c1 = *s1;
+ unsigned c2 = *s2;
+ if (c1 != c2) return (c1 < c2) ? -1 : 1;
+ if (c1 == 0) break;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+
static void _memset(void *p, int value, size_t sz) {
for (size_t i = 0; i < sz; ++i)
((char*)p)[i] = (char)value;
static void WriteShortJumpInstruction(uptr from, uptr target) {
sptr offset = target - from - kShortJumpInstructionLength;
- if (offset < -128 || offset > 127)
+ if (offset < -128 || offset > 127) {
+ ReportError("interception_win: cannot write short jmp from %p to %p\n",
+ (void *)from, (void *)target);
InterceptionFailed();
+ }
*(u8*)from = 0xEB;
*(u8*)(from + 1) = (u8)offset;
}
uptr max_size;
};
-UNUSED static const uptr kTrampolineScanLimitRange = 1ull << 31; // 2 gig
+UNUSED static const uptr kTrampolineRangeLimit = 1ull << 31; // 2 gig
static const int kMaxTrampolineRegion = 1024;
static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];
-static void *AllocateTrampolineRegion(uptr image_address, size_t granularity) {
-#if SANITIZER_WINDOWS64
- uptr address = image_address;
- uptr scanned = 0;
- while (scanned < kTrampolineScanLimitRange) {
+static void *AllocateTrampolineRegion(uptr min_addr, uptr max_addr,
+ uptr func_addr, size_t granularity) {
+# if SANITIZER_WINDOWS64
+ // Clamp {min,max}_addr to the accessible address space.
+ SYSTEM_INFO system_info;
+ ::GetSystemInfo(&system_info);
+ uptr min_virtual_addr =
+ RoundUpTo((uptr)system_info.lpMinimumApplicationAddress, granularity);
+ uptr max_virtual_addr =
+ RoundDownTo((uptr)system_info.lpMaximumApplicationAddress, granularity);
+ if (min_addr < min_virtual_addr)
+ min_addr = min_virtual_addr;
+ if (max_addr > max_virtual_addr)
+ max_addr = max_virtual_addr;
+
+ // This loop probes the virtual address space to find free memory in the
+ // [min_addr, max_addr] interval. The search starts from func_addr and
+ // proceeds "outwards" towards the interval bounds using two probes, lo_addr
+ // and hi_addr, for addresses lower/higher than func_addr. At each step, it
+ // considers the probe closest to func_addr. If that address is not free, the
+ // probe is advanced (lower or higher depending on the probe) to the next
+ // memory block and the search continues.
+ uptr lo_addr = RoundDownTo(func_addr, granularity);
+ uptr hi_addr = RoundUpTo(func_addr, granularity);
+ while (lo_addr >= min_addr || hi_addr <= max_addr) {
+ // Consider the in-range address closest to func_addr.
+ uptr addr;
+ if (lo_addr < min_addr)
+ addr = hi_addr;
+ else if (hi_addr > max_addr)
+ addr = lo_addr;
+ else
+ addr = (hi_addr - func_addr < func_addr - lo_addr) ? hi_addr : lo_addr;
+
MEMORY_BASIC_INFORMATION info;
- if (!::VirtualQuery((void*)address, &info, sizeof(info)))
+ if (!::VirtualQuery((void *)addr, &info, sizeof(info))) {
+ ReportError(
+ "interception_win: VirtualQuery in AllocateTrampolineRegion failed "
+ "for %p\n",
+ (void *)addr);
return nullptr;
+ }
- // Check whether a region can be allocated at |address|.
+ // Check whether a region can be allocated at |addr|.
if (info.State == MEM_FREE && info.RegionSize >= granularity) {
- void *page = ::VirtualAlloc((void*)RoundUpTo(address, granularity),
- granularity,
- MEM_RESERVE | MEM_COMMIT,
- PAGE_EXECUTE_READWRITE);
+ void *page =
+ ::VirtualAlloc((void *)addr, granularity, MEM_RESERVE | MEM_COMMIT,
+ PAGE_EXECUTE_READWRITE);
+ if (page == nullptr)
+ ReportError(
+ "interception_win: VirtualAlloc in AllocateTrampolineRegion failed "
+ "for %p\n",
+ (void *)addr);
return page;
}
- // Move to the next region.
- address = (uptr)info.BaseAddress + info.RegionSize;
- scanned += info.RegionSize;
+ if (addr == lo_addr)
+ lo_addr =
+ RoundDownTo((uptr)info.AllocationBase - granularity, granularity);
+ if (addr == hi_addr)
+ hi_addr =
+ RoundUpTo((uptr)info.BaseAddress + info.RegionSize, granularity);
}
+
+ ReportError(
+ "interception_win: AllocateTrampolineRegion failed to find free memory; "
+ "min_addr: %p, max_addr: %p, func_addr: %p, granularity: %zu\n",
+ (void *)min_addr, (void *)max_addr, (void *)func_addr, granularity);
return nullptr;
#else
return ::VirtualAlloc(nullptr,
}
static uptr AllocateMemoryForTrampoline(uptr func_address, size_t size) {
- uptr image_address = func_address;
+# if SANITIZER_WINDOWS64
+ uptr min_addr = func_address - kTrampolineRangeLimit;
+ uptr max_addr = func_address + kTrampolineRangeLimit - size;
-#if SANITIZER_WINDOWS64
- // Allocate memory after the module (DLL or EXE file), but within 2GB
- // of the start of the module so that any address within the module can be
- // referenced with PC-relative operands.
+ // Allocate memory within 2GB of the module (DLL or EXE file) so that any
+ // address within the module can be referenced with PC-relative operands.
// This allows us to not just jump to the trampoline with a PC-relative
// offset, but to relocate any instructions that we copy to the trampoline
// which have references to the original module. If we can't find the base
// address of the module (e.g. if func_address is in mmap'ed memory), just
- // use func_address as is.
+ // stay within 2GB of func_address.
HMODULE module;
if (::GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
MODULEINFO module_info;
if (::GetModuleInformation(::GetCurrentProcess(), module,
&module_info, sizeof(module_info))) {
- image_address = (uptr)module_info.lpBaseOfDll;
+ min_addr = (uptr)module_info.lpBaseOfDll + module_info.SizeOfImage -
+ kTrampolineRangeLimit;
+ max_addr = (uptr)module_info.lpBaseOfDll + kTrampolineRangeLimit - size;
}
}
-#endif
- // Find a region within 2G with enough space to allocate |size| bytes.
+ // Check for overflow.
+ if (min_addr > func_address)
+ min_addr = 0;
+ if (max_addr < func_address)
+ max_addr = ~(uptr)0;
+# else
+ uptr min_addr = 0;
+ uptr max_addr = ~min_addr;
+# endif
+
+ // Find a region within [min_addr,max_addr] with enough space to allocate
+ // |size| bytes.
TrampolineMemoryRegion *region = nullptr;
for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) {
TrampolineMemoryRegion* current = &TrampolineRegions[bucket];
if (current->content == 0) {
// No valid region found, allocate a new region.
size_t bucket_size = GetMmapGranularity();
- void *content = AllocateTrampolineRegion(image_address, bucket_size);
+ void *content = AllocateTrampolineRegion(min_addr, max_addr, func_address,
+ bucket_size);
if (content == nullptr)
return 0U;
region = current;
break;
} else if (current->max_size - current->allocated_size > size) {
-#if SANITIZER_WINDOWS64
- // In 64-bits, the memory space must be allocated within 2G boundary.
- uptr next_address = current->content + current->allocated_size;
- if (next_address < image_address ||
- next_address - image_address >= 0x7FFF0000)
- continue;
-#endif
+ uptr next_address = current->content + current->allocated_size;
+ if (next_address < min_addr || next_address > max_addr)
+ continue;
// The space can be allocated in the current region.
region = current;
break;
// Returns 0 on error.
static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
+ if (rel_offset) {
+ *rel_offset = 0;
+ }
+
#if SANITIZER_ARM64
// An ARM64 instruction is 4 bytes long.
return 4;
case 0xb8: // b8 XX XX XX XX : mov eax, XX XX XX XX
case 0xB9: // b9 XX XX XX XX : mov ecx, XX XX XX XX
+ case 0xBA: // ba XX XX XX XX : mov edx, XX XX XX XX
return 5;
// Cannot overwrite control-instruction. Return 0 to indicate failure.
case 0xFF8B: // 8B FF : mov edi, edi
case 0xEC8B: // 8B EC : mov ebp, esp
case 0xc889: // 89 C8 : mov eax, ecx
+ case 0xD189: // 89 D1 : mov ecx, edx
case 0xE589: // 89 E5 : mov ebp, esp
case 0xC18B: // 8B C1 : mov eax, ecx
+ case 0xC031: // 31 C0 : xor eax, eax
+ case 0xC931: // 31 C9 : xor ecx, ecx
+ case 0xD231: // 31 D2 : xor edx, edx
case 0xC033: // 33 C0 : xor eax, eax
case 0xC933: // 33 C9 : xor ecx, ecx
case 0xD233: // 33 D2 : xor edx, edx
+ case 0xFF33: // 33 FF : xor edi, edi
+ case 0x9066: // 66 90 : xchg %ax,%ax (Two-byte NOP)
case 0xDB84: // 84 DB : test bl,bl
+ case 0xC084: // 84 C0 : test al,al
case 0xC984: // 84 C9 : test cl,cl
case 0xD284: // 84 D2 : test dl,dl
return 2;
+ case 0x3980: // 80 39 XX : cmp BYTE PTR [rcx], XX
+ case 0x4D8B: // 8B 4D XX : mov XX(%ebp), ecx
+ case 0x558B: // 8B 55 XX : mov XX(%ebp), edx
+ case 0x758B: // 8B 75 XX : mov XX(%ebp), esp
+ case 0xE483: // 83 E4 XX : and esp, XX
+ case 0xEC83: // 83 EC XX : sub esp, XX
+ case 0xC1F6: // F6 C1 XX : test cl, XX
+ return 3;
+
+ case 0x89FF: // FF 89 XX XX XX XX : dec dword ptr [ecx + XX XX XX XX]
+ case 0xEC81: // 81 EC XX XX XX XX : sub esp, XX XX XX XX
+ return 6;
+
// Cannot overwrite control-instruction. Return 0 to indicate failure.
- case 0x25FF: // FF 25 XX XX XX XX : jmp [XXXXXXXX]
+ case 0x25FF: // FF 25 XX YY ZZ WW : jmp dword ptr ds:[WWZZYYXX]
return 0;
}
- switch (0x00FFFFFF & *(u32*)address) {
- case 0xF8E483: // 83 E4 F8 : and esp, 0xFFFFFFF8
- case 0x64EC83: // 83 EC 64 : sub esp, 64h
- return 3;
+ switch (0x00FFFFFF & *(u32 *)address) {
+ case 0x244C8D: // 8D 4C 24 XX : lea ecx, [esp + XX]
+ case 0x2474FF: // FF 74 24 XX : push qword ptr [rsp + XX]
+ return 4;
case 0x24A48D: // 8D A4 24 XX XX XX XX : lea esp, [esp + XX XX XX XX]
return 7;
}
case 0x5541: // push r13
case 0x5641: // push r14
case 0x5741: // push r15
- case 0x9066: // Two-byte NOP
case 0xc084: // test al, al
case 0x018a: // mov al, byte ptr [rcx]
return 2;
- case 0x058A: // 8A 05 XX XX XX XX : mov al, byte ptr [XX XX XX XX]
case 0x7E80: // 80 7E YY XX cmp BYTE PTR [rsi+YY], XX
case 0x7D80: // 80 7D YY XX cmp BYTE PTR [rbp+YY], XX
case 0x7A80: // 80 7A YY XX cmp BYTE PTR [rdx+YY], XX
case 0x7980: // 80 79 YY XX cmp BYTE ptr [rcx+YY], XX
return 4;
+ case 0x058A: // 8A 05 XX XX XX XX : mov al, byte ptr [XX XX XX XX]
case 0x058B: // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX]
if (rel_offset)
*rel_offset = 2;
+ FALLTHROUGH;
+ case 0xB841: // 41 B8 XX XX XX XX : mov r8d, XX XX XX XX
return 6;
case 0x7E81: // 81 7E YY XX XX XX XX cmp DWORD PTR [rsi+YY], XX XX XX XX
case 0x7B81: // 81 7B YY XX XX XX XX cmp DWORD PTR [rbx+YY], XX XX XX XX
case 0x7981: // 81 79 YY XX XX XX XX cmp dword ptr [rcx+YY], XX XX XX XX
return 7;
+
+ case 0xb848: // 48 b8 XX XX XX XX XX XX XX XX :
+ // movabsq XX XX XX XX XX XX XX XX, rax
+ case 0xba48: // 48 ba XX XX XX XX XX XX XX XX :
+ // movabsq XX XX XX XX XX XX XX XX, rdx
+ return 10;
}
- switch (0x00FFFFFF & *(u32*)address) {
- case 0x07c1f6: // f6 c1 07 : test cl, 0x7
+ switch (0x00FFFFFF & *(u32 *)address) {
case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax]
+ case 0x02b70f: // 0f b7 02 : movzx eax, WORD PTR [rdx]
case 0xc00b4d: // 4d 0b c0 : or r8, r8
case 0xc03345: // 45 33 c0 : xor r8d, r8d
case 0xc08548: // 48 85 c0 : test rax, rax
case 0xc1ff48: // 48 ff c1 : inc rcx
case 0xc1ff49: // 49 ff c1 : inc r9
case 0xc28b41: // 41 8b c2 : mov eax, r10d
+ case 0x01b60f: // 0f b6 01 : movzx eax, BYTE PTR [rcx]
+ case 0x09b60f: // 0f b6 09 : movzx ecx, BYTE PTR [rcx]
+ case 0x11b60f: // 0f b6 11 : movzx edx, BYTE PTR [rcx]
case 0xc2b60f: // 0f b6 c2 : movzx eax, dl
case 0xc2ff48: // 48 ff c2 : inc rdx
case 0xc2ff49: // 49 ff c2 : inc r10
case 0xc98548: // 48 85 c9 : test rcx, rcx
case 0xc9854d: // 4d 85 c9 : test r9, r9
case 0xc98b4c: // 4c 8b c9 : mov r9, rcx
+ case 0xd12948: // 48 29 d1 : sub rcx, rdx
+ case 0xc22b4c: // 4c 2b c2 : sub r8, rdx
case 0xca2b48: // 48 2b ca : sub rcx, rdx
case 0xca3b48: // 48 3b ca : cmp rcx, rdx
case 0xd12b48: // 48 2b d1 : sub rdx, rcx
case 0xd2854d: // 4d 85 d2 : test r10, r10
case 0xd28b4c: // 4c 8b d2 : mov r10, rdx
case 0xd2b60f: // 0f b6 d2 : movzx edx, dl
+ case 0xd2be0f: // 0f be d2 : movsx edx, dl
case 0xd98b4c: // 4c 8b d9 : mov r11, rcx
case 0xd9f748: // 48 f7 d9 : neg rcx
+ case 0xc03145: // 45 31 c0 : xor r8d,r8d
+ case 0xc93145: // 45 31 c9 : xor r9d,r9d
+ case 0xd23345: // 45 33 d2 : xor r10d, r10d
case 0xdb3345: // 45 33 db : xor r11d, r11d
+ case 0xc08445: // 45 84 c0 : test r8b,r8b
+ case 0xd28445: // 45 84 d2 : test r10b,r10b
case 0xdb8548: // 48 85 db : test rbx, rbx
case 0xdb854d: // 4d 85 db : test r11, r11
case 0xdc8b4c: // 4c 8b dc : mov r11, rsp
- case 0xe0e483: // 83 e4 e0 : and esp, 0xFFFFFFE0
case 0xe48548: // 48 85 e4 : test rsp, rsp
case 0xe4854d: // 4d 85 e4 : test r12, r12
+ case 0xc88948: // 48 89 c8 : mov rax,rcx
+ case 0xcb8948: // 48 89 cb : mov rbx,rcx
+ case 0xd08948: // 48 89 d0 : mov rax,rdx
+ case 0xd18948: // 48 89 d1 : mov rcx,rdx
+ case 0xd38948: // 48 89 d3 : mov rbx,rdx
case 0xe58948: // 48 89 e5 : mov rbp, rsp
case 0xed8548: // 48 85 ed : test rbp, rbp
+ case 0xc88949: // 49 89 c8 : mov r8, rcx
+ case 0xc98949: // 49 89 c9 : mov r9, rcx
+ case 0xca8949: // 49 89 ca : mov r10,rcx
+ case 0xd08949: // 49 89 d0 : mov r8, rdx
+ case 0xd18949: // 49 89 d1 : mov r9, rdx
+ case 0xd28949: // 49 89 d2 : mov r10, rdx
+ case 0xd38949: // 49 89 d3 : mov r11, rdx
case 0xed854d: // 4d 85 ed : test r13, r13
case 0xf6854d: // 4d 85 f6 : test r14, r14
case 0xff854d: // 4d 85 ff : test r15, r15
case 0x588948: // 48 89 58 XX : mov QWORD PTR[rax + XX], rbx
case 0xec8348: // 48 83 ec XX : sub rsp, XX
case 0xf88349: // 49 83 f8 XX : cmp r8, XX
+ case 0x488d49: // 49 8d 48 XX : lea rcx, [...]
+ case 0x048d4c: // 4c 8d 04 XX : lea r8, [...]
+ case 0x148d4e: // 4e 8d 14 XX : lea r10, [...]
+ case 0x398366: // 66 83 39 XX : cmp WORD PTR [rcx], XX
return 4;
+ case 0x441F0F: // 0F 1F 44 XX XX : nop DWORD PTR [...]
case 0x246483: // 83 64 24 XX YY : and DWORD PTR [rsp+XX], YY
return 5;
return 6;
case 0xec8148: // 48 81 EC XX XX XX XX : sub rsp, XXXXXXXX
+ case 0xc0c748: // 48 C7 C0 XX XX XX XX : mov rax, XX XX XX XX
return 7;
// clang-format off
case 0x798141: // 41 81 79 XX YY YY YY YY : cmp DWORD PTR [r9+YY], XX XX XX XX
case 0x7a8141: // 41 81 7a XX YY YY YY YY : cmp DWORD PTR [r10+YY], XX XX XX XX
case 0x7b8141: // 41 81 7b XX YY YY YY YY : cmp DWORD PTR [r11+YY], XX XX XX XX
- case 0x7c8141: // 41 81 7c XX YY YY YY YY : cmp DWORD PTR [r12+YY], XX XX XX XX
case 0x7d8141: // 41 81 7d XX YY YY YY YY : cmp DWORD PTR [r13+YY], XX XX XX XX
case 0x7e8141: // 41 81 7e XX YY YY YY YY : cmp DWORD PTR [r14+YY], XX XX XX XX
case 0x7f8141: // 41 81 7f YY XX XX XX XX : cmp DWORD PTR [r15+YY], XX XX XX XX
// mov rax, QWORD PTR [rip + XXXXXXXX]
case 0x058d48: // 48 8d 05 XX XX XX XX :
// lea rax, QWORD PTR [rip + XXXXXXXX]
+ case 0x0d8948: // 48 89 0d XX XX XX XX :
+ // mov QWORD PTR [rip + XXXXXXXX], rcx
+ case 0x158948: // 48 89 15 XX XX XX XX :
+ // mov QWORD PTR [rip + XXXXXXXX], rdx
case 0x25ff48: // 48 ff 25 XX XX XX XX :
// rex.W jmp QWORD PTR [rip + XXXXXXXX]
case 0x158D4C: // 4c 8d 15 XX XX XX XX : lea r10, [rip + XX]
case 0x2444c7: // C7 44 24 XX YY YY YY YY
// mov dword ptr [rsp + XX], YYYYYYYY
return 8;
+
+ case 0x7c8141: // 41 81 7c ZZ YY XX XX XX XX
+ // cmp DWORD PTR [reg+reg*n+YY], XX XX XX XX
+ return 9;
}
switch (*(u32*)(address)) {
+ case 0x01b60f44: // 44 0f b6 01 : movzx r8d, BYTE PTR [rcx]
+ case 0x09b60f44: // 44 0f b6 09 : movzx r9d, BYTE PTR [rcx]
+ case 0x0ab60f44: // 44 0f b6 0a : movzx r8d, BYTE PTR [rdx]
+ case 0x11b60f44: // 44 0f b6 11 : movzx r10d, BYTE PTR [rcx]
case 0x1ab60f44: // 44 0f b6 1a : movzx r11d, BYTE PTR [rdx]
return 4;
case 0x24448b48: // 48 8b 44 24 XX : mov rax, QWORD ptr [rsp + XX]
return 5;
case 0x24648348: // 48 83 64 24 XX YY : and QWORD PTR [rsp + XX], YY
return 6;
+ case 0x24A48D48: // 48 8D A4 24 XX XX XX XX : lea rsp, [rsp + XX XX XX XX]
+ return 8;
+ }
+
+ switch (0xFFFFFFFFFFULL & *(u64 *)(address)) {
+ case 0xC07E0F4866: // 66 48 0F 7E C0 : movq rax, xmm0
+ return 5;
}
#else
case 0x5D8B: // 8B 5D XX : mov ebx, dword ptr [ebp + XX]
case 0x7D8B: // 8B 7D XX : mov edi, dword ptr [ebp + XX]
case 0x758B: // 8B 75 XX : mov esi, dword ptr [ebp + XX]
- case 0xEC83: // 83 EC XX : sub esp, XX
case 0x75FF: // FF 75 XX : push dword ptr [ebp + XX]
return 3;
case 0xC1F7: // F7 C1 XX YY ZZ WW : test ecx, WWZZYYXX
- case 0x25FF: // FF 25 XX YY ZZ WW : jmp dword ptr ds:[WWZZYYXX]
return 6;
case 0x3D83: // 83 3D XX YY ZZ WW TT : cmp TT, WWZZYYXX
return 7;
return 0;
}
+size_t TestOnlyGetInstructionSize(uptr address, size_t *rel_offset) {
+ return GetInstructionSize(address, rel_offset);
+}
+
// Returns 0 on error.
static size_t RoundUpToInstrBoundary(size_t size, uptr address) {
size_t cursor = 0;
// this will be untrue if relocated_offset \notin [-2**31, 2**31)
s64 delta = to - from;
s64 relocated_offset = *(s32 *)(to + cursor + rel_offset) - delta;
- if (-0x8000'0000ll > relocated_offset || relocated_offset > 0x7FFF'FFFFll)
+ if (-0x8000'0000ll > relocated_offset ||
+ relocated_offset > 0x7FFF'FFFFll) {
+ ReportError(
+ "interception_win: CopyInstructions relocated_offset %lld outside "
+ "32-bit range\n",
+ (long long)relocated_offset);
return false;
+ }
# else
// on 32-bit, the relative offset will always be correct
s32 delta = to - from;
"libc++.dll", // libc++
"libunwind.dll", // libunwind
# endif
- // NTDLL should go last as it exports some functions that we should
- // override in the CRT [presumably only used internally].
+ // NTDLL must go last as it gets special treatment in OverrideFunction.
"ntdll.dll",
NULL
};
for (DWORD i = 0; i < exports->NumberOfNames; i++) {
RVAPtr<char> name(module, names[i]);
- if (!strcmp(func_name, name)) {
+ if (!_strcmp(func_name, name)) {
DWORD index = ordinals[i];
RVAPtr<char> func(module, functions[index]);
// exported directory.
char function_name[256];
size_t funtion_name_length = _strlen(func);
- if (funtion_name_length >= sizeof(function_name) - 1)
+ if (funtion_name_length >= sizeof(function_name) - 1) {
+ ReportError("interception_win: func too long: '%s'\n", (char *)func);
InterceptionFailed();
+ }
_memcpy(function_name, func, funtion_name_length);
function_name[funtion_name_length] = '\0';
char* separator = _strchr(function_name, '.');
- if (!separator)
+ if (!separator) {
+ ReportError("interception_win: no separator in '%s'\n",
+ function_name);
InterceptionFailed();
+ }
*separator = '\0';
void* redirected_module = GetModuleHandleA(function_name);
- if (!redirected_module)
+ if (!redirected_module) {
+ ReportError("interception_win: GetModuleHandleA failed for '%s'\n",
+ function_name);
InterceptionFailed();
+ }
return InternalGetProcAddress(redirected_module, separator + 1);
}
bool OverrideFunction(
const char *func_name, uptr new_func, uptr *orig_old_func) {
+ static const char *kNtDllIgnore[] = {
+ "memcmp", "memcpy", "memmove", "memset"
+ };
+
bool hooked = false;
void **DLLs = InterestingDLLsAvailable();
for (size_t i = 0; DLLs[i]; ++i) {
+ if (DLLs[i + 1] == nullptr) {
+ // This is the last DLL, i.e. NTDLL. It exports some functions that
+ // we only want to override in the CRT.
+ for (const char *ignored : kNtDllIgnore) {
+ if (_strcmp(func_name, ignored) == 0)
+ return hooked;
+ }
+ }
+
uptr func_addr = InternalGetProcAddress(DLLs[i], func_name);
if (func_addr &&
OverrideFunction(func_addr, new_func, orig_old_func)) {
RVAPtr<IMAGE_IMPORT_BY_NAME> import_by_name(
module, name_table->u1.ForwarderString);
const char *funcname = &import_by_name->Name[0];
- if (strcmp(funcname, function_name) == 0)
+ if (_strcmp(funcname, function_name) == 0)
break;
}
}
// Exposed for unittests
void TestOnlyReleaseTrampolineRegions();
+// Exposed for unittests
+SIZE_T TestOnlyGetInstructionSize(uptr address, SIZE_T *rel_offset);
+
} // namespace __interception
#if defined(INTERCEPTION_DYNAMIC_CRT)
Deallocate(p);
}
+void lsan_free_sized(void *p, uptr) { Deallocate(p); }
+
+void lsan_free_aligned_sized(void *p, uptr, uptr) { Deallocate(p); }
+
void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
return SetErrnoOnNull(Reallocate(stack, p, size, 1));
}
#if !SANITIZER_CAN_USE_ALLOCATOR64
template <typename AddressSpaceViewTy>
struct AP32 {
- static const uptr kSpaceBeg = 0;
+ static const uptr kSpaceBeg = SANITIZER_MMAP_BEGIN;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = sizeof(ChunkMetadata);
typedef __sanitizer::CompactSizeClassMap SizeClassMap;
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
using LSanSizeClassMap = DefaultSizeClassMap;
+# elif SANITIZER_ANDROID && defined(__aarch64__)
+const uptr kAllocatorSpace = 0x3000000000ULL;
+const uptr kAllocatorSize = 0x2000000000ULL;
+using LSanSizeClassMap = VeryCompactSizeClassMap;
# else
const uptr kAllocatorSpace = 0x500000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
void *lsan_malloc(uptr size, const StackTrace &stack);
void lsan_free(void *p);
+void lsan_free_sized(void *p, uptr size);
+void lsan_free_aligned_sized(void *p, uptr alignment, uptr size);
void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
void *lsan_reallocarray(void *p, uptr nmemb, uptr size,
const StackTrace &stack);
# endif
// TLS leak in some glibc versions, described in
// https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
- "leak:*tls_get_addr*\n";
+ "leak:*tls_get_addr*\n"
+ "leak:*dlerror*\n";
void InitializeSuppressions() {
CHECK_EQ(nullptr, suppression_ctx);
# if SANITIZER_FUCHSIA
// Fuchsia handles all threads together with its own callback.
-static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
+static void ProcessThreads(SuspendedThreadsList const &, Frontier *, ThreadID,
uptr) {}
# else
// Scans thread data (stacks and TLS) for heap pointers.
template <class Accessor>
-static void ProcessThread(tid_t os_id, uptr sp,
+static void ProcessThread(ThreadID os_id, uptr sp,
const InternalMmapVector<uptr> ®isters,
InternalMmapVector<Range> &extra_ranges,
Frontier *frontier, Accessor &accessor) {
}
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
- Frontier *frontier, tid_t caller_tid,
+ Frontier *frontier, ThreadID caller_tid,
uptr caller_sp) {
- InternalMmapVector<tid_t> done_threads;
+ InternalMmapVector<ThreadID> done_threads;
InternalMmapVector<uptr> registers;
InternalMmapVector<Range> extra_ranges;
for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
registers.clear();
extra_ranges.clear();
- const tid_t os_id = suspended_threads.GetThreadID(i);
+ const ThreadID os_id = suspended_threads.GetThreadID(i);
uptr sp = 0;
PtraceRegistersStatus have_registers =
suspended_threads.GetRegistersAndSP(i, ®isters, &sp);
if (have_registers != REGISTERS_AVAILABLE) {
- Report("Unable to get registers from thread %llu.\n", os_id);
+ VReport(1, "Unable to get registers from thread %llu.\n", os_id);
// If unable to get SP, consider the entire stack to be reachable unless
// GetRegistersAndSP failed with ESRCH.
if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
if (flags()->use_detached) {
CopyMemoryAccessor accessor;
- InternalMmapVector<tid_t> known_threads;
+ InternalMmapVector<ThreadID> known_threads;
GetRunningThreadsLocked(&known_threads);
Sort(done_threads.data(), done_threads.size());
- for (tid_t os_id : known_threads) {
+ for (ThreadID os_id : known_threads) {
registers.clear();
extra_ranges.clear();
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
- Frontier *frontier, tid_t caller_tid,
+ Frontier *frontier, ThreadID caller_tid,
uptr caller_sp) {
const InternalMmapVector<u32> &suppressed_stacks =
GetSuppressionContext()->GetSortedSuppressedStacks();
static bool ReportUnsuspendedThreads(
const SuspendedThreadsList &suspended_threads) {
- InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
+ InternalMmapVector<ThreadID> threads(suspended_threads.ThreadCount());
for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
threads[i] = suspended_threads.GetThreadID(i);
Sort(threads.data(), threads.size());
- InternalMmapVector<tid_t> known_threads;
+ InternalMmapVector<ThreadID> known_threads;
GetRunningThreadsLocked(&known_threads);
bool succeded = true;
succeded = false;
Report(
"Running thread %zu was not suspended. False leaks are possible.\n",
- os_id);
+ (usize)os_id);
}
}
return succeded;
// where leak checking is initiated from a non-main thread).
void EnsureMainThreadIDIsCorrect();
-bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+bool GetThreadRangesLocked(ThreadID os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls);
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges);
-void GetThreadExtraStackRangesLocked(tid_t os_id,
+void GetThreadExtraStackRangesLocked(ThreadID os_id,
InternalMmapVector<Range> *ranges);
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs);
-void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads);
+void GetRunningThreadsLocked(InternalMmapVector<ThreadID> *threads);
void PrintThreads();
//// --------------------------------------------------------------------------
struct CheckForLeaksParam {
Frontier frontier;
LeakedChunks leaks;
- tid_t caller_tid;
+ ThreadID caller_tid;
uptr caller_sp;
bool success = false;
};
return 0;
}
-#if SANITIZER_ANDROID && __ANDROID_API__ < 21
-extern "C" __attribute__((weak)) int dl_iterate_phdr(
- int (*)(struct dl_phdr_info *, size_t, void *), void *);
-#endif
-
// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
if (!flags()->use_globals) return;
using namespace __lsan;
+namespace __sanitizer {
+// LSan doesn't need to do anything else special in the startup hook.
+void EarlySanitizerInit() {}
+} // namespace __sanitizer
+
namespace __lsan {
void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {}
lsan_free(p);
}
+# if SANITIZER_INTERCEPT_FREE_SIZED
+INTERCEPTOR(void, free_sized, void *p, uptr size) {
+ if (UNLIKELY(!p))
+ return;
+ if (DlsymAlloc::PointerIsMine(p))
+ return DlsymAlloc::Free(p);
+ ENSURE_LSAN_INITED;
+ lsan_free_sized(p, size);
+}
+# define LSAN_MAYBE_INTERCEPT_FREE_SIZED INTERCEPT_FUNCTION(free_sized)
+# else
+# define LSAN_MAYBE_INTERCEPT_FREE_SIZED
+# endif
+
+# if SANITIZER_INTERCEPT_FREE_ALIGNED_SIZED
+INTERCEPTOR(void, free_aligned_sized, void *p, uptr alignment, uptr size) {
+ if (UNLIKELY(!p))
+ return;
+ if (DlsymAlloc::PointerIsMine(p))
+ return DlsymAlloc::Free(p);
+ ENSURE_LSAN_INITED;
+ lsan_free_aligned_sized(p, alignment, size);
+}
+# define LSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED \
+ INTERCEPT_FUNCTION(free_aligned_sized)
+# else
+# define LSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED
+# endif
+
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (DlsymAlloc::Use())
return DlsymAlloc::Callocate(nmemb, size);
GET_STACK_TRACE_MALLOC;
return lsan_valloc(size, stack);
}
+#else
+# define LSAN_MAYBE_INTERCEPT_FREE_SIZED
+# define LSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED
#endif // !SANITIZER_APPLE
#if SANITIZER_INTERCEPT_MEMALIGN
#endif
#if SANITIZER_INTERCEPT_THR_EXIT
-INTERCEPTOR(void, thr_exit, tid_t *state) {
+INTERCEPTOR(void, thr_exit, ThreadID *state) {
ENSURE_LSAN_INITED;
ThreadFinish();
REAL(thr_exit)(state);
}
-#define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit)
+# define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit)
#else
#define LSAN_MAYBE_INTERCEPT_THR_EXIT
#endif
INTERCEPT_FUNCTION(malloc);
INTERCEPT_FUNCTION(free);
+ LSAN_MAYBE_INTERCEPT_FREE_SIZED;
+ LSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED;
LSAN_MAYBE_INTERCEPT_CFREE;
INTERCEPT_FUNCTION(calloc);
INTERCEPT_FUNCTION(realloc);
void *p = lsan_valloc(size, stack)
#define COMMON_MALLOC_FREE(ptr) \
lsan_free(ptr)
-#define COMMON_MALLOC_SIZE(ptr) \
- uptr size = lsan_mz_size(ptr)
-#define COMMON_MALLOC_FILL_STATS(zone, stats)
-#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
- (void)zone_name; \
- Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
-#define COMMON_MALLOC_NAMESPACE __lsan
-#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
-#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0
+# define COMMON_MALLOC_FREE_SIZED(ptr, size) lsan_free_sized(ptr, size)
+# define COMMON_MALLOC_FREE_ALIGNED_SIZED(ptr, alignment, size) \
+ lsan_free_aligned_sized(ptr, alignment, size)
+# define COMMON_MALLOC_SIZE(ptr) uptr size = lsan_mz_size(ptr)
+# define COMMON_MALLOC_FILL_STATS(zone, stats)
+# define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ (void)zone_name; \
+ Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", \
+ ptr);
+# define COMMON_MALLOC_NAMESPACE __lsan
+# define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
+# define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0
-#include "sanitizer_common/sanitizer_malloc_mac.inc"
+# include "sanitizer_common/sanitizer_malloc_mac.inc"
#endif // SANITIZER_APPLE
dtls_ = args->dtls;
}
-void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type) {
+void ThreadStart(u32 tid, ThreadID os_id, ThreadType thread_type) {
OnStartedArgs args;
GetThreadStackAndTls(tid == kMainTid, &args.stack_begin, &args.stack_end,
&args.tls_begin, &args.tls_end);
ThreadContextLsanBase::ThreadStart(tid, os_id, thread_type, &args);
}
-bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+bool GetThreadRangesLocked(ThreadID os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
ThreadContext *context = static_cast<ThreadContext *>(
DTLS *dtls_ = nullptr;
};
-void ThreadStart(u32 tid, tid_t os_id,
+void ThreadStart(u32 tid, ThreadID os_id,
ThreadType thread_type = ThreadType::Regular);
} // namespace __lsan
return thread_registry->CreateThread(0, detached, parent_tid, arg);
}
-void ThreadContextLsanBase::ThreadStart(u32 tid, tid_t os_id,
+void ThreadContextLsanBase::ThreadStart(u32 tid, ThreadID os_id,
ThreadType thread_type, void *arg) {
thread_registry->StartThread(tid, os_id, thread_type, arg);
}
///// Interface to the common LSan module. /////
-void GetThreadExtraStackRangesLocked(tid_t os_id,
+void GetThreadExtraStackRangesLocked(ThreadID os_id,
InternalMmapVector<Range> *ranges) {}
void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
return thread_registry;
}
-void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
+void GetRunningThreadsLocked(InternalMmapVector<ThreadID> *threads) {
GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
[](ThreadContextBase *tctx, void *threads) {
if (tctx->status == ThreadStatusRunning) {
- reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back(
+ reinterpret_cast<InternalMmapVector<ThreadID> *>(threads)->push_back(
tctx->os_id);
}
},
uptr cache_end() { return cache_end_; }
// The argument is passed on to the subclass's OnStarted member function.
- static void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type,
+ static void ThreadStart(u32 tid, ThreadID os_id, ThreadType thread_type,
void *onstarted_arg);
protected:
static void *Allocate(uptr size_in_bytes, uptr align = kWordSize) {
void *ptr = InternalAlloc(size_in_bytes, nullptr, align);
CHECK(internal_allocator()->FromPrimary(ptr));
- Details::OnAllocate(ptr,
- internal_allocator()->GetActuallyAllocatedSize(ptr));
+ Details::OnAllocate(ptr, GetSize(ptr));
return ptr;
}
static void *Callocate(usize nmemb, usize size) {
void *ptr = InternalCalloc(nmemb, size);
CHECK(internal_allocator()->FromPrimary(ptr));
- Details::OnAllocate(ptr,
- internal_allocator()->GetActuallyAllocatedSize(ptr));
+ Details::OnAllocate(ptr, GetSize(ptr));
return ptr;
}
static void Free(void *ptr) {
- uptr size = internal_allocator()->GetActuallyAllocatedSize(ptr);
+ uptr size = GetSize(ptr);
Details::OnFree(ptr, size);
InternalFree(ptr);
}
Free(ptr);
return nullptr;
}
- uptr size = internal_allocator()->GetActuallyAllocatedSize(ptr);
+ uptr size = GetSize(ptr);
uptr memcpy_size = Min(new_size, size);
void *new_ptr = Allocate(new_size);
if (new_ptr)
return Realloc(ptr, count * size);
}
+ static uptr GetSize(void *ptr) {
+ return internal_allocator()->GetActuallyAllocatedSize(ptr);
+ }
+
static void OnAllocate(const void *ptr, uptr size) {}
static void OnFree(const void *ptr, uptr size) {}
};
typedef CompactSizeClassMap InternalSizeClassMap;
struct AP32 {
- static const uptr kSpaceBeg = 0;
+ static const uptr kSpaceBeg = SANITIZER_MMAP_BEGIN;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = 0;
typedef InternalSizeClassMap SizeClassMap;
DCHECK_GT(c->count, 0);
}
void *res = c->batch[--c->count];
- PREFETCH(c->batch[c->count - 1]);
+ PREFETCH(c->batch[c->count > 0 ? c->count - 1 : 0]);
stats_.Add(AllocatorStatAllocated, c->class_size);
return res;
}
// ~(uptr)0.
void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
+
+ uptr MaxAddr = GetMaxUserVirtualAddress();
+ // VReport does not call the sanitizer allocator.
+ VReport(3, "Max user virtual address: 0x%zx\n", MaxAddr);
+ VReport(3, "Total space size for primary allocator: 0x%zx\n",
+ TotalSpaceSize);
+ // TODO: revise the check if we ever configure sanitizers to deliberately
+ // map beyond the 2**48 barrier (note that Linux pretends the VMA is
+ // limited to 48-bit for backwards compatibility, but allows apps to
+ // explicitly specify an address beyond that).
+ if (heap_start + TotalSpaceSize >= MaxAddr) {
+ // We can't easily adjust the requested heap size, because kSpaceSize is
+ // const (for optimization) and used throughout the code.
+ VReport(0, "Error: heap size %zx exceeds max user virtual address %zx\n",
+ TotalSpaceSize, MaxAddr);
+ VReport(
+ 0, "Try using a kernel that allows a larger virtual address space\n");
+ }
PremappedHeap = heap_start != 0;
if (PremappedHeap) {
CHECK(!kUsingConstantSpaceBeg);
// recoverable.
if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,
new_num_freed_chunks))) {
- Report("FATAL: Internal error: %s's allocator exhausted the free list "
- "space for size class %zd (%zd bytes).\n", SanitizerToolName,
- class_id, ClassIdToSize(class_id));
+ Report(
+ "FATAL: Internal error: %s's allocator exhausted the free list "
+ "space for size class %zu (%zu bytes).\n",
+ SanitizerToolName, class_id, ClassIdToSize(class_id));
Die();
}
for (uptr i = 0; i < n_chunks; i++)
if (!region->exhausted) {
region->exhausted = true;
Printf("%s: Out of memory. ", SanitizerToolName);
- Printf("The process has exhausted %zuMB for size class %zu.\n",
- kRegionSize >> 20, ClassIdToSize(class_id));
+ Printf(
+ "The process has exhausted %zu MB for size class %zu (%zu bytes).\n",
+ kRegionSize >> 20, class_id, ClassIdToSize(class_id));
}
return true;
}
#ifndef SANITIZER_ATOMIC_CLANG_H
#define SANITIZER_ATOMIC_CLANG_H
+// Helper to suppress warnings related to 8-byte atomic accesses when the target
+// is 32-bit AIX (where such accesses use libatomic).
+#if defined(_AIX) && !defined(__powerpc64__) && defined(__clang__)
+# define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_BEGIN \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Watomic-alignment\"")
+# define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_END _Pragma("clang diagnostic pop")
+#else
+# define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_BEGIN
+# define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_END
+#endif
+
namespace __sanitizer {
// We use the compiler builtin atomic operations for loads and stores, which
#endif
}
+SANITIZER_IGNORE_ATOMIC_ALIGNMENT_BEGIN
template <typename T>
inline typename T::Type atomic_load(const volatile T *a, memory_order mo) {
DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
return atomic_compare_exchange_strong(a, cmp, xchg, mo);
}
+SANITIZER_IGNORE_ATOMIC_ALIGNMENT_END
+
} // namespace __sanitizer
#undef ATOMIC_ORDER
uptr GetMaxVirtualAddress();
uptr GetMaxUserVirtualAddress();
// Threads
-tid_t GetTid();
-int TgKill(pid_t pid, tid_t tid, int sig);
+ThreadID GetTid();
+int TgKill(pid_t pid, ThreadID tid, int sig);
uptr GetThreadSelf();
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom);
// Used to check if we can map shadow memory to a fixed location.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
-// Releases memory pages entirely within the [beg, end] address range. Noop if
+// Releases memory pages entirely within the [beg, end) address range. Noop if
// the provided range does not contain at least one entire page.
void ReleaseMemoryPagesToOS(uptr beg, uptr end);
void IncreaseTotalMmap(uptr size);
void SetAlternateSignalStack();
void UnsetAlternateSignalStack();
+bool IsSignalHandlerFromSanitizer(int signum);
+bool SetSignalHandlerFromSanitizer(int signum, bool new_state);
+
// Construct a one-line string:
// SUMMARY: SanitizerToolName: error_message
// and pass it to __sanitizer_report_error_summary.
return LeastSignificantSetBitIndex(x);
}
+inline bool IntervalsAreSeparate(uptr start1, uptr end1, uptr start2,
+ uptr end2) {
+ CHECK_LE(start1, end1);
+ CHECK_LE(start2, end2);
+ return (end1 < start2) || (end2 < start1);
+}
+
// Don't use std::min, std::max or std::swap, to minimize dependency
// on libstdc++.
template <class T>
kModuleArchARMV7S,
kModuleArchARMV7K,
kModuleArchARM64,
+ kModuleArchARM64E,
kModuleArchLoongArch64,
kModuleArchRISCV64,
kModuleArchHexagon
return "armv7k";
case kModuleArchARM64:
return "arm64";
+ case kModuleArchARM64E:
+ return "arm64e";
case kModuleArchLoongArch64:
return "loongarch64";
case kModuleArchRISCV64:
// Callback type for iterating over a set of memory ranges.
typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
-enum AndroidApiLevel {
- ANDROID_NOT_ANDROID = 0,
- ANDROID_KITKAT = 19,
- ANDROID_LOLLIPOP_MR1 = 22,
- ANDROID_POST_LOLLIPOP = 23
-};
-
void WriteToSyslog(const char *buffer);
#if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
inline void SetAbortMessage(const char *) {}
#endif
-#if SANITIZER_ANDROID
-void SanitizerInitializeUnwinder();
-AndroidApiLevel AndroidGetApiLevel();
-#else
-inline void AndroidLogWrite(const char *buffer_unused) {}
-inline void SanitizerInitializeUnwinder() {}
-inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
-#endif
-
inline uptr GetPthreadDestructorIterations() {
-#if SANITIZER_ANDROID
- return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
-#elif SANITIZER_POSIX
+#if SANITIZER_POSIX
return 4;
#else
// Unused on Windows.
#endif
#if SANITIZER_INTERCEPT_STRCMP || SANITIZER_INTERCEPT_MEMCMP
-static inline int CharCmpX(unsigned char c1, unsigned char c2) {
+[[maybe_unused]] static inline int CharCmpX(unsigned char c1,
+ unsigned char c2) {
return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
}
#endif
#define INIT_FREXP
#endif // SANITIZER_INTERCEPT_FREXP
-#if SANITIZER_INTERCEPT_FREXPF_FREXPL
+#if SANITIZER_INTERCEPT_FREXPF
INTERCEPTOR(float, frexpf, float x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp);
return res;
}
+# define INIT_FREXPF COMMON_INTERCEPT_FUNCTION(frexpf);
+#else
+# define INIT_FREXPF
+#endif
+
+#if SANITIZER_INTERCEPT_FREXPL
INTERCEPTOR(long double, frexpl, long double x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp);
return res;
}
-#define INIT_FREXPF_FREXPL \
- COMMON_INTERCEPT_FUNCTION(frexpf); \
- COMMON_INTERCEPT_FUNCTION_LDBL(frexpl)
+# define INIT_FREXPL COMMON_INTERCEPT_FUNCTION_LDBL(frexpl)
#else
-#define INIT_FREXPF_FREXPL
-#endif // SANITIZER_INTERCEPT_FREXPF_FREXPL
+# define INIT_FREXPL
+#endif
#if SI_POSIX
static void write_iovec(void *ctx, struct __sanitizer_iovec *iovec,
#endif
#if SANITIZER_INTERCEPT_PRCTL
-INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5) {
+
+# if defined(__aarch64__)
+// https://llvm.org/docs/PointerAuth.html
+// AArch64 is currently the only architecture with full PAC support.
+// Avoid adding PAC instructions to prevent crashes caused by
+// prctl(PR_PAC_RESET_KEYS, ...). Since PR_PAC_RESET_KEYS resets the
+// authentication key, using the old key afterward will lead to a crash.
+
+# if defined(__ARM_FEATURE_BTI_DEFAULT)
+# define BRANCH_PROTECTION_ATTRIBUTE \
+ __attribute__((target("branch-protection=bti")))
+# else
+# define BRANCH_PROTECTION_ATTRIBUTE \
+ __attribute__((target("branch-protection=none")))
+# endif
+
+# define PRCTL_INTERCEPTOR(ret_type, func, ...) \
+ DEFINE_REAL(ret_type, func, __VA_ARGS__) \
+ DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
+ extern "C" INTERCEPTOR_ATTRIBUTE BRANCH_PROTECTION_ATTRIBUTE ret_type \
+ WRAP(func)(__VA_ARGS__)
+
+# else
+# define PRCTL_INTERCEPTOR INTERCEPTOR
+# endif
+
+PRCTL_INTERCEPTOR(int, prctl, int option, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5);
static const int PR_SET_NAME = 15;
static const int PR_SET_SECCOMP = 22;
static const int SECCOMP_MODE_FILTER = 2;
# endif
- if (option == PR_SET_VMA && arg2 == 0UL) {
+ if (option == PR_SET_VMA && arg2 == 0UL && arg5 != 0UL) {
char *name = (char *)arg5;
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (u64 *)(arg5), sizeof(u64));
} else if (res != -1 && option == PR_GET_PDEATHSIG) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (u64 *)(arg2), sizeof(int));
-# if !SANITIZER_ANDROID
+# if SANITIZER_GLIBC
} else if (res != -1 && option == PR_SET_SECCOMP &&
arg2 == SECCOMP_MODE_FILTER) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (u64 *)(arg3), struct_sock_fprog_sz);
}
return res;
}
-#define INIT_PRCTL COMMON_INTERCEPT_FUNCTION(prctl)
+# define INIT_PRCTL COMMON_INTERCEPT_FUNCTION(prctl)
#else
#define INIT_PRCTL
#endif // SANITIZER_INTERCEPT_PRCTL
#if SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
static void unpoison_tm(void *ctx, __sanitizer_tm *tm) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tm, sizeof(*tm));
-#if !SANITIZER_SOLARIS
+// AIX tm struct does not have tm_zone field.
+# if !SANITIZER_SOLARIS && !SANITIZER_AIX
if (tm->tm_zone) {
// Can not use COMMON_INTERCEPTOR_WRITE_RANGE here, because tm->tm_zone
// can point to shared memory and tsan would report a data race.
VSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap)
#endif
+# if SANITIZER_INTERCEPT_VASPRINTF
INTERCEPTOR(int, vasprintf, char **strp, const char *format, va_list ap)
VASPRINTF_INTERCEPTOR_IMPL(vasprintf, strp, format, ap)
+# endif
-#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+# if SANITIZER_INTERCEPT_ISOC99_PRINTF
INTERCEPTOR(int, __isoc99_vprintf, const char *format, va_list ap)
VPRINTF_INTERCEPTOR_IMPL(__isoc99_vprintf, format, ap)
FORMAT_INTERCEPTOR_IMPL(__snprintf_chk, vsnprintf, str, size, format)
#endif
+# if SANITIZER_INTERCEPT_ASPRINTF
INTERCEPTOR(int, asprintf, char **strp, const char *format, ...)
FORMAT_INTERCEPTOR_IMPL(asprintf, vasprintf, strp, format)
+# endif
-#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+# if SANITIZER_INTERCEPT_ISOC99_PRINTF
INTERCEPTOR(int, __isoc99_printf, const char *format, ...)
FORMAT_INTERCEPTOR_IMPL(__isoc99_printf, __isoc99_vprintf, format)
#endif // SANITIZER_INTERCEPT_PRINTF
#if SANITIZER_INTERCEPT_PRINTF
-#define INIT_PRINTF \
- COMMON_INTERCEPT_FUNCTION_LDBL(printf); \
- COMMON_INTERCEPT_FUNCTION_LDBL(sprintf); \
- COMMON_INTERCEPT_FUNCTION_LDBL(snprintf); \
- COMMON_INTERCEPT_FUNCTION_LDBL(asprintf); \
- COMMON_INTERCEPT_FUNCTION_LDBL(fprintf); \
- COMMON_INTERCEPT_FUNCTION_LDBL(vprintf); \
- COMMON_INTERCEPT_FUNCTION_LDBL(vsprintf); \
- COMMON_INTERCEPT_FUNCTION_LDBL(vsnprintf); \
- COMMON_INTERCEPT_FUNCTION_LDBL(vasprintf); \
- COMMON_INTERCEPT_FUNCTION_LDBL(vfprintf);
+# define INIT_PRINTF_COMMON \
+ COMMON_INTERCEPT_FUNCTION_LDBL(printf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(snprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(fprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vfprintf);
+# if !SANITIZER_AIX
+// AIX does not have [v]asprintf.
+# define INIT_PRINTF_EXTRA \
+ COMMON_INTERCEPT_FUNCTION_LDBL(asprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vasprintf);
+# else
+# define INIT_PRINTF_EXTRA
+# endif
+# define INIT_PRINTF INIT_PRINTF_COMMON INIT_PRINTF_EXTRA
#else
#define INIT_PRINTF
#endif
#define INIT_ISOC99_PRINTF
#endif
+#if SANITIZER_INTERCEPT_SETPROCTITLE
+INTERCEPTOR(void, setproctitle, const char *fmt, ...) {
+ void *ctx;
+ va_list ap;
+ va_start(ap, fmt);
+ COMMON_INTERCEPTOR_ENTER(ctx, setproctitle, fmt, ap);
+ if (common_flags()->check_printf)
+ printf_common(ctx, fmt, ap);
+ REAL(setproctitle)(fmt, ap);
+ va_end(ap);
+}
+# define INIT_SETPROCTITLE COMMON_INTERCEPT_FUNCTION(setproctitle);
+#else
+# define INIT_SETPROCTITLE
+#endif
+
#if SANITIZER_INTERCEPT_IOCTL
#include "sanitizer_common_interceptors_ioctl.inc"
#include "sanitizer_interceptors_ioctl_netbsd.inc"
#define INIT_CLOCK_GETCPUCLOCKID
#endif
+#if SANITIZER_INTERCEPT_TIMER_CREATE
+INTERCEPTOR(int, timer_create, __sanitizer_clockid_t clockid, void *sevp,
+ __sanitizer_timer_t *timer) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timer_create, clockid, sevp, timer);
+ int res = REAL(timer_create)(clockid, sevp, timer);
+ if (!res && timer) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, timer, sizeof *timer);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, timer_delete, __sanitizer_timer_t timer) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timer_delete, timer);
+ int res = REAL(timer_delete)(timer);
+ return res;
+}
+
+INTERCEPTOR(int, timer_gettime, __sanitizer_timer_t timer,
+ struct __sanitizer_itimerspec *curr_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timer_gettime, timer, curr_value);
+ int res = REAL(timer_gettime)(timer, curr_value);
+ if (!res && curr_value) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, sizeof *curr_value);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, timer_settime, __sanitizer_timer_t timer, int flags,
+ const struct __sanitizer_itimerspec *new_value,
+ struct __sanitizer_itimerspec *old_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timer_settime, timer, flags, new_value,
+ old_value);
+ int res = REAL(timer_settime)(timer, flags, new_value, old_value);
+ if (!res) {
+ if (new_value)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, sizeof *new_value);
+ if (old_value)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, sizeof *old_value);
+ }
+ return res;
+}
+
+# define INIT_TIMER_CREATE \
+ COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(timer_create, "GLIBC_2.3.3"); \
+ COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(timer_delete, "GLIBC_2.3.3"); \
+ COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(timer_gettime, "GLIBC_2.3.3"); \
+ COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(timer_settime, "GLIBC_2.3.3");
+#else
+# define INIT_TIMER_CREATE
+#endif
+
#if SANITIZER_INTERCEPT_GETITIMER
INTERCEPTOR(int, getitimer, int which, void *curr_value) {
void *ctx;
#define INIT_GETITIMER
#endif
+#if SANITIZER_INTERCEPT_TIMESPEC_GET
+INTERCEPTOR(int, timespec_get, struct __sanitizer_timespec *ts, int base) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timespec_get, ts, base);
+ // We don't yet know if ts is addressable, so we use our own scratch buffer
+ struct __sanitizer_timespec ts_local;
+ int res = REAL(timespec_get)(&ts_local, base);
+ if (res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ts,
+ sizeof(struct __sanitizer_timespec));
+ internal_memcpy(ts, &ts_local, sizeof(struct __sanitizer_timespec));
+ }
+ return res;
+}
+# define INIT_TIMESPEC_GET COMMON_INTERCEPT_FUNCTION(timespec_get);
+#else
+# define INIT_TIMESPEC_GET
+#endif
+
#if SANITIZER_INTERCEPT_GLOB
static void unpoison_glob_t(void *ctx, __sanitizer_glob_t *pglob) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pglob, sizeof(*pglob));
if (res != ((SIZE_T)-1)) {
CHECK_LE(res, sizeof(local_dest));
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, res);
- REAL(memcpy)(dest, local_dest, res);
+ internal_memcpy(dest, local_dest, res);
}
return res;
}
if (res != -1) {
CHECK_LE(res, sizeof(local_dest));
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, res);
- REAL(memcpy)(dest, local_dest, res);
+ internal_memcpy(dest, local_dest, res);
}
return res;
}
# define INIT_FREADLINK
#endif
+#if SANITIZER_INTERCEPT_GETSERVENT_R || SANITIZER_INTERCEPT_GETSERVBYNAME_R || \
+ SANITIZER_INTERCEPT_GETSERVBYPORT_R
+
+UNUSED static void HandleGetServentReentrantResult(
+ void *ctx, int res, struct __sanitizer_servent *result_buf, char *buf,
+ SIZE_T buflen, struct __sanitizer_servent **result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (char *)result, sizeof(void *));
+ if (res)
+ return;
+ if (*result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (char *)*result,
+ sizeof(__sanitizer_servent));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+}
+
+#endif
+
+#if SANITIZER_INTERCEPT_GETSERVENT_R
+INTERCEPTOR(int, getservent_r, struct __sanitizer_servent *result_buf,
+ char *buf, SIZE_T buflen, struct __sanitizer_servent **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getservent_r, result_buf, buf, buflen, result);
+ int res = REAL(getservent_r)(result_buf, buf, buflen, result);
+ HandleGetServentReentrantResult(ctx, res, result_buf, buf, buflen, result);
+ return res;
+}
+# define INIT_GETSERVENT_R COMMON_INTERCEPT_FUNCTION(getservent_r)
+#else
+# define INIT_GETSERVENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETSERVBYNAME_R
+INTERCEPTOR(int, getservbyname_r, const char *name, const char *proto,
+ struct __sanitizer_servent *result_buf, char *buf, SIZE_T buflen,
+ struct __sanitizer_servent **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getservbyname_r, name, proto, result_buf, buf,
+ buflen, result);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, name, internal_strlen(name));
+ int res = REAL(getservbyname_r)(name, proto, result_buf, buf, buflen, result);
+ HandleGetServentReentrantResult(ctx, res, result_buf, buf, buflen, result);
+ return res;
+}
+# define INIT_GETSERVBYNAME_R COMMON_INTERCEPT_FUNCTION(getservbyname_r)
+#else
+# define INIT_GETSERVBYNAME_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETSERVBYPORT_R
+INTERCEPTOR(int, getservbyport_r, int port, const char *proto,
+ struct __sanitizer_servent *result_buf, char *buf, SIZE_T buflen,
+ struct __sanitizer_servent **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getservbyport_r, port, proto, result_buf, buf,
+ buflen, result);
+ int res = REAL(getservbyport_r)(port, proto, result_buf, buf, buflen, result);
+ HandleGetServentReentrantResult(ctx, res, result_buf, buf, buflen, result);
+ return res;
+}
+# define INIT_GETSERVBYPORT_R COMMON_INTERCEPT_FUNCTION(getservbyport_r)
+#else
+# define INIT_GETSERVBYPORT_R
+#endif
+
#include "sanitizer_common_interceptors_netbsd_compat.inc"
namespace __sanitizer {
INIT_PRINTF;
INIT_PRINTF_L;
INIT_ISOC99_PRINTF;
+ INIT_SETPROCTITLE;
INIT_FREXP;
- INIT_FREXPF_FREXPL;
+ INIT_FREXPF;
+ INIT_FREXPL;
INIT_GETPWNAM_AND_FRIENDS;
INIT_GETPWNAM_R_AND_FRIENDS;
INIT_GETPWENT;
INIT_SETPWENT;
INIT_CLOCK_GETTIME;
INIT_CLOCK_GETCPUCLOCKID;
+ INIT_TIMER_CREATE;
INIT_GETITIMER;
INIT_TIME;
+ INIT_TIMESPEC_GET;
INIT_GLOB;
INIT_GLOB64;
INIT___B64_TO;
INIT_FREADLINK;
INIT___PRINTF_CHK;
+ INIT_GETSERVENT_R;
+ INIT_GETSERVBYNAME_R;
+ INIT_GETSERVBYPORT_R;
}
// Returns true if the character is an integer conversion specifier.
static bool format_is_integer_conv(char c) {
+#if SANITIZER_GLIBC
+ if (char_is_one_of(c, "bB"))
+ return true;
+#endif
return char_is_one_of(c, "diouxXn");
}
++ioctl_table_size; \
}
+ _(FIONBIO, READ, sizeof(int));
+#if !SANITIZER_HAIKU
_(FIOASYNC, READ, sizeof(int));
_(FIOCLEX, NONE, 0);
_(FIOGETOWN, WRITE, sizeof(int));
- _(FIONBIO, READ, sizeof(int));
_(FIONCLEX, NONE, 0);
_(FIOSETOWN, READ, sizeof(int));
+#endif
_(SIOCATMARK, WRITE, sizeof(int));
_(SIOCGIFCONF, CUSTOM, 0);
_(SIOCGPGRP, WRITE, sizeof(int));
_(SIOCSPGRP, READ, sizeof(int));
-#if !SANITIZER_SOLARIS
+#if !SANITIZER_SOLARIS && !SANITIZER_HAIKU
_(TIOCCONS, NONE, 0);
#endif
- _(TIOCEXCL, NONE, 0);
+#if !SANITIZER_HAIKU
_(TIOCGETD, WRITE, sizeof(int));
+ _(TIOCNOTTY, NONE, 0);
+ _(TIOCPKT, READ, sizeof(int));
+ _(TIOCSETD, READ, sizeof(int));
+ _(TIOCSTI, READ, sizeof(char));
+#endif
+ _(TIOCEXCL, NONE, 0);
_(TIOCGPGRP, WRITE, pid_t_sz);
_(TIOCGWINSZ, WRITE, struct_winsize_sz);
_(TIOCMBIC, READ, sizeof(int));
_(TIOCMBIS, READ, sizeof(int));
_(TIOCMGET, WRITE, sizeof(int));
_(TIOCMSET, READ, sizeof(int));
- _(TIOCNOTTY, NONE, 0);
_(TIOCNXCL, NONE, 0);
_(TIOCOUTQ, WRITE, sizeof(int));
- _(TIOCPKT, READ, sizeof(int));
+# if !SANITIZER_AIX
_(TIOCSCTTY, NONE, 0);
- _(TIOCSETD, READ, sizeof(int));
+# endif
_(TIOCSPGRP, READ, pid_t_sz);
- _(TIOCSTI, READ, sizeof(char));
_(TIOCSWINSZ, READ, struct_winsize_sz);
#if !SANITIZER_IOS
_(SOUND_PCM_WRITE_CHANNELS, WRITE, sizeof(int));
_(SOUND_PCM_WRITE_FILTER, WRITE, sizeof(int));
_(TCFLSH, NONE, 0);
+# if SANITIZER_TERMIOS_IOCTL_CONSTANTS
_(TCGETS, WRITE, struct_termios_sz);
+# endif
_(TCSBRK, NONE, 0);
_(TCSBRKP, NONE, 0);
+# if SANITIZER_TERMIOS_IOCTL_CONSTANTS
_(TCSETS, READ, struct_termios_sz);
_(TCSETSF, READ, struct_termios_sz);
_(TCSETSW, READ, struct_termios_sz);
+# endif
_(TCXONC, NONE, 0);
_(TIOCGLCKTRMIOS, WRITE, struct_termios_sz);
_(TIOCGSOFTCAR, WRITE, sizeof(int));
// Platform-specific options.
#if SANITIZER_APPLE
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
+# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
#elif SANITIZER_WINDOWS64
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
+# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
+#elif SANITIZER_AIX
+# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
#else
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
+# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
#endif // SANITIZER_APPLE
#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
+TEXT_SECTION
.comm _ZN14__interception10real_vforkE,8,8
.globl ASM_WRAPPER_NAME(vfork)
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
ASM_INTERCEPTOR_TRAMPOLINE(vfork)
ASM_TRAMPOLINE_ALIAS(vfork, vfork)
-GNU_PROPERTY_BTI_PAC
+GNU_PROPERTY_BTI_PAC_GCS
#endif
#include "sanitizer_common/sanitizer_asm.h"
+.att_syntax
+
.comm _ZN14__interception10real_vforkE,4,4
.globl ASM_WRAPPER_NAME(vfork)
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
#include "sanitizer_common/sanitizer_asm.h"
+.att_syntax
+
.comm _ZN14__interception10real_vforkE,8,8
.globl ASM_WRAPPER_NAME(vfork)
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
char sa_data[14];
};
+struct sanitizer_kernel_open_how {
+ u64 flags;
+ u64 mode;
+ u64 resolve;
+};
+
// Real sigset size is always passed as a syscall argument.
// Declare it "void" to catch sizeof(kernel_sigset_t).
typedef void kernel_sigset_t;
POST_SYSCALL(openat)
(long res, long dfd, const void *filename, long flags, long mode) {}
+PRE_SYSCALL(openat2)(long dfd, const void* filename,
+ const sanitizer_kernel_open_how* how, uptr howlen) {
+ if (filename)
+ PRE_READ(filename, __sanitizer::internal_strlen((const char*)filename) + 1);
+
+ if (how)
+ PRE_READ(how, howlen);
+}
+
+POST_SYSCALL(openat2)(long res, long dfd, const void* filename,
+ const sanitizer_kernel_open_how* how, uptr howlen) {}
+
PRE_SYSCALL(newfstatat)
(long dfd, const void *filename, void *statbuf, long flag) {
if (filename)
COMMON_SYSCALL_BLOCKING_END();
}
+PRE_SYSCALL(copy_file_range)
+(int fdin, __sanitizer___kernel_off_t *offin, int fdout,
+ __sanitizer___kernel_off_t *offout, SIZE_T size, unsigned int flags) {
+ if (offin != nullptr) {
+ PRE_READ(offin, sizeof(*offin));
+ }
+ if (offout != nullptr) {
+ PRE_READ(offout, sizeof(*offout));
+ }
+}
+
+POST_SYSCALL(copy_file_range)
+(SSIZE_T, int fdin, __sanitizer___kernel_off_t *offin, int fdout,
+ __sanitizer___kernel_off_t *offout, SIZE_T size, unsigned int flags) {
+ if (offin != nullptr) {
+ POST_WRITE(offin, sizeof(*offin));
+ }
+ if (offout != nullptr) {
+ POST_WRITE(offout, sizeof(*offout));
+ }
+}
+
} // extern "C"
# undef PRE_SYSCALL
u32 lock;
u32 stk;
};
- LockWithContext all_locks_with_contexts_[64];
+ LockWithContext all_locks_with_contexts_[128];
uptr n_all_locks_;
};
# define __errno_location ___errno
#elif SANITIZER_WINDOWS
# define __errno_location _errno
+#elif SANITIZER_HAIKU
+# define __errno_location _errnop
#endif
extern "C" int *__errno_location();
namespace __sanitizer {
-#define errno_ENOMEM 12
-#define errno_EBUSY 16
-#define errno_EINVAL 22
-#define errno_ERANGE 34
-#define errno_ENAMETOOLONG 36
-#define errno_ENOSYS 38
+#ifdef __HAIKU__
+# define errno_ENOMEM (0x80000000)
+# define errno_EBUSY (0x80000000 + 14)
+# define errno_EINVAL (0x80000000 + 5)
+# define errno_ERANGE (0x80007000 + 17)
+# define errno_ENAMETOOLONG (0x80000000 + 0x6004)
+# define errno_ENOSYS (0x80007009)
+#else
+# define errno_ENOMEM 12
+# define errno_EBUSY 16
+# define errno_EINVAL 22
+# define errno_ERANGE 34
+# define errno_ENAMETOOLONG 36
+# define errno_ENOSYS 38
+#endif
// Those might not present or their value differ on different platforms.
extern const int errno_EOWNERDEAD;
void ReportFile::ReopenIfNecessary() {
mu->CheckLocked();
- if (fd == kStdoutFd || fd == kStderrFd) return;
-
uptr pid = internal_getpid();
+ if (fallbackToStderrActive && fd_pid != pid) {
+ // If fallbackToStderrActive is set then we fellback to stderr. If this is a
+ // new process, mark fd as invalid so we attempt to open again.
+ CHECK_EQ(fd, kStderrFd);
+ fd = kInvalidFd;
+ fallbackToStderrActive = false;
+ }
+ if (fd == kStdoutFd || fd == kStderrFd)
+ return;
+
// If in tracer, use the parent's file.
if (pid == stoptheworld_tracer_pid)
pid = stoptheworld_tracer_ppid;
// process, close it now.
if (fd_pid == pid)
return;
- else
- CloseFile(fd);
+ CloseFile(fd);
}
const char *exe_name = GetProcessName();
error_t err;
fd = OpenFile(full_path, WrOnly, &err);
if (fd == kInvalidFd) {
- const char *ErrorMsgPrefix = "ERROR: Can't open file: ";
+ bool fallback = common_flags()->log_fallback_to_stderr;
+ const char *ErrorMsgPrefix =
+ fallback ? "WARNING: Can't open file, falling back to stderr: "
+ : "ERROR: Can't open file: ";
WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
WriteToFile(kStderrFd, full_path, internal_strlen(full_path));
char errmsg[100];
internal_snprintf(errmsg, sizeof(errmsg), " (reason: %d)\n", err);
WriteToFile(kStderrFd, errmsg, internal_strlen(errmsg));
- Die();
+ if (!fallback)
+ Die();
+ fallbackToStderrActive = true;
+ fd = kStderrFd;
}
fd_pid = pid;
}
-static void RecursiveCreateParentDirs(char *path) {
+static void RecursiveCreateParentDirs(char *path, fd_t &fd) {
if (path[0] == '\0')
return;
for (int i = 1; path[i] != '\0'; ++i) {
continue;
path[i] = '\0';
if (!DirExists(path) && !CreateDir(path)) {
- const char *ErrorMsgPrefix = "ERROR: Can't create directory: ";
+ bool fallback = common_flags()->log_fallback_to_stderr;
+ const char *ErrorMsgPrefix =
+ fallback ? "WARNING: Can't create directory, falling back to stderr: "
+ : "ERROR: Can't create directory: ";
WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
WriteToFile(kStderrFd, path, internal_strlen(path));
const char *ErrorMsgSuffix = "\n";
WriteToFile(kStderrFd, ErrorMsgSuffix, internal_strlen(ErrorMsgSuffix));
- Die();
+ if (!fallback)
+ Die();
+ path[i] = save;
+ fd = kStderrFd;
+ return;
}
path[i] = save;
}
}
+/// Parse the report path \p pattern and copy the parsed path to \p dest.
+///
+/// * `%%` becomes `%`
+/// * `%H` expands to the environment variable `HOME`
+/// * `%t` expands to the environment variable `TMPDIR`
+/// * `%p` expands to the process ID (PID)
+static void ParseAndSetPath(const char *pattern, char *dest,
+ const uptr dest_size) {
+ CHECK(pattern);
+ CHECK(dest);
+ CHECK_GE(dest_size, 1);
+ dest[0] = '\0';
+ // Return empty string if empty string was passed
+ if (internal_strlen(pattern) == 0)
+ return;
+ uptr next_substr_start_idx = 0;
+ for (uptr i = 0; i < internal_strlen(pattern) - 1; i++) {
+ if (pattern[i] != '%')
+ continue;
+ int bytes_to_copy = i - next_substr_start_idx;
+ // Copy over previous substring.
+ CHECK_LT(internal_strlcat(dest, pattern + next_substr_start_idx,
+ internal_strlen(dest) + bytes_to_copy + 1),
+ dest_size);
+ const char *str_to_concat;
+ switch (pattern[++i]) {
+ case '%':
+ str_to_concat = "%";
+ break;
+ case 'H':
+ str_to_concat = GetEnv("HOME");
+ break;
+ case 't':
+ str_to_concat = GetEnv("TMPDIR");
+ break;
+ case 'p': {
+ // Use printf directly to write the PID since it's not a static string.
+ int remaining_capacity = dest_size - internal_strlen(dest);
+ int bytes_copied =
+ internal_snprintf(dest + internal_strlen(dest), remaining_capacity,
+ "%ld", internal_getpid());
+ CHECK_GT(bytes_copied, 0);
+ CHECK_LT(bytes_copied, remaining_capacity);
+ str_to_concat = "";
+ break;
+ }
+ default: {
+ // Invalid pattern: fallback to original pattern.
+ const char *message = "ERROR: Unexpected pattern: ";
+ WriteToFile(kStderrFd, message, internal_strlen(message));
+ WriteToFile(kStderrFd, pattern, internal_strlen(pattern));
+ WriteToFile(kStderrFd, "\n", internal_strlen("\n"));
+ CHECK_LT(internal_strlcpy(dest, pattern, dest_size), dest_size);
+ return;
+ }
+ }
+ CHECK(str_to_concat);
+ CHECK_LT(internal_strlcat(dest, str_to_concat, dest_size), dest_size);
+ next_substr_start_idx = i + 1;
+ }
+ CHECK_LT(internal_strlcat(dest, pattern + next_substr_start_idx, dest_size),
+ dest_size);
+}
+
void ReportFile::SetReportPath(const char *path) {
if (path) {
uptr len = internal_strlen(path);
if (len > sizeof(path_prefix) - 100) {
- Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n", path[0], path[1],
- path[2], path[3], path[4], path[5], path[6], path[7]);
- Die();
+ bool fallback = common_flags()->log_fallback_to_stderr;
+ const char *message =
+ fallback ? "WARNING: Path is too long, falling back to stderr: "
+ : "ERROR: Path is too long: ";
+ WriteToFile(kStderrFd, message, internal_strlen(message));
+ WriteToFile(kStderrFd, path, 8);
+ message = "...\n";
+ WriteToFile(kStderrFd, message, internal_strlen(message));
+ if (!fallback)
+ Die();
+ path = "stderr";
}
}
} else if (internal_strcmp(path, "stdout") == 0) {
fd = kStdoutFd;
} else {
- internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
- RecursiveCreateParentDirs(path_prefix);
+ ParseAndSetPath(path, path_prefix, kMaxPathLength);
+ RecursiveCreateParentDirs(path_prefix, fd);
}
}
// PID of the process that opened fd. If a fork() occurs,
// the PID of child will be different from fd_pid.
uptr fd_pid;
+ // Set to true if the last attempt to open the logfile failed, perhaps due to
+ // permission errors
+ bool fallbackToStderrActive = false;
private:
void ReopenIfNecessary();
bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_APPLE,
"Write all sanitizer output to syslog in addition to other means of "
"logging.")
+COMMON_FLAG(bool, log_fallback_to_stderr, false,
+ "When set, fallback to stderr if we are unable to open log path.")
COMMON_FLAG(
int, verbosity, 0,
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
COMMON_FLAG(bool, allow_user_segv_handler, true,
"Deprecated. True has no effect, use handle_sigbus=1. If false, "
"handle_*=1 will be upgraded to handle_*=2.")
+COMMON_FLAG(bool, cloak_sanitizer_signal_handlers, false,
+ "If set, signal/sigaction will pretend that sanitizers did not "
+ "preinstall any signal handlers. If the user subsequently installs "
+ "a signal handler, this will disable cloaking for the respective "
+ "signal.")
COMMON_FLAG(bool, use_sigaltstack, true,
"If set, uses alternate stack for signal handling.")
COMMON_FLAG(bool, detect_deadlocks, true,
#include "sanitizer_fuchsia.h"
#if SANITIZER_FUCHSIA
+# include <limits.h>
# include <pthread.h>
# include <stdlib.h>
# include <unistd.h>
uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
-tid_t GetTid() { return GetThreadSelf(); }
+ThreadID GetTid() { return GetThreadSelf(); }
void Abort() { abort(); }
sanitizer_shadow_bounds_t ShadowBounds;
+// Any sanitizer that utilizes shadow should explicitly call whenever it's
+// appropriate for that sanitizer to reference shadow bounds. For ASan, this is
+// done in `InitializeShadowMemory` and for HWASan, this is done in
+// `InitShadow`.
void InitShadowBounds() { ShadowBounds = __sanitizer_shadow_bounds(); }
+// TODO(leonardchan): It's not immediately clear from a user perspective if
+// `GetMaxUserVirtualAddress` should be called exatly once on runtime startup
+// or can be called multiple times. Currently it looks like most instances of
+// `GetMaxUserVirtualAddress` are meant to be called once, but if someone
+// decides to call this multiple times in the future, we should have a separate
+// function that's ok to call multiple times. Ideally we would just invoke this
+// syscall once. Also for Fuchsia, this syscall technically gets invoked twice
+// since `__sanitizer_shadow_bounds` also invokes this syscall under the hood.
uptr GetMaxUserVirtualAddress() {
- InitShadowBounds();
- return ShadowBounds.memory_limit - 1;
+ zx_info_vmar_t info;
+ zx_status_t status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR,
+ &info, sizeof(info), NULL, NULL);
+ CHECK_EQ(status, ZX_OK);
+
+ // Find the top of the accessible address space.
+ uintptr_t top = info.base + info.len;
+
+ // Round it up to a power-of-two size. There may be some pages at
+ // the top that can't actually be mapped, but for purposes of the
+ // the shadow, we'll pretend they could be.
+ int bit = (sizeof(uintptr_t) * CHAR_BIT) - __builtin_clzl(top);
+ if (top != (uintptr_t)1 << bit)
+ top = (uintptr_t)1 << (bit + 1);
+
+ return top - 1;
}
uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
__sanitizer::StoredEnviron = envp;
__sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
__sanitizer::MainThreadStackSize = stack_size;
+
+ EarlySanitizerInit();
}
void __sanitizer_set_report_path(const char *path) {
void InitShadowBounds();
+// Individual sanitizers can define this to explicitly run something at the end
+// of `__sanitizer_startup_hook`. This can be useful if a sanitizer needs to do
+// extra work after the common startup hook code is called and before module
+// ctors are invoked. For example, hwasan can explicitly call its initializing
+// function here so it can be set up before libc extensions are initialized.
+void EarlySanitizerInit();
+
} // namespace __sanitizer
#endif // SANITIZER_FUCHSIA
#if SANITIZER_LINUX || SANITIZER_FUCHSIA
-# if (__GLIBC_PREREQ(2, 16) || (SANITIZER_ANDROID && __ANDROID_API__ >= 21) || \
- SANITIZER_FUCHSIA) && \
- !SANITIZER_GO
-# define SANITIZER_USE_GETAUXVAL 1
-# else
-# define SANITIZER_USE_GETAUXVAL 0
-# endif
-
-# if SANITIZER_USE_GETAUXVAL
-# include <sys/auxv.h>
-# else
+# if (__GLIBC_PREREQ(2, 16) || SANITIZER_ANDROID || SANITIZER_FUCHSIA) && \
+ !SANITIZER_GO
+# define SANITIZER_USE_GETAUXVAL 1
+# else
+# define SANITIZER_USE_GETAUXVAL 0
+# endif
+
+# if SANITIZER_USE_GETAUXVAL
+# include <sys/auxv.h>
+# else
// The weak getauxval definition allows to check for the function at runtime.
// This is useful for Android, when compiled at a lower API level yet running
// on a more recent platform that offers the function.
extern "C" SANITIZER_WEAK_ATTRIBUTE unsigned long getauxval(unsigned long type);
-# endif
+# endif
#elif SANITIZER_NETBSD
typedef sptr ssize;
#endif
-typedef u64 tid_t;
+typedef u64 ThreadID;
// ----------- ATTENTION -------------
// This header should NOT include any other headers to avoid portability issues.
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_SOLARIS
+ SANITIZER_SOLARIS || SANITIZER_HAIKU
# include "sanitizer_common.h"
# include "sanitizer_flags.h"
# include <sched.h>
# include <signal.h>
# include <sys/mman.h>
-# if !SANITIZER_SOLARIS
+# if !SANITIZER_SOLARIS && !SANITIZER_HAIKU
# include <sys/ptrace.h>
# endif
# include <sys/resource.h>
# include <sys/stat.h>
-# include <sys/syscall.h>
+# if !SANITIZER_HAIKU
+# include <sys/syscall.h>
+# include <ucontext.h>
+# endif
# include <sys/time.h>
# include <sys/types.h>
-# include <ucontext.h>
# include <unistd.h>
# if SANITIZER_LINUX
# include <sys/personality.h>
# endif
+# if SANITIZER_ANDROID && __ANDROID_API__ < 35
+// The weak `strerrorname_np` (introduced in API level 35) definition,
+// allows for checking the API level at runtime.
+extern "C" SANITIZER_WEAK_ATTRIBUTE const char *strerrorname_np(int);
+# endif
+
# if SANITIZER_LINUX && defined(__loongarch__)
# include <sys/sysmacros.h>
# endif
# define environ _environ
# endif
+# if SANITIZER_HAIKU
+# include <OS.h>
+# include <elf.h>
+# include <image.h>
+extern "C" char **__libc_argv;
+# endif
+
extern char **environ;
# if SANITIZER_LINUX
// Are we using 32-bit or 64-bit Linux syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
// but it still needs to use 64-bit syscalls.
-# if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \
- SANITIZER_WORDSIZE == 64 || \
- (defined(__mips__) && _MIPS_SIM == _ABIN32))
+# if SANITIZER_LINUX && \
+ (defined(__x86_64__) || defined(__powerpc64__) || \
+ SANITIZER_WORDSIZE == 64 || \
+ (defined(__mips__) && defined(_ABIN32) && _MIPS_SIM == _ABIN32))
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
# else
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, set, oldset));
}
+# if SANITIZER_LINUX
+// Deletes the specified signal from newset, if it is not present in oldset
+// Equivalently: newset[signum] = newset[signum] & oldset[signum]
+static void KeepUnblocked(__sanitizer_sigset_t &newset,
+ __sanitizer_sigset_t &oldset, int signum) {
+ // FIXME: https://github.com/google/sanitizers/issues/1816
+ if (SANITIZER_ANDROID || !internal_sigismember(&oldset, signum))
+ internal_sigdelset(&newset, signum);
+}
+# endif
+
// Block asynchronous signals
void BlockSignals(__sanitizer_sigset_t *oldset) {
- __sanitizer_sigset_t set;
- internal_sigfillset(&set);
-# if SANITIZER_LINUX && !SANITIZER_ANDROID
+ __sanitizer_sigset_t newset;
+ internal_sigfillset(&newset);
+
+# if SANITIZER_LINUX
+ __sanitizer_sigset_t currentset;
+
+# if !SANITIZER_ANDROID
+ // FIXME: https://github.com/google/sanitizers/issues/1816
+ SetSigProcMask(NULL, ¤tset);
+
// Glibc uses SIGSETXID signal during setuid call. If this signal is blocked
// on any thread, setuid call hangs.
// See test/sanitizer_common/TestCases/Linux/setuid.c.
- internal_sigdelset(&set, 33);
-# endif
-# if SANITIZER_LINUX
+ KeepUnblocked(newset, currentset, 33);
+# endif // !SANITIZER_ANDROID
+
// Seccomp-BPF-sandboxed processes rely on SIGSYS to handle trapped syscalls.
// If this signal is blocked, such calls cannot be handled and the process may
// hang.
- internal_sigdelset(&set, 31);
+ KeepUnblocked(newset, currentset, 31);
+# if !SANITIZER_ANDROID
// Don't block synchronous signals
- internal_sigdelset(&set, SIGSEGV);
- internal_sigdelset(&set, SIGBUS);
- internal_sigdelset(&set, SIGILL);
- internal_sigdelset(&set, SIGTRAP);
- internal_sigdelset(&set, SIGABRT);
- internal_sigdelset(&set, SIGFPE);
- internal_sigdelset(&set, SIGPIPE);
-# endif
+ // but also don't unblock signals that the user had deliberately blocked.
+ // FIXME: https://github.com/google/sanitizers/issues/1816
+ KeepUnblocked(newset, currentset, SIGSEGV);
+ KeepUnblocked(newset, currentset, SIGBUS);
+ KeepUnblocked(newset, currentset, SIGILL);
+ KeepUnblocked(newset, currentset, SIGTRAP);
+ KeepUnblocked(newset, currentset, SIGABRT);
+ KeepUnblocked(newset, currentset, SIGFPE);
+ KeepUnblocked(newset, currentset, SIGPIPE);
+# endif //! SANITIZER_ANDROID
- SetSigProcMask(&set, oldset);
+# endif // SANITIZER_LINUX
+
+ SetSigProcMask(&newset, oldset);
}
ScopedBlockSignals::ScopedBlockSignals(__sanitizer_sigset_t *copy) {
# endif
// --------------- sanitizer_libc.h
-# if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+# if !SANITIZER_SOLARIS && !SANITIZER_NETBSD && !SANITIZER_HAIKU
# if !SANITIZER_S390
uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
u64 offset) {
AT_NO_AUTOMOUNT, STATX_BASIC_STATS, (uptr)&bufx);
statx_to_stat(&bufx, (struct stat *)buf);
return res;
-# elif (SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \
- (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \
+# elif ( \
+ SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \
+ (defined(__mips__) && defined(_ABIN32) && _MIPS_SIM == _ABIN32)) && \
!SANITIZER_SPARC
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
0);
STATX_BASIC_STATS, (uptr)&bufx);
statx_to_stat(&bufx, (struct stat *)buf);
return res;
-# elif (defined(_LP64) || SANITIZER_X32 || \
- (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \
+# elif ( \
+ defined(_LP64) || SANITIZER_X32 || \
+ (defined(__mips__) && defined(_ABIN32) && _MIPS_SIM == _ABIN32)) && \
!SANITIZER_SPARC
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
AT_SYMLINK_NOFOLLOW);
return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv,
(uptr)envp);
}
-# endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+# endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD && !SANITIZER_HAIKU
-# if !SANITIZER_NETBSD
+# if !SANITIZER_NETBSD && !SANITIZER_HAIKU
void internal__exit(int exitcode) {
# if SANITIZER_FREEBSD || SANITIZER_SOLARIS
internal_syscall(SYSCALL(exit), exitcode);
# endif
Die(); // Unreachable.
}
-# endif // !SANITIZER_NETBSD
+# endif // !SANITIZER_NETBSD && !SANITIZER_HAIKU
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
}
# if !SANITIZER_NETBSD
-tid_t GetTid() {
+ThreadID GetTid() {
# if SANITIZER_FREEBSD
long Tid;
thr_self(&Tid);
return Tid;
# elif SANITIZER_SOLARIS
return thr_self();
+# elif SANITIZER_HAIKU
+ return find_thread(NULL);
# else
return internal_syscall(SYSCALL(gettid));
# endif
}
-int TgKill(pid_t pid, tid_t tid, int sig) {
+int TgKill(pid_t pid, ThreadID tid, int sig) {
# if SANITIZER_LINUX
return internal_syscall(SYSCALL(tgkill), pid, tid, sig);
# elif SANITIZER_FREEBSD
errno = thr_kill(tid, sig);
// TgKill is expected to return -1 on error, not an errno.
return errno != 0 ? -1 : 0;
+# elif SANITIZER_HAIKU
+ return kill_thread(tid);
# endif
}
# endif
// 'environ' array (on some others) and does not use libc. This function
// should be called first inside __asan_init.
const char *GetEnv(const char *name) {
-# if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_SOLARIS
+# if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_SOLARIS || \
+ SANITIZER_HAIKU
if (::environ != 0) {
uptr NameLen = internal_strlen(name);
for (char **Env = ::environ; *Env != 0; Env++) {
# endif
}
-# if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && !SANITIZER_GO
+# if !SANITIZER_HAIKU && !SANITIZER_FREEBSD && !SANITIZER_NETBSD && \
+ !SANITIZER_GO
extern "C" {
SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end;
}
# endif
-# if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+# if !SANITIZER_HAIKU && !SANITIZER_FREEBSD && !SANITIZER_NETBSD
static void ReadNullSepFileToArray(const char *path, char ***arr,
int arr_size) {
char *buff;
# endif
static void GetArgsAndEnv(char ***argv, char ***envp) {
-# if SANITIZER_FREEBSD
+# if SANITIZER_HAIKU
+ *argv = __libc_argv;
+ *envp = environ;
+# elif SANITIZER_FREEBSD
// On FreeBSD, retrieving the argument and environment arrays is done via the
// kern.ps_strings sysctl, which returns a pointer to a structure containing
// this information. See also <sys/exec.h>.
# if !SANITIZER_GO
}
# endif // !SANITIZER_GO
-# endif // SANITIZER_FREEBSD
+# endif // SANITIZER_HAIKU
}
char **GetArgv() {
void FutexWait(atomic_uint32_t *p, u32 cmp) {
# if SANITIZER_FREEBSD
_umtx_op(p, UMTX_OP_WAIT_UINT, cmp, 0, 0);
-# elif SANITIZER_NETBSD
+# elif SANITIZER_NETBSD || SANITIZER_HAIKU
sched_yield(); /* No userspace futex-like synchronization */
# else
internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAIT_PRIVATE, cmp, 0, 0, 0);
void FutexWake(atomic_uint32_t *p, u32 count) {
# if SANITIZER_FREEBSD
_umtx_op(p, UMTX_OP_WAKE, count, 0, 0);
-# elif SANITIZER_NETBSD
+# elif SANITIZER_NETBSD || SANITIZER_HAIKU
/* No userspace futex-like synchronization */
# else
internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAKE_PRIVATE, count, 0, 0, 0);
};
# endif
-# if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+# if !SANITIZER_SOLARIS && !SANITIZER_NETBSD && !SANITIZER_HAIKU
// Syscall wrappers.
uptr internal_ptrace(int request, int pid, void *addr, void *data) {
return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr,
# endif
# endif // !SANITIZER_SOLARIS
-# if !SANITIZER_NETBSD
+# if !SANITIZER_NETBSD && !SANITIZER_HAIKU
// ThreadLister implementation.
ThreadLister::ThreadLister(pid_t pid) : buffer_(4096) {
task_path_.AppendF("/proc/%d/task", pid);
}
ThreadLister::Result ThreadLister::ListThreads(
- InternalMmapVector<tid_t> *threads) {
+ InternalMmapVector<ThreadID> *threads) {
int descriptor = internal_open(task_path_.data(), O_RDONLY | O_DIRECTORY);
if (internal_iserror(descriptor)) {
Report("Can't open %s for reading.\n", task_path_.data());
}
}
-const char *ThreadLister::LoadStatus(tid_t tid) {
+const char *ThreadLister::LoadStatus(ThreadID tid) {
status_path_.clear();
status_path_.AppendF("%s/%llu/status", task_path_.data(), tid);
auto cleanup = at_scope_exit([&] {
return buffer_.data();
}
-bool ThreadLister::IsAlive(tid_t tid) {
+bool ThreadLister::IsAlive(ThreadID tid) {
// /proc/%d/task/%d/status uses same call to detect alive threads as
// proc_task_readdir. See task_state implementation in Linux.
static const char kPrefix[] = "\nPPid:";
CHECK_EQ(rv, 0);
return (uptr)pz;
# elif SANITIZER_USE_GETAUXVAL
+# if SANITIZER_ANDROID && __ANDROID_API__ < 35
+ // The 16 KB page size was introduced in Android 15 (API level 35), while
+ // earlier versions of Android always used a 4 KB page size.
+ // We are checking the weak definition of `strerrorname_np` (introduced in API
+ // level 35) because some earlier API levels crashed when
+ // `getauxval(AT_PAGESZ)` was called from the `.preinit_array`.
+ if (!strerrorname_np)
+ return 4096;
+# endif
+
return getauxval(AT_PAGESZ);
# else
return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
# endif
uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
-# if SANITIZER_SOLARIS
+# if SANITIZER_HAIKU
+ int cookie = 0;
+ image_info info;
+ const char *argv0 = "<UNKNOWN>";
+ while (get_next_image_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
+ if (info.type != B_APP_IMAGE)
+ continue;
+ argv0 = info.name;
+ break;
+ }
+ internal_strncpy(buf, argv0, buf_len);
+ return internal_strlen(buf);
+# elif SANITIZER_SOLARIS
const char *default_module_name = getexecname();
CHECK_NE(default_module_name, NULL);
return internal_snprintf(buf, buf_len, "%s", default_module_name);
return (name[base_name_length] == '-' || name[base_name_length] == '.');
}
-# if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID && !SANITIZER_HAIKU
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
CHECK_NE(map, nullptr);
-# if !SANITIZER_FREEBSD
+# if !SANITIZER_FREEBSD && !SANITIZER_HAIKU
typedef ElfW(Phdr) Elf_Phdr;
typedef ElfW(Ehdr) Elf_Ehdr;
# endif // !SANITIZER_FREEBSD
}
# endif
-# if SANITIZER_ANDROID
-# if __ANDROID_API__ < 21
-extern "C" __attribute__((weak)) int dl_iterate_phdr(
- int (*)(struct dl_phdr_info *, size_t, void *), void *);
-# endif
-
-static int dl_iterate_phdr_test_cb(struct dl_phdr_info *info, size_t size,
- void *data) {
- // Any name starting with "lib" indicates a bug in L where library base names
- // are returned instead of paths.
- if (info->dlpi_name && info->dlpi_name[0] == 'l' &&
- info->dlpi_name[1] == 'i' && info->dlpi_name[2] == 'b') {
- *(bool *)data = true;
- return 1;
- }
- return 0;
-}
-
-static atomic_uint32_t android_api_level;
-
-static AndroidApiLevel AndroidDetectApiLevelStatic() {
-# if __ANDROID_API__ <= 19
- return ANDROID_KITKAT;
-# elif __ANDROID_API__ <= 22
- return ANDROID_LOLLIPOP_MR1;
-# else
- return ANDROID_POST_LOLLIPOP;
-# endif
-}
-
-static AndroidApiLevel AndroidDetectApiLevel() {
- if (!&dl_iterate_phdr)
- return ANDROID_KITKAT; // K or lower
- bool base_name_seen = false;
- dl_iterate_phdr(dl_iterate_phdr_test_cb, &base_name_seen);
- if (base_name_seen)
- return ANDROID_LOLLIPOP_MR1; // L MR1
- return ANDROID_POST_LOLLIPOP; // post-L
- // Plain L (API level 21) is completely broken wrt ASan and not very
- // interesting to detect.
-}
-
-extern "C" __attribute__((weak)) void *_DYNAMIC;
-
-AndroidApiLevel AndroidGetApiLevel() {
- AndroidApiLevel level =
- (AndroidApiLevel)atomic_load(&android_api_level, memory_order_relaxed);
- if (level)
- return level;
- level = &_DYNAMIC == nullptr ? AndroidDetectApiLevelStatic()
- : AndroidDetectApiLevel();
- atomic_store(&android_api_level, level, memory_order_relaxed);
- return level;
-}
-
-# endif
-
static HandleSignalMode GetHandleSignalModeImpl(int signum) {
switch (signum) {
case SIGABRT:
SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
Context *ucontext = (Context *)context;
# if defined(__x86_64__) || defined(__i386__)
+# if !SANITIZER_HAIKU
static const uptr PF_WRITE = 1U << 1;
+# endif
# if SANITIZER_FREEBSD
uptr err = ucontext->uc_mcontext.mc_err;
# elif SANITIZER_NETBSD
uptr err = ucontext->uc_mcontext.__gregs[_REG_ERR];
+# elif SANITIZER_HAIKU
+ uptr err = ucontext->uc_mcontext.r13;
# elif SANITIZER_SOLARIS && defined(__i386__)
const int Err = 13;
uptr err = ucontext->uc_mcontext.gregs[Err];
*pc = ucontext->uc_mcontext.mc_rip;
*bp = ucontext->uc_mcontext.mc_rbp;
*sp = ucontext->uc_mcontext.mc_rsp;
+# elif SANITIZER_HAIKU
+ ucontext_t *ucontext = (ucontext_t *)context;
+ *pc = ucontext->uc_mcontext.rip;
+ *bp = ucontext->uc_mcontext.rbp;
+ *sp = ucontext->uc_mcontext.rsp;
# else
ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.gregs[REG_RIP];
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_SOLARIS
+ SANITIZER_SOLARIS || SANITIZER_HAIKU
# include "sanitizer_common.h"
# include "sanitizer_internal_defs.h"
# include "sanitizer_platform_limits_freebsd.h"
// the one in <dirent.h>, which is used by readdir().
struct linux_dirent;
+# if SANITIZER_HAIKU
+struct MemoryMappingLayoutData {
+ long signed int cookie;
+};
+# else
struct ProcSelfMapsBuff {
char *data;
uptr mmaped_size;
};
void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
+# endif // SANITIZER_HAIKU
// Syscall wrappers.
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
Incomplete,
Ok,
};
- Result ListThreads(InternalMmapVector<tid_t> *threads);
- const char *LoadStatus(tid_t tid);
+ Result ListThreads(InternalMmapVector<ThreadID> *threads);
+ const char *LoadStatus(ThreadID tid);
private:
- bool IsAlive(tid_t tid);
+ bool IsAlive(ThreadID tid);
InternalScopedString task_path_;
InternalScopedString status_path_;
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
-// Releases memory pages entirely within the [beg, end] address range.
+// Releases memory pages entirely within the [beg, end) address range.
// The pages no longer count toward RSS; reads are guaranteed to return 0.
// Requires (but does not verify!) that pages are MAP_PRIVATE.
inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_SOLARIS
+ SANITIZER_SOLARIS || SANITIZER_HAIKU
# include "sanitizer_allocator_internal.h"
# include "sanitizer_atomic.h"
# include "sanitizer_procmaps.h"
# include "sanitizer_solaris.h"
+# if SANITIZER_HAIKU
+# define _GNU_SOURCE
+# define _DEFAULT_SOURCE
+# endif
+
# if SANITIZER_NETBSD
-# define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
+# // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
+# define _RTLD_SOURCE
+# include <machine/mcontext.h>
+# undef _RTLD_SOURCE
+# include <sys/param.h>
+# if __NetBSD_Version__ >= 1099001200
+# include <machine/lwp_private.h>
+# endif
# endif
# include <dlfcn.h> // for dlsym()
// that, it was never implemented. So just define it to zero.
# undef MAP_NORESERVE
# define MAP_NORESERVE 0
-extern const Elf_Auxinfo *__elf_aux_vector;
+extern const Elf_Auxinfo *__elf_aux_vector __attribute__((weak));
extern "C" int __sys_sigaction(int signum, const struct sigaction *act,
struct sigaction *oldact);
# endif
# include <thread.h>
# endif
-# if SANITIZER_ANDROID
-# include <android/api-level.h>
-# if !defined(CPU_COUNT) && !defined(__aarch64__)
-# include <dirent.h>
-# include <fcntl.h>
-struct __sanitizer::linux_dirent {
- long d_ino;
- off_t d_off;
- unsigned short d_reclen;
- char d_name[];
-};
-# endif
+# if SANITIZER_HAIKU
+# include <kernel/OS.h>
+# include <sys/link_elf.h>
# endif
# if !SANITIZER_ANDROID
*addr = (uptr)tcb->tcb_dtv[1];
}
}
+# elif SANITIZER_HAIKU
# else
# error "Unknown OS"
# endif
if (phdr->p_type == PT_LOAD) {
uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
uptr cur_end = cur_beg + phdr->p_memsz;
+# if SANITIZER_HAIKU
+ bool executable = phdr->p_flags & PF_EXECUTE;
+ bool writable = phdr->p_flags & PF_WRITE;
+# else
bool executable = phdr->p_flags & PF_X;
bool writable = phdr->p_flags & PF_W;
+# endif
cur_module.addAddressRange(cur_beg, cur_end, executable, writable);
} else if (phdr->p_type == PT_NOTE) {
# ifdef NT_GNU_BUILD_ID
return 0;
}
-# if SANITIZER_ANDROID && __ANDROID_API__ < 21
-extern "C" __attribute__((weak)) int dl_iterate_phdr(
- int (*)(struct dl_phdr_info *, size_t, void *), void *);
-# endif
-
-static bool requiresProcmaps() {
-# if SANITIZER_ANDROID && __ANDROID_API__ <= 22
- // Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken.
- // The runtime check allows the same library to work with
- // both K and L (and future) Android releases.
- return AndroidGetApiLevel() <= ANDROID_LOLLIPOP_MR1;
-# else
- return false;
-# endif
-}
-
-static void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) {
- MemoryMappingLayout memory_mapping(/*cache_enabled*/ true);
- memory_mapping.DumpListOfModules(modules);
-}
-
void ListOfModules::init() {
clearOrInit();
- if (requiresProcmaps()) {
- procmapsInit(&modules_);
- } else {
- DlIteratePhdrData data = {&modules_, true};
- dl_iterate_phdr(dl_iterate_phdr_cb, &data);
- }
+ DlIteratePhdrData data = {&modules_, true};
+ dl_iterate_phdr(dl_iterate_phdr_cb, &data);
}
-// When a custom loader is used, dl_iterate_phdr may not contain the full
-// list of modules. Allow callers to fall back to using procmaps.
-void ListOfModules::fallbackInit() {
- if (!requiresProcmaps()) {
- clearOrInit();
- procmapsInit(&modules_);
- } else {
- clear();
- }
-}
+void ListOfModules::fallbackInit() { clear(); }
// getrusage does not give us the current RSS, only the max RSS.
// Still, this is better than nothing if /proc/self/statm is not available
int req[2];
uptr len = sizeof(ncpu);
req[0] = CTL_HW;
+# ifdef HW_NCPUONLINE
+ req[1] = HW_NCPUONLINE;
+# else
req[1] = HW_NCPU;
+# endif
CHECK_EQ(internal_sysctl(req, 2, &ncpu, &len, NULL, 0), 0);
return ncpu;
-# elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__)
- // Fall back to /sys/devices/system/cpu on Android when cpu_set_t doesn't
- // exist in sched.h. That is the case for toolchains generated with older
- // NDKs.
- // This code doesn't work on AArch64 because internal_getdents makes use of
- // the 64bit getdents syscall, but cpu_set_t seems to always exist on AArch64.
- uptr fd = internal_open("/sys/devices/system/cpu", O_RDONLY | O_DIRECTORY);
- if (internal_iserror(fd))
- return 0;
- InternalMmapVector<u8> buffer(4096);
- uptr bytes_read = buffer.size();
- uptr n_cpus = 0;
- u8 *d_type;
- struct linux_dirent *entry = (struct linux_dirent *)&buffer[bytes_read];
- while (true) {
- if ((u8 *)entry >= &buffer[bytes_read]) {
- bytes_read = internal_getdents(fd, (struct linux_dirent *)buffer.data(),
- buffer.size());
- if (internal_iserror(bytes_read) || !bytes_read)
- break;
- entry = (struct linux_dirent *)buffer.data();
- }
- d_type = (u8 *)entry + entry->d_reclen - 1;
- if (d_type >= &buffer[bytes_read] ||
- (u8 *)&entry->d_name[3] >= &buffer[bytes_read])
- break;
- if (entry->d_ino != 0 && *d_type == DT_DIR) {
- if (entry->d_name[0] == 'c' && entry->d_name[1] == 'p' &&
- entry->d_name[2] == 'u' && entry->d_name[3] >= '0' &&
- entry->d_name[3] <= '9')
- n_cpus++;
- }
- entry = (struct linux_dirent *)(((u8 *)entry) + entry->d_reclen);
- }
- internal_close(fd);
- return n_cpus;
+# elif SANITIZER_HAIKU
+ system_info info;
+ get_system_info(&info);
+ return info.cpu_count;
# elif SANITIZER_SOLARIS
return sysconf(_SC_NPROCESSORS_ONLN);
# else
-# if defined(CPU_COUNT)
cpu_set_t CPUs;
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
return CPU_COUNT(&CPUs);
-# else
- return 1;
-# endif
# endif
}
void WriteOneLineToSyslog(const char *s) {
if (&async_safe_write_log) {
async_safe_write_log(SANITIZER_ANDROID_LOG_INFO, GetProcessName(), s);
- } else if (AndroidGetApiLevel() > ANDROID_KITKAT) {
- syslog(LOG_INFO, "%s", s);
} else {
- CHECK(&__android_log_write);
- __android_log_write(SANITIZER_ANDROID_LOG_INFO, nullptr, s);
+ syslog(LOG_INFO, "%s", s);
}
}
# endif
# include <stdio.h>
+// Start searching for available memory region past PAGEZERO, which is
+// 4KB on 32-bit and 4GB on 64-bit.
+# define GAP_SEARCH_START_ADDRESS \
+ ((SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000)
+
# include "sanitizer_common.h"
# include "sanitizer_file.h"
# include "sanitizer_flags.h"
extern char **environ;
# endif
-# if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__)
-# define SANITIZER_OS_TRACE 1
-# include <os/trace.h>
-# else
-# define SANITIZER_OS_TRACE 0
-# endif
-
-// import new crash reporting api
+// Integrate with CrashReporter library if available
# if defined(__has_include) && __has_include(<CrashReporterClient.h>)
# define HAVE_CRASHREPORTERCLIENT_H 1
# include <CrashReporterClient.h>
# include <dlfcn.h> // for dladdr()
# include <errno.h>
# include <fcntl.h>
+# include <inttypes.h>
# include <libkern/OSAtomic.h>
# include <mach-o/dyld.h>
# include <mach/mach.h>
+# include <mach/mach_error.h>
# include <mach/mach_time.h>
# include <mach/vm_statistics.h>
# include <malloc/malloc.h>
-# if defined(__has_builtin) && __has_builtin(__builtin_os_log_format)
-# include <os/log.h>
-# else
- /* Without support for __builtin_os_log_format, fall back to the older
- method. */
-# define OS_LOG_DEFAULT 0
-# define os_log_error(A,B,C) \
- asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", (C));
-# endif
+# include <os/log.h>
# include <pthread.h>
# include <pthread/introspection.h>
# include <sched.h>
natural_t *nesting_depth,
vm_region_recurse_info_t info,
mach_msg_type_number_t *infoCnt);
+
+ extern const void* _dyld_get_shared_cache_range(size_t* length);
}
+# if !SANITIZER_GO
+// Weak symbol no-op when TSan is not linked
+SANITIZER_WEAK_ATTRIBUTE extern void __tsan_set_in_internal_write_call(
+ bool value) {}
+# endif
+
namespace __sanitizer {
#include "sanitizer_syscall_generic.inc"
}
uptr internal_write(fd_t fd, const void *buf, uptr count) {
+# if SANITIZER_GO
return write(fd, buf, count);
+# else
+ // We need to disable interceptors when writing in TSan
+ __tsan_set_in_internal_write_call(true);
+ uptr res = write(fd, buf, count);
+ __tsan_set_in_internal_write_call(false);
+ return res;
+# endif
}
uptr internal_stat(const char *path, void *buf) {
return S_ISDIR(st.st_mode);
}
-tid_t GetTid() {
- tid_t tid;
+ThreadID GetTid() {
+ ThreadID tid;
pthread_threadid_np(nullptr, &tid);
return tid;
}
static Mutex syslog_lock;
# endif
+# if SANITIZER_DRIVERKIT
+# define SANITIZER_OS_LOG os_log
+# else
+# define SANITIZER_OS_LOG os_log_error
+# endif
+
void WriteOneLineToSyslog(const char *s) {
#if !SANITIZER_GO
syslog_lock.CheckLocked();
if (GetMacosAlignedVersion() >= MacosVersion(10, 12)) {
- os_log_error(OS_LOG_DEFAULT, "%{public}s", s);
+ SANITIZER_OS_LOG(OS_LOG_DEFAULT, "%{public}s", s);
} else {
#pragma clang diagnostic push
// as_log is deprecated.
static Mutex crashreporter_info_mutex;
extern "C" {
-// Integrate with crash reporter libraries.
+
#if HAVE_CRASHREPORTERCLIENT_H
+// Available in CRASHREPORTER_ANNOTATIONS_VERSION 5+
+# ifdef CRASHREPORTER_ANNOTATIONS_INITIALIZER
+CRASHREPORTER_ANNOTATIONS_INITIALIZER()
+# else
+// Support for older CrashRerporter annotiations
CRASH_REPORTER_CLIENT_HIDDEN
struct crashreporter_annotations_t gCRAnnotations
__attribute__((section("__DATA," CRASHREPORTER_ANNOTATIONS_SECTION))) = {
0,
0,
0,
-#if CRASHREPORTER_ANNOTATIONS_VERSION > 4
+# if CRASHREPORTER_ANNOTATIONS_VERSION > 4
0,
-#endif
+# endif
};
-
-#else
-// fall back to old crashreporter api
+# endif
+# else
+// Revert to previous crash reporter API if client header is not available
static const char *__crashreporter_info__ __attribute__((__used__)) =
&crashreporter_info_buff[0];
asm(".desc ___crashreporter_info__, 0x10");
-#endif
+#endif // HAVE_CRASHREPORTERCLIENT_H
} // extern "C"
}
void LogFullErrorReport(const char *buffer) {
-#if !SANITIZER_GO
- // Log with os_trace. This will make it into the crash log.
-#if SANITIZER_OS_TRACE
-#pragma clang diagnostic push
-// os_trace is deprecated.
-#pragma clang diagnostic ignored "-Wdeprecated-declarations"
- if (GetMacosAlignedVersion() >= MacosVersion(10, 10)) {
- // os_trace requires the message (format parameter) to be a string literal.
- if (internal_strncmp(SanitizerToolName, "AddressSanitizer",
- sizeof("AddressSanitizer") - 1) == 0)
- os_trace("Address Sanitizer reported a failure.");
- else if (internal_strncmp(SanitizerToolName, "UndefinedBehaviorSanitizer",
- sizeof("UndefinedBehaviorSanitizer") - 1) == 0)
- os_trace("Undefined Behavior Sanitizer reported a failure.");
- else if (internal_strncmp(SanitizerToolName, "ThreadSanitizer",
- sizeof("ThreadSanitizer") - 1) == 0)
- os_trace("Thread Sanitizer reported a failure.");
- else
- os_trace("Sanitizer tool reported a failure.");
-
- if (common_flags()->log_to_syslog)
- os_trace("Consult syslog for more information.");
- }
-#pragma clang diagnostic pop
-#endif
+# if !SANITIZER_GO
+ // When logging with os_log_error this will make it into the crash log.
+ if (internal_strncmp(SanitizerToolName, "AddressSanitizer",
+ sizeof("AddressSanitizer") - 1) == 0)
+ SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Address Sanitizer reported a failure.");
+ else if (internal_strncmp(SanitizerToolName, "UndefinedBehaviorSanitizer",
+ sizeof("UndefinedBehaviorSanitizer") - 1) == 0)
+ SANITIZER_OS_LOG(OS_LOG_DEFAULT,
+ "Undefined Behavior Sanitizer reported a failure.");
+ else if (internal_strncmp(SanitizerToolName, "ThreadSanitizer",
+ sizeof("ThreadSanitizer") - 1) == 0)
+ SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Thread Sanitizer reported a failure.");
+ else
+ SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Sanitizer tool reported a failure.");
+
+ if (common_flags()->log_to_syslog)
+ SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Consult syslog for more information.");
// Log to syslog.
// The logging on OS X may call pthread_create so we need the threading
WriteToSyslog(buffer);
// The report is added to CrashLog as part of logging all of Printf output.
-#endif
+# endif // !SANITIZER_GO
}
SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
RTLD_DEFAULT, "task_set_exc_guard_behavior");
if (set_behavior == nullptr) return;
const task_exc_guard_behavior_t task_exc_guard_none = 0;
- set_behavior(mach_task_self(), task_exc_guard_none);
+ kern_return_t res = set_behavior(mach_task_self(), task_exc_guard_none);
+ if (res != KERN_SUCCESS) {
+ Report(
+ "WARN: task_set_exc_guard_behavior returned %d (%s), "
+ "mmap may fail unexpectedly.\n",
+ res, mach_error_string(res));
+ if (res == KERN_DENIED)
+ Report(
+ "HINT: Check that task_set_exc_guard_behavior is allowed by "
+ "sandbox.\n");
+ }
}
static void VerifyInterceptorsWorking();
LowLevelAllocator allocator_for_env;
static bool ShouldCheckInterceptors() {
- // Restrict "interceptors working?" check to ASan and TSan.
- const char *sanitizer_names[] = {"AddressSanitizer", "ThreadSanitizer"};
+ // Restrict "interceptors working?" check
+ const char *sanitizer_names[] = {"AddressSanitizer", "ThreadSanitizer",
+ "RealtimeSanitizer"};
size_t count = sizeof(sanitizer_names) / sizeof(sanitizer_names[0]);
for (size_t i = 0; i < count; i++) {
if (internal_strcmp(sanitizer_names[i], SanitizerToolName) == 0)
}
#endif // SANITIZER_GO
+// Prints out a consolidated memory map: contiguous regions
+// are merged together.
+static void PrintVmmap() {
+ const mach_vm_address_t max_vm_address = GetMaxVirtualAddress() + 1;
+ mach_vm_address_t address = GAP_SEARCH_START_ADDRESS;
+ kern_return_t kr = KERN_SUCCESS;
+
+ Report("Memory map:\n");
+ mach_vm_address_t last = 0;
+ mach_vm_address_t lastsz = 0;
+
+ while (1) {
+ mach_vm_size_t vmsize = 0;
+ natural_t depth = 0;
+ vm_region_submap_short_info_data_64_t vminfo;
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
+ kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
+ (vm_region_info_t)&vminfo, &count);
+
+ if (kr == KERN_DENIED) {
+ Report(
+ "ERROR: mach_vm_region_recurse got KERN_DENIED when printing memory "
+ "map.\n");
+ Report(
+ "HINT: Check whether mach_vm_region_recurse is allowed by "
+ "sandbox.\n");
+ }
+
+ if (kr == KERN_SUCCESS && address < max_vm_address) {
+ if (last + lastsz == address) {
+ // This region is contiguous with the last; merge together.
+ lastsz += vmsize;
+ } else {
+ if (lastsz)
+ Printf("|| `[%p, %p]` || size=0x%016" PRIx64 " ||\n", (void*)last,
+ (void*)(last + lastsz), lastsz);
+
+ last = address;
+ lastsz = vmsize;
+ }
+ address += vmsize;
+ } else {
+ // We've reached the end of the memory map. Print the last remaining
+ // region, if there is one.
+ if (lastsz)
+ Printf("|| `[%p, %p]` || size=0x%016" PRIx64 " ||\n", (void*)last,
+ (void*)(last + lastsz), lastsz);
+
+ break;
+ }
+ }
+}
+
+static void ReportShadowAllocFail(uptr shadow_size_bytes, uptr alignment) {
+ Report(
+ "FATAL: Failed to allocate shadow memory. Tried to allocate %p bytes "
+ "(alignment=%p).\n",
+ (void*)shadow_size_bytes, (void*)alignment);
+ PrintVmmap();
+}
+
char **GetArgv() {
return *_NSGetArgv();
}
const uptr left_padding =
Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
- uptr space_size = shadow_size_bytes + left_padding;
+ uptr space_size = shadow_size_bytes;
uptr largest_gap_found = 0;
uptr max_occupied_addr = 0;
+
VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
uptr shadow_start =
- FindAvailableMemoryRange(space_size, alignment, granularity,
+ FindAvailableMemoryRange(space_size, alignment, left_padding,
&largest_gap_found, &max_occupied_addr);
// If the shadow doesn't fit, restrict the address space to make it fit.
if (shadow_start == 0) {
if (new_max_vm < max_occupied_addr) {
Report("Unable to find a memory range for dynamic shadow.\n");
Report(
- "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
- "new_max_vm = %p\n",
- (void *)space_size, (void *)largest_gap_found,
- (void *)max_occupied_addr, (void *)new_max_vm);
+ "\tspace_size = %p\n\tlargest_gap_found = %p\n\tmax_occupied_addr "
+ "= %p\n\tnew_max_vm = %p\n",
+ (void*)space_size, (void*)largest_gap_found, (void*)max_occupied_addr,
+ (void*)new_max_vm);
+ ReportShadowAllocFail(shadow_size_bytes, alignment);
CHECK(0 && "cannot place shadow");
}
RestrictMemoryToMaxAddress(new_max_vm);
high_mem_end = new_max_vm - 1;
- space_size = (high_mem_end >> shadow_scale) + left_padding;
+ space_size = (high_mem_end >> shadow_scale);
VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
- shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
+ shadow_start = FindAvailableMemoryRange(space_size, alignment, left_padding,
nullptr, nullptr);
if (shadow_start == 0) {
Report("Unable to find a memory range after restricting VM.\n");
+ ReportShadowAllocFail(shadow_size_bytes, alignment);
CHECK(0 && "cannot place shadow after restricting vm");
}
}
}
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
- uptr *largest_gap_found,
- uptr *max_occupied_addr) {
- typedef vm_region_submap_short_info_data_64_t RegionInfo;
- enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
- // Start searching for available memory region past PAGEZERO, which is
- // 4KB on 32-bit and 4GB on 64-bit.
- mach_vm_address_t start_address =
- (SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000;
-
+ uptr* largest_gap_found,
+ uptr* max_occupied_addr) {
const mach_vm_address_t max_vm_address = GetMaxVirtualAddress() + 1;
- mach_vm_address_t address = start_address;
- mach_vm_address_t free_begin = start_address;
+ mach_vm_address_t address = GAP_SEARCH_START_ADDRESS;
+ mach_vm_address_t free_begin = GAP_SEARCH_START_ADDRESS;
kern_return_t kr = KERN_SUCCESS;
if (largest_gap_found) *largest_gap_found = 0;
if (max_occupied_addr) *max_occupied_addr = 0;
while (kr == KERN_SUCCESS) {
mach_vm_size_t vmsize = 0;
natural_t depth = 0;
- RegionInfo vminfo;
- mach_msg_type_number_t count = kRegionInfoSize;
+ vm_region_submap_short_info_data_64_t vminfo;
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
(vm_region_info_t)&vminfo, &count);
- if (kr == KERN_INVALID_ADDRESS) {
+
+ if (kr == KERN_SUCCESS) {
+ // There are cases where going beyond the processes' max vm does
+ // not return KERN_INVALID_ADDRESS so we check for going beyond that
+ // max address as well.
+ if (address > max_vm_address) {
+ address = max_vm_address;
+ kr = -1; // break after this iteration.
+ }
+
+ if (max_occupied_addr)
+ *max_occupied_addr = address + vmsize;
+ } else if (kr == KERN_INVALID_ADDRESS) {
// No more regions beyond "address", consider the gap at the end of VM.
address = max_vm_address;
- vmsize = 0;
+
+ // We will break after this iteration anyway since kr != KERN_SUCCESS
+ } else if (kr == KERN_DENIED) {
+ Report("ERROR: Unable to find a memory range for dynamic shadow.\n");
+ Report("HINT: Ensure mach_vm_region_recurse is allowed under sandbox.\n");
+ Die();
} else {
- if (max_occupied_addr) *max_occupied_addr = address + vmsize;
+ Report(
+ "WARNING: mach_vm_region_recurse returned unexpected code %d (%s)\n",
+ kr, mach_error_string(kr));
+ DCHECK(false && "mach_vm_region_recurse returned unexpected code");
+ break; // address is not valid unless KERN_SUCCESS, therefore we must not
+ // use it.
}
+
if (free_begin != address) {
// We found a free region [free_begin..address-1].
uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment);
return 0;
}
+// This function (when used during initialization when there is
+// only a single thread), can be used to verify that a range
+// of memory hasn't already been mapped, and won't be mapped
+// later in the shared cache.
+//
+// If the syscall mach_vm_region_recurse fails (due to sandbox),
+// we assume that the memory is not mapped so that execution can continue.
+//
+// NOTE: range_end is inclusive
+//
+// WARNING: This function must NOT allocate memory, since it is
+// used in InitializeShadowMemory between where we search for
+// space for shadow and where we actually allocate it.
+bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
+ mach_vm_size_t vmsize = 0;
+ natural_t depth = 0;
+ vm_region_submap_short_info_data_64_t vminfo;
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
+ mach_vm_address_t address = range_start;
+
+ // First, check if the range is already mapped.
+ kern_return_t kr =
+ mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
+ (vm_region_info_t)&vminfo, &count);
+
+ if (kr == KERN_DENIED) {
+ Report(
+ "WARN: mach_vm_region_recurse returned KERN_DENIED when checking "
+ "whether an address is mapped.\n");
+ Report("HINT: Is mach_vm_region_recurse allowed by sandbox?\n");
+ }
+
+ if (kr == KERN_SUCCESS && !IntervalsAreSeparate(address, address + vmsize - 1,
+ range_start, range_end)) {
+ // Overlaps with already-mapped memory
+ return false;
+ }
+
+ size_t cacheLength;
+ uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength);
+
+ if (cacheStart &&
+ !IntervalsAreSeparate(cacheStart, cacheStart + cacheLength - 1,
+ range_start, range_end)) {
+ // Overlaps with shared cache region
+ return false;
+ }
+
+ // We believe this address is available.
+ return true;
+}
+
// FIXME implement on this platform.
void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
#include "sanitizer_common.h"
#include "sanitizer_platform.h"
-
-/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use
- TARGET_OS_MAC (we have no support for iOS in any form for these versions,
- so there's no ambiguity). */
-#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC
-# define TARGET_OS_OSX 1
-#endif
-
-/* Other TARGET_OS_xxx are not present on earlier versions, define them to
- 0 (we have no support for them; they are not valid targets anyway). */
-#ifndef TARGET_OS_IOS
-#define TARGET_OS_IOS 0
-#endif
-#ifndef TARGET_OS_TV
-#define TARGET_OS_TV 0
-#endif
-#ifndef TARGET_OS_WATCH
-#define TARGET_OS_WATCH 0
-#endif
-
#if SANITIZER_APPLE
#include "sanitizer_posix.h"
VersionBase(u16 major, u16 minor) : major(major), minor(minor) {}
- bool operator==(const VersionType &other) const {
- return major == other.major && minor == other.minor;
- }
bool operator>=(const VersionType &other) const {
return major > other.major ||
(major == other.major && minor >= other.minor);
bool operator<(const VersionType &other) const { return !(*this >= other); }
};
+template <typename VersionType>
+bool operator==(const VersionBase<VersionType> &self,
+ const VersionBase<VersionType> &other) {
+ return self.major == other.major && self.minor == other.minor;
+}
+
struct MacosVersion : VersionBase<MacosVersion> {
MacosVersion(u16 major, u16 minor) : VersionBase(major, minor) {}
};
COMMON_MALLOC_FREE(ptr);
}
+#if SANITIZER_INTERCEPT_FREE_SIZED && defined(COMMON_MALLOC_FREE_SIZED)
+INTERCEPTOR(void, free_sized, void *ptr, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_FREE_SIZED(ptr, size);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_FREE_ALIGNED_SIZED && \
+ defined(COMMON_MALLOC_FREE_ALIGNED_SIZED)
+INTERCEPTOR(void, free_aligned_sized, void *ptr, size_t alignment,
+ size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_FREE_ALIGNED_SIZED(ptr, alignment, size);
+}
+#endif
+
INTERCEPTOR(void *, realloc, void *ptr, size_t size) {
COMMON_MALLOC_ENTER();
COMMON_MALLOC_REALLOC(ptr, size);
return _sys_execve(filename, argv, envp);
}
-tid_t GetTid() {
+ThreadID GetTid() {
DEFINE__REAL(int, _lwp_self);
return _REAL(_lwp_self);
}
-int TgKill(pid_t pid, tid_t tid, int sig) {
+int TgKill(pid_t pid, ThreadID tid, int sig) {
DEFINE__REAL(int, _lwp_kill, int a, int b);
(void)pid;
return _REAL(_lwp_kill, tid, sig);
#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \
!defined(__APPLE__) && !defined(_WIN32) && !defined(__Fuchsia__) && \
- !(defined(__sun__) && defined(__svr4__))
+ !(defined(__sun__) && defined(__svr4__)) && !defined(__HAIKU__) && \
+ !defined(__wasi__)
# error "This operating system is not supported"
#endif
# define SANITIZER_SOLARIS 0
#endif
+#if defined(__HAIKU__)
+# define SANITIZER_HAIKU 1
+#else
+# define SANITIZER_HAIKU 0
+#endif
+
+#if defined(__wasi__)
+# define SANITIZER_WASI 1
+#else
+# define SANITIZER_WASI 0
+#endif
+
// - SANITIZER_APPLE: all Apple code
// - TARGET_OS_OSX: macOS
// - SANITIZER_IOS: devices (iOS and iOS-like)
# define SANITIZER_MUSL 0
#endif
-#define SANITIZER_POSIX \
+#define SANITIZER_POSIX \
(SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_APPLE || \
- SANITIZER_NETBSD || SANITIZER_SOLARIS)
+ SANITIZER_NETBSD || SANITIZER_SOLARIS || SANITIZER_HAIKU)
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
# endif
#endif
+// The first address that can be returned by mmap.
+#define SANITIZER_MMAP_BEGIN 0
+
// The range of addresses which can be returned my mmap.
// FIXME: this value should be different on different platforms. Larger values
// will still work but will consume more memory for TwoLevelByteMap.
# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
#endif
-#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD || SANITIZER_SOLARIS
+#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS || SANITIZER_HAIKU
# define SANITIZER_MADVISE_DONTNEED MADV_FREE
#else
# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED
# define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 0
#endif
+#if SANITIZER_LINUX
+# if SANITIZER_GLIBC
+// Workaround for
+// glibc/commit/3d3572f59059e2b19b8541ea648a6172136ec42e
+// Linux: Keep termios ioctl constants strictly internal
+# if __GLIBC_PREREQ(2, 41)
+# define SANITIZER_TERMIOS_IOCTL_CONSTANTS 0
+# else
+# define SANITIZER_TERMIOS_IOCTL_CONSTANTS 1
+# endif
+# else
+# define SANITIZER_TERMIOS_IOCTL_CONSTANTS 1
+# endif
+#endif
+
#endif // SANITIZER_PLATFORM_H
#if SANITIZER_FUCHSIA
#define SI_NOT_FUCHSIA 0
+#define SI_FUCHSIA 1
#else
#define SI_NOT_FUCHSIA 1
+#define SI_FUCHSIA 0
#endif
#if SANITIZER_SOLARIS
#define SI_SOLARIS 0
#endif
+#if SANITIZER_AIX
+# define SI_NOT_AIX 0
+#else
+# define SI_NOT_AIX 1
+#endif
+
#if SANITIZER_SOLARIS32
#define SI_SOLARIS32 1
#else
#define SANITIZER_INTERCEPT_STRLEN SI_NOT_FUCHSIA
#define SANITIZER_INTERCEPT_STRNLEN (SI_NOT_MAC && SI_NOT_FUCHSIA)
-#define SANITIZER_INTERCEPT_STRCMP SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRCMP (SI_NOT_FUCHSIA && SI_NOT_AIX)
#define SANITIZER_INTERCEPT_STRSTR SI_NOT_FUCHSIA
-#define SANITIZER_INTERCEPT_STRCASESTR SI_POSIX
+#define SANITIZER_INTERCEPT_STRCASESTR (SI_POSIX && SI_NOT_AIX)
#define SANITIZER_INTERCEPT_STRTOK SI_NOT_FUCHSIA
#define SANITIZER_INTERCEPT_STRCHR SI_NOT_FUCHSIA
-#define SANITIZER_INTERCEPT_STRCHRNUL SI_POSIX_NOT_MAC
+#define SANITIZER_INTERCEPT_STRCHRNUL (SI_POSIX_NOT_MAC && SI_NOT_AIX)
#define SANITIZER_INTERCEPT_STRRCHR SI_NOT_FUCHSIA
#define SANITIZER_INTERCEPT_STRSPN SI_NOT_FUCHSIA
#define SANITIZER_INTERCEPT_STRPBRK SI_NOT_FUCHSIA
#define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_STRCASECMP SI_POSIX
#define SANITIZER_INTERCEPT_MEMSET 1
-#define SANITIZER_INTERCEPT_MEMMOVE 1
-#define SANITIZER_INTERCEPT_MEMCPY 1
+#define SANITIZER_INTERCEPT_MEMMOVE SI_NOT_AIX
+#define SANITIZER_INTERCEPT_MEMCPY SI_NOT_AIX
#define SANITIZER_INTERCEPT_MEMCMP SI_NOT_FUCHSIA
#define SANITIZER_INTERCEPT_BCMP \
SANITIZER_INTERCEPT_MEMCMP && \
#define SANITIZER_INTERCEPT_PREAD64 (SI_GLIBC || SI_SOLARIS32)
#define SANITIZER_INTERCEPT_PWRITE64 (SI_GLIBC || SI_SOLARIS32)
+#define SANITIZER_INTERCEPT_LSEEK64 (SI_GLIBC || SI_SOLARIS32)
+
#define SANITIZER_INTERCEPT_READV SI_POSIX
#define SANITIZER_INTERCEPT_WRITEV SI_POSIX
#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_GLIBC
#ifndef SANITIZER_INTERCEPT_PRINTF
-#define SANITIZER_INTERCEPT_PRINTF SI_POSIX
-#define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD)
-#define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_GLIBC
+# define SANITIZER_INTERCEPT_ASPRINTF SI_NOT_AIX
+# define SANITIZER_INTERCEPT_VASPRINTF SI_NOT_AIX
+# define SANITIZER_INTERCEPT_PRINTF SI_POSIX
+# define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD)
+# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_GLIBC
#endif
+#define SANITIZER_INTERCEPT_SETPROCTITLE (SI_FREEBSD || SI_NETBSD)
+
#define SANITIZER_INTERCEPT___PRINTF_CHK \
(SANITIZER_INTERCEPT_PRINTF && SI_GLIBC)
-#define SANITIZER_INTERCEPT_FREXP SI_NOT_FUCHSIA
-#define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_POSIX
+// AIX libc does not export FREXP and FREXPF.
+#define SANITIZER_INTERCEPT_FREXP (SI_NOT_FUCHSIA && SI_NOT_AIX)
+#define SANITIZER_INTERCEPT_FREXPF (SI_POSIX && SI_NOT_AIX)
+#define SANITIZER_INTERCEPT_FREXPL SI_POSIX
#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_POSIX
#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
(SI_FREEBSD || SI_NETBSD || SI_LINUX || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID \
(SI_LINUX || SI_FREEBSD || SI_NETBSD)
+// TODO: This should be SI_POSIX, adding glibc first until I have time
+// to verify all timer_t typedefs on other platforms.
+#define SANITIZER_INTERCEPT_TIMER_CREATE SI_GLIBC
#define SANITIZER_INTERCEPT_GETITIMER SI_POSIX
#define SANITIZER_INTERCEPT_TIME SI_POSIX
+#define SANITIZER_INTERCEPT_TIMESPEC_GET SI_LINUX
#define SANITIZER_INTERCEPT_GLOB (SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GLOB64 SI_GLIBC
#define SANITIZER_INTERCEPT___B64_TO SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_ACCEPT4 \
(SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_PACCEPT SI_NETBSD
-#define SANITIZER_INTERCEPT_MODF SI_POSIX
+#define SANITIZER_INTERCEPT_MODF (SI_POSIX && SI_NOT_AIX)
#define SANITIZER_INTERCEPT_RECVMSG SI_POSIX
#define SANITIZER_INTERCEPT_SENDMSG SI_POSIX
#define SANITIZER_INTERCEPT_RECVMMSG SI_LINUX
#define SANITIZER_INTERCEPT___WCSXFRM_L SI_LINUX
#define SANITIZER_INTERCEPT_WCSNRTOMBS \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_WCRTOMB \
- (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_WCRTOMB \
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS || \
+ !SI_NOT_AIX)
#define SANITIZER_INTERCEPT_WCTOMB \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_GETGROUPS SI_POSIX
#define SANITIZER_INTERCEPT_POLL SI_POSIX
#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_EPOLL (SI_LINUX)
+#define SANITIZER_INTERCEPT_KQUEUE (SI_FREEBSD || SI_NETBSD || SI_MAC)
#define SANITIZER_INTERCEPT_WORDEXP \
(SI_FREEBSD || SI_NETBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID || \
SI_SOLARIS)
#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS \
(SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_STATFS64 SI_GLIBC && SANITIZER_HAS_STATFS64
+#define SANITIZER_INTERCEPT_STATFS64 \
+ ((SI_GLIBC || !SI_NOT_AIX) && SANITIZER_HAS_STATFS64)
#define SANITIZER_INTERCEPT_STATVFS \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_STATVFS64 SI_GLIBC
#define SANITIZER_INTERCEPT_TTYNAME_R SI_POSIX
#define SANITIZER_INTERCEPT_TEMPNAM SI_POSIX
#define SANITIZER_INTERCEPT_SINCOS SI_LINUX || SI_SOLARIS
-#define SANITIZER_INTERCEPT_REMQUO SI_POSIX
-#define SANITIZER_INTERCEPT_REMQUOL (SI_POSIX && !SI_NETBSD)
-#define SANITIZER_INTERCEPT_LGAMMA SI_POSIX
-#define SANITIZER_INTERCEPT_LGAMMAL (SI_POSIX && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_REMQUO (SI_POSIX && SI_NOT_AIX)
+#define SANITIZER_INTERCEPT_REMQUOL (SI_POSIX && !SI_NETBSD && SI_NOT_AIX)
+#define SANITIZER_INTERCEPT_LGAMMA (SI_POSIX && SI_NOT_AIX)
+#define SANITIZER_INTERCEPT_LGAMMAL (SI_POSIX && !SI_NETBSD && SI_NOT_AIX)
#define SANITIZER_INTERCEPT_LGAMMA_R (SI_FREEBSD || SI_LINUX || SI_SOLARIS)
#define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_DRAND48_R SI_GLIBC
#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE (SI_LINUX || SI_FREEBSD)
#define SI_STAT_LINUX (SI_LINUX && __GLIBC_PREREQ(2, 33))
-#define SANITIZER_INTERCEPT_STAT \
- (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS || \
- SI_STAT_LINUX)
-#define SANITIZER_INTERCEPT_STAT64 SI_STAT_LINUX && SANITIZER_HAS_STAT64
-#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD || SI_STAT_LINUX)
+#define SANITIZER_INTERCEPT_STAT \
+ (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS || \
+ SI_STAT_LINUX || !SI_NOT_AIX)
+#define SANITIZER_INTERCEPT_STAT64 \
+ ((SI_STAT_LINUX || !SI_NOT_AIX) && SANITIZER_HAS_STAT64)
+#define SANITIZER_INTERCEPT_LSTAT \
+ (SI_NETBSD || SI_FREEBSD || SI_STAT_LINUX || !SI_NOT_AIX)
#define SANITIZER_INTERCEPT___XSTAT \
((!SANITIZER_INTERCEPT_STAT && SI_POSIX) || SI_STAT_LINUX)
#define SANITIZER_INTERCEPT___XSTAT64 SI_GLIBC
#define SANITIZER_INTERCEPT_MMAP SI_POSIX
#define SANITIZER_INTERCEPT_MMAP64 SI_GLIBC || SI_SOLARIS
-#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (SI_GLIBC || SI_ANDROID)
+#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \
+ (SI_GLIBC || SI_ANDROID || SI_FUCHSIA)
#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC
#define SANITIZER_INTERCEPT_PVALLOC (SI_GLIBC || SI_ANDROID)
#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WCSLEN 1
-#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX
+#define SANITIZER_INTERCEPT_WCSNLEN 1
+#define SANITIZER_INTERCEPT_WCSCAT (SI_POSIX || SI_WINDOWS)
#define SANITIZER_INTERCEPT_WCSDUP SI_POSIX
#define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION (!SI_WINDOWS && SI_NOT_FUCHSIA)
#define SANITIZER_INTERCEPT_BSD_SIGNAL SI_ANDROID
#define SANITIZER_INTERCEPT_PROTOENT_R SI_GLIBC
#define SANITIZER_INTERCEPT_NETENT (SI_LINUX || SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_SETVBUF \
- (SI_NETBSD || SI_FREEBSD || SI_LINUX || SI_MAC)
+ (SI_NETBSD || SI_FREEBSD || SI_LINUX || SI_MAC || !SI_NOT_AIX)
#define SANITIZER_INTERCEPT_GETMNTINFO (SI_NETBSD || SI_FREEBSD || SI_MAC)
#define SANITIZER_INTERCEPT_MI_VECTOR_HASH SI_NETBSD
#define SANITIZER_INTERCEPT_GETVFSSTAT SI_NETBSD
#define SANITIZER_INTERCEPT_SHA1 SI_NETBSD
#define SANITIZER_INTERCEPT_MD4 SI_NETBSD
#define SANITIZER_INTERCEPT_RMD160 SI_NETBSD
-#define SANITIZER_INTERCEPT_FSEEK (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_FSEEK SI_POSIX
#define SANITIZER_INTERCEPT_MD2 SI_NETBSD
#define SANITIZER_INTERCEPT_CDB SI_NETBSD
#define SANITIZER_INTERCEPT_VIS (SI_NETBSD || SI_FREEBSD)
# define SI_MAC_OS_DEPLOYMENT_MIN_13_00 0
#endif
#define SANITIZER_INTERCEPT_FREADLINK (SI_MAC && SI_MAC_OS_DEPLOYMENT_MIN_13_00)
+#define SANITIZER_INTERCEPT_GETSERVENT_R SI_GLIBC
+#define SANITIZER_INTERCEPT_GETSERVBYNAME_R SI_GLIBC
+#define SANITIZER_INTERCEPT_GETSERVBYPORT_R SI_GLIBC
+
+// Until free_sized and free_aligned_sized are more generally available,
+// we can only unconditionally intercept on ELF-based platforms where it
+// is okay to have undefined weak symbols.
+#ifdef __ELF__
+# define SANITIZER_INTERCEPT_FREE_SIZED 1
+# define SANITIZER_INTERCEPT_FREE_ALIGNED_SIZED 1
+#else
+# define SANITIZER_INTERCEPT_FREE_SIZED 0
+# define SANITIZER_INTERCEPT_FREE_ALIGNED_SIZED 0
+#endif
+
// This macro gives a way for downstream users to override the above
// interceptor macros irrespective of the platform they are on. They have
// to do two things:
#include <sys/mtio.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
+#include <sys/shm.h>
#include <sys/signal.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <semaphore.h>
#include <signal.h>
#include <stddef.h>
-#include <md5.h>
-#include <sha224.h>
-#include <sha256.h>
-#include <sha384.h>
-#include <sha512.h>
#include <stdio.h>
#include <stringlist.h>
-#include <term.h>
#include <termios.h>
#include <time.h>
#include <ttyent.h>
#include <wchar.h>
#include <wordexp.h>
-#define _KERNEL // to declare 'shminfo' structure
-#include <sys/shm.h>
-#undef _KERNEL
-
#undef IOC_DIRMASK
// Include these after system headers to avoid name clashes and ambiguities.
unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
unsigned struct_mq_attr_sz = sizeof(struct mq_attr);
unsigned struct_statvfs_sz = sizeof(struct statvfs);
-unsigned struct_shminfo_sz = sizeof(struct shminfo);
-unsigned struct_shm_info_sz = sizeof(struct shm_info);
unsigned struct_regmatch_sz = sizeof(regmatch_t);
unsigned struct_regex_sz = sizeof(regex_t);
unsigned struct_fstab_sz = sizeof(struct fstab);
const uptr sa_siginfo = (uptr)SA_SIGINFO;
int shmctl_ipc_stat = (int)IPC_STAT;
-int shmctl_ipc_info = (int)IPC_INFO;
-int shmctl_shm_info = (int)SHM_INFO;
-int shmctl_shm_stat = (int)SHM_STAT;
unsigned struct_utmpx_sz = sizeof(struct utmpx);
int map_fixed = MAP_FIXED;
const int si_SEGV_ACCERR = SEGV_ACCERR;
const int unvis_valid = UNVIS_VALID;
const int unvis_validpush = UNVIS_VALIDPUSH;
-
-const unsigned MD5_CTX_sz = sizeof(MD5_CTX);
-const unsigned MD5_return_length = MD5_DIGEST_STRING_LENGTH;
-
-#define SHA2_CONST(LEN) \
- const unsigned SHA##LEN##_CTX_sz = sizeof(SHA##LEN##_CTX); \
- const unsigned SHA##LEN##_return_length = SHA##LEN##_DIGEST_STRING_LENGTH; \
- const unsigned SHA##LEN##_block_length = SHA##LEN##_BLOCK_LENGTH; \
- const unsigned SHA##LEN##_digest_length = SHA##LEN##_DIGEST_LENGTH
-
-SHA2_CONST(224);
-SHA2_CONST(256);
-SHA2_CONST(384);
-SHA2_CONST(512);
-
-#undef SHA2_CONST
} // namespace __sanitizer
using namespace __sanitizer;
typedef void __sanitizer_FILE;
-extern unsigned struct_shminfo_sz;
-extern unsigned struct_shm_info_sz;
extern int shmctl_ipc_stat;
-extern int shmctl_ipc_info;
-extern int shmctl_shm_info;
-extern int shmctl_shm_stat;
+
+// This simplifies generic code
+#define struct_shminfo_sz -1
+#define struct_shm_info_sz -1
+#define shmctl_shm_stat -1
+#define shmctl_ipc_info -1
+#define shmctl_shm_info -1
extern unsigned struct_utmpx_sz;
extern const int si_SEGV_MAPERR;
extern const int si_SEGV_ACCERR;
-extern const unsigned MD5_CTX_sz;
-extern const unsigned MD5_return_length;
-
-#define SHA2_EXTERN(LEN) \
- extern const unsigned SHA##LEN##_CTX_sz; \
- extern const unsigned SHA##LEN##_return_length; \
- extern const unsigned SHA##LEN##_block_length; \
- extern const unsigned SHA##LEN##_digest_length
-
-SHA2_EXTERN(224);
-SHA2_EXTERN(256);
-SHA2_EXTERN(384);
-SHA2_EXTERN(512);
-
-#undef SHA2_EXTERN
-
struct __sanitizer_cap_rights {
u64 cr_rights[2];
};
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
// are not defined anywhere in userspace headers. Fake them. This seems to work
-// fine with newer headers, too. Beware that with <sys/stat.h>, struct stat
-// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
-// Also, for some platforms (e.g. mips) there are additional members in the
-// <sys/stat.h> struct stat:s.
+// fine with newer headers, too.
#include <linux/posix_types.h>
# if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__)
# include <sys/stat.h>
#include <md5.h>
#include <rmd160.h>
#include <soundcard.h>
-#include <term.h>
#include <termios.h>
#include <time.h>
#include <ttyent.h>
#include <stringlist.h>
#if defined(__x86_64__)
-#include <nvmm.h>
+#include <dev/nvmm/nvmm_ioctl.h>
#endif
// clang-format on
unsigned timeval_sz = sizeof(timeval);
unsigned uid_t_sz = sizeof(uid_t);
unsigned gid_t_sz = sizeof(gid_t);
+unsigned fpos_t_sz = sizeof(fpos_t);
unsigned mbstate_t_sz = sizeof(mbstate_t);
unsigned sigset_t_sz = sizeof(sigset_t);
unsigned struct_timezone_sz = sizeof(struct timezone);
const unsigned MD5_CTX_sz = sizeof(MD5_CTX);
const unsigned MD5_return_length = MD5_DIGEST_STRING_LENGTH;
-const unsigned fpos_t_sz = sizeof(fpos_t);
-
const unsigned MD2_CTX_sz = sizeof(MD2_CTX);
const unsigned MD2_return_length = MD2_DIGEST_STRING_LENGTH;
extern unsigned timeval_sz;
extern unsigned uid_t_sz;
extern unsigned gid_t_sz;
+extern unsigned fpos_t_sz;
extern unsigned mbstate_t_sz;
extern unsigned struct_timezone_sz;
extern unsigned struct_tms_sz;
extern const unsigned MD5_CTX_sz;
extern const unsigned MD5_return_length;
-extern const unsigned fpos_t_sz;
-
extern const unsigned MD2_CTX_sz;
extern const unsigned MD2_return_length;
// Must go after undef _FILE_OFFSET_BITS.
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_APPLE
+#if SANITIZER_LINUX || SANITIZER_APPLE || SANITIZER_HAIKU
// Must go after undef _FILE_OFFSET_BITS.
#include "sanitizer_glibc_version.h"
#include <time.h>
#include <wchar.h>
#include <regex.h>
-#if !SANITIZER_APPLE
+#if !SANITIZER_APPLE && !SANITIZER_HAIKU
#include <utmp.h>
#endif
#endif
#if !SANITIZER_ANDROID
+#if !SANITIZER_HAIKU
#include <sys/mount.h>
+#endif
#include <sys/timeb.h>
#include <utmpx.h>
#endif
#if !SANITIZER_ANDROID
#include <ifaddrs.h>
+#if !SANITIZER_HAIKU
#include <sys/ucontext.h>
#include <wordexp.h>
#endif
+#endif
#if SANITIZER_LINUX
#if SANITIZER_GLIBC
#include <sys/vfs.h>
#include <sys/epoll.h>
#include <linux/capability.h>
-#else
+#elif !SANITIZER_HAIKU
#include <fstab.h>
#endif // SANITIZER_LINUX
#include <sys/sockio.h>
#endif
+#if SANITIZER_HAIKU
+#include <sys/sockio.h>
+#include <sys/ioctl.h>
+#endif
+
// Include these after system headers to avoid name clashes and ambiguities.
# include "sanitizer_common.h"
# include "sanitizer_internal_defs.h"
unsigned struct_fstab_sz = sizeof(struct fstab);
#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
// SANITIZER_APPLE
-#if !SANITIZER_ANDROID
+#if !SANITIZER_ANDROID && !SANITIZER_HAIKU
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
int shmctl_shm_stat = (int)SHM_STAT;
#endif
-#if !SANITIZER_APPLE && !SANITIZER_FREEBSD
+#if !SANITIZER_APPLE && !SANITIZER_FREEBSD && !SANITIZER_HAIKU
unsigned struct_utmp_sz = sizeof(struct utmp);
#endif
#if !SANITIZER_ANDROID
int glob_altdirfunc = GLOB_ALTDIRFUNC;
#endif
-# if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID && !SANITIZER_HAIKU
const int wordexp_wrde_dooffs = WRDE_DOOFFS;
-# endif // !SANITIZER_ANDROID
+# endif // !SANITIZER_ANDROID && !SANITIZER_HAIKU
# if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
unsigned struct_sock_fprog_sz = sizeof(struct sock_fprog);
# endif // SANITIZER_GLIBC
-# if !SANITIZER_ANDROID && !SANITIZER_APPLE
+# if !SANITIZER_ANDROID && !SANITIZER_APPLE && !SANITIZER_HAIKU
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
#endif
+ unsigned fpos_t_sz = sizeof(fpos_t);
+
const unsigned long __sanitizer_bufsiz = BUFSIZ;
const unsigned IOCTL_NOT_PRESENT = 0;
+ unsigned IOCTL_FIONBIO = FIONBIO;
+#if !SANITIZER_HAIKU
unsigned IOCTL_FIOASYNC = FIOASYNC;
unsigned IOCTL_FIOCLEX = FIOCLEX;
unsigned IOCTL_FIOGETOWN = FIOGETOWN;
- unsigned IOCTL_FIONBIO = FIONBIO;
unsigned IOCTL_FIONCLEX = FIONCLEX;
unsigned IOCTL_FIOSETOWN = FIOSETOWN;
+#endif
unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI;
unsigned IOCTL_SIOCATMARK = SIOCATMARK;
unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI;
unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU;
unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK;
unsigned IOCTL_SIOCSPGRP = SIOCSPGRP;
+
+#if !SANITIZER_HAIKU
unsigned IOCTL_TIOCCONS = TIOCCONS;
- unsigned IOCTL_TIOCEXCL = TIOCEXCL;
unsigned IOCTL_TIOCGETD = TIOCGETD;
+ unsigned IOCTL_TIOCNOTTY = TIOCNOTTY;
+ unsigned IOCTL_TIOCPKT = TIOCPKT;
+ unsigned IOCTL_TIOCSETD = TIOCSETD;
+ unsigned IOCTL_TIOCSTI = TIOCSTI;
+#endif
+
+ unsigned IOCTL_TIOCEXCL = TIOCEXCL;
unsigned IOCTL_TIOCGPGRP = TIOCGPGRP;
unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ;
unsigned IOCTL_TIOCMBIC = TIOCMBIC;
unsigned IOCTL_TIOCMBIS = TIOCMBIS;
unsigned IOCTL_TIOCMGET = TIOCMGET;
unsigned IOCTL_TIOCMSET = TIOCMSET;
- unsigned IOCTL_TIOCNOTTY = TIOCNOTTY;
unsigned IOCTL_TIOCNXCL = TIOCNXCL;
unsigned IOCTL_TIOCOUTQ = TIOCOUTQ;
- unsigned IOCTL_TIOCPKT = TIOCPKT;
unsigned IOCTL_TIOCSCTTY = TIOCSCTTY;
- unsigned IOCTL_TIOCSETD = TIOCSETD;
unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
- unsigned IOCTL_TIOCSTI = TIOCSTI;
unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
#if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;
unsigned IOCTL_SOUND_PCM_WRITE_FILTER = SOUND_PCM_WRITE_FILTER;
#endif // SOUND_VERSION
unsigned IOCTL_TCFLSH = TCFLSH;
+# if SANITIZER_TERMIOS_IOCTL_CONSTANTS
unsigned IOCTL_TCGETS = TCGETS;
+# endif
unsigned IOCTL_TCSBRK = TCSBRK;
unsigned IOCTL_TCSBRKP = TCSBRKP;
+# if SANITIZER_TERMIOS_IOCTL_CONSTANTS
unsigned IOCTL_TCSETS = TCSETS;
unsigned IOCTL_TCSETSF = TCSETSF;
unsigned IOCTL_TCSETSW = TCSETSW;
+# endif
unsigned IOCTL_TCXONC = TCXONC;
unsigned IOCTL_TIOCGLCKTRMIOS = TIOCGLCKTRMIOS;
unsigned IOCTL_TIOCGSOFTCAR = TIOCGSOFTCAR;
CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
-#if SANITIZER_LINUX && (__ANDROID_API__ >= 21 || __GLIBC_PREREQ (2, 14))
+# if SANITIZER_LINUX && (SANITIZER_ANDROID || __GLIBC_PREREQ(2, 14))
CHECK_TYPE_SIZE(mmsghdr);
CHECK_SIZE_AND_OFFSET(mmsghdr, msg_hdr);
CHECK_SIZE_AND_OFFSET(mmsghdr, msg_len);
CHECK_SIZE_AND_OFFSET(dirent, d_ino);
#if SANITIZER_APPLE
CHECK_SIZE_AND_OFFSET(dirent, d_seekoff);
-#elif SANITIZER_FREEBSD
+#elif SANITIZER_FREEBSD || SANITIZER_HAIKU
// There is no 'd_off' field on FreeBSD.
#else
CHECK_SIZE_AND_OFFSET(dirent, d_off);
CHECK_TYPE_SIZE(ifconf);
CHECK_SIZE_AND_OFFSET(ifconf, ifc_len);
+#if !SANITIZER_HAIKU
CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);
+#endif
CHECK_TYPE_SIZE(pollfd);
CHECK_SIZE_AND_OFFSET(pollfd, fd);
CHECK_TYPE_SIZE(__kernel_fd_set);
#endif
-#if !SANITIZER_ANDROID
+#if !SANITIZER_ANDROID && !SANITIZER_HAIKU
CHECK_TYPE_SIZE(wordexp_t);
CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc);
CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);
CHECK_SIZE_AND_OFFSET(mntent, mnt_passno);
#endif
+#if !SANITIZER_HAIKU
CHECK_TYPE_SIZE(ether_addr);
+#endif
#if SANITIZER_GLIBC || SANITIZER_FREEBSD
CHECK_TYPE_SIZE(ipc_perm);
CHECK_TYPE_SIZE(clockid_t);
#endif
-#if !SANITIZER_ANDROID
+#if !SANITIZER_ANDROID && !SANITIZER_HAIKU
CHECK_TYPE_SIZE(ifaddrs);
CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
#ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H
#define SANITIZER_PLATFORM_LIMITS_POSIX_H
-#if SANITIZER_LINUX || SANITIZER_APPLE
-
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform.h"
-#include "sanitizer_mallinfo.h"
-
-#if SANITIZER_APPLE
-#include <sys/cdefs.h>
-#if !__DARWIN_ONLY_64_BIT_INO_T
-#define SANITIZER_HAS_STAT64 1
-#define SANITIZER_HAS_STATFS64 1
-#else
-#define SANITIZER_HAS_STAT64 0
-#define SANITIZER_HAS_STATFS64 0
-#endif
-#elif SANITIZER_GLIBC || SANITIZER_ANDROID
-#define SANITIZER_HAS_STAT64 1
-#define SANITIZER_HAS_STATFS64 1
-#endif
+#if SANITIZER_LINUX || SANITIZER_APPLE || SANITIZER_HAIKU
+
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_mallinfo.h"
+# include "sanitizer_platform.h"
+
+# if SANITIZER_APPLE
+# include <sys/cdefs.h>
+# if !__DARWIN_ONLY_64_BIT_INO_T
+# define SANITIZER_HAS_STAT64 1
+# define SANITIZER_HAS_STATFS64 1
+# else
+# define SANITIZER_HAS_STAT64 0
+# define SANITIZER_HAS_STATFS64 0
+# endif
+# elif SANITIZER_GLIBC || SANITIZER_ANDROID
+# define SANITIZER_HAS_STAT64 1
+# define SANITIZER_HAS_STATFS64 1
+# endif
-#if defined(__sparc__)
+# if defined(__sparc__)
// FIXME: This can't be included from tsan which does not support sparc yet.
-#include "sanitizer_glibc_version.h"
-#endif
+# include "sanitizer_glibc_version.h"
+# endif
-# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle))
+# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map *)(handle))
namespace __sanitizer {
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
-#if SANITIZER_HAS_STAT64
+# if SANITIZER_HAS_STAT64
extern unsigned struct_stat64_sz;
-#endif
+# endif
extern unsigned struct_rusage_sz;
extern unsigned siginfo_t_sz;
extern unsigned struct_itimerval_sz;
extern unsigned struct_sigevent_sz;
extern unsigned struct_stack_t_sz;
extern unsigned struct_sched_param_sz;
-#if SANITIZER_HAS_STATFS64
+# if SANITIZER_HAS_STATFS64
extern unsigned struct_statfs64_sz;
-#endif
+# endif
extern unsigned struct_regex_sz;
extern unsigned struct_regmatch_sz;
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
extern unsigned struct_fstab_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
# if defined(__x86_64__)
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 0;
-#elif defined(__i386__)
+# elif defined(__i386__)
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 96;
-#elif defined(__arm__)
+# elif defined(__arm__)
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 104;
-#elif defined(__aarch64__)
+# elif defined(__aarch64__)
const unsigned struct_kernel_stat_sz = 128;
const unsigned struct_kernel_stat64_sz = 104;
-#elif defined(__powerpc__) && !defined(__powerpc64__)
+# elif defined(__powerpc__) && !defined(__powerpc64__)
const unsigned struct_kernel_stat_sz = 72;
const unsigned struct_kernel_stat64_sz = 104;
-#elif defined(__powerpc64__)
+# elif defined(__powerpc64__)
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 104;
-#elif defined(__mips__)
-const unsigned struct_kernel_stat_sz =
- SANITIZER_ANDROID
- ? FIRST_32_SECOND_64(104, 128)
- : FIRST_32_SECOND_64((_MIPS_SIM == _ABIN32) ? 176 : 160, 216);
+# elif defined(__mips__)
+const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
+ ? FIRST_32_SECOND_64(104, 128)
+# if defined(_ABIN32) && _MIPS_SIM == _ABIN32
+ : FIRST_32_SECOND_64(176, 216);
+# elif SANITIZER_MUSL
+ : FIRST_32_SECOND_64(160, 208);
+# else
+ : FIRST_32_SECOND_64(160, 216);
+# endif
const unsigned struct_kernel_stat64_sz = 104;
-#elif defined(__s390__) && !defined(__s390x__)
+# elif defined(__s390__) && !defined(__s390x__)
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 104;
-#elif defined(__s390x__)
+# elif defined(__s390x__)
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 0;
-#elif defined(__sparc__) && defined(__arch64__)
+# elif defined(__sparc__) && defined(__arch64__)
const unsigned struct___old_kernel_stat_sz = 0;
const unsigned struct_kernel_stat_sz = 104;
const unsigned struct_kernel_stat64_sz = 144;
-#elif defined(__sparc__) && !defined(__arch64__)
+# elif defined(__sparc__) && !defined(__arch64__)
const unsigned struct___old_kernel_stat_sz = 0;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 104;
-#elif SANITIZER_RISCV64
+# elif SANITIZER_RISCV64
const unsigned struct_kernel_stat_sz = 128;
const unsigned struct_kernel_stat64_sz = 0; // RISCV64 does not use stat64
# elif defined(__hexagon__)
extern unsigned struct_oldold_utsname_sz;
const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
-#endif // SANITIZER_LINUX
+# endif // SANITIZER_LINUX
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
-#if defined(__powerpc64__) || defined(__s390__) || defined(__loongarch__)
+# if defined(__powerpc64__) || defined(__s390__) || defined(__loongarch__)
const unsigned struct___old_kernel_stat_sz = 0;
-#elif !defined(__sparc__)
+# elif !defined(__sparc__)
const unsigned struct___old_kernel_stat_sz = 32;
-#endif
+# endif
extern unsigned struct_rlimit_sz;
extern unsigned struct_utimbuf_sz;
const unsigned old_sigset_t_sz = sizeof(unsigned long);
struct __sanitizer_sem_t {
-#if SANITIZER_ANDROID && defined(_LP64)
+# if SANITIZER_ANDROID && defined(_LP64)
int data[4];
-#elif SANITIZER_ANDROID && !defined(_LP64)
+# elif SANITIZER_ANDROID && !defined(_LP64)
int data;
-#elif SANITIZER_LINUX
+# elif SANITIZER_LINUX
uptr data[4];
-#endif
+# endif
};
-#endif // SANITIZER_LINUX
+# endif // SANITIZER_LINUX
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned struct_ustat_sz;
extern unsigned struct_rlimit64_sz;
extern unsigned struct_statvfs64_sz;
int gid;
int cuid;
int cgid;
-#ifdef __powerpc__
+# ifdef __powerpc__
unsigned mode;
unsigned __seq;
u64 __unused1;
u64 __unused2;
-#elif defined(__sparc__)
+# elif defined(__sparc__)
unsigned mode;
unsigned short __pad2;
unsigned short __seq;
unsigned long long __unused1;
unsigned long long __unused2;
-#else
+# else
unsigned int mode;
unsigned short __seq;
unsigned short __pad2;
-#if defined(__x86_64__) && !defined(_LP64)
+# if defined(__x86_64__) && !defined(_LP64)
u64 __unused1;
u64 __unused2;
-#else
+# else
unsigned long __unused1;
unsigned long __unused2;
-#endif
-#endif
+# endif
+# endif
};
struct __sanitizer_shmid_ds {
__sanitizer_ipc_perm shm_perm;
-#if defined(__sparc__)
-#if !defined(__arch64__)
+# if defined(__sparc__)
+# if !defined(__arch64__)
u32 __pad1;
-#endif
+# endif
long shm_atime;
-#if !defined(__arch64__)
+# if !defined(__arch64__)
u32 __pad2;
-#endif
+# endif
long shm_dtime;
-#if !defined(__arch64__)
+# if !defined(__arch64__)
u32 __pad3;
-#endif
+# endif
long shm_ctime;
uptr shm_segsz;
int shm_cpid;
unsigned long shm_nattch;
unsigned long __glibc_reserved1;
unsigned long __glibc_reserved2;
-#else
-#ifndef __powerpc__
+# else
+# ifndef __powerpc__
uptr shm_segsz;
-#elif !defined(__powerpc64__)
+# elif !defined(__powerpc64__)
uptr __unused0;
-#endif
-#if defined(__x86_64__) && !defined(_LP64)
+# endif
+# if defined(__x86_64__) && !defined(_LP64)
u64 shm_atime;
u64 shm_dtime;
u64 shm_ctime;
-#else
+# else
uptr shm_atime;
-#if !defined(_LP64) && !defined(__mips__)
+# if !defined(_LP64) && !defined(__mips__)
uptr __unused1;
-#endif
+# endif
uptr shm_dtime;
-#if !defined(_LP64) && !defined(__mips__)
+# if !defined(_LP64) && !defined(__mips__)
uptr __unused2;
-#endif
+# endif
uptr shm_ctime;
-#if !defined(_LP64) && !defined(__mips__)
+# if !defined(_LP64) && !defined(__mips__)
uptr __unused3;
-#endif
-#endif
-#ifdef __powerpc__
+# endif
+# endif
+# ifdef __powerpc__
uptr shm_segsz;
-#endif
+# endif
int shm_cpid;
int shm_lpid;
-#if defined(__x86_64__) && !defined(_LP64)
+# if defined(__x86_64__) && !defined(_LP64)
u64 shm_nattch;
u64 __unused4;
u64 __unused5;
-#else
+# else
uptr shm_nattch;
uptr __unused4;
uptr __unused5;
-#endif
-#endif
+# endif
+# endif
};
-#endif
+# endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned struct_msqid_ds_sz;
extern unsigned struct_mq_attr_sz;
extern unsigned struct_timex_sz;
extern unsigned struct_statvfs_sz;
-#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+# endif // SANITIZER_LINUX && !SANITIZER_ANDROID
struct __sanitizer_iovec {
void *iov_base;
usize iov_len;
};
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
struct __sanitizer_ifaddrs {
struct __sanitizer_ifaddrs *ifa_next;
char *ifa_name;
void *ifa_addr; // (struct sockaddr *)
void *ifa_netmask; // (struct sockaddr *)
// This is a union on Linux.
-# ifdef ifa_dstaddr
-# undef ifa_dstaddr
-# endif
+# ifdef ifa_dstaddr
+# undef ifa_dstaddr
+# endif
void *ifa_dstaddr; // (struct sockaddr *)
void *ifa_data;
};
-#endif // !SANITIZER_ANDROID
+# endif // !SANITIZER_ANDROID
-#if SANITIZER_APPLE
+# if SANITIZER_APPLE
typedef unsigned long __sanitizer_pthread_key_t;
-#else
+# else
typedef unsigned __sanitizer_pthread_key_t;
-#endif
+# endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
struct __sanitizer_XDR {
int x_op;
const int __sanitizer_XDR_ENCODE = 0;
const int __sanitizer_XDR_DECODE = 1;
const int __sanitizer_XDR_FREE = 2;
-#endif
+# endif
struct __sanitizer_passwd {
char *pw_name;
char *pw_passwd;
int pw_uid;
int pw_gid;
-#if SANITIZER_APPLE
+# if SANITIZER_APPLE
long pw_change;
char *pw_class;
-#endif
-#if !(SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32))
+# endif
+# if !(SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32)) && !SANITIZER_HAIKU
char *pw_gecos;
-#endif
+# endif
char *pw_dir;
char *pw_shell;
-#if SANITIZER_APPLE
+# if SANITIZER_APPLE
long pw_expire;
-#endif
+# endif
+# if SANITIZER_HAIKU
+ char *pw_gecos;
+# endif
};
struct __sanitizer_group {
# if (SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID) || \
(defined(__x86_64__) && !defined(_LP64)) || defined(__hexagon__)
typedef long long __sanitizer_time_t;
-#else
+# else
typedef long __sanitizer_time_t;
-#endif
+# endif
typedef long __sanitizer_suseconds_t;
+struct __sanitizer_timespec {
+ __sanitizer_time_t tv_sec; /* seconds */
+ u64 tv_nsec; /* nanoseconds */
+};
+
+struct __sanitizer_itimerspec {
+ struct __sanitizer_timespec it_interval; /* timer period */
+ struct __sanitizer_timespec it_value; /* timer expiration */
+};
+
struct __sanitizer_timeval {
__sanitizer_time_t tv_sec;
__sanitizer_suseconds_t tv_usec;
int tm_wday;
int tm_yday;
int tm_isdst;
+# if SANITIZER_HAIKU
+ int tm_gmtoff;
+# else
long int tm_gmtoff;
+# endif
const char *tm_zone;
};
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
struct __sanitizer_mntent {
char *mnt_fsname;
char *mnt_dir;
int handle_type;
unsigned char f_handle[1]; // variable sized
};
-#endif
+# endif
-#if SANITIZER_APPLE
+# if SANITIZER_APPLE || SANITIZER_HAIKU
struct __sanitizer_msghdr {
void *msg_name;
unsigned msg_namelen;
int cmsg_level;
int cmsg_type;
};
-#else
+# elif SANITIZER_MUSL
+struct __sanitizer_msghdr {
+ void *msg_name;
+ unsigned msg_namelen;
+ struct __sanitizer_iovec *msg_iov;
+ int msg_iovlen;
+# if SANITIZER_WORDSIZE == 64
+ int __pad1;
+# endif
+ void *msg_control;
+ unsigned msg_controllen;
+# if SANITIZER_WORDSIZE == 64
+ int __pad2;
+# endif
+ int msg_flags;
+};
+struct __sanitizer_cmsghdr {
+ unsigned cmsg_len;
+# if SANITIZER_WORDSIZE == 64
+ int __pad1;
+# endif
+ int cmsg_level;
+ int cmsg_type;
+};
+# else
// In POSIX, int msg_iovlen; socklen_t msg_controllen; socklen_t cmsg_len; but
// many implementations don't conform to the standard.
struct __sanitizer_msghdr {
int cmsg_level;
int cmsg_type;
};
-#endif
+# endif
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
struct __sanitizer_mmsghdr {
__sanitizer_msghdr msg_hdr;
unsigned int msg_len;
};
-#endif
+# endif
-#if SANITIZER_APPLE
+# if SANITIZER_APPLE
struct __sanitizer_dirent {
unsigned long long d_ino;
unsigned long long d_seekoff;
unsigned short d_reclen;
// more fields that we don't care about
};
+# elif SANITIZER_HAIKU
+struct __sanitizer_dirent {
+ int d_dev;
+ int d_pdev;
+ unsigned long long d_ino;
+ unsigned long long d_pino;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+};
# elif (SANITIZER_LINUX && !SANITIZER_GLIBC) || defined(__x86_64__) || \
defined(__hexagon__)
struct __sanitizer_dirent {
unsigned short d_reclen;
// more fields that we don't care about
};
-#endif
+extern unsigned struct_sock_fprog_sz;
+# endif
-#if defined(__x86_64__) && !defined(_LP64)
+# if SANITIZER_HAIKU
+typedef int __sanitizer_clock_t;
+# elif defined(__x86_64__) && !defined(_LP64)
typedef long long __sanitizer_clock_t;
-#else
+# else
typedef long __sanitizer_clock_t;
-#endif
+# endif
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX || SANITIZER_HAIKU
typedef int __sanitizer_clockid_t;
typedef unsigned long long __sanitizer_eventfd_t;
-#endif
+# endif
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
# if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
defined(__mips__) || defined(__hexagon__)
typedef unsigned __sanitizer___kernel_uid_t;
typedef unsigned __sanitizer___kernel_gid_t;
-#else
+# else
typedef unsigned short __sanitizer___kernel_uid_t;
typedef unsigned short __sanitizer___kernel_gid_t;
-#endif
-#if defined(__x86_64__) && !defined(_LP64)
+# endif
+# if defined(__x86_64__) && !defined(_LP64)
typedef long long __sanitizer___kernel_off_t;
-#else
+# else
typedef long __sanitizer___kernel_off_t;
-#endif
+# endif
-#if defined(__powerpc__) || defined(__mips__)
+# if defined(__powerpc__) || defined(__mips__)
typedef unsigned int __sanitizer___kernel_old_uid_t;
typedef unsigned int __sanitizer___kernel_old_gid_t;
-#else
+# else
typedef unsigned short __sanitizer___kernel_old_uid_t;
typedef unsigned short __sanitizer___kernel_old_gid_t;
-#endif
+# endif
typedef long long __sanitizer___kernel_loff_t;
typedef struct {
unsigned long fds_bits[1024 / (8 * sizeof(long))];
} __sanitizer___kernel_fd_set;
-#endif
+# endif
// This thing depends on the platform. We are only interested in the upper
// limit. Verified with a compiler assert in .cpp.
void *align;
};
-#if SANITIZER_ANDROID
-# if SANITIZER_MIPS
+# if SANITIZER_ANDROID
+# if SANITIZER_MIPS
typedef unsigned long __sanitizer_sigset_t[16 / sizeof(unsigned long)];
-# else
+# else
typedef unsigned long __sanitizer_sigset_t;
-# endif
-#elif SANITIZER_APPLE
+# endif
+# elif SANITIZER_APPLE
typedef unsigned __sanitizer_sigset_t;
-#elif SANITIZER_LINUX
+# elif SANITIZER_HAIKU
+typedef unsigned long __sanitizer_sigset_t;
+# elif SANITIZER_LINUX
struct __sanitizer_sigset_t {
// The size is determined by looking at sizeof of real sigset_t on linux.
uptr val[128 / sizeof(uptr)];
};
-#endif
+# endif
struct __sanitizer_siginfo_pad {
-#if SANITIZER_X32
+# if SANITIZER_X32
// x32 siginfo_t is aligned to 8 bytes.
u64 pad[128 / sizeof(u64)];
-#else
+# else
// Require uptr, because siginfo_t is always pointer-size aligned on Linux.
uptr pad[128 / sizeof(uptr)];
-#endif
+# endif
};
-#if SANITIZER_LINUX
-# define SANITIZER_HAS_SIGINFO 1
+# if SANITIZER_LINUX
+# define SANITIZER_HAS_SIGINFO 1
union __sanitizer_siginfo {
- struct {
+ __extension__ struct {
int si_signo;
-# if SANITIZER_MIPS
+# if SANITIZER_MIPS
int si_code;
int si_errno;
-# else
+# else
int si_errno;
int si_code;
-# endif
+# endif
};
__sanitizer_siginfo_pad pad;
};
-#else
-# define SANITIZER_HAS_SIGINFO 0
+# else
+# define SANITIZER_HAS_SIGINFO 0
typedef __sanitizer_siginfo_pad __sanitizer_siginfo;
-#endif
+# endif
using __sanitizer_sighandler_ptr = void (*)(int sig);
using __sanitizer_sigactionhandler_ptr = void (*)(int sig,
void *uctx);
// Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
-#if SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 64)
+# if SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 64)
struct __sanitizer_sigaction {
unsigned sa_flags;
union {
__sanitizer_sigset_t sa_mask;
void (*sa_restorer)();
};
-#elif SANITIZER_ANDROID && SANITIZER_MIPS32 // check this before WORDSIZE == 32
+# elif SANITIZER_ANDROID && \
+ SANITIZER_MIPS32 // check this before WORDSIZE == 32
struct __sanitizer_sigaction {
unsigned sa_flags;
union {
};
__sanitizer_sigset_t sa_mask;
};
-#elif SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32)
+# elif SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32)
struct __sanitizer_sigaction {
union {
__sanitizer_sigactionhandler_ptr sigaction;
uptr sa_flags;
void (*sa_restorer)();
};
-#else // !SANITIZER_ANDROID
+# else // !SANITIZER_ANDROID
struct __sanitizer_sigaction {
-#if defined(__mips__) && !SANITIZER_FREEBSD
+# if defined(__mips__) && !SANITIZER_FREEBSD && !SANITIZER_MUSL
unsigned int sa_flags;
-#endif
+# endif
union {
__sanitizer_sigactionhandler_ptr sigaction;
__sanitizer_sighandler_ptr handler;
};
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
int sa_flags;
__sanitizer_sigset_t sa_mask;
-#else
-#if defined(__s390x__)
+# else
+# if defined(__s390x__)
int sa_resv;
-#else
+# else
__sanitizer_sigset_t sa_mask;
-#endif
-#ifndef __mips__
-#if defined(__sparc__)
-#if __GLIBC_PREREQ (2, 20)
+# endif
+# if !defined(__mips__) || SANITIZER_MUSL
+# if defined(__sparc__)
+# if __GLIBC_PREREQ(2, 20)
// On sparc glibc 2.19 and earlier sa_flags was unsigned long.
-#if defined(__arch64__)
+# if defined(__arch64__)
// To maintain ABI compatibility on sparc64 when switching to an int,
// __glibc_reserved0 was added.
int __glibc_reserved0;
-#endif
+# endif
int sa_flags;
-#else
+# else
unsigned long sa_flags;
-#endif
-#else
+# endif
+# else
int sa_flags;
-#endif
-#endif
-#endif
-#if SANITIZER_LINUX
+# endif
+# endif
+# endif
+# if SANITIZER_LINUX || SANITIZER_HAIKU
void (*sa_restorer)();
-#endif
-#if defined(__mips__) && (SANITIZER_WORDSIZE == 32)
+# endif
+# if defined(__mips__) && (SANITIZER_WORDSIZE == 32) && !SANITIZER_MUSL
int sa_resv[1];
-#endif
-#if defined(__s390x__)
+# endif
+# if defined(__s390x__)
__sanitizer_sigset_t sa_mask;
-#endif
+# endif
};
-#endif // !SANITIZER_ANDROID
+# endif // !SANITIZER_ANDROID
-#if defined(__mips__)
-#define __SANITIZER_KERNEL_NSIG 128
-#else
-#define __SANITIZER_KERNEL_NSIG 64
-#endif
+# if defined(__mips__)
+# define __SANITIZER_KERNEL_NSIG 128
+# else
+# define __SANITIZER_KERNEL_NSIG 64
+# endif
struct __sanitizer_kernel_sigset_t {
uptr sig[__SANITIZER_KERNEL_NSIG / (sizeof(uptr) * 8)];
};
// Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
-#if SANITIZER_MIPS
+# if SANITIZER_MIPS
struct __sanitizer_kernel_sigaction_t {
unsigned int sa_flags;
union {
__sanitizer_kernel_sigset_t sa_mask;
void (*sa_restorer)(void);
};
-#else
+# else
struct __sanitizer_kernel_sigaction_t {
union {
void (*handler)(int signo);
void (*sa_restorer)(void);
__sanitizer_kernel_sigset_t sa_mask;
};
-#endif
+# endif
extern const uptr sig_ign;
extern const uptr sig_dfl;
extern const uptr sig_err;
extern const uptr sa_siginfo;
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
extern int e_tabsz;
-#endif
+# endif
extern int af_inet;
extern int af_inet6;
uptr __sanitizer_in_addr_sz(int af);
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
struct __sanitizer_dl_phdr_info {
uptr dlpi_addr;
const char *dlpi_name;
};
extern unsigned struct_ElfW_Phdr_sz;
-#endif
+# endif
struct __sanitizer_protoent {
char *p_name;
int ai_family;
int ai_socktype;
int ai_protocol;
-#if SANITIZER_ANDROID || SANITIZER_APPLE
+# if SANITIZER_ANDROID || SANITIZER_APPLE || SANITIZER_HAIKU
unsigned ai_addrlen;
char *ai_canonname;
void *ai_addr;
-#else // LINUX
+# else // LINUX
unsigned ai_addrlen;
void *ai_addr;
char *ai_canonname;
-#endif
+# endif
struct __sanitizer_addrinfo *ai_next;
};
short revents;
};
-#if SANITIZER_ANDROID || SANITIZER_APPLE
+# if SANITIZER_ANDROID || SANITIZER_APPLE
typedef unsigned __sanitizer_nfds_t;
-#else
+# else
typedef unsigned long __sanitizer_nfds_t;
-#endif
+# endif
-#if !SANITIZER_ANDROID
-# if SANITIZER_LINUX
+# if !SANITIZER_ANDROID
+# if SANITIZER_LINUX
struct __sanitizer_glob_t {
uptr gl_pathc;
char **gl_pathv;
int (*gl_lstat)(const char *, void *);
int (*gl_stat)(const char *, void *);
};
-# endif // SANITIZER_LINUX
+# endif // SANITIZER_LINUX
-# if SANITIZER_LINUX
+# if SANITIZER_LINUX
extern int glob_nomatch;
extern int glob_altdirfunc;
-# endif
-#endif // !SANITIZER_ANDROID
+# endif
+# endif // !SANITIZER_ANDROID
extern unsigned path_max;
uptr we_offs;
};
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
struct __sanitizer_FILE {
int _flags;
char *_IO_read_ptr;
__sanitizer_FILE *_chain;
int _fileno;
};
-# define SANITIZER_HAS_STRUCT_FILE 1
-#else
+# define SANITIZER_HAS_STRUCT_FILE 1
+# else
typedef void __sanitizer_FILE;
-# define SANITIZER_HAS_STRUCT_FILE 0
-#endif
+# define SANITIZER_HAS_STRUCT_FILE 0
+# endif
# if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
extern int shmctl_ipc_info;
extern int shmctl_shm_info;
extern int shmctl_shm_stat;
-#endif
+# endif
-#if !SANITIZER_APPLE && !SANITIZER_FREEBSD
+# if !SANITIZER_APPLE && !SANITIZER_FREEBSD
extern unsigned struct_utmp_sz;
-#endif
-#if !SANITIZER_ANDROID
+# endif
+# if !SANITIZER_ANDROID
extern unsigned struct_utmpx_sz;
-#endif
+# endif
extern int map_fixed;
union {
void *ifcu_req;
} ifc_ifcu;
-#if SANITIZER_APPLE
+# if SANITIZER_APPLE
} __attribute__((packed));
-#else
+# else
};
-#endif
+# endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
struct __sanitizer__obstack_chunk {
char *limit;
struct __sanitizer__obstack_chunk *prev;
__sanitizer_cookie_io_seek seek;
__sanitizer_cookie_io_close close;
};
-#endif
+# endif
-#define IOC_NRBITS 8
-#define IOC_TYPEBITS 8
-#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || \
- defined(__sparc__)
-#define IOC_SIZEBITS 13
-#define IOC_DIRBITS 3
-#define IOC_NONE 1U
-#define IOC_WRITE 4U
-#define IOC_READ 2U
-#else
-#define IOC_SIZEBITS 14
-#define IOC_DIRBITS 2
-#define IOC_NONE 0U
-#define IOC_WRITE 1U
-#define IOC_READ 2U
-#endif
-#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
-#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
-#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
-#if defined(IOC_DIRMASK)
-#undef IOC_DIRMASK
-#endif
-#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
-#define IOC_NRSHIFT 0
-#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
-#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
-#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
-#define EVIOC_EV_MAX 0x1f
-#define EVIOC_ABS_MAX 0x3f
-
-#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
-#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
-#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
-
-#if defined(__sparc__)
+# define IOC_NRBITS 8
+# define IOC_TYPEBITS 8
+# if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || \
+ defined(__sparc__)
+# define IOC_SIZEBITS 13
+# define IOC_DIRBITS 3
+# define IOC_NONE 1U
+# define IOC_WRITE 4U
+# define IOC_READ 2U
+# else
+# define IOC_SIZEBITS 14
+# define IOC_DIRBITS 2
+# define IOC_NONE 0U
+# define IOC_WRITE 1U
+# define IOC_READ 2U
+# endif
+# define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+# define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+# define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+# if defined(IOC_DIRMASK)
+# undef IOC_DIRMASK
+# endif
+# define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+# define IOC_NRSHIFT 0
+# define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+# define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+# define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+# define EVIOC_EV_MAX 0x1f
+# define EVIOC_ABS_MAX 0x3f
+
+# define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+# define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+# define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+
+# if defined(__sparc__)
// In sparc the 14 bits SIZE field overlaps with the
// least significant bit of DIR, so either IOC_READ or
// IOC_WRITE shall be 1 in order to get a non-zero SIZE.
-#define IOC_SIZE(nr) \
- ((((((nr) >> 29) & 0x7) & (4U | 2U)) == 0) ? 0 : (((nr) >> 16) & 0x3fff))
-#else
-#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
-#endif
+# define IOC_SIZE(nr) \
+ ((((((nr) >> 29) & 0x7) & (4U | 2U)) == 0) ? 0 : (((nr) >> 16) & 0x3fff))
+# else
+# define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+# endif
extern unsigned struct_ifreq_sz;
extern unsigned struct_termios_sz;
extern unsigned struct_winsize_sz;
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
extern unsigned struct_arpreq_sz;
extern unsigned struct_cdrom_msf_sz;
extern unsigned struct_cdrom_multisession_sz;
extern unsigned struct_vt_consize_sz;
extern unsigned struct_vt_sizes_sz;
extern unsigned struct_vt_stat_sz;
-#endif // SANITIZER_LINUX
+# endif // SANITIZER_LINUX
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
extern unsigned struct_copr_buffer_sz;
extern unsigned struct_copr_debug_buf_sz;
extern unsigned struct_copr_msg_sz;
extern unsigned struct_seq_event_rec_sz;
extern unsigned struct_synth_info_sz;
extern unsigned struct_vt_mode_sz;
-#endif // SANITIZER_LINUX
+# endif // SANITIZER_LINUX
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned struct_ax25_parms_struct_sz;
extern unsigned struct_input_keymap_entry_sz;
extern unsigned struct_ipx_config_data_sz;
extern unsigned struct_sockaddr_ax25_sz;
extern unsigned struct_unimapdesc_sz;
extern unsigned struct_unimapinit_sz;
-extern unsigned struct_sock_fprog_sz;
# endif // SANITIZER_LINUX && !SANITIZER_ANDROID
extern const unsigned long __sanitizer_bufsiz;
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned struct_audio_buf_info_sz;
extern unsigned struct_ppp_stats_sz;
-#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+# endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
-#if !SANITIZER_ANDROID && !SANITIZER_APPLE
+# if !SANITIZER_ANDROID && !SANITIZER_APPLE
extern unsigned struct_sioc_sg_req_sz;
extern unsigned struct_sioc_vif_req_sz;
-#endif
+# endif
+
+extern unsigned fpos_t_sz;
// ioctl request identifiers
extern unsigned IOCTL_SIOCSIFMTU;
extern unsigned IOCTL_SIOCSIFNETMASK;
extern unsigned IOCTL_SIOCSPGRP;
+# if !SANITIZER_HAIKU
extern unsigned IOCTL_TIOCCONS;
-extern unsigned IOCTL_TIOCEXCL;
extern unsigned IOCTL_TIOCGETD;
+extern unsigned IOCTL_TIOCNOTTY;
+extern unsigned IOCTL_TIOCPKT;
+extern unsigned IOCTL_TIOCSETD;
+extern unsigned IOCTL_TIOCSTI;
+# endif
+extern unsigned IOCTL_TIOCEXCL;
extern unsigned IOCTL_TIOCGPGRP;
extern unsigned IOCTL_TIOCGWINSZ;
extern unsigned IOCTL_TIOCMBIC;
extern unsigned IOCTL_TIOCMBIS;
extern unsigned IOCTL_TIOCMGET;
extern unsigned IOCTL_TIOCMSET;
-extern unsigned IOCTL_TIOCNOTTY;
extern unsigned IOCTL_TIOCNXCL;
extern unsigned IOCTL_TIOCOUTQ;
-extern unsigned IOCTL_TIOCPKT;
extern unsigned IOCTL_TIOCSCTTY;
-extern unsigned IOCTL_TIOCSETD;
extern unsigned IOCTL_TIOCSPGRP;
-extern unsigned IOCTL_TIOCSTI;
extern unsigned IOCTL_TIOCSWINSZ;
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned IOCTL_SIOCGETSGCNT;
extern unsigned IOCTL_SIOCGETVIFCNT;
-#endif
-#if SANITIZER_LINUX
+# endif
+# if SANITIZER_LINUX
extern unsigned IOCTL_EVIOCGABS;
extern unsigned IOCTL_EVIOCGBIT;
extern unsigned IOCTL_EVIOCGEFFECTS;
extern unsigned IOCTL_SNDCTL_COPR_WCODE;
extern unsigned IOCTL_SNDCTL_COPR_WDATA;
extern unsigned IOCTL_TCFLSH;
-extern unsigned IOCTL_TCGETS;
extern unsigned IOCTL_TCSBRK;
extern unsigned IOCTL_TCSBRKP;
+# if SANITIZER_TERMIOS_IOCTL_CONSTANTS
+extern unsigned IOCTL_TCGETS;
extern unsigned IOCTL_TCSETS;
extern unsigned IOCTL_TCSETSF;
extern unsigned IOCTL_TCSETSW;
+# endif
extern unsigned IOCTL_TCXONC;
extern unsigned IOCTL_TIOCGLCKTRMIOS;
extern unsigned IOCTL_TIOCGSOFTCAR;
extern unsigned IOCTL_VT_RELDISP;
extern unsigned IOCTL_VT_SETMODE;
extern unsigned IOCTL_VT_WAITACTIVE;
-#endif // SANITIZER_LINUX
+# endif // SANITIZER_LINUX
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned IOCTL_EQL_EMANCIPATE;
extern unsigned IOCTL_EQL_ENSLAVE;
extern unsigned IOCTL_EQL_GETMASTRCFG;
extern unsigned IOCTL_KDSKBMODE;
extern unsigned IOCTL_KIOCSOUND;
extern unsigned IOCTL_PIO_SCRNMAP;
-#endif
+# endif
+
+# if SANITIZER_GLIBC
+struct __sanitizer_servent {
+ char *s_name;
+ char **s_aliases;
+ int s_port;
+ char *s_proto;
+};
+# endif
extern const int si_SEGV_MAPERR;
extern const int si_SEGV_ACCERR;
} // namespace __sanitizer
-#define CHECK_TYPE_SIZE(TYPE) \
- COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+# define CHECK_TYPE_SIZE(TYPE) \
+ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
-#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
- COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
- sizeof(((CLASS *)NULL)->MEMBER)); \
- COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
- offsetof(CLASS, MEMBER))
+# define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
+ offsetof(CLASS, MEMBER))
// For sigaction, which is a function and struct at the same time,
// and thus requires explicit "struct" in sizeof() expression.
-#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
- COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
- sizeof(((struct CLASS *)NULL)->MEMBER)); \
- COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
- offsetof(struct CLASS, MEMBER))
+# define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((struct CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
+ offsetof(struct CLASS, MEMBER))
-#define SIGACTION_SYMNAME sigaction
+# define SIGACTION_SYMNAME sigaction
+
+# if SANITIZER_LINUX
+typedef void *__sanitizer_timer_t;
+# endif
-#endif // SANITIZER_LINUX || SANITIZER_APPLE
+#endif // SANITIZER_LINUX || SANITIZER_APPLE || SANITIZER_HAIKU
#endif
#include <semaphore.h>
#include <signal.h>
#include <stddef.h>
+#include <stdio.h>
#include <sys/ethernet.h>
#include <sys/filio.h>
#include <sys/ipc.h>
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
+ unsigned fpos_t_sz = sizeof(fpos_t);
+
const unsigned IOCTL_NOT_PRESENT = 0;
unsigned IOCTL_FIOASYNC = FIOASYNC;
extern unsigned struct_sioc_sg_req_sz;
extern unsigned struct_sioc_vif_req_sz;
+extern unsigned fpos_t_sz;
+
// ioctl request identifiers
// A special value to mark ioctls that are not present on the target platform,
return (void *)p;
}
-static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
- uptr start2, uptr end2) {
- CHECK(start1 <= end1);
- CHECK(start2 <= end2);
- return (end1 < start2) || (end2 < start1);
-}
-
+# if !SANITIZER_APPLE
// FIXME: this is thread-unsafe, but should not cause problems most of the time.
-// When the shadow is mapped only a single thread usually exists (plus maybe
-// several worker threads on Mac, which aren't expected to map big chunks of
-// memory).
+// When the shadow is mapped only a single thread usually exists
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
if (proc_maps.Error())
return true;
}
-#if !SANITIZER_APPLE
void DumpProcessMap() {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
const sptr kBufSize = 4095;
Report("End of process memory map.\n");
UnmapOrDie(filename, kBufSize);
}
-#endif
+# endif
const char *GetPwd() {
return GetEnv("PWD");
namespace __sanitizer {
+[[maybe_unused]] static atomic_uint8_t signal_handler_is_from_sanitizer[64];
+
u32 GetUid() {
return getuid();
}
UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
}
+bool IsSignalHandlerFromSanitizer(int signum) {
+ return atomic_load(&signal_handler_is_from_sanitizer[signum],
+ memory_order_relaxed);
+}
+
+bool SetSignalHandlerFromSanitizer(int signum, bool new_state) {
+ if (signum < 0 || static_cast<unsigned>(signum) >=
+ ARRAY_SIZE(signal_handler_is_from_sanitizer))
+ return false;
+
+ return atomic_exchange(&signal_handler_is_from_sanitizer[signum], new_state,
+ memory_order_relaxed);
+}
+
static void MaybeInstallSigaction(int signum,
SignalHandlerType handler) {
if (GetHandleSignalMode(signum) == kHandleSignalNo) return;
if (common_flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
CHECK_EQ(0, internal_sigaction(signum, &sigact, nullptr));
VReport(1, "Installed the sigaction for signal %d\n", signum);
+
+ if (common_flags()->cloak_sanitizer_signal_handlers)
+ SetSignalHandlerFromSanitizer(signum, true);
}
void InstallDeadlySignalHandlers(SignalHandlerType handler) {
#include "sanitizer_platform.h"
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
- SANITIZER_APPLE || SANITIZER_SOLARIS || \
+ SANITIZER_APPLE || SANITIZER_SOLARIS || SANITIZER_HAIKU || \
SANITIZER_FUCHSIA
#include "sanitizer_common.h"
#include <mach/mach.h>
// These are not available in older macOS SDKs.
-#ifndef CPU_SUBTYPE_X86_64_H
-#define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t)8) /* Haswell */
-#endif
-#ifndef CPU_SUBTYPE_ARM_V7S
-#define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t)11) /* Swift */
-#endif
-#ifndef CPU_SUBTYPE_ARM_V7K
-#define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t)12)
-#endif
-#ifndef CPU_TYPE_ARM64
-#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64)
-#endif
+# ifndef CPU_SUBTYPE_X86_64_H
+# define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t)8) /* Haswell */
+# endif
+# ifndef CPU_SUBTYPE_ARM_V7S
+# define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t)11) /* Swift */
+# endif
+# ifndef CPU_SUBTYPE_ARM_V7K
+# define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t)12)
+# endif
+# ifndef CPU_TYPE_ARM64
+# define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64)
+# endif
+# ifndef CPU_SUBTYPE_ARM64E
+# define CPU_SUBTYPE_ARM64E ((cpu_subtype_t)2)
+# endif
namespace __sanitizer {
const char *current_load_cmd_addr;
u32 lc_type;
uptr base_virt_addr;
- uptr addr_mask;
};
template <typename Section>
const Section *sc = (const Section *)data->current_load_cmd_addr;
data->current_load_cmd_addr += sizeof(Section);
- uptr sec_start = (sc->addr & data->addr_mask) + data->base_virt_addr;
+ uptr sec_start = sc->addr + data->base_virt_addr;
uptr sec_end = sec_start + sc->size;
module->addAddressRange(sec_start, sec_end, /*executable=*/false, isWritable,
sc->sectname);
}
+static bool VerifyMemoryMapping(MemoryMappingLayout* mapping) {
+ InternalMmapVector<LoadedModule> modules;
+ modules.reserve(128); // matches DumpProcessMap
+ mapping->DumpListOfModules(&modules);
+
+ InternalMmapVector<LoadedModule::AddressRange> segments;
+ for (uptr i = 0; i < modules.size(); ++i) {
+ for (auto& range : modules[i].ranges()) {
+ segments.push_back(range);
+ }
+ }
+
+ // Verify that none of the segments overlap:
+ // 1. Sort the segments by the start address
+ // 2. Check that every segment starts after the previous one ends.
+ Sort(segments.data(), segments.size(),
+ [](LoadedModule::AddressRange& a, LoadedModule::AddressRange& b) {
+ return a.beg < b.beg;
+ });
+
+ // To avoid spam, we only print the report message once-per-process.
+ static bool invalid_module_map_reported = false;
+ bool well_formed = true;
+
+ for (size_t i = 1; i < segments.size(); i++) {
+ uptr cur_start = segments[i].beg;
+ uptr prev_end = segments[i - 1].end;
+ if (cur_start < prev_end) {
+ well_formed = false;
+ VReport(2, "Overlapping mappings: %s start = %p, %s end = %p\n",
+ segments[i].name, (void*)cur_start, segments[i - 1].name,
+ (void*)prev_end);
+ if (!invalid_module_map_reported) {
+ Report(
+ "WARN: Invalid dyld module map detected. This is most likely a bug "
+ "in the sanitizer.\n");
+ Report("WARN: Backtraces may be unreliable.\n");
+ invalid_module_map_reported = true;
+ }
+ }
+ }
+
+ for (auto& m : modules) m.clear();
+
+ mapping->Reset();
+ return well_formed;
+}
+
void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {
// Don't iterate over sections when the caller hasn't set up the
// data pointer, when there are no sections, or when the segment
MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
Reset();
+ VerifyMemoryMapping(this);
}
MemoryMappingLayout::~MemoryMappingLayout() {
// until we hit a Mach header matching dyld instead. These recurse
// calls are expensive, but the first memory map generation occurs
// early in the process, when dyld is one of the only images loaded,
-// so it will be hit after only a few iterations. These assumptions don't
-// hold on macOS 13+ anymore (dyld itself has moved into the shared cache).
-
-// FIXME: Unfortunately, the upstream revised version to deal with macOS 13+
-// is incompatible with GCC and also uses APIs not available on earlier
-// systems which we support; backed out for now.
-
+// so it will be hit after only a few iterations. These assumptions don't hold
+// on macOS 13+ anymore (dyld itself has moved into the shared cache).
static mach_header *GetDyldImageHeaderViaVMRegion() {
vm_address_t address = 0;
}
}
+extern "C" {
+struct dyld_shared_cache_dylib_text_info {
+ uint64_t version; // current version 2
+ // following fields all exist in version 1
+ uint64_t loadAddressUnslid;
+ uint64_t textSegmentSize;
+ uuid_t dylibUuid;
+ const char *path; // pointer invalid at end of iterations
+ // following fields all exist in version 2
+ uint64_t textSegmentOffset; // offset from start of cache
+};
+typedef struct dyld_shared_cache_dylib_text_info
+ dyld_shared_cache_dylib_text_info;
+
+extern bool _dyld_get_shared_cache_uuid(uuid_t uuid);
+extern const void *_dyld_get_shared_cache_range(size_t *length);
+extern intptr_t _dyld_get_image_slide(const struct mach_header* mh);
+extern int dyld_shared_cache_iterate_text(
+ const uuid_t cacheUuid,
+ void (^callback)(const dyld_shared_cache_dylib_text_info *info));
+} // extern "C"
+
+static mach_header *GetDyldImageHeaderViaSharedCache() {
+ uuid_t uuid;
+ bool hasCache = _dyld_get_shared_cache_uuid(uuid);
+ if (!hasCache)
+ return nullptr;
+
+ size_t cacheLength;
+ __block uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength);
+ CHECK(cacheStart && cacheLength);
+
+ __block mach_header *dyldHdr = nullptr;
+ int res = dyld_shared_cache_iterate_text(
+ uuid, ^(const dyld_shared_cache_dylib_text_info *info) {
+ CHECK_GE(info->version, 2);
+ mach_header *hdr =
+ (mach_header *)(cacheStart + info->textSegmentOffset);
+ if (IsDyldHdr(hdr))
+ dyldHdr = hdr;
+ });
+ CHECK_EQ(res, 0);
+
+ return dyldHdr;
+}
+
const mach_header *get_dyld_hdr() {
if (!dyld_hdr) {
// On macOS 13+, dyld itself has moved into the shared cache. Looking it up
// via vm_region_recurse_64() causes spins/hangs/crashes.
- // FIXME: find a way to do this compatible with GCC.
if (GetMacosAlignedVersion() >= MacosVersion(13, 0)) {
+ dyld_hdr = GetDyldImageHeaderViaSharedCache();
+ if (!dyld_hdr) {
VReport(1,
- "looking up the dyld image header in the shared cache on "
- "macOS 13+ is not yet supported. Falling back to "
+ "Failed to lookup the dyld image header in the shared cache on "
+ "macOS 13+ (or no shared cache in use). Falling back to "
"lookup via vm_region_recurse_64().\n");
dyld_hdr = GetDyldImageHeaderViaVMRegion();
+ }
} else {
dyld_hdr = GetDyldImageHeaderViaVMRegion();
}
layout_data->current_load_cmd_count--;
if (((const load_command *)lc)->cmd == kLCSegment) {
const SegmentCommand* sc = (const SegmentCommand *)lc;
- uptr base_virt_addr, addr_mask;
- if (layout_data->current_image == kDyldImageIdx) {
- base_virt_addr = (uptr)get_dyld_hdr();
- // vmaddr is masked with 0xfffff because on macOS versions < 10.12,
- // it contains an absolute address rather than an offset for dyld.
- // To make matters even more complicated, this absolute address
- // isn't actually the absolute segment address, but the offset portion
- // of the address is accurate when combined with the dyld base address,
- // and the mask will give just this offset.
- addr_mask = 0xfffff;
- } else {
+ if (internal_strcmp(sc->segname, "__LINKEDIT") == 0) {
+ // The LINKEDIT sections are for internal linker use, and may alias
+ // with the LINKEDIT section for other modules. (If we included them,
+ // our memory map would contain overlappping sections.)
+ return false;
+ }
+
+ uptr base_virt_addr;
+ if (layout_data->current_image == kDyldImageIdx)
+ base_virt_addr = (uptr)_dyld_get_image_slide(get_dyld_hdr());
+ else
base_virt_addr =
(uptr)_dyld_get_image_vmaddr_slide(layout_data->current_image);
- addr_mask = ~0;
- }
- segment->start = (sc->vmaddr & addr_mask) + base_virt_addr;
+ segment->start = sc->vmaddr + base_virt_addr;
segment->end = segment->start + sc->vmsize;
// Most callers don't need section information, so only fill this struct
// when required.
(const char *)lc + sizeof(SegmentCommand);
seg_data->lc_type = kLCSegment;
seg_data->base_virt_addr = base_virt_addr;
- seg_data->addr_mask = addr_mask;
internal_strncpy(seg_data->name, sc->segname,
ARRAY_SIZE(seg_data->name));
+ seg_data->name[ARRAY_SIZE(seg_data->name) - 1] = 0;
}
// Return the initial protection.
? kDyldPath
: _dyld_get_image_name(layout_data->current_image);
internal_strncpy(segment->filename, src, segment->filename_size);
+ segment->filename[segment->filename_size - 1] = 0;
}
segment->arch = layout_data->current_arch;
internal_memcpy(segment->uuid, layout_data->current_uuid, kModuleUUIDSize);
case CPU_TYPE_I386:
return kModuleArchI386;
case CPU_TYPE_X86_64:
- if (cpusubtype == CPU_SUBTYPE_X86_64_ALL) return kModuleArchX86_64;
- if (cpusubtype == CPU_SUBTYPE_X86_64_H) return kModuleArchX86_64H;
+ if (cpusubtype == CPU_SUBTYPE_X86_64_ALL)
+ return kModuleArchX86_64;
+ if (cpusubtype == CPU_SUBTYPE_X86_64_H)
+ return kModuleArchX86_64H;
CHECK(0 && "Invalid subtype of x86_64");
return kModuleArchUnknown;
case CPU_TYPE_ARM:
- if (cpusubtype == CPU_SUBTYPE_ARM_V6) return kModuleArchARMV6;
- if (cpusubtype == CPU_SUBTYPE_ARM_V7) return kModuleArchARMV7;
- if (cpusubtype == CPU_SUBTYPE_ARM_V7S) return kModuleArchARMV7S;
- if (cpusubtype == CPU_SUBTYPE_ARM_V7K) return kModuleArchARMV7K;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V6)
+ return kModuleArchARMV6;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7)
+ return kModuleArchARMV7;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7S)
+ return kModuleArchARMV7S;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7K)
+ return kModuleArchARMV7K;
CHECK(0 && "Invalid subtype of ARM");
return kModuleArchUnknown;
case CPU_TYPE_ARM64:
+ if (cpusubtype == CPU_SUBTYPE_ARM64E)
+ return kModuleArchARM64E;
return kModuleArchARM64;
default:
CHECK(0 && "Invalid CPU type");
return (const load_command *)((const char *)lc + lc->cmdsize);
}
-static void FindUUID(const load_command *first_lc, u8 *uuid_output) {
- for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) {
- if (lc->cmd != LC_UUID) continue;
+# ifdef MH_MAGIC_64
+static constexpr size_t header_size = sizeof(mach_header_64);
+# else
+static constexpr size_t header_size = sizeof(mach_header);
+# endif
+
+static void FindUUID(const load_command *first_lc, const mach_header *hdr,
+ u8 *uuid_output) {
+ uint32_t curcmd = 0;
+ for (const load_command *lc = first_lc; curcmd < hdr->ncmds;
+ curcmd++, lc = NextCommand(lc)) {
+ CHECK_LT((const char *)lc,
+ (const char *)hdr + header_size + hdr->sizeofcmds);
+
+ if (lc->cmd != LC_UUID)
+ continue;
const uuid_command *uuid_lc = (const uuid_command *)lc;
const uint8_t *uuid = &uuid_lc->uuid[0];
}
}
-static bool IsModuleInstrumented(const load_command *first_lc) {
- for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) {
- if (lc->cmd != LC_LOAD_DYLIB) continue;
+static bool IsModuleInstrumented(const load_command *first_lc,
+ const mach_header *hdr) {
+ uint32_t curcmd = 0;
+ for (const load_command *lc = first_lc; curcmd < hdr->ncmds;
+ curcmd++, lc = NextCommand(lc)) {
+ CHECK_LT((const char *)lc,
+ (const char *)hdr + header_size + hdr->sizeofcmds);
+
+ if (lc->cmd != LC_LOAD_DYLIB)
+ continue;
const dylib_command *dylib_lc = (const dylib_command *)lc;
uint32_t dylib_name_offset = dylib_lc->dylib.name.offset;
continue;
}
}
- FindUUID((const load_command *)data_.current_load_cmd_addr,
+ FindUUID((const load_command *)data_.current_load_cmd_addr, hdr,
data_.current_uuid);
data_.current_instrumented = IsModuleInstrumented(
- (const load_command *)data_.current_load_cmd_addr);
+ (const load_command *)data_.current_load_cmd_addr, hdr);
}
while (data_.current_load_cmd_count > 0) {
// Information about the process mappings (Solaris-specific parts).
//===----------------------------------------------------------------------===//
-// Before Solaris 11.4, <procfs.h> doesn't work in a largefile environment.
-#undef _FILE_OFFSET_BITS
-
// Avoid conflict between `_TIME_BITS` defined vs. `_FILE_OFFSET_BITS`
// undefined in some Linux configurations.
#undef _TIME_BITS
# define SANITIZER_REDEFINE_BUILTINS_H
// The asm hack only works with GCC and Clang.
-# if !defined(_WIN32) && defined(HAVE_AS_SYM_ASSIGN)
+# if !defined(_WIN32) && !defined(_AIX) && !defined(__APPLE__)
asm(R"(
.set memcpy, __sanitizer_internal_memcpy
} // namespace std
# endif // __cpluplus
-# endif // !_WIN32 && HAVE_AS_SYM_ASSIGN
+# endif // !_WIN32
# endif // SANITIZER_REDEFINE_BUILTINS_H
#endif // SANITIZER_COMMON_NO_REDEFINE_BUILTINS
INTERCEPTOR(uptr, bsd_signal, int signum, uptr handler) {
SIGNAL_INTERCEPTOR_ENTER();
if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0;
+
+ // TODO: support cloak_sanitizer_signal_handlers
SIGNAL_INTERCEPTOR_SIGNAL_IMPL(bsd_signal, signum, handler);
}
#define INIT_BSD_SIGNAL COMMON_INTERCEPT_FUNCTION(bsd_signal)
INTERCEPTOR(uptr, signal, int signum, uptr handler) {
SIGNAL_INTERCEPTOR_ENTER();
if (GetHandleSignalMode(signum) == kHandleSignalExclusive)
+ // The user can neither view nor change the signal handler, regardless of
+ // the cloak_sanitizer_signal_handlers setting. This differs from
+ // sigaction().
return (uptr) nullptr;
- SIGNAL_INTERCEPTOR_SIGNAL_IMPL(signal, signum, handler);
+
+ uptr ret = +[](auto signal, int signum, uptr handler) {
+ SIGNAL_INTERCEPTOR_SIGNAL_IMPL(signal, signum, handler);
+ }(signal, signum, handler);
+
+ if (ret != sig_err && SetSignalHandlerFromSanitizer(signum, false))
+ // If the user sets a signal handler, it becomes uncloaked, even if they
+ // reuse a sanitizer's signal handler.
+ ret = sig_dfl;
+
+ return ret;
}
#define INIT_SIGNAL COMMON_INTERCEPT_FUNCTION(signal)
INTERCEPTOR(int, sigaction_symname, int signum,
const __sanitizer_sigaction *act, __sanitizer_sigaction *oldact) {
SIGNAL_INTERCEPTOR_ENTER();
+
if (GetHandleSignalMode(signum) == kHandleSignalExclusive) {
if (!oldact) return 0;
act = nullptr;
+ // If cloak_sanitizer_signal_handlers=true, the user can neither view nor
+ // change the signal handle.
+ // If false, the user can view but not change the signal handler. This
+ // differs from signal().
}
- SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact);
+
+ int ret = +[](int signum, const __sanitizer_sigaction* act,
+ __sanitizer_sigaction* oldact) {
+ SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact);
+ }(signum, act, oldact);
+
+ if (act) {
+ if (ret == 0 && SetSignalHandlerFromSanitizer(signum, false)) {
+ // If the user sets a signal handler, it becomes uncloaked, even if they
+ // reuse a sanitizer's signal handler.
+
+ if (oldact)
+ oldact->handler = reinterpret_cast<__sanitizer_sighandler_ptr>(sig_dfl);
+ }
+ } else if (ret == 0 && oldact && IsSignalHandlerFromSanitizer(signum)) {
+ oldact->handler = reinterpret_cast<__sanitizer_sighandler_ptr>(sig_dfl);
+ }
+
+ return ret;
}
#define INIT_SIGACTION COMMON_INTERCEPT_FUNCTION(sigaction_symname)
// Nope, this does not look right either. This means the frame after next does
// not have a valid frame pointer, but we can still extract the caller PC.
// Unfortunately, there is no way to decide between GCC and LLVM frame
- // layouts. Assume GCC.
- return bp_prev - 1;
+ // layouts. Assume LLVM.
+ return bp_prev;
#else
return (uhwptr*)bp;
#endif
IsAligned((uptr)frame, sizeof(*frame)) &&
size < max_depth) {
#ifdef __powerpc__
- // PowerPC ABIs specify that the return address is saved on the
- // *caller's* stack frame. Thus we must dereference the back chain
- // to find the caller frame before extracting it.
+ // PowerPC ABIs specify that the return address is saved at offset
+ // 16 of the *caller's* stack frame. Thus we must dereference the
+ // back chain to find the caller frame before extracting it.
uhwptr *caller_frame = (uhwptr*)frame[0];
if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
!IsAligned((uptr)caller_frame, sizeof(uhwptr)))
break;
- // For most ABIs the offset where the return address is saved is two
- // register sizes. The exception is the SVR4 ABI, which uses an
- // offset of only one register size.
-#ifdef _CALL_SYSV
- uhwptr pc1 = caller_frame[1];
-#else
uhwptr pc1 = caller_frame[2];
-#endif
#elif defined(__s390__)
uhwptr pc1 = frame[14];
#elif defined(__loongarch__) || defined(__riscv)
}
virtual uptr ThreadCount() const { UNIMPLEMENTED(); }
- virtual tid_t GetThreadID(uptr index) const { UNIMPLEMENTED(); }
+ virtual ThreadID GetThreadID(uptr index) const { UNIMPLEMENTED(); }
protected:
~SuspendedThreadsList() {}
# include <asm/ptrace.h>
#endif
#include <sys/user.h> // for user_regs_struct
-#if SANITIZER_ANDROID && SANITIZER_MIPS
-# include <asm/reg.h> // for mips SP register in sys/user.h
-#endif
-#include <sys/wait.h> // for signal-related stuff
-
-#ifdef sa_handler
-# undef sa_handler
-#endif
-
-#ifdef sa_sigaction
-# undef sa_sigaction
-#endif
-
-#include "sanitizer_common.h"
-#include "sanitizer_flags.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_linux.h"
-#include "sanitizer_mutex.h"
-#include "sanitizer_placement_new.h"
+# if SANITIZER_MIPS
+// clang-format off
+# include <asm/sgidefs.h> // <asm/sgidefs.h> must be included before <asm/reg.h>
+# include <asm/reg.h> // for mips SP register
+// clang-format on
+# endif
+# include <sys/wait.h> // for signal-related stuff
+
+# ifdef sa_handler
+# undef sa_handler
+# endif
+
+# ifdef sa_sigaction
+# undef sa_sigaction
+# endif
+
+# include "sanitizer_common.h"
+# include "sanitizer_flags.h"
+# include "sanitizer_libc.h"
+# include "sanitizer_linux.h"
+# include "sanitizer_mutex.h"
+# include "sanitizer_placement_new.h"
// Sufficiently old kernel headers don't provide this value, but we can still
// call prctl with it. If the runtime kernel is new enough, the prctl call will
public:
SuspendedThreadsListLinux() { thread_ids_.reserve(1024); }
- tid_t GetThreadID(uptr index) const override;
+ ThreadID GetThreadID(uptr index) const override;
uptr ThreadCount() const override;
- bool ContainsTid(tid_t thread_id) const;
- void Append(tid_t tid);
+ bool ContainsTid(ThreadID thread_id) const;
+ void Append(ThreadID tid);
PtraceRegistersStatus GetRegistersAndSP(uptr index,
InternalMmapVector<uptr> *buffer,
uptr *sp) const override;
private:
- InternalMmapVector<tid_t> thread_ids_;
+ InternalMmapVector<ThreadID> thread_ids_;
};
// Structure for passing arguments into the tracer thread.
private:
SuspendedThreadsListLinux suspended_threads_list_;
pid_t pid_;
- bool SuspendThread(tid_t thread_id);
+ bool SuspendThread(ThreadID thread_id);
};
-bool ThreadSuspender::SuspendThread(tid_t tid) {
+bool ThreadSuspender::SuspendThread(ThreadID tid) {
int pterrno;
if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr),
&pterrno)) {
bool ThreadSuspender::SuspendAllThreads() {
ThreadLister thread_lister(pid_);
bool retry = true;
- InternalMmapVector<tid_t> threads;
+ InternalMmapVector<ThreadID> threads;
threads.reserve(128);
for (int i = 0; i < 30 && retry; ++i) {
retry = false;
case ThreadLister::Ok:
break;
}
- for (tid_t tid : threads) {
+ for (ThreadID tid : threads) {
// Are we already attached to this thread?
// Currently this check takes linear time, however the number of threads
// is usually small.
}
};
+// This detects whether ptrace is blocked (e.g., by seccomp), by forking and
+// then attempting ptrace.
+// This separate check is necessary because StopTheWorld() creates a thread
+// with a shared virtual address space and shared TLS, and therefore
+// cannot use waitpid() due to the shared errno.
+static void TestPTrace() {
+# if SANITIZER_SPARC
+ // internal_fork() on SPARC actually calls __fork(). We can't safely fork,
+ // because it's possible seccomp has been configured to disallow fork() but
+ // allow clone().
+ VReport(1, "WARNING: skipping TestPTrace() because this is SPARC\n");
+ VReport(1,
+ "If seccomp blocks ptrace, LeakSanitizer may hang without further "
+ "notice\n");
+ VReport(
+ 1,
+ "If seccomp does not block ptrace, you can safely ignore this warning\n");
+# else
+ // Heuristic: only check the first time this is called. This is not always
+ // correct (e.g., user manually triggers leak detection, then updates
+ // seccomp, then leak detection is triggered again).
+ static bool checked = false;
+ if (checked)
+ return;
+ checked = true;
+
+ // Hopefully internal_fork() is not too expensive, thanks to copy-on-write.
+ // Besides, this is only called the first time.
+ // Note that internal_fork() on non-SPARC Linux actually calls
+ // SYSCALL(clone); thus, it is reasonable to use it because if seccomp kills
+ // TestPTrace(), it would have killed StopTheWorld() anyway.
+ int pid = internal_fork();
+
+ if (pid < 0) {
+ int rverrno;
+ if (internal_iserror(pid, &rverrno))
+ VReport(0, "WARNING: TestPTrace() failed to fork (errno %d)\n", rverrno);
+
+ // We don't abort the sanitizer - it's still worth letting the sanitizer
+ // try.
+ return;
+ }
+
+ if (pid == 0) {
+ // Child subprocess
+
+ // TODO: consider checking return value of internal_ptrace, to handle
+ // SCMP_ACT_ERRNO. However, be careful not to consume too many
+ // resources performing a proper ptrace.
+ internal_ptrace(PTRACE_ATTACH, 0, nullptr, nullptr);
+ internal__exit(0);
+ } else {
+ int wstatus;
+ internal_waitpid(pid, &wstatus, 0);
+
+ // Handle SCMP_ACT_KILL
+ if (WIFSIGNALED(wstatus)) {
+ VReport(0,
+ "WARNING: ptrace appears to be blocked (is seccomp enabled?). "
+ "LeakSanitizer may hang.\n");
+ VReport(0, "Child exited with signal %d.\n", WTERMSIG(wstatus));
+ // We don't abort the sanitizer - it's still worth letting the sanitizer
+ // try.
+ }
+ }
+# endif
+}
+
void StopTheWorld(StopTheWorldCallback callback, void *argument) {
+ TestPTrace();
+
StopTheWorldScope in_stoptheworld;
// Prepare the arguments for TracerThread.
struct TracerThreadArgument tracer_thread_argument;
internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
// Allow the tracer thread to start.
tracer_thread_argument.mutex.Unlock();
- // NOTE: errno is shared between this thread and the tracer thread.
+ // NOTE: errno is shared between this thread and the tracer thread
+ // (clone was called without CLONE_SETTLS / newtls).
// internal_waitpid() may call syscall() which can access/spoil errno,
// so we can't call it now. Instead we for the tracer thread to finish using
// the spin loop below. Man page for sched_yield() says "In the Linux
#elif defined(__mips__)
typedef struct user regs_struct;
-# if SANITIZER_ANDROID
-# define REG_SP regs[EF_R29]
-# else
-# define REG_SP regs[EF_REG29]
-# endif
+# define REG_SP regs[EF_R29]
#elif defined(__aarch64__)
typedef struct user_pt_regs regs_struct;
#error "Unsupported architecture"
#endif // SANITIZER_ANDROID && defined(__arm__)
-tid_t SuspendedThreadsListLinux::GetThreadID(uptr index) const {
+ThreadID SuspendedThreadsListLinux::GetThreadID(uptr index) const {
CHECK_LT(index, thread_ids_.size());
return thread_ids_[index];
}
return thread_ids_.size();
}
-bool SuspendedThreadsListLinux::ContainsTid(tid_t thread_id) const {
+bool SuspendedThreadsListLinux::ContainsTid(ThreadID thread_id) const {
for (uptr i = 0; i < thread_ids_.size(); i++) {
if (thread_ids_[i] == thread_id) return true;
}
return false;
}
-void SuspendedThreadsListLinux::Append(tid_t tid) {
+void SuspendedThreadsListLinux::Append(ThreadID tid) {
thread_ids_.push_back(tid);
}
namespace __sanitizer {
typedef struct {
- tid_t tid;
+ ThreadID tid;
thread_t thread;
} SuspendedThreadInfo;
public:
SuspendedThreadsListMac() = default;
- tid_t GetThreadID(uptr index) const override;
+ ThreadID GetThreadID(uptr index) const override;
thread_t GetThread(uptr index) const;
uptr ThreadCount() const override;
bool ContainsThread(thread_t thread) const;
#error "Unsupported architecture"
#endif
-tid_t SuspendedThreadsListMac::GetThreadID(uptr index) const {
+ThreadID SuspendedThreadsListMac::GetThreadID(uptr index) const {
CHECK_LT(index, threads_.size());
return threads_[index].tid;
}
public:
SuspendedThreadsListNetBSD() { thread_ids_.reserve(1024); }
- tid_t GetThreadID(uptr index) const;
+ ThreadID GetThreadID(uptr index) const;
uptr ThreadCount() const;
- bool ContainsTid(tid_t thread_id) const;
- void Append(tid_t tid);
+ bool ContainsTid(ThreadID thread_id) const;
+ void Append(ThreadID tid);
PtraceRegistersStatus GetRegistersAndSP(uptr index,
InternalMmapVector<uptr> *buffer,
uptr *sp) const;
private:
- InternalMmapVector<tid_t> thread_ids_;
+ InternalMmapVector<ThreadID> thread_ids_;
};
struct TracerThreadArgument {
}
}
-tid_t SuspendedThreadsListNetBSD::GetThreadID(uptr index) const {
+ThreadID SuspendedThreadsListNetBSD::GetThreadID(uptr index) const {
CHECK_LT(index, thread_ids_.size());
return thread_ids_[index];
}
return thread_ids_.size();
}
-bool SuspendedThreadsListNetBSD::ContainsTid(tid_t thread_id) const {
+bool SuspendedThreadsListNetBSD::ContainsTid(ThreadID thread_id) const {
for (uptr i = 0; i < thread_ids_.size(); i++) {
if (thread_ids_[i] == thread_id)
return true;
return false;
}
-void SuspendedThreadsListNetBSD::Append(tid_t tid) {
+void SuspendedThreadsListNetBSD::Append(ThreadID tid) {
thread_ids_.push_back(tid);
}
InternalMmapVector<uptr> *buffer,
uptr *sp) const override;
- tid_t GetThreadID(uptr index) const override;
+ ThreadID GetThreadID(uptr index) const override;
uptr ThreadCount() const override;
};
# define SP_REG Esp
# elif SANITIZER_ARM | SANITIZER_ARM64
# define SP_REG Sp
+# elif SANITIZER_MIPS32
+# define SP_REG IntSp
# else
# error Architecture not supported!
# endif
return REGISTERS_AVAILABLE;
}
-tid_t SuspendedThreadsListWindows::GetThreadID(uptr index) const {
+ThreadID SuspendedThreadsListWindows::GetThreadID(uptr index) const {
CHECK_LT(index, threadIds.size());
return threadIds[index];
}
const char *ExtractToken(const char *str, const char *delims, char **result) {
uptr prefix_len = internal_strcspn(str, delims);
- *result = (char*)InternalAlloc(prefix_len + 1);
+ *result = (char *)InternalAlloc(prefix_len + 1);
internal_memcpy(*result, str, prefix_len);
(*result)[prefix_len] = '\0';
const char *prefix_end = str + prefix_len;
- if (*prefix_end != '\0') prefix_end++;
+ if (*prefix_end != '\0')
+ prefix_end++;
return prefix_end;
}
internal_memcpy(*result, str, prefix_len);
(*result)[prefix_len] = '\0';
const char *prefix_end = str + prefix_len;
- if (*prefix_end != '\0') prefix_end += internal_strlen(delimiter);
+ if (*prefix_end != '\0')
+ prefix_end += internal_strlen(delimiter);
return prefix_end;
}
modules_were_reloaded = true;
}
const LoadedModule *module = SearchForModule(modules_, address);
- if (module) return module;
+ if (module)
+ return module;
// dlopen/dlclose interceptors invalidate the module list, but when
// interception is disabled, we need to retry if the lookup fails in
// case the module list changed.
-#if !SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
+# if !SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
if (!modules_were_reloaded) {
RefreshModules();
module = SearchForModule(modules_, address);
- if (module) return module;
+ if (module)
+ return module;
}
-#endif
+# endif
if (fallback_modules_.size()) {
module = SearchForModule(fallback_modules_, address);
// script/asan_symbolize.py and sanitizer_common.h.
void GetArgV(const char *path_to_binary,
const char *(&argv)[kArgVMax]) const override {
-#if defined(__x86_64h__)
- const char* const kSymbolizerArch = "--default-arch=x86_64h";
-#elif defined(__x86_64__)
- const char* const kSymbolizerArch = "--default-arch=x86_64";
-#elif defined(__i386__)
- const char* const kSymbolizerArch = "--default-arch=i386";
-#elif SANITIZER_LOONGARCH64
+# if defined(__x86_64h__)
+ const char *const kSymbolizerArch = "--default-arch=x86_64h";
+# elif defined(__x86_64__)
+ const char *const kSymbolizerArch = "--default-arch=x86_64";
+# elif defined(__i386__)
+ const char *const kSymbolizerArch = "--default-arch=i386";
+# elif SANITIZER_LOONGARCH64
const char *const kSymbolizerArch = "--default-arch=loongarch64";
-#elif SANITIZER_RISCV64
+# elif SANITIZER_RISCV64
const char *const kSymbolizerArch = "--default-arch=riscv64";
-#elif defined(__aarch64__)
- const char* const kSymbolizerArch = "--default-arch=arm64";
-#elif defined(__arm__)
- const char* const kSymbolizerArch = "--default-arch=arm";
-#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- const char* const kSymbolizerArch = "--default-arch=powerpc64";
-#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- const char* const kSymbolizerArch = "--default-arch=powerpc64le";
-#elif defined(__s390x__)
- const char* const kSymbolizerArch = "--default-arch=s390x";
-#elif defined(__s390__)
- const char* const kSymbolizerArch = "--default-arch=s390";
-#else
- const char* const kSymbolizerArch = "--default-arch=unknown";
-#endif
+# elif defined(__aarch64__)
+ const char *const kSymbolizerArch = "--default-arch=arm64";
+# elif defined(__arm__)
+ const char *const kSymbolizerArch = "--default-arch=arm";
+# elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ const char *const kSymbolizerArch = "--default-arch=powerpc64";
+# elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ const char *const kSymbolizerArch = "--default-arch=powerpc64le";
+# elif defined(__s390x__)
+ const char *const kSymbolizerArch = "--default-arch=s390x";
+# elif defined(__s390__)
+ const char *const kSymbolizerArch = "--default-arch=s390";
+# else
+ const char *const kSymbolizerArch = "--default-arch=unknown";
+# endif
const char *const demangle_flag =
common_flags()->demangle ? "--demangle" : "--no-demangle";
char *back = file_line_info + size - 1;
for (int i = 0; i < 2; ++i) {
while (back > file_line_info && IsDigit(*back)) --back;
- if (*back != ':' || !IsDigit(back[1])) break;
+ if (*back != ':' || !IsDigit(back[1]))
+ break;
info->column = info->line;
info->line = internal_atoll(back + 1);
// Truncate the string at the colon to keep only filename.
if (!buf)
return false;
ParseSymbolizeDataOutput(buf, info);
- info->start += (addr - info->module_offset); // Add the base address.
+ info->start += (addr - info->module_offset); // Add the base address.
return true;
}
size_needed = internal_snprintf(buffer_, kBufferSize, "%s \"%s\" 0x%zx\n",
command_prefix, module_name, module_offset);
else
- size_needed = internal_snprintf(buffer_, kBufferSize,
- "%s \"%s:%s\" 0x%zx\n", command_prefix,
- module_name, ModuleArchToString(arch),
- module_offset);
+ size_needed = internal_snprintf(
+ buffer_, kBufferSize, "%s \"%s:%s\" 0x%zx\n", command_prefix,
+ module_name, ModuleArchToString(arch), module_offset);
if (size_needed >= static_cast<int>(kBufferSize)) {
Report("WARNING: Command buffer too small");
CHECK_NE(path_[0], '\0');
}
-static bool IsSameModule(const char* path) {
- if (const char* ProcessName = GetProcessName()) {
- if (const char* SymbolizerName = StripModuleName(path)) {
+static bool IsSameModule(const char *path) {
+ if (const char *ProcessName = GetProcessName()) {
+ if (const char *SymbolizerName = StripModuleName(path)) {
return !internal_strcmp(ProcessName, SymbolizerName);
}
}
const char *SymbolizerProcess::SendCommandImpl(const char *command) {
if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd)
- return nullptr;
+ return nullptr;
if (!WriteToSymbolizer(command, internal_strlen(command)))
- return nullptr;
+ return nullptr;
if (!ReadFromSymbolizer())
return nullptr;
return buffer_.data();
// because we do not require a C++ ABI library to be linked to a program
// using sanitizers; if it's not present, we'll just use the mangled name.
namespace __cxxabiv1 {
- extern "C" SANITIZER_WEAK_ATTRIBUTE
- char *__cxa_demangle(const char *mangled, char *buffer,
- size_t *length, int *status);
+extern "C" SANITIZER_WEAK_ATTRIBUTE char *__cxa_demangle(const char *mangled,
+ char *buffer,
+ size_t *length,
+ int *status);
}
namespace __sanitizer {
// it does not allocate). For now, we just call it anyway, and we leak
// the returned value.
if (&__cxxabiv1::__cxa_demangle)
- if (const char *demangled_name =
- __cxxabiv1::__cxa_demangle(name, 0, 0, 0))
+ if (const char *demangled_name = __cxxabiv1::__cxa_demangle(name, 0, 0, 0))
return demangled_name;
return nullptr;
}
const char *DemangleSwiftAndCXX(const char *name) {
- if (!name) return nullptr;
+ if (!name)
+ return nullptr;
if (const char *swift_demangled_name = DemangleSwift(name))
return swift_demangled_name;
return DemangleCXXABI(name);
} else {
outfd = sock_pair[i];
for (int j = 0; j < i; j++) {
- if (sock_pair[j] == infd) continue;
+ if (sock_pair[j] == infd)
+ continue;
internal_close(sock_pair[j][0]);
internal_close(sock_pair[j][1]);
}
}
if (use_posix_spawn_) {
-#if SANITIZER_APPLE
+# if SANITIZER_APPLE
fd_t fd = internal_spawn(argv, const_cast<const char **>(GetEnvP()), &pid);
if (fd == kInvalidFd) {
Report("WARNING: failed to spawn external symbolizer (errno: %d)\n",
input_fd_ = fd;
output_fd_ = fd;
-#else // SANITIZER_APPLE
+# else // SANITIZER_APPLE
UNIMPLEMENTED();
-#endif // SANITIZER_APPLE
+# endif // SANITIZER_APPLE
} else {
fd_t infd[2] = {}, outfd[2] = {};
if (!CreateTwoHighNumberedPipes(infd, outfd)) {
- Report("WARNING: Can't create a socket pair to start "
- "external symbolizer (errno: %d)\n", errno);
+ Report(
+ "WARNING: Can't create a socket pair to start "
+ "external symbolizer (errno: %d)\n",
+ errno);
return false;
}
// 1. First one, corresponding to given offset to be symbolized
// (may be equal to output_terminator_, if offset is not valid).
// 2. Second one for output_terminator_, itself to mark the end of output.
- if (length <= kTerminatorLen) return false;
+ if (length <= kTerminatorLen)
+ return false;
// Addr2Line output should end up with output_terminator_.
- return !internal_memcmp(buffer + length - kTerminatorLen,
- output_terminator_, kTerminatorLen);
+ return !internal_memcmp(buffer + length - kTerminatorLen, output_terminator_,
+ kTerminatorLen);
}
class Addr2LinePool final : public SymbolizerTool {
return false;
}
- bool SymbolizeData(uptr addr, DataInfo *info) override {
- return false;
- }
+ bool SymbolizeData(uptr addr, DataInfo *info) override { return false; }
private:
const char *SendCommand(const char *module_name, uptr module_offset) {
}
if (!addr2line) {
addr2line =
- new(*allocator_) Addr2LineProcess(addr2line_path_, module_name);
+ new (*allocator_) Addr2LineProcess(addr2line_path_, module_name);
addr2line_pool_.push_back(addr2line);
}
CHECK_EQ(0, internal_strcmp(module_name, addr2line->module_name()));
char buffer[kBufferSize];
- internal_snprintf(buffer, kBufferSize, "0x%zx\n0x%zx\n",
- module_offset, dummy_address_);
+ internal_snprintf(buffer, kBufferSize, "0x%zx\n0x%zx\n", module_offset,
+ dummy_address_);
return addr2line->SendCommand(buffer);
}
static const uptr kBufferSize = 64;
const char *addr2line_path_;
LowLevelAllocator *allocator_;
- InternalMmapVector<Addr2LineProcess*> addr2line_pool_;
- static const uptr dummy_address_ =
- FIRST_32_SECOND_64(UINT32_MAX, UINT64_MAX);
+ InternalMmapVector<Addr2LineProcess *> addr2line_pool_;
+ static const uptr dummy_address_ = FIRST_32_SECOND_64(UINT32_MAX, UINT64_MAX);
};
# if SANITIZER_SUPPORTS_WEAK_HOOKS
}
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {
- bool result = __sanitizer_symbolize_code(
- stack->info.module, stack->info.module_offset, buffer_, sizeof(buffer_));
+ bool result = __sanitizer_symbolize_code(stack->info.module,
+ stack->info.module_offset, buffer_,
+ sizeof(buffer_));
if (result)
ParseSymbolizePCOutput(buffer_, stack);
return result;
} else if (!internal_strncmp(binary_name, kLLVMSymbolizerPrefix,
internal_strlen(kLLVMSymbolizerPrefix))) {
VReport(2, "Using llvm-symbolizer at user-specified path: %s\n", path);
- return new(*allocator) LLVMSymbolizer(path, allocator);
+ return new (*allocator) LLVMSymbolizer(path, allocator);
} else if (!internal_strcmp(binary_name, "atos")) {
-#if SANITIZER_APPLE
+# if SANITIZER_APPLE
VReport(2, "Using atos at user-specified path: %s\n", path);
- return new(*allocator) AtosSymbolizer(path, allocator);
-#else // SANITIZER_APPLE
+ return new (*allocator) AtosSymbolizer(path, allocator);
+# else // SANITIZER_APPLE
Report("ERROR: Using `atos` is only supported on Darwin.\n");
Die();
-#endif // SANITIZER_APPLE
+# endif // SANITIZER_APPLE
} else if (!internal_strcmp(binary_name, "addr2line")) {
VReport(2, "Using addr2line at user-specified path: %s\n", path);
- return new(*allocator) Addr2LinePool(path, allocator);
+ return new (*allocator) Addr2LinePool(path, allocator);
} else if (path) {
- Report("ERROR: External symbolizer path is set to '%s' which isn't "
- "a known symbolizer. Please set the path to the llvm-symbolizer "
- "binary or other known tool.\n", path);
+ Report(
+ "ERROR: External symbolizer path is set to '%s' which isn't "
+ "a known symbolizer. Please set the path to the llvm-symbolizer "
+ "binary or other known tool.\n",
+ path);
Die();
}
// Otherwise symbolizer program is unknown, let's search $PATH
+# ifdef SANITIZER_DISABLE_SYMBOLIZER_PATH_SEARCH
+ VReport(2,
+ "Symbolizer path search is disabled in the runtime "
+ "build configuration.\n");
+ return nullptr;
+# else
CHECK(path == nullptr);
-#if SANITIZER_APPLE
+# if SANITIZER_APPLE
if (const char *found_path = FindPathToBinary("atos")) {
VReport(2, "Using atos found at: %s\n", found_path);
- return new(*allocator) AtosSymbolizer(found_path, allocator);
+ return new (*allocator) AtosSymbolizer(found_path, allocator);
}
-#endif // SANITIZER_APPLE
+# endif // SANITIZER_APPLE
if (const char *found_path = FindPathToBinary("llvm-symbolizer")) {
VReport(2, "Using llvm-symbolizer found at: %s\n", found_path);
- return new(*allocator) LLVMSymbolizer(found_path, allocator);
+ return new (*allocator) LLVMSymbolizer(found_path, allocator);
}
if (common_flags()->allow_addr2line) {
if (const char *found_path = FindPathToBinary("addr2line")) {
VReport(2, "Using addr2line found at: %s\n", found_path);
- return new(*allocator) Addr2LinePool(found_path, allocator);
+ return new (*allocator) Addr2LinePool(found_path, allocator);
}
}
return nullptr;
+# endif // SANITIZER_DISABLE_SYMBOLIZER_PATH_SEARCH
}
static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
list->push_back(tool);
}
-#if SANITIZER_APPLE
+# if SANITIZER_APPLE
+ if (list->empty()) {
+ Report(
+ "WARN: No external symbolizers found. Symbols may be missing or "
+ "unreliable.\n");
+ Report(
+ "HINT: Is PATH set? Does sandbox allow file-read of /usr/bin/atos?\n");
+ }
VReport(2, "Using dladdr symbolizer.\n");
- list->push_back(new(*allocator) DlAddrSymbolizer());
-#endif // SANITIZER_APPLE
+ list->push_back(new (*allocator) DlAddrSymbolizer());
+# endif // SANITIZER_APPLE
}
Symbolizer *Symbolizer::PlatformInit() {
IntrusiveList<SymbolizerTool> list;
list.clear();
ChooseSymbolizerTools(&list, &symbolizer_allocator_);
- return new(symbolizer_allocator_) Symbolizer(list);
+ return new (symbolizer_allocator_) Symbolizer(list);
}
void Symbolizer::LateInitialize() {
return true;
if (file && internal_strstr(file, "\\compiler-rt\\lib\\"))
return true;
- if (file && internal_strstr(file, "\\libsanitizer\\"))
- return true;
if (module && (internal_strstr(module, "libclang_rt.")))
return true;
if (module && (internal_strstr(module, "clang_rt.")))
return true;
- if (module && (internal_strstr(module, "libtsan.")
- || internal_strstr(module, "libhwasan.")
- || internal_strstr(module, "liblsan.")
- || internal_strstr(module, "libasan.")
- || internal_strstr(module, "libubsan.")))
- return true;
return false;
}
OnFinished();
}
-void ThreadContextBase::SetStarted(tid_t _os_id, ThreadType _thread_type,
+void ThreadContextBase::SetStarted(ThreadID _os_id, ThreadType _thread_type,
void *arg) {
status = ThreadStatusRunning;
os_id = _os_id;
tctx->status != ThreadStatusDead);
}
-ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(tid_t os_id) {
+ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(
+ ThreadID os_id) {
return FindThreadContextLocked(FindThreadContextByOsIdCallback,
(void *)os_id);
}
return prev_status;
}
-void ThreadRegistry::StartThread(u32 tid, tid_t os_id, ThreadType thread_type,
- void *arg) {
+void ThreadRegistry::StartThread(u32 tid, ThreadID os_id,
+ ThreadType thread_type, void *arg) {
ThreadRegistryLock l(this);
running_threads_++;
ThreadContextBase *tctx = threads_[tid];
const u32 tid; // Thread ID. Main thread should have tid = 0.
u64 unique_id; // Unique thread ID.
u32 reuse_count; // Number of times this tid was reused.
- tid_t os_id; // PID (used for reporting).
+ ThreadID os_id; // PID (used for reporting).
uptr user_id; // Some opaque user thread id (e.g. pthread_t).
char name[64]; // As annotated by user.
void SetDead();
void SetJoined(void *arg);
void SetFinished();
- void SetStarted(tid_t _os_id, ThreadType _thread_type, void *arg);
+ void SetStarted(ThreadID _os_id, ThreadType _thread_type, void *arg);
void SetCreated(uptr _user_id, u64 _unique_id, bool _detached,
u32 _parent_tid, u32 _stack_tid, void *arg);
void Reset();
// is found.
ThreadContextBase *FindThreadContextLocked(FindThreadCallback cb,
void *arg);
- ThreadContextBase *FindThreadContextByOsIDLocked(tid_t os_id);
+ ThreadContextBase *FindThreadContextByOsIDLocked(ThreadID os_id);
void SetThreadName(u32 tid, const char *name);
void SetThreadNameByUserId(uptr user_id, const char *name);
void JoinThread(u32 tid, void *arg);
// Finishes thread and returns previous status.
ThreadStatus FinishThread(u32 tid);
- void StartThread(u32 tid, tid_t os_id, ThreadType thread_type, void *arg);
+ void StartThread(u32 tid, ThreadID os_id, ThreadType thread_type, void *arg);
u32 ConsumeThreadUserId(uptr user_id);
void SetThreadUserId(u32 tid, uptr user_id);
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_SOLARIS
+ SANITIZER_SOLARIS || SANITIZER_HAIKU
#include "sanitizer_common.h"
#include "sanitizer_stacktrace.h"
} // namespace
-#if SANITIZER_ANDROID
-void SanitizerInitializeUnwinder() {
- if (AndroidGetApiLevel() >= ANDROID_LOLLIPOP_MR1) return;
-
- // Pre-lollipop Android can not unwind through signal handler frames with
- // libgcc unwinder, but it has a libcorkscrew.so library with the necessary
- // workarounds.
- void *p = dlopen("libcorkscrew.so", RTLD_LAZY);
- if (!p) {
- VReport(1,
- "Failed to open libcorkscrew.so. You may see broken stack traces "
- "in SEGV reports.");
- return;
- }
- acquire_my_map_info_list =
- (acquire_my_map_info_list_func)(uptr)dlsym(p, "acquire_my_map_info_list");
- release_my_map_info_list =
- (release_my_map_info_list_func)(uptr)dlsym(p, "release_my_map_info_list");
- unwind_backtrace_signal_arch = (unwind_backtrace_signal_arch_func)(uptr)dlsym(
- p, "unwind_backtrace_signal_arch");
- if (!acquire_my_map_info_list || !release_my_map_info_list ||
- !unwind_backtrace_signal_arch) {
- VReport(1,
- "Failed to find one of the required symbols in libcorkscrew.so. "
- "You may see broken stack traces in SEGV reports.");
- acquire_my_map_info_list = 0;
- unwind_backtrace_signal_arch = 0;
- release_my_map_info_list = 0;
- }
-}
-#endif
-
void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
CHECK_GE(max_depth, 2);
size = 0;
} // namespace __sanitizer
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||
- // SANITIZER_SOLARIS
+ // SANITIZER_SOLARIS || SANITIZER_HAIKU
stack_frame.AddrPC.Offset = ctx.Pc;
stack_frame.AddrFrame.Offset = ctx.R11;
stack_frame.AddrStack.Offset = ctx.Sp;
+# elif SANITIZER_MIPS32
+ int machine_type = IMAGE_FILE_MACHINE_R4000;
+ stack_frame.AddrPC.Offset = ctx.Fir;
+ stack_frame.AddrFrame.Offset = ctx.IntS8;
+ stack_frame.AddrStack.Offset = ctx.IntSp;
# else
int machine_type = IMAGE_FILE_MACHINE_I386;
stack_frame.AddrPC.Offset = ctx.Eip;
// In contrast to POSIX, on Windows GetCurrentThreadId()
// returns a system-unique identifier.
-tid_t GetTid() {
- return GetCurrentThreadId();
-}
+ThreadID GetTid() { return GetCurrentThreadId(); }
uptr GetThreadSelf() {
return GetTid();
static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type,
const char *mmap_type) {
error_t last_error = GetLastError();
- if (last_error == ERROR_NOT_ENOUGH_MEMORY)
+
+ // Assumption: VirtualAlloc is the last system call that was invoked before
+ // this method.
+ // VirtualAlloc emits one of 3 error codes when running out of memory
+ // 1. ERROR_NOT_ENOUGH_MEMORY:
+ // There's not enough memory to execute the command
+ // 2. ERROR_INVALID_PARAMETER:
+ // VirtualAlloc will return this if the request would allocate memory at an
+ // address exceeding or being very close to the maximum application address
+ // (the `lpMaximumApplicationAddress` field within the `SystemInfo` struct).
+ // This does not seem to be officially documented, but is corroborated here:
+ // https://stackoverflow.com/questions/45833674/why-does-virtualalloc-fail-for-lpaddress-greater-than-0x6ffffffffff
+ // 3. ERROR_COMMITMENT_LIMIT:
+ // VirtualAlloc will return this if e.g. the pagefile is too small to commit
+ // the requested amount of memory.
+ if (last_error == ERROR_NOT_ENOUGH_MEMORY ||
+ last_error == ERROR_INVALID_PARAMETER ||
+ last_error == ERROR_COMMITMENT_LIMIT)
return nullptr;
ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error);
}
# if SANITIZER_ARM
bp = (uptr)context_record->R11;
sp = (uptr)context_record->Sp;
+# elif SANITIZER_MIPS32
+ bp = (uptr)context_record->IntS8;
+ sp = (uptr)context_record->IntSp;
# else
bp = (uptr)context_record->Ebp;
sp = (uptr)context_record->Esp;
}
SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id,
+int __tsan_get_report_thread(void *report, uptr idx, int *tid, ThreadID *os_id,
int *running, const char **name, int *parent_tid,
void **trace, uptr trace_size) {
const ReportDesc *rep = (ReportDesc *)report;
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
- tid_t *os_id) {
+ ThreadID *os_id) {
MBlock *b = 0;
Allocator *a = allocator();
if (a->PointerIsMine((void *)addr)) {
#include "tsan_rtl.h"
#include "ubsan/ubsan_flags.h"
+#if SANITIZER_APPLE && !SANITIZER_GO
+namespace __sanitizer {
+
+template <>
+inline bool FlagHandler<LockDuringWriteSetting>::Parse(const char *value) {
+ if (internal_strcmp(value, "on") == 0) {
+ *t_ = kLockDuringAllWrites;
+ return true;
+ }
+ if (internal_strcmp(value, "disable_for_current_process") == 0) {
+ *t_ = kNoLockDuringWritesCurrentProcess;
+ return true;
+ }
+ if (internal_strcmp(value, "disable_for_all_processes") == 0) {
+ *t_ = kNoLockDuringWritesAllProcesses;
+ return true;
+ }
+ Printf("ERROR: Invalid value for signal handler option: '%s'\n", value);
+ return false;
+}
+
+template <>
+inline bool FlagHandler<LockDuringWriteSetting>::Format(char *buffer,
+ uptr size) {
+ switch (*t_) {
+ case kLockDuringAllWrites:
+ return FormatString(buffer, size, "on");
+ case kNoLockDuringWritesCurrentProcess:
+ return FormatString(buffer, size, "disable_for_current_process");
+ case kNoLockDuringWritesAllProcesses:
+ return FormatString(buffer, size, "disable_for_all_processes");
+ }
+}
+
+} // namespace __sanitizer
+#endif // SANITIZER_APPLE && !SANITIZER_GO
+
namespace __tsan {
// Can be overriden in frontend.
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
+#if SANITIZER_APPLE && !SANITIZER_GO
+enum LockDuringWriteSetting {
+ kLockDuringAllWrites,
+ kNoLockDuringWritesCurrentProcess,
+ kNoLockDuringWritesAllProcesses,
+};
+#endif
+
namespace __tsan {
struct Flags : DDFlags {
TSAN_FLAG(bool, print_full_thread_history, false,
"If set, prints thread creation stacks for the threads involved in "
"the report and their ancestors up to the main thread.")
+
+#if SANITIZER_APPLE && !SANITIZER_GO
+TSAN_FLAG(LockDuringWriteSetting, lock_during_write, kLockDuringAllWrites,
+ "Determines whether to obtain a lock while writing logs or error "
+ "reports. "
+ "\"on\" - [default] lock during all writes. "
+ "\"disable_for_current_process\" - don't lock during all writes in "
+ "the current process, but do lock for all writes in child "
+ "processes."
+ "\"disable_for_all_processes\" - don't lock during all writes in "
+ "the current process and it's children processes.")
+#endif
#ifndef TSAN_INTERCEPTORS_H
#define TSAN_INTERCEPTORS_H
+#if SANITIZER_APPLE && !SANITIZER_GO
+# include "sanitizer_common/sanitizer_mac.h"
+#endif
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "tsan_rtl.h"
#endif
inline bool MustIgnoreInterceptor(ThreadState *thr) {
- return !thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib;
+ return !thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib
+#if SANITIZER_APPLE && !SANITIZER_GO
+ || (flags()->lock_during_write != kLockDuringAllWrites &&
+ thr->in_internal_write_call)
+#endif
+ ;
}
} // namespace __tsan
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_APPLE
-#include "interception/interception.h"
-#include "tsan_interceptors.h"
-#include "tsan_interface.h"
-#include "tsan_interface_ann.h"
-#include "tsan_spinlock_defs_mac.h"
-#include "sanitizer_common/sanitizer_addrhashmap.h"
-
-#include <errno.h>
-#include <libkern/OSAtomic.h>
-#include <objc/objc-sync.h>
-#include <os/lock.h>
-#include <sys/ucontext.h>
-
-#if defined(__has_include) && __has_include(<xpc/xpc.h>)
-#include <xpc/xpc.h>
-#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
+# include <errno.h>
+# include <libkern/OSAtomic.h>
+# include <objc/objc-sync.h>
+# include <os/lock.h>
+# include <sys/ucontext.h>
+
+# include "interception/interception.h"
+# include "sanitizer_common/sanitizer_addrhashmap.h"
+# include "tsan_interceptors.h"
+# include "tsan_interface.h"
+# include "tsan_interface_ann.h"
+
+# if defined(__has_include) && __has_include(<xpc/xpc.h>)
+# include <xpc/xpc.h>
+# endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
typedef long long_t;
static constexpr morder kMacOrderNonBarrier = mo_acq_rel;
static constexpr morder kMacFailureOrder = mo_relaxed;
-#define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
- TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
- return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \
- }
+# define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \
+ }
-#define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
- TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
- return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \
- }
+# define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, \
+ mo) \
+ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \
+ }
-#define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
- TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, ptr); \
- return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \
- }
+# define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
+ mo) \
+ TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \
+ }
-#define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
- mo) \
- TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, ptr); \
- return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \
- }
+# define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
+ mo) \
+ TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \
+ }
-#define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \
- m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderNonBarrier) \
- m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderBarrier) \
- m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \
- kMacOrderNonBarrier) \
- m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \
- kMacOrderBarrier)
-
-#define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \
- m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderNonBarrier) \
- m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderBarrier) \
- m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderNonBarrier) \
- m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \
- __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
-
-
-#pragma clang diagnostic push
-// OSAtomic* functions are deprecated.
-#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+# define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \
+ m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int32_t, int32_t, a32, f##32##Barrier, \
+ __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier) \
+ m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int64_t, int64_t, a64, f##64##Barrier, \
+ __tsan_atomic64_##tsan_atomic_f, kMacOrderBarrier)
+
+# define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \
+ m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int32_t, uint32_t, a32, f##32##Barrier, \
+ __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier) \
+ m_orig(int32_t, uint32_t, a32, f##32##Orig, \
+ __tsan_atomic32_##tsan_atomic_f, kMacOrderNonBarrier) \
+ m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \
+ __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
+
+# pragma clang diagnostic push // OSAtomic* deprecation
+# pragma clang diagnostic ignored "-Wdeprecated-declarations"
OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add,
OSATOMIC_INTERCEPTOR_PLUS_X)
OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add,
OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
+# pragma clang diagnostic pop // OSAtomic* deprecation
+
+# define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \
+ TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \
+ return tsan_atomic_f##_compare_exchange_strong( \
+ (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
+ kMacOrderNonBarrier, kMacFailureOrder); \
+ } \
+ \
+ TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \
+ t volatile *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \
+ return tsan_atomic_f##_compare_exchange_strong( \
+ (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
+ kMacOrderBarrier, kMacFailureOrder); \
+ }
-#define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \
- TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \
- return tsan_atomic_f##_compare_exchange_strong( \
- (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
- kMacOrderNonBarrier, kMacFailureOrder); \
- } \
- \
- TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \
- t volatile *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \
- return tsan_atomic_f##_compare_exchange_strong( \
- (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
- kMacOrderBarrier, kMacFailureOrder); \
- }
-
-#pragma clang diagnostic push
-// OSAtomicCompareAndSwap* functions are deprecated.
-#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+# pragma clang diagnostic push // OSAtomicCompareAndSwap* deprecation
+# pragma clang diagnostic ignored "-Wdeprecated-declarations"
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int)
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64,
long_t)
int32_t)
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64,
int64_t)
-#pragma clang diagnostic pop
-
-#define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \
- TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \
- volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \
- char bit = 0x80u >> (n & 7); \
- char mask = clear ? ~bit : bit; \
- char orig_byte = op((volatile a8 *)byte_ptr, mask, mo); \
- return orig_byte & bit; \
- }
+# pragma clang diagnostic pop // OSAtomicCompareAndSwap* deprecation
+
+# define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \
+ TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \
+ volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \
+ char bit = 0x80u >> (n & 7); \
+ char mask = clear ? ~bit : bit; \
+ char orig_byte = op((volatile a8 *)byte_ptr, mask, mo); \
+ return orig_byte & bit; \
+ }
-#define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \
- OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \
- OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier)
+# define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \
+ OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \
+ OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier)
+# pragma clang diagnostic push // OSAtomicTestAnd* deprecation
+# pragma clang diagnostic ignored "-Wdeprecated-declarations"
OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false)
OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and,
true)
+# pragma clang diagnostic pop // OSAtomicTestAnd* deprecation
TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item,
size_t offset) {
TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) {
SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset);
void *item = REAL(OSAtomicDequeue)(list, offset);
- if (item) __tsan_acquire(item);
+ if (item)
+ __tsan_acquire(item);
return item;
}
// OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X.
-#if !SANITIZER_IOS
+# if !SANITIZER_IOS
TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item,
size_t offset) {
size_t offset) {
SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset);
void *item = REAL(OSAtomicFifoDequeue)(list, offset);
- if (item) __tsan_acquire(item);
+ if (item)
+ __tsan_acquire(item);
return item;
}
-#endif
+# endif
+
+// If `OSSPINLOCK_USE_INLINED=1` is set, then SDK headers don't declare these
+// as functions, but macros that call non-deprecated APIs. Undefine these
+// macros so they don't interfere with the interceptor machinery.
+# undef OSSpinLockLock
+# undef OSSpinLockTry
+# undef OSSpinLockUnlock
+
+# pragma clang diagnostic push // OSSpinLock* deprecation
+# pragma clang diagnostic ignored "-Wdeprecated-declarations"
TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) {
CHECK(!cur_thread()->is_dead);
Release(thr, pc, (uptr)lock);
REAL(OSSpinLockUnlock)(lock);
}
+# pragma clang diagnostic pop // OSSpinLock* deprecation
TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) {
CHECK(!cur_thread()->is_dead);
Acquire(thr, pc, (uptr)lock);
}
+// os_unfair_lock_lock_with_flags was introduced in macOS 15
+# if defined(__MAC_15_0) || defined(__IPHONE_18_0) || defined(__TVOS_18_0) || \
+ defined(__VISIONOS_2_0) || defined(__WATCHOS_11_0)
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wunguarded-availability-new"
+// We're just intercepting this - if it doesn't exist on the platform, then the
+// process shouldn't have called it in the first place.
+TSAN_INTERCEPTOR(void, os_unfair_lock_lock_with_flags, os_unfair_lock_t lock,
+ os_unfair_lock_flags_t flags) {
+ if (!cur_thread()->is_inited || cur_thread()->is_dead) {
+ return REAL(os_unfair_lock_lock_with_flags)(lock, flags);
+ }
+ SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_lock_with_flags, lock, flags);
+ REAL(os_unfair_lock_lock_with_flags)(lock, flags);
+ Acquire(thr, pc, (uptr)lock);
+}
+# pragma clang diagnostic pop
+# endif
+
TSAN_INTERCEPTOR(void, os_unfair_lock_lock_with_options, os_unfair_lock_t lock,
u32 options) {
if (!cur_thread()->is_inited || cur_thread()->is_dead) {
REAL(os_unfair_lock_unlock)(lock);
}
-#if defined(__has_include) && __has_include(<xpc/xpc.h>)
+# if defined(__has_include) && __has_include(<xpc/xpc.h>)
TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler,
xpc_connection_t connection, xpc_handler_t handler) {
REAL(xpc_connection_cancel)(connection);
}
-#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
+# endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
// Determines whether the Obj-C object pointer is a tagged pointer. Tagged
// pointers encode the object data directly in their pointer bits and do not
Map::Handle h(&Addresses, addr);
if (h.created()) {
ThreadIgnoreBegin(thr, pc);
- *h = (uptr) user_alloc(thr, pc, /*size=*/1);
+ *h = (uptr)user_alloc(thr, pc, /*size=*/1);
ThreadIgnoreEnd(thr);
}
return *h;
TSAN_INTERCEPTOR(int, objc_sync_enter, id obj) {
SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj);
- if (!obj) return REAL(objc_sync_enter)(obj);
+ if (!obj)
+ return REAL(objc_sync_enter)(obj);
uptr addr = SyncAddressForObjCObject(obj, thr, pc);
MutexPreLock(thr, pc, addr, MutexFlagWriteReentrant);
int result = REAL(objc_sync_enter)(obj);
TSAN_INTERCEPTOR(int, objc_sync_exit, id obj) {
SCOPED_TSAN_INTERCEPTOR(objc_sync_exit, obj);
- if (!obj) return REAL(objc_sync_exit)(obj);
+ if (!obj)
+ return REAL(objc_sync_exit)(obj);
uptr addr = SyncAddressForObjCObject(obj, thr, pc);
MutexUnlock(thr, pc, addr);
int result = REAL(objc_sync_exit)(obj);
- if (result != OBJC_SYNC_SUCCESS) MutexInvalidAccess(thr, pc, addr);
+ if (result != OBJC_SYNC_SUCCESS)
+ MutexInvalidAccess(thr, pc, addr);
return result;
}
// On macOS, libc++ is always linked dynamically, so intercepting works the
// usual way.
-#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
+# define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
namespace {
struct fake_shared_weak_count {
// sanitizer_common/sanitizer_common_interceptors.inc
//===----------------------------------------------------------------------===//
+#include <stdarg.h>
+
+#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator_dlsym.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
-#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_vector.h"
+#include "tsan_fd.h"
+#if SANITIZER_APPLE && !SANITIZER_GO
+# include "tsan_flags.h"
+#endif
#include "tsan_interceptors.h"
#include "tsan_interface.h"
+#include "tsan_mman.h"
#include "tsan_platform.h"
-#include "tsan_suppressions.h"
#include "tsan_rtl.h"
-#include "tsan_mman.h"
-#include "tsan_fd.h"
-
-#include <stdarg.h>
+#include "tsan_suppressions.h"
using namespace __tsan;
};
#endif
-#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
- defined(__s390x__)
-#define PTHREAD_ABI_BASE "GLIBC_2.3.2"
-#elif defined(__aarch64__) || SANITIZER_PPC64V2
-#define PTHREAD_ABI_BASE "GLIBC_2.17"
-#elif SANITIZER_LOONGARCH64
-#define PTHREAD_ABI_BASE "GLIBC_2.36"
-#elif SANITIZER_RISCV64
-# define PTHREAD_ABI_BASE "GLIBC_2.27"
-#endif
-
extern "C" int pthread_attr_init(void *attr);
extern "C" int pthread_attr_destroy(void *attr);
DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
SignalDesc pending_signals[kSigCount];
// emptyset and oldset are too big for stack.
__sanitizer_sigset_t emptyset;
- __sanitizer_sigset_t oldset;
+ __sanitizer::Vector<__sanitizer_sigset_t> oldset;
};
void EnterBlockingFunc(ThreadState *thr) {
}
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD
-# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
-#else
-# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
-#endif
#if SANITIZER_FREEBSD
# define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \
INTERCEPT_FUNCTION(_pthread_##func)
buf->shadow_stack_pos = thr->shadow_stack_pos;
ThreadSignalContext *sctx = SigCtx(thr);
buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
+ buf->oldset_stack_size = sctx ? sctx->oldset.Size() : 0;
buf->in_blocking_func = atomic_load(&thr->in_blocking_func, memory_order_relaxed);
buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
memory_order_relaxed);
while (thr->shadow_stack_pos > buf->shadow_stack_pos)
FuncExit(thr);
ThreadSignalContext *sctx = SigCtx(thr);
- if (sctx)
+ if (sctx) {
sctx->int_signal_send = buf->int_signal_send;
+ while (sctx->oldset.Size() > buf->oldset_stack_size)
+ sctx->oldset.PopBack();
+ }
atomic_store(&thr->in_blocking_func, buf->in_blocking_func,
memory_order_relaxed);
atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
user_free(thr, pc, p);
}
+# if SANITIZER_INTERCEPT_FREE_SIZED
+TSAN_INTERCEPTOR(void, free_sized, void *p, uptr size) {
+ if (UNLIKELY(!p))
+ return;
+ if (in_symbolizer())
+ return InternalFree(p);
+ if (DlsymAlloc::PointerIsMine(p))
+ return DlsymAlloc::Free(p);
+ invoke_free_hook(p);
+ SCOPED_INTERCEPTOR_RAW(free_sized, p, size);
+ user_free(thr, pc, p);
+}
+# define TSAN_MAYBE_INTERCEPT_FREE_SIZED INTERCEPT_FUNCTION(free_sized)
+# else
+# define TSAN_MAYBE_INTERCEPT_FREE_SIZED
+# endif
+
+# if SANITIZER_INTERCEPT_FREE_ALIGNED_SIZED
+TSAN_INTERCEPTOR(void, free_aligned_sized, void *p, uptr alignment, uptr size) {
+ if (UNLIKELY(!p))
+ return;
+ if (in_symbolizer())
+ return InternalFree(p);
+ if (DlsymAlloc::PointerIsMine(p))
+ return DlsymAlloc::Free(p);
+ invoke_free_hook(p);
+ SCOPED_INTERCEPTOR_RAW(free_aligned_sized, p, alignment, size);
+ user_free(thr, pc, p);
+}
+# define TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED \
+ INTERCEPT_FUNCTION(free_aligned_sized)
+# else
+# define TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED
+# endif
+
TSAN_INTERCEPTOR(void, cfree, void *p) {
if (UNLIKELY(!p))
return;
SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
return user_alloc_usable_size(p);
}
+#else
+# define TSAN_MAYBE_INTERCEPT_FREE_SIZED
+# define TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED
#endif
TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
bool blocking_hooks = true) {
- if (blocking_hooks)
- OnPotentiallyBlockingRegionBegin();
- auto on_exit = at_scope_exit([blocking_hooks] {
- if (blocking_hooks)
+ bool in_potentially_blocking_region = false;
+ auto on_exit = at_scope_exit([&] {
+ if (in_potentially_blocking_region)
OnPotentiallyBlockingRegionEnd();
});
} else {
if ((cmp & kGuardWaiter) ||
atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter,
- memory_order_relaxed))
+ memory_order_relaxed)) {
+ if (blocking_hooks && !in_potentially_blocking_region) {
+ in_potentially_blocking_region = true;
+ OnPotentiallyBlockingRegionBegin();
+ }
FutexWait(g, cmp | kGuardWaiter);
+ }
}
}
}
&thr->signal_ctx, memory_order_relaxed);
if (sctx) {
atomic_store(&thr->signal_ctx, 0, memory_order_relaxed);
+ sctx->oldset.Reset();
UnmapOrDie(sctx, sizeof(*sctx));
}
}
TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
+#if SANITIZER_ANDROID
+ {
+ // In Bionic, if the target thread has already exited when pthread_detach is
+ // called, pthread_detach will call pthread_join internally to clean it up.
+ // In that case, the thread has already been consumed by the pthread_detach
+ // interceptor.
+ Tid tid = ctx->thread_registry.FindThread(
+ [](ThreadContextBase* tctx, void* arg) {
+ return tctx->user_id == (uptr)arg;
+ },
+ th);
+ if (tid == kInvalidTid) {
+ return REAL(pthread_join)(th, ret);
+ }
+ }
+#endif
Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
ThreadIgnoreBegin(thr, pc);
int res = BLOCK_REAL(pthread_join)(th, ret);
TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
+#if SANITIZER_APPLE && !SANITIZER_GO
+ if (flags()->lock_during_write != kLockDuringAllWrites &&
+ cur_thread_init()->in_internal_write_call) {
+ // This is needed to make it through process launch without hanging
+ f();
+ return 0;
+ }
+#endif
if (o == 0 || f == 0)
return errno_EINVAL;
atomic_uint32_t *a;
// StackTrace::GetNestInstructionPc(pc) is used because return address is
// expected, OutputReport() will undo this.
ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
- ThreadRegistryLock l(&ctx->thread_registry);
- ScopedReport rep(ReportTypeErrnoInSignal);
- rep.SetSigNum(sig);
- if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
- rep.AddStack(stack, true);
- OutputReport(thr, rep);
+ // Use alloca, because malloc during signal handling deadlocks
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ bool suppressed;
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
+ {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ new (rep) ScopedReport(ReportTypeErrnoInSignal);
+ rep->SetSigNum(sig);
+ suppressed = IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack);
+ if (!suppressed)
+ rep->AddStack(stack, true);
+#if SANITIZER_APPLE
+ } // Close this scope to release the locks before writing report
+#endif
+ if (!suppressed)
+ OutputReport(thr, *rep);
+
+ // Need to manually destroy this because we used placement new to allocate
+ rep->~ScopedReport();
+#if !SANITIZER_APPLE
}
+#endif
}
static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
return;
atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
internal_sigfillset(&sctx->emptyset);
- int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
+ __sanitizer_sigset_t *oldset = sctx->oldset.PushBack();
+ int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, oldset);
CHECK_EQ(res, 0);
for (int sig = 0; sig < kSigCount; sig++) {
SignalDesc *signal = &sctx->pending_signals[sig];
&signal->ctx);
}
}
- res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
+ res = REAL(pthread_sigmask)(SIG_SETMASK, oldset, 0);
CHECK_EQ(res, 0);
+ sctx->oldset.PopBack();
atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
}
}
#endif
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+// Bionic's pthread_create internally calls clone. When the CLONE_THREAD flag is
+// set, clone does not create a new process but a new thread. This is a
+// workaround for Android. Disabling the interception of clone solves the
+// problem in most scenarios.
TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
#endif
#if SANITIZER_FREEBSD
-TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
+TSAN_INTERCEPTOR(void, thr_exit, ThreadID *state) {
SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
DestroyThreadState();
REAL(thr_exit(state));
}
-#define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
+# define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
#else
#define TSAN_MAYBE_INTERCEPT_THR_EXIT
#endif
TSAN_INTERCEPT(realloc);
TSAN_INTERCEPT(reallocarray);
TSAN_INTERCEPT(free);
+ TSAN_MAYBE_INTERCEPT_FREE_SIZED;
+ TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED;
TSAN_INTERCEPT(cfree);
TSAN_INTERCEPT(munmap);
TSAN_MAYBE_INTERCEPT_MEMALIGN;
TSAN_INTERCEPT(pthread_timedjoin_np);
#endif
- TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
- TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
- TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
- TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
- TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
- TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
+ // In glibc versions older than 2.36, dlsym(RTLD_NEXT, "pthread_cond_init")
+ // may return an outdated symbol (max(2.2,base_version)) if the port was
+ // introduced before 2.3.2 (when the new pthread_cond_t was introduced).
+#if SANITIZER_GLIBC && !__GLIBC_PREREQ(2, 36) && \
+ (defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
+ defined(__s390x__))
+ INTERCEPT_FUNCTION_VER(pthread_cond_init, "GLIBC_2.3.2");
+ INTERCEPT_FUNCTION_VER(pthread_cond_signal, "GLIBC_2.3.2");
+ INTERCEPT_FUNCTION_VER(pthread_cond_broadcast, "GLIBC_2.3.2");
+ INTERCEPT_FUNCTION_VER(pthread_cond_wait, "GLIBC_2.3.2");
+ INTERCEPT_FUNCTION_VER(pthread_cond_timedwait, "GLIBC_2.3.2");
+ INTERCEPT_FUNCTION_VER(pthread_cond_destroy, "GLIBC_2.3.2");
+#else
+ INTERCEPT_FUNCTION(pthread_cond_init);
+ INTERCEPT_FUNCTION(pthread_cond_signal);
+ INTERCEPT_FUNCTION(pthread_cond_broadcast);
+ INTERCEPT_FUNCTION(pthread_cond_wait);
+ INTERCEPT_FUNCTION(pthread_cond_timedwait);
+ INTERCEPT_FUNCTION(pthread_cond_destroy);
+#endif
TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
TSAN_INTERCEPT(fork);
TSAN_INTERCEPT(vfork);
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPT(clone);
#endif
#if !SANITIZER_ANDROID
TSAN_INTERCEPT(dl_iterate_phdr);
#endif
+
+ // Symbolization indirectly calls dl_iterate_phdr
+ ready_to_symbolize = true;
+
TSAN_MAYBE_INTERCEPT_ON_EXIT;
TSAN_INTERCEPT(__cxa_atexit);
TSAN_INTERCEPT(_exit);
#define TSAN_INTERFACE_H
#include <sanitizer_common/sanitizer_internal_defs.h>
+using __sanitizer::ThreadID;
using __sanitizer::uptr;
-using __sanitizer::tid_t;
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __tsan_.
// Returns information about threads included in the report.
SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id,
+int __tsan_get_report_thread(void *report, uptr idx, int *tid, ThreadID *os_id,
int *running, const char **name, int *parent_tid,
void **trace, uptr trace_size);
// Returns the allocation stack for a heap pointer.
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
- tid_t *os_id);
+ ThreadID *os_id);
#endif // SANITIZER_GO
namespace __tsan {
// These should match declarations from public tsan_interface_atomic.h header.
-typedef unsigned char a8;
+typedef unsigned char a8;
typedef unsigned short a16;
-typedef unsigned int a32;
+typedef unsigned int a32;
typedef unsigned long long a64;
-#if !SANITIZER_GO && (defined(__SIZEOF_INT128__) \
- || (__clang_major__ * 100 + __clang_minor__ >= 302)) && \
+#if !SANITIZER_GO && \
+ (defined(__SIZEOF_INT128__) || \
+ (__clang_major__ * 100 + __clang_minor__ >= 302)) && \
!defined(__mips64) && !defined(__s390x__)
__extension__ typedef __int128 a128;
-# define __TSAN_HAS_INT128 1
+# define __TSAN_HAS_INT128 1
#else
-# define __TSAN_HAS_INT128 0
+# define __TSAN_HAS_INT128 0
#endif
// Part of ABI, do not change.
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_load(const volatile a8 *a, morder mo);
+a8 __tsan_atomic8_load(const volatile a8 *a, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_load(const volatile a16 *a, morder mo);
+a16 __tsan_atomic16_load(const volatile a16 *a, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_load(const volatile a32 *a, morder mo);
+a32 __tsan_atomic32_load(const volatile a32 *a, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_load(const volatile a64 *a, morder mo);
+a64 __tsan_atomic64_load(const volatile a64 *a, int mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_load(const volatile a128 *a, morder mo);
+a128 __tsan_atomic128_load(const volatile a128 *a, int mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo);
+void __tsan_atomic8_store(volatile a8 *a, a8 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo);
+void __tsan_atomic16_store(volatile a16 *a, a16 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo);
+void __tsan_atomic32_store(volatile a32 *a, a32 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo);
+void __tsan_atomic64_store(volatile a64 *a, a64 v, int mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo);
+void __tsan_atomic128_store(volatile a128 *a, a128 v, int mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo);
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo);
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo);
+a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo);
+a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, int mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo);
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, int mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo);
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo);
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo);
+a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo);
+a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, int mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo);
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, int mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo);
+a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo);
+a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo);
+a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo);
+a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, int mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo);
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, int mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo);
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo);
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo);
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo);
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, int mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo);
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, int mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo);
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo);
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo);
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo);
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, int mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo);
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, int mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo);
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo);
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo);
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo);
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, int mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo);
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, int mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo);
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo);
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo);
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo);
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, int mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo);
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, int mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
- morder mo, morder fmo);
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, int mo,
+ int fmo);
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
- morder mo, morder fmo);
+ int mo, int fmo);
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
- morder mo, morder fmo);
+ int mo, int fmo);
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
- morder mo, morder fmo);
+ int mo, int fmo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
- morder mo, morder fmo);
+ int mo, int fmo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo,
- morder fmo);
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, int mo,
+ int fmo);
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
- morder mo, morder fmo);
+ int mo, int fmo);
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
- morder mo, morder fmo);
+ int mo, int fmo);
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
- morder mo, morder fmo);
+ int mo, int fmo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
- morder mo, morder fmo);
+ int mo, int fmo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo,
- morder fmo);
+a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, int mo,
+ int fmo);
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
- morder mo, morder fmo);
+a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, int mo,
+ int fmo);
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
- morder mo, morder fmo);
+a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, int mo,
+ int fmo);
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
- morder mo, morder fmo);
+a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, int mo,
+ int fmo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
- morder mo, morder fmo);
+ int mo, int fmo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic_thread_fence(morder mo);
+void __tsan_atomic_thread_fence(int mo);
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic_signal_fence(morder mo);
+void __tsan_atomic_signal_fence(int mo);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
}
static void ReportMutexHeldWrongContext(ThreadState *thr, uptr pc) {
- ThreadRegistryLock l(&ctx->thread_registry);
- ScopedReport rep(ReportTypeMutexHeldWrongContext);
- for (uptr i = 0; i < thr->mset.Size(); ++i) {
- MutexSet::Desc desc = thr->mset.Get(i);
- rep.AddMutex(desc.addr, desc.stack_id);
+ // Use alloca, because malloc during signal handling deadlocks
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
+ {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ new (rep) ScopedReport(ReportTypeMutexHeldWrongContext);
+ for (uptr i = 0; i < thr->mset.Size(); ++i) {
+ MutexSet::Desc desc = thr->mset.Get(i);
+ rep->AddMutex(desc.addr, desc.stack_id);
+ }
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep->AddStack(trace, true);
+#if SANITIZER_APPLE
+ } // Close this scope to release the locks
+#endif
+ OutputReport(thr, *rep);
+
+ // Need to manually destroy this because we used placement new to allocate
+ rep->~ScopedReport();
+#if !SANITIZER_APPLE
}
- VarSizeStackTrace trace;
- ObtainCurrentStack(thr, pc, &trace);
- rep.AddStack(trace, true);
- OutputReport(thr, rep);
+#endif
}
INTERFACE_ATTRIBUTE
// The following page contains more background information:
// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
+#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "sanitizer_common/sanitizer_mutex.h"
#include "tsan_flags.h"
#include "tsan_interface.h"
#include "tsan_rtl.h"
#if SANITIZER_DEBUG
static bool IsLoadOrder(morder mo) {
- return mo == mo_relaxed || mo == mo_consume
- || mo == mo_acquire || mo == mo_seq_cst;
+ return mo == mo_relaxed || mo == mo_consume || mo == mo_acquire ||
+ mo == mo_seq_cst;
}
static bool IsStoreOrder(morder mo) {
}
static bool IsAcquireOrder(morder mo) {
- return mo == mo_consume || mo == mo_acquire
- || mo == mo_acq_rel || mo == mo_seq_cst;
+ return mo == mo_consume || mo == mo_acquire || mo == mo_acq_rel ||
+ mo == mo_seq_cst;
}
static bool IsAcqRelOrder(morder mo) {
return mo == mo_acq_rel || mo == mo_seq_cst;
}
-template<typename T> T func_xchg(volatile T *v, T op) {
+template <typename T>
+T func_xchg(volatile T *v, T op) {
T res = __sync_lock_test_and_set(v, op);
// __sync_lock_test_and_set does not contain full barrier.
__sync_synchronize();
return res;
}
-template<typename T> T func_add(volatile T *v, T op) {
+template <typename T>
+T func_add(volatile T *v, T op) {
return __sync_fetch_and_add(v, op);
}
-template<typename T> T func_sub(volatile T *v, T op) {
+template <typename T>
+T func_sub(volatile T *v, T op) {
return __sync_fetch_and_sub(v, op);
}
-template<typename T> T func_and(volatile T *v, T op) {
+template <typename T>
+T func_and(volatile T *v, T op) {
return __sync_fetch_and_and(v, op);
}
-template<typename T> T func_or(volatile T *v, T op) {
+template <typename T>
+T func_or(volatile T *v, T op) {
return __sync_fetch_and_or(v, op);
}
-template<typename T> T func_xor(volatile T *v, T op) {
+template <typename T>
+T func_xor(volatile T *v, T op) {
return __sync_fetch_and_xor(v, op);
}
-template<typename T> T func_nand(volatile T *v, T op) {
+template <typename T>
+T func_nand(volatile T *v, T op) {
// clang does not support __sync_fetch_and_nand.
T cmp = *v;
for (;;) {
}
}
-template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
+template <typename T>
+T func_cas(volatile T *v, T cmp, T xch) {
return __sync_val_compare_and_swap(v, cmp, xch);
}
// Atomic ops are executed under tsan internal mutex,
// here we assume that the atomic variables are not accessed
// from non-instrumented code.
-#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
- && __TSAN_HAS_INT128
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO && \
+ __TSAN_HAS_INT128
a128 func_xchg(volatile a128 *v, a128 op) {
SpinMutexLock lock(&mutex128);
a128 cmp = *v;
static memory_order to_mo(morder mo) {
switch (mo) {
- case mo_relaxed: return memory_order_relaxed;
- case mo_consume: return memory_order_consume;
- case mo_acquire: return memory_order_acquire;
- case mo_release: return memory_order_release;
- case mo_acq_rel: return memory_order_acq_rel;
- case mo_seq_cst: return memory_order_seq_cst;
+ case mo_relaxed:
+ return memory_order_relaxed;
+ case mo_consume:
+ return memory_order_consume;
+ case mo_acquire:
+ return memory_order_acquire;
+ case mo_release:
+ return memory_order_release;
+ case mo_acq_rel:
+ return memory_order_acq_rel;
+ case mo_seq_cst:
+ return memory_order_seq_cst;
}
DCHECK(0);
return memory_order_seq_cst;
}
-template<typename T>
-static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
- return atomic_load(to_atomic(a), to_mo(mo));
-}
-
-#if __TSAN_HAS_INT128 && !SANITIZER_GO
-static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
- SpinMutexLock lock(&mutex128);
- return *a;
-}
-#endif
-
-template <typename T>
-static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
- DCHECK(IsLoadOrder(mo));
- // This fast-path is critical for performance.
- // Assume the access is atomic.
- if (!IsAcquireOrder(mo)) {
- MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
- kAccessRead | kAccessAtomic);
- return NoTsanAtomicLoad(a, mo);
- }
- // Don't create sync object if it does not exist yet. For example, an atomic
- // pointer is initialized to nullptr and then periodically acquire-loaded.
- T v = NoTsanAtomicLoad(a, mo);
- SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);
- if (s) {
- SlotLocker locker(thr);
- ReadLock lock(&s->mtx);
- thr->clock.Acquire(s->clock);
- // Re-read under sync mutex because we need a consistent snapshot
- // of the value and the clock we acquire.
- v = NoTsanAtomicLoad(a, mo);
- }
- MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessRead | kAccessAtomic);
- return v;
-}
-
-template<typename T>
-static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
- atomic_store(to_atomic(a), v, to_mo(mo));
-}
-
-#if __TSAN_HAS_INT128 && !SANITIZER_GO
-static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
- SpinMutexLock lock(&mutex128);
- *a = v;
-}
-#endif
-
-template <typename T>
-static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
- morder mo) {
- DCHECK(IsStoreOrder(mo));
- MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
- // This fast-path is critical for performance.
- // Assume the access is atomic.
- // Strictly saying even relaxed store cuts off release sequence,
- // so must reset the clock.
- if (!IsReleaseOrder(mo)) {
- NoTsanAtomicStore(a, v, mo);
- return;
- }
- SlotLocker locker(thr);
- {
- auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
- Lock lock(&s->mtx);
- thr->clock.ReleaseStore(&s->clock);
- NoTsanAtomicStore(a, v, mo);
- }
- IncrementEpoch(thr);
-}
+namespace {
template <typename T, T (*F)(volatile T *v, T op)>
static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
return v;
}
-template<typename T>
-static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
- return func_xchg(a, v);
-}
+struct OpLoad {
+ template <typename T>
+ static T NoTsanAtomic(morder mo, const volatile T *a) {
+ return atomic_load(to_atomic(a), to_mo(mo));
+ }
-template<typename T>
-static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
- return func_add(a, v);
-}
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
+ static a128 NoTsanAtomic(morder mo, const volatile a128 *a) {
+ SpinMutexLock lock(&mutex128);
+ return *a;
+ }
+#endif
-template<typename T>
-static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
- return func_sub(a, v);
-}
+ template <typename T>
+ static T Atomic(ThreadState *thr, uptr pc, morder mo, const volatile T *a) {
+ DCHECK(IsLoadOrder(mo));
+ // This fast-path is critical for performance.
+ // Assume the access is atomic.
+ if (!IsAcquireOrder(mo)) {
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
+ kAccessRead | kAccessAtomic);
+ return NoTsanAtomic(mo, a);
+ }
+ // Don't create sync object if it does not exist yet. For example, an atomic
+ // pointer is initialized to nullptr and then periodically acquire-loaded.
+ T v = NoTsanAtomic(mo, a);
+ SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);
+ if (s) {
+ SlotLocker locker(thr);
+ ReadLock lock(&s->mtx);
+ thr->clock.Acquire(s->clock);
+ // Re-read under sync mutex because we need a consistent snapshot
+ // of the value and the clock we acquire.
+ v = NoTsanAtomic(mo, a);
+ }
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
+ kAccessRead | kAccessAtomic);
+ return v;
+ }
+};
-template<typename T>
-static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
- return func_and(a, v);
-}
+struct OpStore {
+ template <typename T>
+ static void NoTsanAtomic(morder mo, volatile T *a, T v) {
+ atomic_store(to_atomic(a), v, to_mo(mo));
+ }
-template<typename T>
-static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
- return func_or(a, v);
-}
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
+ static void NoTsanAtomic(morder mo, volatile a128 *a, a128 v) {
+ SpinMutexLock lock(&mutex128);
+ *a = v;
+ }
+#endif
-template<typename T>
-static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
- return func_xor(a, v);
-}
+ template <typename T>
+ static void Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ DCHECK(IsStoreOrder(mo));
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
+ kAccessWrite | kAccessAtomic);
+ // This fast-path is critical for performance.
+ // Assume the access is atomic.
+ // Strictly saying even relaxed store cuts off release sequence,
+ // so must reset the clock.
+ if (!IsReleaseOrder(mo)) {
+ NoTsanAtomic(mo, a, v);
+ return;
+ }
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+ Lock lock(&s->mtx);
+ thr->clock.ReleaseStore(&s->clock);
+ NoTsanAtomic(mo, a, v);
+ }
+ IncrementEpoch(thr);
+ }
+};
-template<typename T>
-static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
- return func_nand(a, v);
-}
+struct OpExchange {
+ template <typename T>
+ static T NoTsanAtomic(morder mo, volatile T *a, T v) {
+ return func_xchg(a, v);
+ }
+ template <typename T>
+ static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
+ }
+};
-template<typename T>
-static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
- morder mo) {
- return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
-}
+struct OpFetchAdd {
+ template <typename T>
+ static T NoTsanAtomic(morder mo, volatile T *a, T v) {
+ return func_add(a, v);
+ }
-template<typename T>
-static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
- morder mo) {
- return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
-}
+ template <typename T>
+ static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
+ }
+};
-template<typename T>
-static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
- morder mo) {
- return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
-}
+struct OpFetchSub {
+ template <typename T>
+ static T NoTsanAtomic(morder mo, volatile T *a, T v) {
+ return func_sub(a, v);
+ }
-template<typename T>
-static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
- morder mo) {
- return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
-}
+ template <typename T>
+ static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
+ }
+};
-template<typename T>
-static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
- morder mo) {
- return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
-}
+struct OpFetchAnd {
+ template <typename T>
+ static T NoTsanAtomic(morder mo, volatile T *a, T v) {
+ return func_and(a, v);
+ }
-template<typename T>
-static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
- morder mo) {
- return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
-}
+ template <typename T>
+ static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
+ }
+};
-template<typename T>
-static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
- morder mo) {
- return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
-}
+struct OpFetchOr {
+ template <typename T>
+ static T NoTsanAtomic(morder mo, volatile T *a, T v) {
+ return func_or(a, v);
+ }
-template<typename T>
-static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
- return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
-}
+ template <typename T>
+ static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
+ }
+};
-#if __TSAN_HAS_INT128
-static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
- morder mo, morder fmo) {
- a128 old = *c;
- a128 cur = func_cas(a, old, v);
- if (cur == old)
- return true;
- *c = cur;
- return false;
-}
-#endif
+struct OpFetchXor {
+ template <typename T>
+ static T NoTsanAtomic(morder mo, volatile T *a, T v) {
+ return func_xor(a, v);
+ }
-template<typename T>
-static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
- NoTsanAtomicCAS(a, &c, v, mo, fmo);
- return c;
-}
+ template <typename T>
+ static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
+ }
+};
-template <typename T>
-static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
- morder mo, morder fmo) {
- // 31.7.2.18: "The failure argument shall not be memory_order_release
- // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
- // (mo_relaxed) when those are used.
- DCHECK(IsLoadOrder(fmo));
+struct OpFetchNand {
+ template <typename T>
+ static T NoTsanAtomic(morder mo, volatile T *a, T v) {
+ return func_nand(a, v);
+ }
- MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
- if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
- T cc = *c;
- T pr = func_cas(a, cc, v);
- if (pr == cc)
+ template <typename T>
+ static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
+ return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
+ }
+};
+
+struct OpCAS {
+ template <typename T>
+ static bool NoTsanAtomic(morder mo, morder fmo, volatile T *a, T *c, T v) {
+ return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
+ }
+
+#if __TSAN_HAS_INT128
+ static bool NoTsanAtomic(morder mo, morder fmo, volatile a128 *a, a128 *c,
+ a128 v) {
+ a128 old = *c;
+ a128 cur = func_cas(a, old, v);
+ if (cur == old)
return true;
- *c = pr;
+ *c = cur;
return false;
}
- SlotLocker locker(thr);
- bool release = IsReleaseOrder(mo);
- bool success;
- {
- auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
- RWLock lock(&s->mtx, release);
- T cc = *c;
- T pr = func_cas(a, cc, v);
- success = pr == cc;
- if (!success) {
+#endif
+
+ template <typename T>
+ static T NoTsanAtomic(morder mo, morder fmo, volatile T *a, T c, T v) {
+ NoTsanAtomic(mo, fmo, a, &c, v);
+ return c;
+ }
+
+ template <typename T>
+ static bool Atomic(ThreadState *thr, uptr pc, morder mo, morder fmo,
+ volatile T *a, T *c, T v) {
+ // 31.7.2.18: "The failure argument shall not be memory_order_release
+ // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
+ // (mo_relaxed) when those are used.
+ DCHECK(IsLoadOrder(fmo));
+
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
+ kAccessWrite | kAccessAtomic);
+ if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
+ T cc = *c;
+ T pr = func_cas(a, cc, v);
+ if (pr == cc)
+ return true;
*c = pr;
- mo = fmo;
+ return false;
}
- if (success && IsAcqRelOrder(mo))
- thr->clock.ReleaseAcquire(&s->clock);
- else if (success && IsReleaseOrder(mo))
- thr->clock.Release(&s->clock);
- else if (IsAcquireOrder(mo))
- thr->clock.Acquire(s->clock);
+ SlotLocker locker(thr);
+ bool release = IsReleaseOrder(mo);
+ bool success;
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+ RWLock lock(&s->mtx, release);
+ T cc = *c;
+ T pr = func_cas(a, cc, v);
+ success = pr == cc;
+ if (!success) {
+ *c = pr;
+ mo = fmo;
+ }
+ if (success && IsAcqRelOrder(mo))
+ thr->clock.ReleaseAcquire(&s->clock);
+ else if (success && IsReleaseOrder(mo))
+ thr->clock.Release(&s->clock);
+ else if (IsAcquireOrder(mo))
+ thr->clock.Acquire(s->clock);
+ }
+ if (success && release)
+ IncrementEpoch(thr);
+ return success;
}
- if (success && release)
- IncrementEpoch(thr);
- return success;
-}
-template<typename T>
-static T AtomicCAS(ThreadState *thr, uptr pc,
- volatile T *a, T c, T v, morder mo, morder fmo) {
- AtomicCAS(thr, pc, a, &c, v, mo, fmo);
- return c;
-}
+ template <typename T>
+ static T Atomic(ThreadState *thr, uptr pc, morder mo, morder fmo,
+ volatile T *a, T c, T v) {
+ Atomic(thr, pc, mo, fmo, a, &c, v);
+ return c;
+ }
+};
#if !SANITIZER_GO
-static void NoTsanAtomicFence(morder mo) {
- __sync_synchronize();
-}
+struct OpFence {
+ static void NoTsanAtomic(morder mo) { __sync_synchronize(); }
-static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
- // FIXME(dvyukov): not implemented.
- __sync_synchronize();
-}
+ static void Atomic(ThreadState *thr, uptr pc, morder mo) {
+ // FIXME(dvyukov): not implemented.
+ __sync_synchronize();
+ }
+};
#endif
+} // namespace
+
// Interface functions follow.
#if !SANITIZER_GO
// C/C++
static morder convert_morder(morder mo) {
- if (flags()->force_seq_cst_atomics)
- return (morder)mo_seq_cst;
+ return flags()->force_seq_cst_atomics ? mo_seq_cst : mo;
+}
+static morder to_morder(int mo) {
// Filter out additional memory order flags:
// MEMMODEL_SYNC = 1 << 15
// __ATOMIC_HLE_ACQUIRE = 1 << 16
// since we use __sync_ atomics for actual atomic operations,
// we can safely ignore it as well. It also subtly affects semantics,
// but we don't model the difference.
- return (morder)(mo & 0x7fff);
+ morder res = static_cast<morder>(static_cast<u8>(mo));
+ DCHECK_LE(res, mo_seq_cst);
+ return res;
}
-# define ATOMIC_IMPL(func, ...) \
- ThreadState *const thr = cur_thread(); \
- ProcessPendingSignals(thr); \
- if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \
- return NoTsanAtomic##func(__VA_ARGS__); \
- mo = convert_morder(mo); \
- return Atomic##func(thr, GET_CALLER_PC(), __VA_ARGS__);
+template <class Op, class... Types>
+ALWAYS_INLINE auto AtomicImpl(morder mo, Types... args) {
+ ThreadState *const thr = cur_thread();
+ ProcessPendingSignals(thr);
+ if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors))
+ return Op::NoTsanAtomic(mo, args...);
+ return Op::Atomic(thr, GET_CALLER_PC(), convert_morder(mo), args...);
+}
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
- ATOMIC_IMPL(Load, a, mo);
+a8 __tsan_atomic8_load(const volatile a8 *a, int mo) {
+ return AtomicImpl<OpLoad>(to_morder(mo), a);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
- ATOMIC_IMPL(Load, a, mo);
+a16 __tsan_atomic16_load(const volatile a16 *a, int mo) {
+ return AtomicImpl<OpLoad>(to_morder(mo), a);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
- ATOMIC_IMPL(Load, a, mo);
+a32 __tsan_atomic32_load(const volatile a32 *a, int mo) {
+ return AtomicImpl<OpLoad>(to_morder(mo), a);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
- ATOMIC_IMPL(Load, a, mo);
+a64 __tsan_atomic64_load(const volatile a64 *a, int mo) {
+ return AtomicImpl<OpLoad>(to_morder(mo), a);
}
-#if __TSAN_HAS_INT128
+# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
- ATOMIC_IMPL(Load, a, mo);
+a128 __tsan_atomic128_load(const volatile a128 *a, int mo) {
+ return AtomicImpl<OpLoad>(to_morder(mo), a);
}
-#endif
+# endif
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(Store, a, v, mo);
+void __tsan_atomic8_store(volatile a8 *a, a8 v, int mo) {
+ return AtomicImpl<OpStore>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(Store, a, v, mo);
+void __tsan_atomic16_store(volatile a16 *a, a16 v, int mo) {
+ return AtomicImpl<OpStore>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(Store, a, v, mo);
+void __tsan_atomic32_store(volatile a32 *a, a32 v, int mo) {
+ return AtomicImpl<OpStore>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(Store, a, v, mo);
+void __tsan_atomic64_store(volatile a64 *a, a64 v, int mo) {
+ return AtomicImpl<OpStore>(to_morder(mo), a, v);
}
-#if __TSAN_HAS_INT128
+# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(Store, a, v, mo);
+void __tsan_atomic128_store(volatile a128 *a, a128 v, int mo) {
+ return AtomicImpl<OpStore>(to_morder(mo), a, v);
}
-#endif
+# endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(Exchange, a, v, mo);
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, int mo) {
+ return AtomicImpl<OpExchange>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(Exchange, a, v, mo);
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, int mo) {
+ return AtomicImpl<OpExchange>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(Exchange, a, v, mo);
+a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, int mo) {
+ return AtomicImpl<OpExchange>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(Exchange, a, v, mo);
+a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, int mo) {
+ return AtomicImpl<OpExchange>(to_morder(mo), a, v);
}
-#if __TSAN_HAS_INT128
+# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(Exchange, a, v, mo);
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, int mo) {
+ return AtomicImpl<OpExchange>(to_morder(mo), a, v);
}
-#endif
+# endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(FetchAdd, a, v, mo);
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, int mo) {
+ return AtomicImpl<OpFetchAdd>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(FetchAdd, a, v, mo);
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, int mo) {
+ return AtomicImpl<OpFetchAdd>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(FetchAdd, a, v, mo);
+a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, int mo) {
+ return AtomicImpl<OpFetchAdd>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(FetchAdd, a, v, mo);
+a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, int mo) {
+ return AtomicImpl<OpFetchAdd>(to_morder(mo), a, v);
}
-#if __TSAN_HAS_INT128
+# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(FetchAdd, a, v, mo);
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, int mo) {
+ return AtomicImpl<OpFetchAdd>(to_morder(mo), a, v);
}
-#endif
+# endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(FetchSub, a, v, mo);
+a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, int mo) {
+ return AtomicImpl<OpFetchSub>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(FetchSub, a, v, mo);
+a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, int mo) {
+ return AtomicImpl<OpFetchSub>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(FetchSub, a, v, mo);
+a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, int mo) {
+ return AtomicImpl<OpFetchSub>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(FetchSub, a, v, mo);
+a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, int mo) {
+ return AtomicImpl<OpFetchSub>(to_morder(mo), a, v);
}
-#if __TSAN_HAS_INT128
+# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(FetchSub, a, v, mo);
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, int mo) {
+ return AtomicImpl<OpFetchSub>(to_morder(mo), a, v);
}
-#endif
+# endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(FetchAnd, a, v, mo);
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, int mo) {
+ return AtomicImpl<OpFetchAnd>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(FetchAnd, a, v, mo);
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, int mo) {
+ return AtomicImpl<OpFetchAnd>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(FetchAnd, a, v, mo);
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, int mo) {
+ return AtomicImpl<OpFetchAnd>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(FetchAnd, a, v, mo);
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, int mo) {
+ return AtomicImpl<OpFetchAnd>(to_morder(mo), a, v);
}
-#if __TSAN_HAS_INT128
+# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(FetchAnd, a, v, mo);
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, int mo) {
+ return AtomicImpl<OpFetchAnd>(to_morder(mo), a, v);
}
-#endif
+# endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(FetchOr, a, v, mo);
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, int mo) {
+ return AtomicImpl<OpFetchOr>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(FetchOr, a, v, mo);
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, int mo) {
+ return AtomicImpl<OpFetchOr>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(FetchOr, a, v, mo);
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, int mo) {
+ return AtomicImpl<OpFetchOr>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(FetchOr, a, v, mo);
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, int mo) {
+ return AtomicImpl<OpFetchOr>(to_morder(mo), a, v);
}
-#if __TSAN_HAS_INT128
+# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(FetchOr, a, v, mo);
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, int mo) {
+ return AtomicImpl<OpFetchOr>(to_morder(mo), a, v);
}
-#endif
+# endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(FetchXor, a, v, mo);
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, int mo) {
+ return AtomicImpl<OpFetchXor>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(FetchXor, a, v, mo);
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, int mo) {
+ return AtomicImpl<OpFetchXor>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(FetchXor, a, v, mo);
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, int mo) {
+ return AtomicImpl<OpFetchXor>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(FetchXor, a, v, mo);
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, int mo) {
+ return AtomicImpl<OpFetchXor>(to_morder(mo), a, v);
}
-#if __TSAN_HAS_INT128
+# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(FetchXor, a, v, mo);
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, int mo) {
+ return AtomicImpl<OpFetchXor>(to_morder(mo), a, v);
}
-#endif
+# endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
- ATOMIC_IMPL(FetchNand, a, v, mo);
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, int mo) {
+ return AtomicImpl<OpFetchNand>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
- ATOMIC_IMPL(FetchNand, a, v, mo);
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, int mo) {
+ return AtomicImpl<OpFetchNand>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
- ATOMIC_IMPL(FetchNand, a, v, mo);
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, int mo) {
+ return AtomicImpl<OpFetchNand>(to_morder(mo), a, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
- ATOMIC_IMPL(FetchNand, a, v, mo);
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, int mo) {
+ return AtomicImpl<OpFetchNand>(to_morder(mo), a, v);
}
-#if __TSAN_HAS_INT128
+# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
- ATOMIC_IMPL(FetchNand, a, v, mo);
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, int mo) {
+ return AtomicImpl<OpFetchNand>(to_morder(mo), a, v);
}
-#endif
+# endif
SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, int mo,
+ int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+ int mo, int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+ int mo, int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+ int mo, int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
-#if __TSAN_HAS_INT128
+# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+ int mo, int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
-#endif
+# endif
SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, int mo,
+ int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+ int mo, int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+ int mo, int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+ int mo, int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
-#if __TSAN_HAS_INT128
+# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+ int mo, int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
-#endif
+# endif
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, int mo,
+ int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, int mo,
+ int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, int mo,
+ int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, int mo,
+ int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
-#if __TSAN_HAS_INT128
+# if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
- morder mo, morder fmo) {
- ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+ int mo, int fmo) {
+ return AtomicImpl<OpCAS>(to_morder(mo), to_morder(fmo), a, c, v);
}
-#endif
+# endif
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
+void __tsan_atomic_thread_fence(int mo) {
+ return AtomicImpl<OpFence>(to_morder(mo));
+}
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic_signal_fence(morder mo) {
-}
+void __tsan_atomic_signal_fence(int mo) {}
} // extern "C"
#else // #if !SANITIZER_GO
// Go
-# define ATOMIC(func, ...) \
- if (thr->ignore_sync) { \
- NoTsanAtomic##func(__VA_ARGS__); \
- } else { \
- FuncEntry(thr, cpc); \
- Atomic##func(thr, pc, __VA_ARGS__); \
- FuncExit(thr); \
- }
+template <class Op, class... Types>
+void AtomicGo(ThreadState *thr, uptr cpc, uptr pc, Types... args) {
+ if (thr->ignore_sync) {
+ (void)Op::NoTsanAtomic(args...);
+ } else {
+ FuncEntry(thr, cpc);
+ (void)Op::Atomic(thr, pc, args...);
+ FuncExit(thr);
+ }
+}
-# define ATOMIC_RET(func, ret, ...) \
- if (thr->ignore_sync) { \
- (ret) = NoTsanAtomic##func(__VA_ARGS__); \
- } else { \
- FuncEntry(thr, cpc); \
- (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
- FuncExit(thr); \
- }
+template <class Op, class... Types>
+auto AtomicGoRet(ThreadState *thr, uptr cpc, uptr pc, Types... args) {
+ if (thr->ignore_sync) {
+ return Op::NoTsanAtomic(args...);
+ } else {
+ FuncEntry(thr, cpc);
+ auto ret = Op::Atomic(thr, pc, args...);
+ FuncExit(thr);
+ return ret;
+ }
+}
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
- ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
+ *(a32 *)(a + 8) = AtomicGoRet<OpLoad>(thr, cpc, pc, mo_acquire, *(a32 **)a);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
- ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
+ *(a64 *)(a + 8) = AtomicGoRet<OpLoad>(thr, cpc, pc, mo_acquire, *(a64 **)a);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
- ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
+ AtomicGo<OpStore>(thr, cpc, pc, mo_release, *(a32 **)a, *(a32 *)(a + 8));
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
- ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
+ AtomicGo<OpStore>(thr, cpc, pc, mo_release, *(a64 **)a, *(a64 *)(a + 8));
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
- ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+ *(a32 *)(a + 16) = AtomicGoRet<OpFetchAdd>(thr, cpc, pc, mo_acq_rel,
+ *(a32 **)a, *(a32 *)(a + 8));
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
- ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+ *(a64 *)(a + 16) = AtomicGoRet<OpFetchAdd>(thr, cpc, pc, mo_acq_rel,
+ *(a64 **)a, *(a64 *)(a + 8));
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
- ATOMIC_RET(FetchAnd, *(a32 *)(a + 16), *(a32 **)a, *(a32 *)(a + 8),
- mo_acq_rel);
+ *(a32 *)(a + 16) = AtomicGoRet<OpFetchAnd>(thr, cpc, pc, mo_acq_rel,
+ *(a32 **)a, *(a32 *)(a + 8));
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
- ATOMIC_RET(FetchAnd, *(a64 *)(a + 16), *(a64 **)a, *(a64 *)(a + 8),
- mo_acq_rel);
+ *(a64 *)(a + 16) = AtomicGoRet<OpFetchAnd>(thr, cpc, pc, mo_acq_rel,
+ *(a64 **)a, *(a64 *)(a + 8));
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
- ATOMIC_RET(FetchOr, *(a32 *)(a + 16), *(a32 **)a, *(a32 *)(a + 8),
- mo_acq_rel);
+ *(a32 *)(a + 16) = AtomicGoRet<OpFetchOr>(thr, cpc, pc, mo_acq_rel,
+ *(a32 **)a, *(a32 *)(a + 8));
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
- ATOMIC_RET(FetchOr, *(a64 *)(a + 16), *(a64 **)a, *(a64 *)(a + 8),
- mo_acq_rel);
+ *(a64 *)(a + 16) = AtomicGoRet<OpFetchOr>(thr, cpc, pc, mo_acq_rel,
+ *(a64 **)a, *(a64 *)(a + 8));
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
- ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+ *(a32 *)(a + 16) = AtomicGoRet<OpExchange>(thr, cpc, pc, mo_acq_rel,
+ *(a32 **)a, *(a32 *)(a + 8));
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
- ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+ *(a64 *)(a + 16) = AtomicGoRet<OpExchange>(thr, cpc, pc, mo_acq_rel,
+ *(a64 **)a, *(a64 *)(a + 8));
}
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_go_atomic32_compare_exchange(
- ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
- a32 cur = 0;
- a32 cmp = *(a32*)(a+8);
- ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
- *(bool*)(a+16) = (cur == cmp);
+void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
+ u8 *a) {
+ a32 cmp = *(a32 *)(a + 8);
+ a32 cur = AtomicGoRet<OpCAS>(thr, cpc, pc, mo_acq_rel, mo_acquire, *(a32 **)a,
+ cmp, *(a32 *)(a + 12));
+ *(bool *)(a + 16) = (cur == cmp);
}
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_go_atomic64_compare_exchange(
- ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
- a64 cur = 0;
- a64 cmp = *(a64*)(a+8);
- ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
- *(bool*)(a+24) = (cur == cmp);
+void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
+ u8 *a) {
+ a64 cmp = *(a64 *)(a + 8);
+ a64 cur = AtomicGoRet<OpCAS>(thr, cpc, pc, mo_acq_rel, mo_acquire, *(a64 **)a,
+ cmp, *(a64 *)(a + 16));
+ *(bool *)(a + 24) = (cur == cmp);
}
} // extern "C"
#endif // #if !SANITIZER_GO
DCHECK_GE(dst, jctx->heap_begin);
DCHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
DCHECK_NE(dst, src);
- DCHECK_NE(size, 0);
// Assuming it's not running concurrently with threads that do
// memory accesses and mutex operations (stop-the-world phase).
invoke_free_hook(ptr); \
SCOPED_INTERCEPTOR_RAW(free, ptr); \
user_free(thr, pc, ptr)
-#define COMMON_MALLOC_SIZE(ptr) uptr size = user_alloc_usable_size(ptr);
-#define COMMON_MALLOC_FILL_STATS(zone, stats)
-#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
- (void)zone_name; \
- Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
-#define COMMON_MALLOC_NAMESPACE __tsan
-#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
-#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0
+# define COMMON_MALLOC_FREE_SIZED(ptr, size) COMMON_MALLOC_FREE(ptr)
+# define COMMON_MALLOC_FREE_ALIGNED_SIZED(ptr, alignment, size) \
+ COMMON_MALLOC_FREE(ptr)
+# define COMMON_MALLOC_SIZE(ptr) uptr size = user_alloc_usable_size(ptr);
+# define COMMON_MALLOC_FILL_STATS(zone, stats)
+# define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ (void)zone_name; \
+ Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", \
+ ptr);
+# define COMMON_MALLOC_NAMESPACE __tsan
+# define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
+# define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0
-#include "sanitizer_common/sanitizer_malloc_mac.inc"
+# include "sanitizer_common/sanitizer_malloc_mac.inc"
#endif
ObtainCurrentStack(thr, pc, &stack);
if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
return;
- ThreadRegistryLock l(&ctx->thread_registry);
- ScopedReport rep(ReportTypeSignalUnsafe);
- rep.AddStack(stack, true);
- OutputReport(thr, rep);
+ // Use alloca, because malloc during signal handling deadlocks
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
+ {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ new (rep) ScopedReport(ReportTypeSignalUnsafe);
+ rep->AddStack(stack, true);
+#if SANITIZER_APPLE
+ } // Close this scope to release the locks
+#endif
+ OutputReport(thr, *rep);
+
+ // Need to manually destroy this because we used placement new to allocate
+ rep->~ScopedReport();
+#if !SANITIZER_APPLE
+ }
+#endif
}
static const uptr kShadowAdd = 0x200000000000ull;
};
+/* Go on linux/riscv64 (39-bit VMA)
+0000 0001 0000 - 000f 0000 0000: executable and heap (60 GiB)
+000f 0000 0000 - 0010 0000 0000: -
+0010 0000 0000 - 0030 0000 0000: shadow - 128 GiB ( ~ 2 * app)
+0030 0000 0000 - 0038 0000 0000: metainfo - 32 GiB ( ~ 0.5 * app)
+0038 0000 0000 - 0040 0000 0000: -
+*/
+struct MappingGoRiscv64_39 {
+ static const uptr kMetaShadowBeg = 0x003000000000ull;
+ static const uptr kMetaShadowEnd = 0x003800000000ull;
+ static const uptr kShadowBeg = 0x001000000000ull;
+ static const uptr kShadowEnd = 0x003000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000010000ull;
+ static const uptr kLoAppMemEnd = 0x000f00000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x001000000000ull;
+};
+
+/* Go on linux/riscv64 (48-bit VMA)
+0000 0001 0000 - 00e0 0000 0000: executable and heap (896 GiB)
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2400 0000 0000: shadow - 4 TiB ( ~ 4 * app)
+2400 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 3100 0000 0000: metainfo - 1 TiB ( ~ 1 * app)
+3100 0000 0000 - 8000 0000 0000: -
+*/
+struct MappingGoRiscv64_48 {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x310000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x240000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000010000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
+};
+
/*
Go on linux/s390x
0000 0000 1000 - 1000 0000 0000: executable and heap - 16 TiB
return Func::template Apply<MappingGoAarch64>(arg);
# elif defined(__loongarch_lp64)
return Func::template Apply<MappingGoLoongArch64_47>(arg);
+# elif SANITIZER_RISCV64
+ switch (vmaSize) {
+ case 39:
+ return Func::template Apply<MappingGoRiscv64_39>(arg);
+ case 48:
+ return Func::template Apply<MappingGoRiscv64_48>(arg);
+ }
# elif SANITIZER_WINDOWS
return Func::template Apply<MappingGoWindows>(arg);
# else
Func::template Apply<MappingGoAarch64>();
Func::template Apply<MappingGoLoongArch64_47>();
Func::template Apply<MappingGoMips64_47>();
+ Func::template Apply<MappingGoRiscv64_39>();
+ Func::template Apply<MappingGoRiscv64_48>();
Func::template Apply<MappingGoS390x>();
}
struct IsShadowMemImpl {
template <typename Mapping>
static bool Apply(uptr mem) {
- return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd;
+ return mem >= Mapping::kShadowBeg && mem < Mapping::kShadowEnd;
}
};
struct IsMetaMemImpl {
template <typename Mapping>
static bool Apply(uptr mem) {
- return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd;
+ return mem >= Mapping::kMetaShadowBeg && mem < Mapping::kMetaShadowEnd;
}
};
"WARNING: Program is run with randomized virtual address "
"space, which wouldn't work with ThreadSanitizer on Android.\n"
"Re-execing with fixed virtual address space.\n");
- CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
+
+ if (personality(old_personality | ADDR_NO_RANDOMIZE) == -1) {
+ Printf(
+ "FATAL: ThreadSanitizer: unable to disable ASLR (perhaps "
+ "sandboxing is enabled?).\n");
+ Printf("FATAL: Please rerun without sandboxing and/or ASLR.\n");
+ Die();
+ }
+
reexec = true;
}
# endif
"possibly due to high-entropy ASLR.\n"
"Re-execing with fixed virtual address space.\n"
"N.B. reducing ASLR entropy is preferable.\n");
- CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
+
+ if (personality(old_personality | ADDR_NO_RANDOMIZE) == -1) {
+ Printf(
+ "FATAL: ThreadSanitizer: encountered an incompatible memory "
+ "layout but was unable to disable ASLR (perhaps sandboxing is "
+ "enabled?).\n");
+ Printf(
+ "FATAL: Please rerun with lower ASLR entropy, ASLR disabled, "
+ "and/or sandboxing disabled.\n");
+ Die();
+ }
+
reexec = true;
} else {
Printf(
Printf("FATAL: Found %zd - Supported 39 and 48\n", vmaSize);
Die();
}
+# else
+ if (vmaSize != 39 && vmaSize != 48) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 39 and 48\n", vmaSize);
+ Die();
+ }
# endif
# endif
// is not compiled with -pie.
#if !SANITIZER_GO
{
-# if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch_lp64))
+# if INIT_LONGJMP_XOR_KEY
// Initialize the xor key used in {sig}{set,long}jump.
InitializeLongjmpXorKey();
# endif
// Reverse operation of libc stack pointer mangling
static uptr UnmangleLongJmpSp(uptr mangled_sp) {
-#if defined(__x86_64__)
-# if SANITIZER_LINUX
+# if SANITIZER_ANDROID && INIT_LONGJMP_XOR_KEY
+ if (longjmp_xor_key == 0) {
+ // bionic libc initialization process: __libc_init_globals ->
+ // __libc_init_vdso (calls strcmp) -> __libc_init_setjmp_cookie. strcmp is
+ // intercepted by TSan, so during TSan initialization the setjmp_cookie
+ // remains uninitialized. On Android, longjmp_xor_key must be set on first
+ // use.
+ InitializeLongjmpXorKey();
+ CHECK_NE(longjmp_xor_key, 0);
+ }
+# endif
+
+# if defined(__x86_64__)
+# if SANITIZER_LINUX
// Reverse of:
// xor %fs:0x30, %rsi
// rol $0x11, %rsi
# else
# define LONG_JMP_SP_ENV_SLOT 2
# endif
-#elif SANITIZER_LINUX
-# ifdef __aarch64__
-# define LONG_JMP_SP_ENV_SLOT 13
-# elif defined(__loongarch__)
-# define LONG_JMP_SP_ENV_SLOT 1
-# elif defined(__mips64)
-# define LONG_JMP_SP_ENV_SLOT 1
+# elif SANITIZER_ANDROID
+# ifdef __aarch64__
+# define LONG_JMP_SP_ENV_SLOT 3
+# elif SANITIZER_RISCV64
+# define LONG_JMP_SP_ENV_SLOT 3
+# elif defined(__x86_64__)
+# define LONG_JMP_SP_ENV_SLOT 6
+# else
+# error unsupported
+# endif
+# elif SANITIZER_LINUX
+# ifdef __aarch64__
+# define LONG_JMP_SP_ENV_SLOT 13
+# elif defined(__loongarch__)
+# define LONG_JMP_SP_ENV_SLOT 1
+# elif defined(__mips64)
+# define LONG_JMP_SP_ENV_SLOT 1
# elif SANITIZER_RISCV64
# define LONG_JMP_SP_ENV_SLOT 13
# elif defined(__s390x__)
# else
# define LONG_JMP_SP_ENV_SLOT 6
# endif
-#endif
+# endif
uptr ExtractLongJmpSp(uptr *env) {
uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT];
}
CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
}
- return thr;
+
+ // Skia calls mallopt(M_THREAD_DISABLE_MEM_INIT, 1), which sets the least
+ // significant bit of TLS_SLOT_SANITIZER to 1. Scudo allocator uses this bit
+ // as a flag to disable memory initialization. This is a workaround to get the
+ // correct ThreadState pointer.
+ uptr addr = reinterpret_cast<uptr>(thr);
+ return reinterpret_cast<ThreadState*>(addr & ~1ULL);
}
void set_cur_thread(ThreadState *thr) {
void InitializePlatformEarly() {
# if !SANITIZER_GO && SANITIZER_IOS
uptr max_vm = GetMaxUserVirtualAddress() + 1;
- if (max_vm != HiAppMemEnd()) {
- Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
- (void *)max_vm, (void *)HiAppMemEnd());
+ if (max_vm < HiAppMemEnd()) {
+ Report(
+ "ThreadSanitizer: Unsupported virtual memory layout:\n\tVM address "
+ "limit = %p\n\tExpected %p.\n",
+ (void*)max_vm, (void*)HiAppMemEnd());
+ Die();
+ }
+ // In some configurations, the max_vm is expanded, but much of this space is
+ // already mapped. TSAN will not work in this configuration.
+ if (!MemoryRangeIsAvailable(HiAppMemEnd() - 1, HiAppMemEnd())) {
+ Report(
+ "ThreadSanitizer: Unsupported virtual memory layout: Address %p is "
+ "already mapped.\n",
+ (void*)(HiAppMemEnd() - 1));
Die();
}
#endif
ThreadEventCallbacks callbacks = {
.create = ThreadCreateCallback,
+ .start = nullptr,
.terminate = ThreadTerminateCallback,
+ .destroy = nullptr,
};
InstallPthreadIntrospectionHook(callbacks);
#endif
#ifndef TSAN_REPORT_H
#define TSAN_REPORT_H
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_vector.h"
bool atomic;
uptr external_tag;
Vector<ReportMopMutex> mset;
+ StackTrace stack_trace;
ReportStack *stack;
ReportMop();
int fd = 0;
bool fd_closed = false;
bool suppressable = false;
+ StackID stack_id = 0;
ReportStack *stack = nullptr;
};
struct ReportThread {
Tid id;
- tid_t os_id;
+ ThreadID os_id;
bool running;
ThreadType thread_type;
char *name;
Tid parent_tid;
+ StackID stack_id;
ReportStack *stack;
+ bool suppressable;
};
struct ReportMutex {
int id;
uptr addr;
+ StackID stack_id;
ReportStack *stack;
};
+struct AddedLocationAddr {
+ uptr addr;
+ usize locs_idx;
+};
+
class ReportDesc {
public:
ReportType typ;
Vector<ReportStack*> stacks;
Vector<ReportMop*> mops;
Vector<ReportLocation*> locs;
+ Vector<AddedLocationAddr> added_location_addrs;
Vector<ReportMutex*> mutexes;
Vector<ReportThread*> threads;
Vector<Tid> unique_tids;
void __tsan_test_only_on_fork() {}
#endif
+#if SANITIZER_APPLE && !SANITIZER_GO
+// Override weak symbol from sanitizer_common
+extern void __tsan_set_in_internal_write_call(bool value) {
+ __tsan::cur_thread_init()->in_internal_write_call = value;
+}
+#endif
+
namespace __tsan {
#if !SANITIZER_GO
return false;
}
-void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
+void UnmapShadow(ThreadState* thr, uptr addr, uptr size) {
if (size == 0 || !IsValidMmapRange(addr, size))
return;
- DontNeedShadowFor(addr, size);
+ // unmap shadow is related to semantic of mmap/munmap, so we
+ // should clear the whole shadow range, including the tail shadow
+ // while addr + size % kShadowCell != 0.
+ uptr rounded_size_shadow = RoundUp(addr + size, kShadowCell) - addr;
+ DontNeedShadowFor(addr, rounded_size_shadow);
ScopedGlobalProcessor sgp;
SlotLocker locker(thr, true);
- ctx->metamap.ResetRange(thr->proc(), addr, size, true);
+ uptr rounded_size_meta = RoundUp(addr + size, kMetaShadowCell) - addr;
+ ctx->metamap.ResetRange(thr->proc(), addr, rounded_size_meta, true);
}
#endif
void MapShadow(uptr addr, uptr size) {
+ // Although named MapShadow, this function's semantic is unrelated to
+ // UnmapShadow. This function currently only used for Go's lazy allocation
+ // of shadow, whose targets are program section (e.g., bss, data, etc.).
+ // Therefore, we can guarantee that the addr and size align to kShadowCell
+ // and kMetaShadowCell by the following assertions.
+ DCHECK_EQ(addr % kShadowCell, 0);
+ DCHECK_EQ(size % kShadowCell, 0);
+ DCHECK_EQ(addr % kMetaShadowCell, 0);
+ DCHECK_EQ(size % kMetaShadowCell, 0);
+
// Ensure thead registry lock held, so as to synchronize
// with DoReset, which also access the mapped_shadow_* ctxt fields.
ThreadRegistryLock lock0(&ctx->thread_registry);
static uptr mapped_meta_end = 0;
uptr meta_begin = (uptr)MemToMeta(addr);
uptr meta_end = (uptr)MemToMeta(addr + size);
+ // Windows wants 64K alignment.
meta_begin = RoundDownTo(meta_begin, 64 << 10);
meta_end = RoundUpTo(meta_end, 64 << 10);
if (!data_mapped) {
Die();
} else {
// Mapping continuous heap.
- // Windows wants 64K alignment.
- meta_begin = RoundDownTo(meta_begin, 64 << 10);
- meta_end = RoundUpTo(meta_end, 64 << 10);
CHECK_GT(meta_end, mapped_meta_end);
if (meta_begin < mapped_meta_end)
meta_begin = mapped_meta_end;
thr->ignore_reads_and_writes++;
atomic_store_relaxed(&thr->in_signal_handler, 0);
#endif
- PrintCurrentStackSlow(StackTrace::GetCurrentPc());
+ PrintCurrentStack(StackTrace::GetCurrentPc(),
+ common_flags()->fast_unwind_on_fatal);
}
bool is_initialized;
+// Symbolization indirectly calls dl_iterate_phdr. If a CHECK() fails early on
+// (prior to the dl_iterate_phdr interceptor setup), resulting in an attempted
+// symbolization, it will segfault.
+// dl_iterate_phdr is not intercepted for Android.
+bool ready_to_symbolize = SANITIZER_ANDROID;
+
void Initialize(ThreadState *thr) {
// Thread safe because done before all threads exist.
if (is_initialized)
ThreadIgnoreBegin(thr, pc);
ThreadIgnoreSyncBegin(thr, pc);
}
+
+# if SANITIZER_APPLE && !SANITIZER_GO
+ // This flag can have inheritance disabled - we are the child so act
+ // accordingly
+ if (flags()->lock_during_write == kNoLockDuringWritesCurrentProcess)
+ flags()->lock_during_write = kLockDuringAllWrites;
+# endif
}
#endif
namespace __tsan {
+extern bool ready_to_symbolize;
+
#if !SANITIZER_GO
struct MapUnmapCallback;
# if defined(__mips64) || defined(__aarch64__) || defined(__loongarch__) || \
defined(__powerpc__) || SANITIZER_RISCV64
struct AP32 {
- static const uptr kSpaceBeg = 0;
+ static const uptr kSpaceBeg = SANITIZER_MMAP_BEGIN;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = 0;
typedef __sanitizer::CompactSizeClassMap SizeClassMap;
uptr sp;
int int_signal_send;
bool in_blocking_func;
+ uptr oldset_stack_size;
uptr in_signal_handler;
uptr *shadow_stack_pos;
};
const ReportDesc *current_report;
+#if SANITIZER_APPLE && !SANITIZER_GO
+ bool in_internal_write_call;
+#endif
+
explicit ThreadState(Tid tid);
};
void AddSleep(StackID stack_id);
void SetCount(int count);
void SetSigNum(int sig);
+ void SymbolizeStackElems(void);
const ReportDesc *GetReport() const;
void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
AccessType typ);
-bool OutputReport(ThreadState *thr, const ScopedReport &srep);
+bool OutputReport(ThreadState *thr, ScopedReport &srep);
bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
bool IsExpectedReport(uptr addr, uptr size);
StackID CurrentStackId(ThreadState *thr, uptr pc);
ReportStack *SymbolizeStackId(StackID stack_id);
void PrintCurrentStack(ThreadState *thr, uptr pc);
-void PrintCurrentStackSlow(uptr pc); // uses libunwind
+void PrintCurrentStack(uptr pc, bool fast); // may uses libunwind
MBlock *JavaHeapBlock(uptr addr, uptr *start);
void Initialize(ThreadState *thr);
void ThreadIgnoreSyncEnd(ThreadState *thr);
Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
-void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
+void ThreadStart(ThreadState *thr, Tid tid, ThreadID os_id,
ThreadType thread_type);
void ThreadFinish(ThreadState *thr);
Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
#include "sanitizer_common/sanitizer_asm.h"
#include "builtins/assembly.h"
-#if !defined(__APPLE__)
-.section .text
-#else
-.section __TEXT,__text
+TEXT_SECTION
+#if defined(__APPLE__)
.align 3
#endif
NO_EXEC_STACK_DIRECTIVE
-GNU_PROPERTY_BTI_PAC
+GNU_PROPERTY_BTI_PAC_GCS
#endif
ALWAYS_INLINE USED void MemoryAccess(ThreadState* thr, uptr pc, uptr addr,
uptr size, AccessType typ) {
+#if SANITIZER_APPLE && !SANITIZER_GO
+ // Swift symbolizer can be intercepted and deadlock without this
+ if (thr->in_symbolizer)
+ return;
+#endif
RawShadow* shadow_mem = MemToShadow(addr);
UNUSED char memBuf[4][64];
DPrintf2("#%d: Access: %d@%d %p/%zd typ=0x%x {%s, %s, %s, %s}\n", thr->tid,
}
void ShadowSet(RawShadow* p, RawShadow* end, RawShadow v) {
- DCHECK_LE(p, end);
+ DCHECK_LT(p, end);
DCHECK(IsShadowMem(p));
- DCHECK(IsShadowMem(end));
+ DCHECK(IsShadowMem(end - 1));
UNUSED const uptr kAlign = kShadowCnt * kShadowSize;
DCHECK_EQ(reinterpret_cast<uptr>(p) % kAlign, 0);
DCHECK_EQ(reinterpret_cast<uptr>(end) % kAlign, 0);
RawShadow* mid1 =
Min(end, reinterpret_cast<RawShadow*>(RoundUp(
reinterpret_cast<uptr>(begin) + kPageSize / 2, kPageSize)));
+ // begin must < mid1
ShadowSet(begin, mid1, val);
// Reset middle part.
RawShadow* mid2 = RoundDown(end, kPageSize);
Die();
}
// Set the ending.
- ShadowSet(mid2, end, val);
+ if (mid2 < end)
+ ShadowSet(mid2, end, val);
+ else
+ DCHECK_EQ(mid2, end);
}
void MemoryResetRange(ThreadState* thr, uptr pc, uptr addr, uptr size) {
RawShadow* shadow_mem = MemToShadow(addr);
DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_read=%d\n", thr->tid,
(void*)pc, (void*)addr, (int)size, is_read);
-
+ DCHECK_NE(size, 0);
#if SANITIZER_DEBUG
if (!IsAppMem(addr)) {
Printf("Access to non app mem start: %p\n", (void*)addr);
DCHECK(IsAppMem(addr + size - 1));
}
if (!IsShadowMem(shadow_mem)) {
- Printf("Bad shadow start addr: %p (%p)\n", shadow_mem, (void*)addr);
+ Printf("Bad shadow start addr: %p (%p)\n", (void*)shadow_mem, (void*)addr);
DCHECK(IsShadowMem(shadow_mem));
}
- RawShadow* shadow_mem_end = reinterpret_cast<RawShadow*>(
- reinterpret_cast<uptr>(shadow_mem) + size * kShadowMultiplier - 1);
- if (!IsShadowMem(shadow_mem_end)) {
- Printf("Bad shadow end addr: %p (%p)\n", shadow_mem_end,
+ uptr rounded_size =
+ (RoundUpTo(addr + size, kShadowCell) - RoundDownTo(addr, kShadowCell));
+ RawShadow* shadow_mem_end =
+ shadow_mem + rounded_size / kShadowCell * kShadowCnt;
+ if (!IsShadowMem(shadow_mem_end - 1)) {
+ Printf("Bad shadow end addr: %p (%p)\n", (void*)(shadow_mem_end - 1),
(void*)(addr + size - 1));
Printf(
- "Shadow start addr (ok): %p (%p); size: 0x%zx; kShadowMultiplier: "
- "%zx\n",
- shadow_mem, (void*)addr, size, kShadowMultiplier);
- DCHECK(IsShadowMem(shadow_mem_end));
+ "Shadow start addr (ok): %p (%p); size: 0x%zx; rounded_size: 0x%zx; "
+ "kShadowMultiplier: %zx\n",
+ (void*)shadow_mem, (void*)addr, size, rounded_size, kShadowMultiplier);
+ DCHECK(IsShadowMem(shadow_mem_end - 1));
}
#endif
#include "sanitizer_common/sanitizer_asm.h"
+.att_syntax
+
#if !defined(__APPLE__)
.section .text
#else
//===----------------------------------------------------------------------===//
#include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
+#include <sanitizer_common/sanitizer_placement_new.h>
#include <sanitizer_common/sanitizer_stackdepot.h>
-#include "tsan_rtl.h"
#include "tsan_flags.h"
-#include "tsan_sync.h"
+#include "tsan_platform.h"
#include "tsan_report.h"
+#include "tsan_rtl.h"
#include "tsan_symbolize.h"
-#include "tsan_platform.h"
+#include "tsan_sync.h"
namespace __tsan {
return;
if (!ShouldReport(thr, typ))
return;
- ThreadRegistryLock l(&ctx->thread_registry);
- ScopedReport rep(typ);
- rep.AddMutex(addr, creation_stack_id);
- VarSizeStackTrace trace;
- ObtainCurrentStack(thr, pc, &trace);
- rep.AddStack(trace, true);
- rep.AddLocation(addr, 1);
- OutputReport(thr, rep);
+ // Use alloca, because malloc during signal handling deadlocks
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
+ {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ new (rep) ScopedReport(typ);
+ rep->AddMutex(addr, creation_stack_id);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep->AddStack(trace, true);
+ rep->AddLocation(addr, 1);
+#if SANITIZER_APPLE
+ } // Close this scope to release the locks
+#endif
+ OutputReport(thr, *rep);
+
+ // Need to manually destroy this because we used placement new to allocate
+ rep->~ScopedReport();
+#if !SANITIZER_APPLE
+ }
+#endif
}
static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr,
void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
return;
- ThreadRegistryLock l(&ctx->thread_registry);
- ScopedReport rep(ReportTypeDeadlock);
- for (int i = 0; i < r->n; i++) {
- rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
- rep.AddUniqueTid((int)r->loop[i].thr_ctx);
- rep.AddThread((int)r->loop[i].thr_ctx);
- }
- uptr dummy_pc = 0x42;
- for (int i = 0; i < r->n; i++) {
- for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
- u32 stk = r->loop[i].stk[j];
- if (stk && stk != kInvalidStackID) {
- rep.AddStack(StackDepotGet(stk), true);
- } else {
- // Sometimes we fail to extract the stack trace (FIXME: investigate),
- // but we should still produce some stack trace in the report.
- rep.AddStack(StackTrace(&dummy_pc, 1), true);
+ // Use alloca, because malloc during signal handling deadlocks
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
+ {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ new (rep) ScopedReport(ReportTypeDeadlock);
+ for (int i = 0; i < r->n; i++) {
+ rep->AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
+ rep->AddUniqueTid((int)r->loop[i].thr_ctx);
+ rep->AddThread((int)r->loop[i].thr_ctx);
+ }
+ uptr dummy_pc = 0x42;
+ for (int i = 0; i < r->n; i++) {
+ for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
+ u32 stk = r->loop[i].stk[j];
+ StackTrace stack;
+ if (stk && stk != kInvalidStackID) {
+ stack = StackDepotGet(stk);
+ } else {
+ // Sometimes we fail to extract the stack trace (FIXME: investigate),
+ // but we should still produce some stack trace in the report.
+ stack = StackTrace(&dummy_pc, 1);
+ }
+ rep->AddStack(stack, true);
}
}
+#if SANITIZER_APPLE
+ } // Close this scope to release the locks
+#endif
+ OutputReport(thr, *rep);
+
+ // Need to manually destroy this because we used placement new to allocate
+ rep->~ScopedReport();
+#if !SANITIZER_APPLE
}
- OutputReport(thr, rep);
+#endif
}
void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
FastState last_lock, StackID creation_stack_id) {
- // We need to lock the slot during RestoreStack because it protects
- // the slot journal.
- Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
- ThreadRegistryLock l0(&ctx->thread_registry);
- Lock slots_lock(&ctx->slot_mtx);
- ScopedReport rep(ReportTypeMutexDestroyLocked);
- rep.AddMutex(addr, creation_stack_id);
- VarSizeStackTrace trace;
- ObtainCurrentStack(thr, pc, &trace);
- rep.AddStack(trace, true);
-
- Tid tid;
- DynamicMutexSet mset;
- uptr tag;
- if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr,
- 0, kAccessWrite, &tid, &trace, mset, &tag))
- return;
- rep.AddStack(trace, true);
- rep.AddLocation(addr, 1);
- OutputReport(thr, rep);
+ // Use alloca, because malloc during signal handling deadlocks
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
+ {
+ // We need to lock the slot during RestoreStack because it protects
+ // the slot journal.
+ Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
+ ThreadRegistryLock l0(&ctx->thread_registry);
+ Lock slots_lock(&ctx->slot_mtx);
+ new (rep) ScopedReport(ReportTypeMutexDestroyLocked);
+ rep->AddMutex(addr, creation_stack_id);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep->AddStack(trace, true);
+
+ Tid tid;
+ DynamicMutexSet mset;
+ uptr tag;
+ if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(),
+ addr, 0, kAccessWrite, &tid, &trace, mset, &tag))
+ return;
+ rep->AddStack(trace, true);
+ rep->AddLocation(addr, 1);
+#if SANITIZER_APPLE
+ } // Close this scope to release the locks
+#endif
+ OutputReport(thr, *rep);
+
+ // Need to manually destroy this because we used placement new to allocate
+ rep->~ScopedReport();
+#if !SANITIZER_APPLE
+ }
+#endif
}
} // namespace __tsan
#include "tsan_ppc_regs.h"
- .machine altivec
.section .text
.hidden __tsan_setjmp
.globl _setjmp
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_defs.h"
#include "tsan_fd.h"
#include "tsan_flags.h"
#include "tsan_mman.h"
mop->size = size;
mop->write = !(typ & kAccessRead);
mop->atomic = typ & kAccessAtomic;
- mop->stack = SymbolizeStack(stack);
mop->external_tag = external_tag;
- if (mop->stack)
- mop->stack->suppressable = true;
+ mop->stack_trace = stack;
for (uptr i = 0; i < mset->Size(); i++) {
MutexSet::Desc d = mset->Get(i);
int id = this->AddMutex(d.addr, d.stack_id);
}
}
+void ScopedReportBase::SymbolizeStackElems() {
+ // symbolize memory ops
+ for (usize i = 0, size = rep_->mops.Size(); i < size; i++) {
+ ReportMop *mop = rep_->mops[i];
+ mop->stack = SymbolizeStack(mop->stack_trace);
+ if (mop->stack)
+ mop->stack->suppressable = true;
+ }
+
+ // symbolize locations
+ for (usize i = 0, size = rep_->locs.Size(); i < size; i++) {
+ // added locations have a NULL placeholder - don't dereference them
+ if (ReportLocation *loc = rep_->locs[i])
+ loc->stack = SymbolizeStackId(loc->stack_id);
+ }
+
+ // symbolize any added locations
+ for (usize i = 0, size = rep_->added_location_addrs.Size(); i < size; i++) {
+ AddedLocationAddr *added_loc = &rep_->added_location_addrs[i];
+ if (ReportLocation *loc = SymbolizeData(added_loc->addr)) {
+ loc->suppressable = true;
+ rep_->locs[added_loc->locs_idx] = loc;
+ }
+ }
+
+ // Filter out any added location placeholders that could not be symbolized
+ usize j = 0;
+ for (usize i = 0, size = rep_->locs.Size(); i < size; i++) {
+ if (rep_->locs[i] != nullptr) {
+ rep_->locs[j] = rep_->locs[i];
+ j++;
+ }
+ }
+ rep_->locs.Resize(j);
+
+ // symbolize threads
+ for (usize i = 0, size = rep_->threads.Size(); i < size; i++) {
+ ReportThread *rt = rep_->threads[i];
+ rt->stack = SymbolizeStackId(rt->stack_id);
+ if (rt->stack)
+ rt->stack->suppressable = rt->suppressable;
+ }
+
+ // symbolize mutexes
+ for (usize i = 0, size = rep_->mutexes.Size(); i < size; i++) {
+ ReportMutex *rm = rep_->mutexes[i];
+ rm->stack = SymbolizeStackId(rm->stack_id);
+ }
+}
+
void ScopedReportBase::AddUniqueTid(Tid unique_tid) {
rep_->unique_tids.PushBack(unique_tid);
}
rt->name = internal_strdup(tctx->name);
rt->parent_tid = tctx->parent_tid;
rt->thread_type = tctx->thread_type;
- rt->stack = 0;
- rt->stack = SymbolizeStackId(tctx->creation_stack_id);
- if (rt->stack)
- rt->stack->suppressable = suppressable;
+ rt->stack_id = tctx->creation_stack_id;
+ rt->suppressable = suppressable;
}
#if !SANITIZER_GO
rep_->mutexes.PushBack(rm);
rm->id = rep_->mutexes.Size() - 1;
rm->addr = addr;
- rm->stack = SymbolizeStackId(creation_stack_id);
+ rm->stack_id = creation_stack_id;
return rm->id;
}
loc->fd_closed = closed;
loc->fd = fd;
loc->tid = creat_tid;
- loc->stack = SymbolizeStackId(creat_stack);
+ loc->stack_id = creat_stack;
rep_->locs.PushBack(loc);
AddThread(creat_tid);
return;
loc->heap_chunk_size = b->siz;
loc->external_tag = b->tag;
loc->tid = b->tid;
- loc->stack = SymbolizeStackId(b->stk);
+ loc->stack_id = b->stk;
rep_->locs.PushBack(loc);
AddThread(b->tid);
return;
AddThread(tctx);
}
#endif
- if (ReportLocation *loc = SymbolizeData(addr)) {
- loc->suppressable = true;
- rep_->locs.PushBack(loc);
- return;
- }
+ rep_->added_location_addrs.PushBack({addr, rep_->locs.Size()});
+ rep_->locs.PushBack(nullptr);
}
#if !SANITIZER_GO
return false;
}
-bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
+bool OutputReport(ThreadState *thr, ScopedReport &srep) {
// These should have been checked in ShouldReport.
// It's too late to check them here, we have already taken locks.
CHECK(flags()->report_bugs);
CHECK(!thr->suppress_reports);
+ srep.SymbolizeStackElems();
atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
const ReportDesc *rep = srep.GetReport();
CHECK_EQ(thr->current_report, nullptr);
DynamicMutexSet mset1;
MutexSet *mset[kMop] = {&thr->mset, mset1};
- // We need to lock the slot during RestoreStack because it protects
- // the slot journal.
- Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
- ThreadRegistryLock l0(&ctx->thread_registry);
- Lock slots_lock(&ctx->slot_mtx);
- if (SpuriousRace(old))
- return;
- if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1,
- size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) {
- StoreShadow(&ctx->last_spurious_race, old.raw());
- return;
- }
+ // Use alloca, because malloc during signal handling deadlocks
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
+ {
+ // We need to lock the slot during RestoreStack because it protects
+ // the slot journal.
+ Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
+ ThreadRegistryLock l0(&ctx->thread_registry);
+ Lock slots_lock(&ctx->slot_mtx);
+ if (SpuriousRace(old))
+ return;
+ if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1,
+ size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) {
+ StoreShadow(&ctx->last_spurious_race, old.raw());
+ return;
+ }
- if (IsFiredSuppression(ctx, rep_typ, traces[1]))
- return;
+ if (IsFiredSuppression(ctx, rep_typ, traces[1]))
+ return;
- if (HandleRacyStacks(thr, traces))
- return;
+ if (HandleRacyStacks(thr, traces))
+ return;
- // If any of the accesses has a tag, treat this as an "external" race.
- uptr tag = kExternalTagNone;
- for (uptr i = 0; i < kMop; i++) {
- if (tags[i] != kExternalTagNone) {
- rep_typ = ReportTypeExternalRace;
- tag = tags[i];
- break;
+ // If any of the accesses has a tag, treat this as an "external" race.
+ uptr tag = kExternalTagNone;
+ for (uptr i = 0; i < kMop; i++) {
+ if (tags[i] != kExternalTagNone) {
+ rep_typ = ReportTypeExternalRace;
+ tag = tags[i];
+ break;
+ }
}
- }
- ScopedReport rep(rep_typ, tag);
- for (uptr i = 0; i < kMop; i++)
- rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]);
+ new (rep) ScopedReport(rep_typ, tag);
+ for (uptr i = 0; i < kMop; i++)
+ rep->AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]);
- for (uptr i = 0; i < kMop; i++) {
- ThreadContext *tctx = static_cast<ThreadContext *>(
- ctx->thread_registry.GetThreadLocked(tids[i]));
- rep.AddThread(tctx);
- }
+ for (uptr i = 0; i < kMop; i++) {
+ ThreadContext *tctx = static_cast<ThreadContext *>(
+ ctx->thread_registry.GetThreadLocked(tids[i]));
+ rep->AddThread(tctx);
+ }
- rep.AddLocation(addr_min, addr_max - addr_min);
-
- if (flags()->print_full_thread_history) {
- const ReportDesc *rep_desc = rep.GetReport();
- for (uptr i = 0; i < rep_desc->threads.Size(); i++) {
- Tid parent_tid = rep_desc->threads[i]->parent_tid;
- if (parent_tid == kMainTid || parent_tid == kInvalidTid)
- continue;
- ThreadContext *parent_tctx = static_cast<ThreadContext *>(
- ctx->thread_registry.GetThreadLocked(parent_tid));
- rep.AddThread(parent_tctx);
+ rep->AddLocation(addr_min, addr_max - addr_min);
+
+ if (flags()->print_full_thread_history) {
+ const ReportDesc *rep_desc = rep->GetReport();
+ for (uptr i = 0; i < rep_desc->threads.Size(); i++) {
+ Tid parent_tid = rep_desc->threads[i]->parent_tid;
+ if (parent_tid == kMainTid || parent_tid == kInvalidTid)
+ continue;
+ ThreadContext *parent_tctx = static_cast<ThreadContext *>(
+ ctx->thread_registry.GetThreadLocked(parent_tid));
+ rep->AddThread(parent_tctx);
+ }
}
- }
#if !SANITIZER_GO
- if (!((typ0 | typ1) & kAccessFree) &&
- s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid()))
- rep.AddSleep(thr->last_sleep_stack_id);
+ if (!((typ0 | typ1) & kAccessFree) &&
+ s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid()))
+ rep->AddSleep(thr->last_sleep_stack_id);
+#endif
+
+#if SANITIZER_APPLE
+ } // Close this scope to release the locks
+#endif
+ OutputReport(thr, *rep);
+
+ // Need to manually destroy this because we used placement new to allocate
+ rep->~ScopedReport();
+#if !SANITIZER_APPLE
+ }
#endif
- OutputReport(thr, rep);
}
void PrintCurrentStack(ThreadState *thr, uptr pc) {
PrintStack(SymbolizeStack(trace));
}
-// Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
+// Always inlining PrintCurrentStack, because LocatePcInTrace assumes
// __sanitizer_print_stack_trace exists in the actual unwinded stack, but
-// tail-call to PrintCurrentStackSlow breaks this assumption because
+// tail-call to PrintCurrentStack breaks this assumption because
// __sanitizer_print_stack_trace disappears after tail-call.
// However, this solution is not reliable enough, please see dvyukov's comment
// http://reviews.llvm.org/D19148#406208
// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
-ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) {
+ALWAYS_INLINE USED void PrintCurrentStack(uptr pc, bool fast) {
#if !SANITIZER_GO
uptr bp = GET_CURRENT_FRAME();
auto *ptrace = New<BufferedStackTrace>();
- ptrace->Unwind(pc, bp, nullptr, false);
+ ptrace->Unwind(pc, bp, nullptr, fast);
for (uptr i = 0; i < ptrace->size / 2; i++) {
uptr tmp = ptrace->trace_buffer[i];
ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
}
- PrintStack(SymbolizeStack(*ptrace));
+
+ if (ready_to_symbolize) {
+ PrintStack(SymbolizeStack(*ptrace));
+ } else {
+ Printf(
+ "WARNING: PrintCurrentStack() has been called too early, before "
+ "symbolization is possible. Printing unsymbolized stack trace:\n");
+ for (unsigned int i = 0; i < ptrace->size; i++)
+ Printf(" #%u: 0x%zx\n", i, ptrace->trace[i]);
+ }
#endif
}
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_print_stack_trace() {
- PrintCurrentStackSlow(StackTrace::GetCurrentPc());
+ PrintCurrentStack(StackTrace::GetCurrentPc(), false);
}
} // extern "C"
#if !SANITIZER_GO
if (!ShouldReport(thr, ReportTypeThreadLeak))
return;
- ThreadRegistryLock l(&ctx->thread_registry);
Vector<ThreadLeak> leaks;
- ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks,
- &leaks);
+ {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks,
+ &leaks);
+ }
+
for (uptr i = 0; i < leaks.Size(); i++) {
- ScopedReport rep(ReportTypeThreadLeak);
- rep.AddThread(leaks[i].tctx, true);
- rep.SetCount(leaks[i].count);
- OutputReport(thr, rep);
+ // Use alloca, because malloc during signal handling deadlocks
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
+ {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ new (rep) ScopedReport(ReportTypeThreadLeak);
+ rep->AddThread(leaks[i].tctx, true);
+ rep->SetCount(leaks[i].count);
+# if SANITIZER_APPLE
+ } // Close this scope to release the locks
+# endif
+ OutputReport(thr, *rep);
+
+ // Need to manually destroy this because we used placement new to allocate
+ rep->~ScopedReport();
+# if !SANITIZER_APPLE
+ }
+# endif
}
#endif
}
uptr tls_size;
};
-void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
+void ThreadStart(ThreadState *thr, Tid tid, ThreadID os_id,
ThreadType thread_type) {
ctx->thread_registry.StartThread(tid, os_id, thread_type, thr);
if (!thr->ignore_sync) {
}
#endif
-#if !SANITIZER_GO
+#if !SANITIZER_GO && !SANITIZER_ANDROID
// Don't imitate stack/TLS writes for the main thread,
// because its initialization is synchronized with all
// subsequent threads anyway.
+ // Because thr is created by MmapOrDie, the thr object
+ // is not in tls, the pointer to the thr object is in
+ // TLS_SLOT_SANITIZER slot. So skip this check on
+ // Android platform.
if (tid != kMainTid) {
if (stk_addr && stk_size) {
const uptr pc = StackTrace::GetNextInstructionPc(
// there are no concurrent accesses to the regions (e.g. stop-the-world).
CHECK_NE(src, dst);
CHECK_NE(sz, 0);
+
+ // The current MoveMemory implementation behaves incorrectly when src, dst,
+ // and sz are not aligned to kMetaShadowCell.
+ // For example, with kMetaShadowCell == 8:
+ // - src = 4: unexpectedly clears the metadata for the range [0, 4).
+ // - src = 16, dst = 4, size = 8: A sync variable for addr = 20, which should
+ // be moved to the metadata for address 8, is incorrectly moved to the
+ // metadata for address 0 instead.
+ // - src = 0, sz = 4: fails to move the tail metadata.
+ // Therefore, the following assertions is needed.
+ DCHECK_EQ(src % kMetaShadowCell, 0);
+ DCHECK_EQ(dst % kMetaShadowCell, 0);
+ DCHECK_EQ(sz % kMetaShadowCell, 0);
+
uptr diff = dst - src;
- u32 *src_meta = MemToMeta(src);
- u32 *dst_meta = MemToMeta(dst);
- u32 *src_meta_end = MemToMeta(src + sz);
- uptr inc = 1;
- if (dst > src) {
+ u32 *src_meta, *dst_meta, *src_meta_end;
+ uptr inc;
+ if (dst < src) {
+ src_meta = MemToMeta(src);
+ dst_meta = MemToMeta(dst);
+ src_meta_end = MemToMeta(src + sz);
+ inc = 1;
+ } else {
src_meta = MemToMeta(src + sz) - 1;
dst_meta = MemToMeta(dst + sz) - 1;
src_meta_end = MemToMeta(src) - 1;
UBSAN_CHECK(InvalidShiftBase, "invalid-shift-base", "shift-base")
UBSAN_CHECK(InvalidShiftExponent, "invalid-shift-exponent", "shift-exponent")
UBSAN_CHECK(OutOfBoundsIndex, "out-of-bounds-index", "bounds")
+UBSAN_CHECK(LocalOutOfBounds, "local-out-of-bounds", "local-bounds")
UBSAN_CHECK(UnreachableCall, "unreachable-call", "unreachable")
UBSAN_CHECK(MissingReturn, "missing-return", "return")
UBSAN_CHECK(NonPositiveVLAIndex, "non-positive-vla-index", "vla-bound")
{
CommonFlags cf;
cf.CopyFrom(*common_flags());
- cf.print_summary = false;
cf.external_symbolizer_path = GetFlag("UBSAN_SYMBOLIZER_PATH");
OverrideCommonFlags(cf);
}
Die();
}
+static void handleLocalOutOfBoundsImpl(ReportOptions Opts) {
+ // FIXME: Pass more diagnostic info.
+ SymbolizedStackHolder CallerLoc;
+ CallerLoc.reset(getCallerLocation(Opts.pc));
+ Location Loc;
+ Loc = CallerLoc;
+ ErrorType ET = ErrorType::LocalOutOfBounds;
+ ScopedReport R(Opts, Loc, ET);
+ Diag(Loc, DL_Error, ET, "access out of bounds");
+}
+
+void __ubsan::__ubsan_handle_local_out_of_bounds() {
+ GET_REPORT_OPTIONS(false);
+ handleLocalOutOfBoundsImpl(Opts);
+}
+
+void __ubsan::__ubsan_handle_local_out_of_bounds_abort() {
+ GET_REPORT_OPTIONS(true);
+ handleLocalOutOfBoundsImpl(Opts);
+ Die();
+}
+
static void handleBuiltinUnreachableImpl(UnreachableData *Data,
ReportOptions Opts) {
ErrorType ET = ErrorType::UnreachableCall;
namespace __ubsan {
-#ifdef UBSAN_CAN_USE_CXXABI
-
#ifdef _WIN32
-
extern "C" void __ubsan_handle_cfi_bad_type_default(CFICheckFailData *Data,
ValueHandle Vtable,
bool ValidVtable,
}
WIN_WEAK_ALIAS(__ubsan_handle_cfi_bad_type, __ubsan_handle_cfi_bad_type_default)
-#else
-SANITIZER_WEAK_ATTRIBUTE
-#endif
void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
bool ValidVtable, ReportOptions Opts);
-
#else
+SANITIZER_WEAK_ATTRIBUTE
void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
bool ValidVtable, ReportOptions Opts) {
Die();
}
#endif
-} // namespace __ubsan
-
-void __ubsan::__ubsan_handle_cfi_bad_icall(CFIBadIcallData *CallData,
- ValueHandle Function) {
- GET_REPORT_OPTIONS(false);
- CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
- handleCFIBadIcall(&Data, Function, Opts);
-}
-
-void __ubsan::__ubsan_handle_cfi_bad_icall_abort(CFIBadIcallData *CallData,
- ValueHandle Function) {
- GET_REPORT_OPTIONS(true);
- CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
- handleCFIBadIcall(&Data, Function, Opts);
- Die();
-}
+} // namespace __ubsan
void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data,
ValueHandle Value,
/// \brief Handle an array index out of bounds error.
RECOVERABLE(out_of_bounds, OutOfBoundsData *Data, ValueHandle Index)
+/// \brief Handle an local object access out of bounds error.
+RECOVERABLE(local_out_of_bounds)
+
struct UnreachableData {
SourceLocation Loc;
};
CFITCK_VMFCall,
};
-struct CFIBadIcallData {
- SourceLocation Loc;
- const TypeDescriptor &Type;
-};
-
struct CFICheckFailData {
CFITypeCheckKind CheckKind;
SourceLocation Loc;
const TypeDescriptor &Type;
};
-/// \brief Handle control flow integrity failure for indirect function calls.
-RECOVERABLE(cfi_bad_icall, CFIBadIcallData *Data, ValueHandle Function)
-
/// \brief Handle control flow integrity failures.
RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function,
uptr VtableIsValid)
Diag(Loc, DL_Note, ET, "check failed in %0, vtable located in %1")
<< SrcModule << DstModule;
}
-
-static bool handleFunctionTypeMismatch(FunctionTypeMismatchData *Data,
- ValueHandle Function,
- ValueHandle calleeRTTI,
- ValueHandle fnRTTI, ReportOptions Opts) {
- if (checkTypeInfoEquality(reinterpret_cast<void *>(calleeRTTI),
- reinterpret_cast<void *>(fnRTTI)))
- return false;
-
- SourceLocation CallLoc = Data->Loc.acquire();
- ErrorType ET = ErrorType::FunctionTypeMismatch;
-
- if (ignoreReport(CallLoc, Opts, ET))
- return true;
-
- ScopedReport R(Opts, CallLoc, ET);
-
- SymbolizedStackHolder FLoc(getSymbolizedLocation(Function));
- const char *FName = FLoc.get()->info.function;
- if (!FName)
- FName = "(unknown)";
-
- Diag(CallLoc, DL_Error, ET,
- "call to function %0 through pointer to incorrect function type %1")
- << FName << Data->Type;
- Diag(FLoc, DL_Note, ET, "%0 defined here") << FName;
- return true;
-}
-
-void __ubsan_handle_function_type_mismatch_v1(FunctionTypeMismatchData *Data,
- ValueHandle Function,
- ValueHandle calleeRTTI,
- ValueHandle fnRTTI) {
- GET_REPORT_OPTIONS(false);
- handleFunctionTypeMismatch(Data, Function, calleeRTTI, fnRTTI, Opts);
-}
-
-void __ubsan_handle_function_type_mismatch_v1_abort(
- FunctionTypeMismatchData *Data, ValueHandle Function,
- ValueHandle calleeRTTI, ValueHandle fnRTTI) {
- GET_REPORT_OPTIONS(true);
- if (handleFunctionTypeMismatch(Data, Function, calleeRTTI, fnRTTI, Opts))
- Die();
-}
} // namespace __ubsan
#endif // CAN_SANITIZE_UB
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __ubsan_handle_dynamic_type_cache_miss_abort(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash);
-
-struct FunctionTypeMismatchData;
-
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
-__ubsan_handle_function_type_mismatch_v1(FunctionTypeMismatchData *Data,
- ValueHandle Val,
- ValueHandle calleeRTTI,
- ValueHandle fnRTTI);
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
-__ubsan_handle_function_type_mismatch_v1_abort(FunctionTypeMismatchData *Data,
- ValueHandle Val,
- ValueHandle calleeRTTI,
- ValueHandle fnRTTI);
}
#endif // UBSAN_HANDLERS_CXX_H
#include "ubsan_init.h"
#include "ubsan_signals_standalone.h"
+#if SANITIZER_FUCHSIA
+namespace __sanitizer {
+// UBSan doesn't need to do anything else special in the startup hook.
+void EarlySanitizerInit() {}
+} // namespace __sanitizer
+#endif // SANITIZER_FUCHSIA
+
namespace __ubsan {
class UbsanStandaloneInitializer {
INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss_abort)
INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow)
INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow_abort)
-INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_v1)
-INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_v1_abort)
INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch)
INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_abort)
INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion)
INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1_abort)
INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds)
INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds_abort)
+INTERFACE_FUNCTION(__ubsan_handle_local_out_of_bounds)
+INTERFACE_FUNCTION(__ubsan_handle_local_out_of_bounds_abort)
INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow)
INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow_abort)
INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds)
#ifndef UBSAN_PLATFORM_H
#define UBSAN_PLATFORM_H
-#ifndef CAN_SANITIZE_UB
// Other platforms should be easy to add, and probably work as-is.
#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
defined(__NetBSD__) || defined(__DragonFly__) || \
(defined(__sun__) && defined(__svr4__)) || defined(_WIN32) || \
- defined(__Fuchsia__)
+ defined(__Fuchsia__) || defined(__HAIKU__)
#define CAN_SANITIZE_UB 1
#else
# define CAN_SANITIZE_UB 0
#endif
-#endif //CAN_SANITIZE_UB
#endif
unsigned getIntegerBitCount() const {
DCHECK(isIntegerTy());
- if (isSignedBitIntTy())
- return *reinterpret_cast<const u32 *>(getBitIntBitCountPointer());
- else
+ if (isSignedBitIntTy()) {
+ u32 BitCountValue;
+ internal_memcpy(&BitCountValue, getBitIntBitCountPointer(),
+ sizeof(BitCountValue));
+ return BitCountValue;
+ } else
return getIntegerBitWidth();
}