From: Jakub Jelinek Date: Tue, 2 Dec 2025 12:03:28 +0000 (+0100) Subject: libsanitizer: Merge from upstream (8723fe5606de6dfb344afacd667c20f55bb2f5e0) X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=8bf309c5a17ac471e24099ac1cdb0b2343cb87b0;p=thirdparty%2Fgcc.git libsanitizer: Merge from upstream (8723fe5606de6dfb344afacd667c20f55bb2f5e0) * MERGE: Merge from master. --- diff --git a/libsanitizer/MERGE b/libsanitizer/MERGE index a223feef2f54..a42116a0b4fe 100644 --- a/libsanitizer/MERGE +++ b/libsanitizer/MERGE @@ -1,4 +1,4 @@ -61a6439f35b6de28ff4aff4450d6fca970292fd5 +8723fe5606de6dfb344afacd667c20f55bb2f5e0 The first line of this file holds the git revision number of the last merge done from the master library sources. diff --git a/libsanitizer/asan/asan_activation.cpp b/libsanitizer/asan/asan_activation.cpp index 1757838600ca..5796a7cb06ef 100644 --- a/libsanitizer/asan/asan_activation.cpp +++ b/libsanitizer/asan/asan_activation.cpp @@ -58,7 +58,7 @@ static struct AsanDeactivatedFlags { cf.verbosity = Verbosity(); cf.help = false; // this is activation-specific help - // Check if activation flags need to be overriden. + // Check if activation flags need to be overridden. if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) { parser.ParseString(env); } diff --git a/libsanitizer/asan/asan_allocator.cpp b/libsanitizer/asan/asan_allocator.cpp index 9e66f77217ec..752ba9ab32c7 100644 --- a/libsanitizer/asan/asan_allocator.cpp +++ b/libsanitizer/asan/asan_allocator.cpp @@ -21,6 +21,7 @@ #include "asan_poisoning.h" #include "asan_report.h" #include "asan_stack.h" +#include "asan_suppressions.h" #include "asan_thread.h" #include "lsan/lsan_common.h" #include "sanitizer_common/sanitizer_allocator_checks.h" @@ -423,10 +424,15 @@ struct Allocator { PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic); } - void ReInitialize(const AllocatorOptions &options) { + // Apply provided AllocatorOptions to an Allocator + void ApplyOptions(const AllocatorOptions &options) { SetAllocatorMayReturnNull(options.may_return_null); allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); SharedInitCode(options); + } + + void ReInitialize(const AllocatorOptions &options) { + ApplyOptions(options); // Poison all existing allocation's redzones. if (CanPoisonMemory()) { @@ -541,6 +547,7 @@ struct Allocator { ComputeUserRequestedAlignmentLog(alignment); if (alignment < min_alignment) alignment = min_alignment; + bool upgraded_from_zero = false; if (size == 0) { // We'd be happy to avoid allocating memory for zero-size requests, but // some programs/tests depend on this behavior and assume that malloc @@ -549,6 +556,7 @@ struct Allocator { // consecutive "new" calls must be different even if the allocated size // is zero. size = 1; + upgraded_from_zero = true; } CHECK(IsPowerOfTwo(alignment)); uptr rz_log = ComputeRZLog(size); @@ -631,6 +639,10 @@ struct Allocator { *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0; } + if (upgraded_from_zero) + PoisonShadow(user_beg, ASAN_SHADOW_GRANULARITY, + kAsanHeapLeftRedzoneMagic); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.mallocs++; thread_stats.malloced += size; @@ -732,7 +744,8 @@ struct Allocator { if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return; if (m->alloc_type != alloc_type) { - if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) { + if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire) && + !IsAllocDeallocMismatchSuppressed(stack)) { ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type, (AllocType)alloc_type); } @@ -975,6 +988,11 @@ void ReInitializeAllocator(const AllocatorOptions &options) { instance.ReInitialize(options); } +// Apply provided AllocatorOptions to an Allocator +void ApplyAllocatorOptions(const AllocatorOptions &options) { + instance.ApplyOptions(options); +} + void GetAllocatorOptions(AllocatorOptions *options) { instance.GetOptions(options); } @@ -995,13 +1013,8 @@ void PrintInternalAllocatorStats() { instance.PrintStats(); } -void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { - instance.Deallocate(ptr, 0, 0, stack, alloc_type); -} - -void asan_delete(void *ptr, uptr size, uptr alignment, - BufferedStackTrace *stack, AllocType alloc_type) { - instance.Deallocate(ptr, size, alignment, stack, alloc_type); +void asan_free(void *ptr, BufferedStackTrace *stack) { + instance.Deallocate(ptr, 0, 0, stack, FROM_MALLOC); } void *asan_malloc(uptr size, BufferedStackTrace *stack) { @@ -1056,8 +1069,7 @@ void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { instance.Allocate(size, PageSize, stack, FROM_MALLOC, true)); } -void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, - AllocType alloc_type) { +void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; if (AllocatorMayReturnNull()) @@ -1065,7 +1077,7 @@ void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, ReportInvalidAllocationAlignment(alignment, stack); } return SetErrnoOnNull( - instance.Allocate(size, alignment, stack, alloc_type, true)); + instance.Allocate(size, alignment, stack, FROM_MALLOC, true)); } void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) { @@ -1105,6 +1117,99 @@ uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { return usable_size; } +namespace { + +void *asan_new(uptr size, BufferedStackTrace *stack, bool array) { + return SetErrnoOnNull( + instance.Allocate(size, 0, stack, array ? FROM_NEW_BR : FROM_NEW, true)); +} + +void *asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace *stack, + bool array) { + if (UNLIKELY(alignment == 0 || !IsPowerOfTwo(alignment))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAllocationAlignment(alignment, stack); + } + return SetErrnoOnNull(instance.Allocate( + size, alignment, stack, array ? FROM_NEW_BR : FROM_NEW, true)); +} + +void asan_delete(void *ptr, BufferedStackTrace *stack, bool array) { + instance.Deallocate(ptr, 0, 0, stack, array ? FROM_NEW_BR : FROM_NEW); +} + +void asan_delete_aligned(void *ptr, uptr alignment, BufferedStackTrace *stack, + bool array) { + instance.Deallocate(ptr, 0, alignment, stack, array ? FROM_NEW_BR : FROM_NEW); +} + +void asan_delete_sized(void *ptr, uptr size, BufferedStackTrace *stack, + bool array) { + instance.Deallocate(ptr, size, 0, stack, array ? FROM_NEW_BR : FROM_NEW); +} + +void asan_delete_sized_aligned(void *ptr, uptr size, uptr alignment, + BufferedStackTrace *stack, bool array) { + instance.Deallocate(ptr, size, alignment, stack, + array ? FROM_NEW_BR : FROM_NEW); +} + +} // namespace + +void *asan_new(uptr size, BufferedStackTrace *stack) { + return asan_new(size, stack, /*array=*/false); +} + +void *asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace *stack) { + return asan_new_aligned(size, alignment, stack, /*array=*/false); +} + +void *asan_new_array(uptr size, BufferedStackTrace *stack) { + return asan_new(size, stack, /*array=*/true); +} + +void *asan_new_array_aligned(uptr size, uptr alignment, + BufferedStackTrace *stack) { + return asan_new_aligned(size, alignment, stack, /*array=*/true); +} + +void asan_delete(void *ptr, BufferedStackTrace *stack) { + asan_delete(ptr, stack, /*array=*/false); +} + +void asan_delete_aligned(void *ptr, uptr alignment, BufferedStackTrace *stack) { + asan_delete_aligned(ptr, alignment, stack, /*array=*/false); +} + +void asan_delete_sized(void *ptr, uptr size, BufferedStackTrace *stack) { + asan_delete_sized(ptr, size, stack, /*array=*/false); +} + +void asan_delete_sized_aligned(void *ptr, uptr size, uptr alignment, + BufferedStackTrace *stack) { + asan_delete_sized_aligned(ptr, size, alignment, stack, /*array=*/false); +} + +void asan_delete_array(void *ptr, BufferedStackTrace *stack) { + asan_delete(ptr, stack, /*array=*/true); +} + +void asan_delete_array_aligned(void *ptr, uptr alignment, + BufferedStackTrace *stack) { + asan_delete_aligned(ptr, alignment, stack, /*array=*/true); +} + +void asan_delete_array_sized(void *ptr, uptr size, BufferedStackTrace *stack) { + asan_delete_sized(ptr, size, stack, /*array=*/true); +} + +void asan_delete_array_sized_aligned(void *ptr, uptr size, uptr alignment, + BufferedStackTrace *stack) { + asan_delete_sized_aligned(ptr, size, alignment, stack, /*array=*/true); +} + uptr asan_mz_size(const void *ptr) { return instance.AllocationSize(reinterpret_cast(ptr)); } diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h index db8dc3bebfc6..fdf456473fb0 100644 --- a/libsanitizer/asan/asan_allocator.h +++ b/libsanitizer/asan/asan_allocator.h @@ -47,6 +47,7 @@ struct AllocatorOptions { void InitializeAllocator(const AllocatorOptions &options); void ReInitializeAllocator(const AllocatorOptions &options); void GetAllocatorOptions(AllocatorOptions *options); +void ApplyAllocatorOptions(const AllocatorOptions &options); class AsanChunkView { public: @@ -238,7 +239,7 @@ using PrimaryAllocator = PrimaryAllocatorASVT; typedef CompactSizeClassMap SizeClassMap; template struct AP32 { - static const uptr kSpaceBeg = 0; + static const uptr kSpaceBeg = SANITIZER_MMAP_BEGIN; static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; static const uptr kMetadataSize = 0; typedef __asan::SizeClassMap SizeClassMap; @@ -269,11 +270,8 @@ struct AsanThreadLocalMallocStorage { AsanThreadLocalMallocStorage() {} }; -void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, - AllocType alloc_type); -void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type); -void asan_delete(void *ptr, uptr size, uptr alignment, - BufferedStackTrace *stack, AllocType alloc_type); +void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack); +void asan_free(void *ptr, BufferedStackTrace *stack); void *asan_malloc(uptr size, BufferedStackTrace *stack); void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack); @@ -288,6 +286,23 @@ int asan_posix_memalign(void **memptr, uptr alignment, uptr size, BufferedStackTrace *stack); uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp); +void *asan_new(uptr size, BufferedStackTrace *stack); +void *asan_new_aligned(uptr size, uptr alignment, BufferedStackTrace *stack); +void *asan_new_array(uptr size, BufferedStackTrace *stack); +void *asan_new_array_aligned(uptr size, uptr alignment, + BufferedStackTrace *stack); +void asan_delete(void *ptr, BufferedStackTrace *stack); +void asan_delete_aligned(void *ptr, uptr alignment, BufferedStackTrace *stack); +void asan_delete_sized(void *ptr, uptr size, BufferedStackTrace *stack); +void asan_delete_sized_aligned(void *ptr, uptr size, uptr alignment, + BufferedStackTrace *stack); +void asan_delete_array(void *ptr, BufferedStackTrace *stack); +void asan_delete_array_aligned(void *ptr, uptr alignment, + BufferedStackTrace *stack); +void asan_delete_array_sized(void *ptr, uptr size, BufferedStackTrace *stack); +void asan_delete_array_sized_aligned(void *ptr, uptr size, uptr alignment, + BufferedStackTrace *stack); + uptr asan_mz_size(const void *ptr); void asan_mz_force_lock(); void asan_mz_force_unlock(); diff --git a/libsanitizer/asan/asan_descriptions.cpp b/libsanitizer/asan/asan_descriptions.cpp index caec79313e22..18c2a6c571c1 100644 --- a/libsanitizer/asan/asan_descriptions.cpp +++ b/libsanitizer/asan/asan_descriptions.cpp @@ -45,6 +45,9 @@ void DescribeThread(AsanThreadContext *context) { } context->announced = true; + InternalScopedString str; + str.AppendF("Thread %s", AsanThreadIdAndName(context).c_str()); + AsanThreadContext *parent_context = context->parent_tid == kInvalidTid ? nullptr @@ -52,12 +55,7 @@ void DescribeThread(AsanThreadContext *context) { // `context->parent_tid` may point to reused slot. Check `unique_id` which // is always smaller for the parent, always greater for a new user. - if (context->unique_id <= parent_context->unique_id) - parent_context = nullptr; - - InternalScopedString str; - str.AppendF("Thread %s", AsanThreadIdAndName(context).c_str()); - if (!parent_context) { + if (!parent_context || context->unique_id <= parent_context->unique_id) { str.Append(" created by unknown thread\n"); Printf("%s", str.data()); return; @@ -213,10 +211,10 @@ bool GetStackAddressInformation(uptr addr, uptr access_size, descr->frame_pc = access.frame_pc; descr->frame_descr = access.frame_descr; -#if SANITIZER_PPC64V1 - // On PowerPC64 ELFv1, the address of a function actually points to a - // three-doubleword data structure with the first field containing - // the address of the function's code. +#if SANITIZER_PPC64V1 || SANITIZER_AIX + // On PowerPC64 ELFv1 or AIX, the address of a function actually points to a + // three-doubleword (or three-word for 32-bit AIX) data structure with + // the first field containing the address of the function's code. descr->frame_pc = *reinterpret_cast(descr->frame_pc); #endif descr->frame_pc += 16; @@ -446,6 +444,18 @@ AddressDescription::AddressDescription(uptr addr, uptr access_size, data.kind = kAddressKindShadow; return; } + + // Check global first. On AIX, some global data defined in shared libraries + // are put to the STACK region for unknown reasons. Check global first can + // workaround this issue. + // TODO: Look into whether there's a different solution to this problem. +#if SANITIZER_AIX + if (GetGlobalAddressInformation(addr, access_size, &data.global)) { + data.kind = kAddressKindGlobal; + return; + } +#endif + if (GetHeapAddressInformation(addr, access_size, &data.heap)) { data.kind = kAddressKindHeap; return; @@ -463,10 +473,14 @@ AddressDescription::AddressDescription(uptr addr, uptr access_size, return; } +// GetGlobalAddressInformation is called earlier on AIX due to a workaround +#if !SANITIZER_AIX if (GetGlobalAddressInformation(addr, access_size, &data.global)) { data.kind = kAddressKindGlobal; return; } +#endif + data.kind = kAddressKindWild; data.wild.addr = addr; data.wild.access_size = access_size; diff --git a/libsanitizer/asan/asan_errors.cpp b/libsanitizer/asan/asan_errors.cpp index 4f112cc5d1bc..2a207cd06cca 100644 --- a/libsanitizer/asan/asan_errors.cpp +++ b/libsanitizer/asan/asan_errors.cpp @@ -12,8 +12,10 @@ //===----------------------------------------------------------------------===// #include "asan_errors.h" + #include "asan_descriptions.h" #include "asan_mapping.h" +#include "asan_poisoning.h" #include "asan_report.h" #include "asan_stack.h" #include "sanitizer_common/sanitizer_stackdepot.h" @@ -600,6 +602,44 @@ static void PrintShadowMemoryForAddress(uptr addr) { Printf("%s", str.data()); } +static void CheckPoisonRecords(uptr addr) { + if (!AddrIsInMem(addr)) + return; + + u8 *shadow_addr = (u8 *)MemToShadow(addr); + // If we are in the partial right redzone, look at the next shadow byte. + if (*shadow_addr > 0 && *shadow_addr < 128) + shadow_addr++; + u8 shadow_val = *shadow_addr; + + if (shadow_val != kAsanUserPoisonedMemoryMagic) + return; + + Printf("\n"); + + if (flags()->poison_history_size <= 0) { + Printf( + "NOTE: the stack trace above identifies the code that *accessed* " + "the poisoned memory.\n"); + Printf( + "To identify the code that *poisoned* the memory, try the " + "experimental setting ASAN_OPTIONS=poison_history_size=.\n"); + return; + } + + PoisonRecord record; + if (FindPoisonRecord(addr, record)) { + StackTrace poison_stack = StackDepotGet(record.stack_id); + if (poison_stack.size > 0) { + Printf("Memory was manually poisoned by thread T%u:\n", record.thread_id); + poison_stack.Print(); + } + } else { + Printf("ERROR: no matching poison tracking record found.\n"); + Printf("Try a larger value for ASAN_OPTIONS=poison_history_size=.\n"); + } +} + void ErrorGeneric::Print() { Decorator d; Printf("%s", d.Error()); @@ -623,6 +663,9 @@ void ErrorGeneric::Print() { PrintContainerOverflowHint(); ReportErrorSummary(bug_descr, &stack); PrintShadowMemoryForAddress(addr); + + // This is an experimental flag, hence we don't make a special handler. + CheckPoisonRecords(addr); } } // namespace __asan diff --git a/libsanitizer/asan/asan_errors.h b/libsanitizer/asan/asan_errors.h index b3af655e6663..f339b35d2a76 100644 --- a/libsanitizer/asan/asan_errors.h +++ b/libsanitizer/asan/asan_errors.h @@ -362,7 +362,7 @@ struct ErrorBadParamsToCopyContiguousContainerAnnotations : ErrorBase { u32 tid, BufferedStackTrace *stack_, uptr old_storage_beg_, uptr old_storage_end_, uptr new_storage_beg_, uptr new_storage_end_) : ErrorBase(tid, 10, - "bad-__sanitizer_annotate_double_ended_contiguous_container"), + "bad-__sanitizer_copy_contiguous_container_annotations"), stack(stack_), old_storage_beg(old_storage_beg_), old_storage_end(old_storage_end_), diff --git a/libsanitizer/asan/asan_fake_stack.cpp b/libsanitizer/asan/asan_fake_stack.cpp index 7443ff166984..d3fa953f3100 100644 --- a/libsanitizer/asan/asan_fake_stack.cpp +++ b/libsanitizer/asan/asan_fake_stack.cpp @@ -27,7 +27,8 @@ static const u64 kAllocaRedzoneMask = 31UL; // For small size classes inline PoisonShadow for better performance. ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) { - u64 *shadow = reinterpret_cast(MemToShadow(ptr)); + CHECK(AddrIsAlignedByGranularity(ptr + size)); + u64* shadow = reinterpret_cast(MemToShadow(ptr)); if (ASAN_SHADOW_SCALE == 3 && class_id <= 6) { // This code expects ASAN_SHADOW_SCALE=3. for (uptr i = 0; i < (((uptr)1) << class_id); i++) { @@ -39,27 +40,48 @@ ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) { // The size class is too big, it's cheaper to poison only size bytes. PoisonShadow(ptr, size, static_cast(magic)); } + + if (magic == 0) { + uptr redzone_size = FakeStack::BytesInSizeClass(class_id) - size; + PoisonShadow(ptr + size, redzone_size, kAsanStackRightRedzoneMagic); + } } -FakeStack *FakeStack::Create(uptr stack_size_log) { +FakeStack* FakeStack::Create(uptr stack_size_log) { static uptr kMinStackSizeLog = 16; static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28); if (stack_size_log < kMinStackSizeLog) stack_size_log = kMinStackSizeLog; if (stack_size_log > kMaxStackSizeLog) stack_size_log = kMaxStackSizeLog; + CHECK_LE(kMaxStackFrameSizeLog, stack_size_log); uptr size = RequiredSize(stack_size_log); - FakeStack *res = reinterpret_cast( - flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack") - : MmapOrDie(size, "FakeStack")); + uptr padded_size = size + kMaxStackFrameSize; + void* true_res = reinterpret_cast( + flags()->uar_noreserve ? MmapNoReserveOrDie(padded_size, "FakeStack") + : MmapOrDie(padded_size, "FakeStack")); + // GetFrame() requires the property that + // (res + kFlagsOffset + SizeRequiredForFlags(stack_size_log)) is aligned to + // kMaxStackFrameSize. + // We didn't use MmapAlignedOrDieOnFatalError, because it requires that the + // *size* is a power of 2, which is an overly strong condition. + static_assert(alignof(FakeStack) <= kMaxStackFrameSize); + FakeStack* res = reinterpret_cast( + RoundUpTo( + (uptr)true_res + kFlagsOffset + SizeRequiredForFlags(stack_size_log), + kMaxStackFrameSize) - + kFlagsOffset - SizeRequiredForFlags(stack_size_log)); + res->true_start = true_res; res->stack_size_log_ = stack_size_log; - u8 *p = reinterpret_cast(res); + u8* p = reinterpret_cast(res); VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; " - "mmapped %zdK, noreserve=%d \n", - GetCurrentTidOrInvalid(), (void *)p, - (void *)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log, - size >> 10, flags()->uar_noreserve); + "mmapped %zdK, noreserve=%d, true_start: %p, start of first frame: " + "0x%zx\n", + GetCurrentTidOrInvalid(), (void*)p, + (void*)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log, + size >> 10, flags()->uar_noreserve, res->true_start, + res->GetFrame(stack_size_log, /*class_id*/ 0, /*pos*/ 0)); return res; } @@ -73,8 +95,10 @@ void FakeStack::Destroy(int tid) { Report("T%d: FakeStack destroyed: %s\n", tid, str.data()); } uptr size = RequiredSize(stack_size_log_); - FlushUnneededASanShadowMemory(reinterpret_cast(this), size); - UnmapOrDie(this, size); + uptr padded_size = size + kMaxStackFrameSize; + FlushUnneededASanShadowMemory(reinterpret_cast(true_start), + padded_size); + UnmapOrDie(true_start, padded_size); } void FakeStack::PoisonAll(u8 magic) { @@ -85,14 +109,14 @@ void FakeStack::PoisonAll(u8 magic) { #if !defined(_MSC_VER) || defined(__clang__) ALWAYS_INLINE USED #endif -FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id, - uptr real_stack) { + FakeFrame* FakeStack::Allocate(uptr stack_size_log, uptr class_id, + uptr real_stack) { CHECK_LT(class_id, kNumberOfSizeClasses); if (needs_gc_) GC(real_stack); - uptr &hint_position = hint_position_[class_id]; + uptr& hint_position = hint_position_[class_id]; const int num_iter = NumberOfFrames(stack_size_log, class_id); - u8 *flags = GetFlags(stack_size_log, class_id); + u8* flags = GetFlags(stack_size_log, class_id); for (int i = 0; i < num_iter; i++) { uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++); // This part is tricky. On one hand, checking and setting flags[pos] @@ -102,22 +126,24 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id, // and so will not touch this particular byte. So, it is safe to do this // with regular non-atomic load and store (at least I was not able to make // this code crash). - if (flags[pos]) continue; + if (flags[pos]) + continue; flags[pos] = 1; - FakeFrame *res = reinterpret_cast( - GetFrame(stack_size_log, class_id, pos)); + FakeFrame* res = + reinterpret_cast(GetFrame(stack_size_log, class_id, pos)); res->real_stack = real_stack; *SavedFlagPtr(reinterpret_cast(res), class_id) = &flags[pos]; return res; } - return nullptr; // We are out of fake stack. + return nullptr; // We are out of fake stack. } -uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) { +uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr* frame_beg, uptr* frame_end) { uptr stack_size_log = this->stack_size_log(); uptr beg = reinterpret_cast(GetFrame(stack_size_log, 0, 0)); uptr end = reinterpret_cast(this) + RequiredSize(stack_size_log); - if (ptr < beg || ptr >= end) return 0; + if (ptr < beg || ptr >= end) + return 0; uptr class_id = (ptr - beg) >> stack_size_log; uptr base = beg + (class_id << stack_size_log); CHECK_LE(base, ptr); @@ -129,9 +155,7 @@ uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) { return res; } -void FakeStack::HandleNoReturn() { - needs_gc_ = true; -} +void FakeStack::HandleNoReturn() { needs_gc_ = true; } // Hack: The statement below is not true if we take into account sigaltstack or // makecontext. It should be possible to make GC to discard wrong stack frame if @@ -146,7 +170,7 @@ void FakeStack::HandleNoReturn() { // We do it based on their 'real_stack' values -- everything that is lower // than the current real_stack is garbage. NOINLINE void FakeStack::GC(uptr real_stack) { - AsanThread *curr_thread = GetCurrentThread(); + AsanThread* curr_thread = GetCurrentThread(); if (!curr_thread) return; // Try again when we have a thread. auto top = curr_thread->stack_top(); @@ -155,12 +179,13 @@ NOINLINE void FakeStack::GC(uptr real_stack) { return; // Not the default stack. for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { - u8 *flags = GetFlags(stack_size_log(), class_id); + u8* flags = GetFlags(stack_size_log(), class_id); for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; i++) { - if (flags[i] == 0) continue; // not allocated. - FakeFrame *ff = reinterpret_cast( - GetFrame(stack_size_log(), class_id, i)); + if (flags[i] == 0) + continue; // not allocated. + FakeFrame* ff = + reinterpret_cast(GetFrame(stack_size_log(), class_id, i)); // GC only on the default stack. if (bottom < ff->real_stack && ff->real_stack < real_stack) { flags[i] = 0; @@ -173,14 +198,15 @@ NOINLINE void FakeStack::GC(uptr real_stack) { needs_gc_ = false; } -void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) { +void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void* arg) { for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { - u8 *flags = GetFlags(stack_size_log(), class_id); + u8* flags = GetFlags(stack_size_log(), class_id); for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; i++) { - if (flags[i] == 0) continue; // not allocated. - FakeFrame *ff = reinterpret_cast( - GetFrame(stack_size_log(), class_id, i)); + if (flags[i] == 0) + continue; // not allocated. + FakeFrame* ff = + reinterpret_cast(GetFrame(stack_size_log(), class_id, i)); uptr begin = reinterpret_cast(ff); callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg); } @@ -188,44 +214,51 @@ void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) { } #if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA -static THREADLOCAL FakeStack *fake_stack_tls; +static THREADLOCAL FakeStack* fake_stack_tls; -FakeStack *GetTLSFakeStack() { - return fake_stack_tls; -} -void SetTLSFakeStack(FakeStack *fs) { - fake_stack_tls = fs; -} +static FakeStack* GetTLSFakeStack() { return fake_stack_tls; } +static void SetTLSFakeStack(FakeStack* fs) { fake_stack_tls = fs; } +void ResetTLSFakeStack() { fake_stack_tls = nullptr; } #else -FakeStack *GetTLSFakeStack() { return 0; } -void SetTLSFakeStack(FakeStack *fs) { } +static FakeStack* GetTLSFakeStack() { return nullptr; } +static void SetTLSFakeStack(FakeStack*) {} +void ResetTLSFakeStack() {} #endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA -static FakeStack *GetFakeStack() { - AsanThread *t = GetCurrentThread(); - if (!t) return nullptr; +static FakeStack* GetFakeStack() { + AsanThread* t = GetCurrentThread(); + if (!t) + return nullptr; return t->get_or_create_fake_stack(); } -static FakeStack *GetFakeStackFast() { - if (FakeStack *fs = GetTLSFakeStack()) +static FakeStack* GetFakeStackFast() { + FakeStack* fs = GetTLSFakeStack(); + if (LIKELY(fs)) return fs; if (!__asan_option_detect_stack_use_after_return) return nullptr; - return GetFakeStack(); + fs = GetFakeStack(); + if (LIKELY(fs)) + SetTLSFakeStack(fs); + return fs; } -static FakeStack *GetFakeStackFastAlways() { - if (FakeStack *fs = GetTLSFakeStack()) +static FakeStack* GetFakeStackFastAlways() { + FakeStack* fs = GetTLSFakeStack(); + if (LIKELY(fs)) return fs; - return GetFakeStack(); + fs = GetFakeStack(); + if (LIKELY(fs)) + SetTLSFakeStack(fs); + return fs; } static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) { - FakeStack *fs = GetFakeStackFast(); + FakeStack* fs = GetFakeStackFast(); if (!fs) return 0; - FakeFrame *ff = + FakeFrame* ff = fs->Allocate(fs->stack_size_log(), class_id, GET_CURRENT_FRAME()); if (!ff) return 0; // Out of fake stack. @@ -235,10 +268,10 @@ static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) { } static ALWAYS_INLINE uptr OnMallocAlways(uptr class_id, uptr size) { - FakeStack *fs = GetFakeStackFastAlways(); + FakeStack* fs = GetFakeStackFastAlways(); if (!fs) return 0; - FakeFrame *ff = + FakeFrame* ff = fs->Allocate(fs->stack_size_log(), class_id, GET_CURRENT_FRAME()); if (!ff) return 0; // Out of fake stack. @@ -252,17 +285,17 @@ static ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) { SetShadow(ptr, size, class_id, kMagic8); } -} // namespace __asan +} // namespace __asan // ---------------------- Interface ---------------- {{{1 using namespace __asan; #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ - __asan_stack_malloc_##class_id(uptr size) { \ + __asan_stack_malloc_##class_id(uptr size) { \ return OnMalloc(class_id, size); \ } \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ - __asan_stack_malloc_always_##class_id(uptr size) { \ + __asan_stack_malloc_always_##class_id(uptr size) { \ return OnMallocAlways(class_id, size); \ } \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \ @@ -287,21 +320,25 @@ extern "C" { // -asan-use-after-return=never, after modal UAR flag lands // (https://github.com/google/sanitizers/issues/1394) SANITIZER_INTERFACE_ATTRIBUTE -void *__asan_get_current_fake_stack() { return GetFakeStackFast(); } +void* __asan_get_current_fake_stack() { return GetFakeStackFast(); } SANITIZER_INTERFACE_ATTRIBUTE -void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg, - void **end) { - FakeStack *fs = reinterpret_cast(fake_stack); - if (!fs) return nullptr; +void* __asan_addr_is_in_fake_stack(void* fake_stack, void* addr, void** beg, + void** end) { + FakeStack* fs = reinterpret_cast(fake_stack); + if (!fs) + return nullptr; uptr frame_beg, frame_end; - FakeFrame *frame = reinterpret_cast(fs->AddrIsInFakeStack( + FakeFrame* frame = reinterpret_cast(fs->AddrIsInFakeStack( reinterpret_cast(addr), &frame_beg, &frame_end)); - if (!frame) return nullptr; + if (!frame) + return nullptr; if (frame->magic != kCurrentStackFrameMagic) return nullptr; - if (beg) *beg = reinterpret_cast(frame_beg); - if (end) *end = reinterpret_cast(frame_end); + if (beg) + *beg = reinterpret_cast(frame_beg); + if (end) + *end = reinterpret_cast(frame_end); return reinterpret_cast(frame->real_stack); } @@ -320,9 +357,9 @@ void __asan_alloca_poison(uptr addr, uptr size) { SANITIZER_INTERFACE_ATTRIBUTE void __asan_allocas_unpoison(uptr top, uptr bottom) { - if ((!top) || (top > bottom)) return; - REAL(memset) - (reinterpret_cast(MemToShadow(top)), 0, - (bottom - top) / ASAN_SHADOW_GRANULARITY); + if ((!top) || (top > bottom)) + return; + REAL(memset)(reinterpret_cast(MemToShadow(top)), 0, + (bottom - top) / ASAN_SHADOW_GRANULARITY); } -} // extern "C" +} // extern "C" diff --git a/libsanitizer/asan/asan_fake_stack.h b/libsanitizer/asan/asan_fake_stack.h index 270a19816d6e..593c1373c8ff 100644 --- a/libsanitizer/asan/asan_fake_stack.h +++ b/libsanitizer/asan/asan_fake_stack.h @@ -32,12 +32,12 @@ struct FakeFrame { // is not popped but remains there for quite some time until gets used again. // So, we poison the objects on the fake stack when function returns. // It helps us find use-after-return bugs. -// // The FakeStack objects is allocated by a single mmap call and has no other // pointers. The size of the fake stack depends on the actual thread stack size // and thus can not be a constant. // stack_size is a power of two greater or equal to the thread's stack size; // we store it as its logarithm (stack_size_log). +// FakeStack is padded such that GetFrame() is aligned to BytesInSizeClass(). // FakeStack has kNumberOfSizeClasses (11) size classes, each size class // is a power of two, starting from 64 bytes. Each size class occupies // stack_size bytes and thus can allocate @@ -56,6 +56,9 @@ struct FakeFrame { class FakeStack { static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B. static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K. + static_assert(kMaxStackFrameSizeLog >= kMinStackFrameSizeLog); + + static const u64 kMaxStackFrameSize = 1 << kMaxStackFrameSizeLog; public: static const uptr kNumberOfSizeClasses = @@ -66,7 +69,7 @@ class FakeStack { void Destroy(int tid); - // stack_size_log is at least 15 (stack_size >= 32K). + // min_uar_stack_size_log is 16 (stack_size >= 64KB) static uptr SizeRequiredForFlags(uptr stack_size_log) { return ((uptr)1) << (stack_size_log + 1 - kMinStackFrameSizeLog); } @@ -110,6 +113,28 @@ class FakeStack { } // Get frame by class_id and pos. + // Return values are guaranteed to be aligned to BytesInSizeClass(class_id), + // which is useful in combination with + // ASanStackFrameLayout::ComputeASanStackFrameLayout(). + // + // Note that alignment to 1<= kMaxStackFrameSizeLog (otherwise you + // couldn't store a single frame of that size in the entire stack) + // hence (1<(this) + kFlagsOffset + SizeRequiredForFlags(stack_size_log) + @@ -156,19 +181,21 @@ class FakeStack { private: FakeStack() { } - static const uptr kFlagsOffset = 4096; // This is were the flags begin. + static const uptr kFlagsOffset = 4096; // This is where the flags begin. // Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID COMPILER_CHECK(kNumberOfSizeClasses == 11); static const uptr kMaxStackMallocSize = ((uptr)1) << kMaxStackFrameSizeLog; uptr hint_position_[kNumberOfSizeClasses]; uptr stack_size_log_; - // a bit is set if something was allocated from the corresponding size class. bool needs_gc_; + // We allocated more memory than needed to ensure the FakeStack (and, by + // extension, each of the fake stack frames) is aligned. We keep track of the + // true start so that we can unmap it. + void *true_start; }; -FakeStack *GetTLSFakeStack(); -void SetTLSFakeStack(FakeStack *fs); +void ResetTLSFakeStack(); } // namespace __asan diff --git a/libsanitizer/asan/asan_flags.cpp b/libsanitizer/asan/asan_flags.cpp index 56deb1b0d082..190a89345dd1 100644 --- a/libsanitizer/asan/asan_flags.cpp +++ b/libsanitizer/asan/asan_flags.cpp @@ -144,6 +144,7 @@ static void InitializeDefaultFlags() { DisplayHelpMessages(&asan_parser); } +// Validate flags and report incompatible configurations static void ProcessFlags() { Flags *f = flags(); @@ -217,11 +218,12 @@ void InitializeFlags() { ProcessFlags(); #if SANITIZER_WINDOWS - // On Windows, weak symbols are emulated by having the user program - // register which weak functions are defined. - // The ASAN DLL will initialize flags prior to user module initialization, - // so __asan_default_options will not point to the user definition yet. - // We still want to ensure we capture when options are passed via + // On Windows, weak symbols (such as the `__asan_default_options` function) + // are emulated by having the user program register which weak functions are + // defined. The ASAN DLL will initialize flags prior to user module + // initialization, so __asan_default_options will not point to the user + // definition yet. We still want to ensure we capture when options are passed + // via // __asan_default_options, so we add a callback to be run // when it is registered with the runtime. @@ -232,14 +234,13 @@ void InitializeFlags() { // __sanitizer_register_weak_function. AddRegisterWeakFunctionCallback( reinterpret_cast(__asan_default_options), []() { - FlagParser asan_parser; - - RegisterAsanFlags(&asan_parser, flags()); - RegisterCommonFlags(&asan_parser); - asan_parser.ParseString(__asan_default_options()); - - DisplayHelpMessages(&asan_parser); + // We call `InitializeDefaultFlags` again, instead of just parsing + // `__asan_default_options` directly, to ensure that flags set through + // `ASAN_OPTS` take precedence over those set through + // `__asan_default_options`. + InitializeDefaultFlags(); ProcessFlags(); + ApplyFlags(); }); # if CAN_SANITIZE_UB diff --git a/libsanitizer/asan/asan_flags.h b/libsanitizer/asan/asan_flags.h index b55c81f07d4b..6f38a62a25d8 100644 --- a/libsanitizer/asan/asan_flags.h +++ b/libsanitizer/asan/asan_flags.h @@ -19,12 +19,12 @@ // ASan flag values can be defined in four ways: // 1) initialized with default values at startup. -// 2) overriden during compilation of ASan runtime by providing +// 2) overridden during compilation of ASan runtime by providing // compile definition ASAN_DEFAULT_OPTIONS. -// 3) overriden from string returned by user-specified function +// 3) overridden from string returned by user-specified function // __asan_default_options(). -// 4) overriden from env variable ASAN_OPTIONS. -// 5) overriden during ASan activation (for now used on Android only). +// 4) overridden from env variable ASAN_OPTIONS. +// 5) overridden during ASan activation (for now used on Android only). namespace __asan { diff --git a/libsanitizer/asan/asan_flags.inc b/libsanitizer/asan/asan_flags.inc index fad1577d912a..32e6d3405533 100644 --- a/libsanitizer/asan/asan_flags.inc +++ b/libsanitizer/asan/asan_flags.inc @@ -116,6 +116,9 @@ ASAN_FLAG(bool, poison_partial, true, "stack buffers.") ASAN_FLAG(bool, poison_array_cookie, true, "Poison (or not) the array cookie after operator new[].") +ASAN_FLAG(int, poison_history_size, 0, + "[EXPERIMENTAL] Number of most recent memory poisoning calls for " + "which the stack traces will be recorded.") // Turn off alloc/dealloc mismatch checker on Mac and Windows for now. // https://github.com/google/sanitizers/issues/131 diff --git a/libsanitizer/asan/asan_fuchsia.cpp b/libsanitizer/asan/asan_fuchsia.cpp index 96c41e9d42ba..a9e5dad91b97 100644 --- a/libsanitizer/asan/asan_fuchsia.cpp +++ b/libsanitizer/asan/asan_fuchsia.cpp @@ -25,13 +25,18 @@ # include "asan_thread.h" # include "lsan/lsan_common.h" +namespace __sanitizer { +// ASan doesn't need to do anything else special in the startup hook. +void EarlySanitizerInit() {} +} // namespace __sanitizer + namespace __asan { -// The system already set up the shadow memory for us. -// __sanitizer::GetMaxUserVirtualAddress has already been called by -// AsanInitInternal->InitializeHighMemEnd (asan_rtl.cpp). -// Just do some additional sanity checks here. void InitializeShadowMemory() { + // Explicitly setup shadow here right beforer any of the ShadowBounds members + // are used. + InitShadowBounds(); + if (Verbosity()) PrintAddressSpaceLayout(); diff --git a/libsanitizer/asan/asan_globals.cpp b/libsanitizer/asan/asan_globals.cpp index d1794ad96e2a..c83b782cb85f 100644 --- a/libsanitizer/asan/asan_globals.cpp +++ b/libsanitizer/asan/asan_globals.cpp @@ -226,6 +226,25 @@ static void CheckODRViolationViaIndicator(const Global *g) AddGlobalToList(relevant_globals, g); } +// Check ODR violation for given global G by checking if it's already poisoned. +// We use this method in case compiler doesn't use private aliases for global +// variables. +static void CheckODRViolationViaPoisoning(const Global *g) + SANITIZER_REQUIRES(mu_for_globals) { + if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) { + // This check may not be enough: if the first global is much larger + // the entire redzone of the second global may be within the first global. + for (const auto &l : list_of_all_globals) { + if (g->beg == l.g->beg && + (flags()->detect_odr_violation >= 2 || g->size != l.g->size) && + !IsODRViolationSuppressed(g->name)) { + ReportODRViolation(g, FindRegistrationSite(g), l.g, + FindRegistrationSite(l.g)); + } + } + } +} + // Clang provides two different ways for global variables protection: // it can poison the global itself or its private alias. In former // case we may poison same symbol multiple times, that can help us to @@ -271,6 +290,8 @@ static void RegisterGlobal(const Global *g) SANITIZER_REQUIRES(mu_for_globals) { // where two globals with the same name are defined in different modules. if (UseODRIndicator(g)) CheckODRViolationViaIndicator(g); + else + CheckODRViolationViaPoisoning(g); } if (CanPoisonMemory()) PoisonRedZones(*g); diff --git a/libsanitizer/asan/asan_interceptors.cpp b/libsanitizer/asan/asan_interceptors.cpp index 0239e0cdf537..8643271e89d7 100644 --- a/libsanitizer/asan/asan_interceptors.cpp +++ b/libsanitizer/asan/asan_interceptors.cpp @@ -58,13 +58,20 @@ namespace __asan { static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) { #if SANITIZER_INTERCEPT_STRNLEN - if (REAL(strnlen)) { + if (static_cast(REAL(strnlen))) return REAL(strnlen)(s, maxlen); - } -#endif +# endif return internal_strnlen(s, maxlen); } +static inline uptr MaybeRealWcsnlen(const wchar_t* s, uptr maxlen) { +# if SANITIZER_INTERCEPT_WCSNLEN + if (static_cast(REAL(wcsnlen))) + return REAL(wcsnlen)(s, maxlen); +# endif + return internal_wcsnlen(s, maxlen); +} + void SetThreadName(const char *name) { AsanThread *t = GetCurrentThread(); if (t) @@ -365,7 +372,7 @@ INTERCEPTOR(void, makecontext, struct ucontext_t *ucp, void (*func)(), int argc, va_list ap; uptr args[64]; // We don't know a better way to forward ... into REAL function. We can - // increase args size if neccecary. + // increase args size if necessary. CHECK_LE(argc, ARRAY_SIZE(args)); internal_memset(args, 0, sizeof(args)); va_start(ap, argc); @@ -570,6 +577,20 @@ INTERCEPTOR(char *, strcpy, char *to, const char *from) { return REAL(strcpy)(to, from); } +INTERCEPTOR(wchar_t*, wcscpy, wchar_t* to, const wchar_t* from) { + void* ctx; + ASAN_INTERCEPTOR_ENTER(ctx, wcscpy); + if (!TryAsanInitFromRtl()) + return REAL(wcscpy)(to, from); + if (flags()->replace_str) { + uptr size = (internal_wcslen(from) + 1) * sizeof(wchar_t); + CHECK_RANGES_OVERLAP("wcscpy", to, size, from, size); + ASAN_READ_RANGE(ctx, from, size); + ASAN_WRITE_RANGE(ctx, to, size); + } + return REAL(wcscpy)(to, from); +} + // Windows doesn't always define the strdup identifier, // and when it does it's a macro defined to either _strdup // or _strdup_dbg, _strdup_dbg ends up calling _strdup, so @@ -584,6 +605,9 @@ INTERCEPTOR(char *, strcpy, char *to, const char *from) { INTERCEPTOR(char*, strdup, const char *s) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strdup); + // Allowing null input is Windows-specific + if (SANITIZER_WINDOWS && UNLIKELY(!s)) + return nullptr; if (UNLIKELY(!TryAsanInitFromRtl())) return internal_strdup(s); uptr length = internal_strlen(s); @@ -630,6 +654,20 @@ INTERCEPTOR(char*, strncpy, char *to, const char *from, usize size) { return REAL(strncpy)(to, from, size); } +INTERCEPTOR(wchar_t*, wcsncpy, wchar_t* to, const wchar_t* from, uptr size) { + void* ctx; + ASAN_INTERCEPTOR_ENTER(ctx, wcsncpy); + AsanInitFromRtl(); + if (flags()->replace_str) { + uptr from_size = + Min(size, MaybeRealWcsnlen(from, size) + 1) * sizeof(wchar_t); + CHECK_RANGES_OVERLAP("wcsncpy", to, from_size, from, from_size); + ASAN_READ_RANGE(ctx, from, from_size); + ASAN_WRITE_RANGE(ctx, to, size * sizeof(wchar_t)); + } + return REAL(wcsncpy)(to, from, size); +} + template static ALWAYS_INLINE auto StrtolImpl(void *ctx, Fn real, const char *nptr, char **endptr, int base) @@ -806,6 +844,11 @@ void InitializeAsanInterceptors() { ASAN_INTERCEPT_FUNC(strncat); ASAN_INTERCEPT_FUNC(strncpy); ASAN_INTERCEPT_FUNC(strdup); + + // Intercept wcs* functions. + ASAN_INTERCEPT_FUNC(wcscpy); + ASAN_INTERCEPT_FUNC(wcsncpy); + # if ASAN_INTERCEPT___STRDUP ASAN_INTERCEPT_FUNC(__strdup); #endif @@ -823,7 +866,7 @@ void InitializeAsanInterceptors() { ASAN_INTERCEPT_FUNC(__isoc23_strtoll); # endif - // Intecept jump-related functions. + // Intercept jump-related functions. ASAN_INTERCEPT_FUNC(longjmp); # if ASAN_INTERCEPT_SWAPCONTEXT diff --git a/libsanitizer/asan/asan_interceptors.h b/libsanitizer/asan/asan_interceptors.h index 85cde07ca7ee..2d551cfafd1f 100644 --- a/libsanitizer/asan/asan_interceptors.h +++ b/libsanitizer/asan/asan_interceptors.h @@ -71,12 +71,7 @@ void InitializePlatformInterceptors(); #if ASAN_HAS_EXCEPTIONS && !SANITIZER_SOLARIS && !SANITIZER_NETBSD && \ (!SANITIZER_WINDOWS || (defined(__MINGW32__) && defined(__i386__))) # define ASAN_INTERCEPT___CXA_THROW 1 -# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \ - || ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION -# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1 -# else -# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0 -# endif +# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1 # if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__)) # define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1 # else @@ -134,6 +129,7 @@ DECLARE_REAL(char*, strchr, const char *str, int c) DECLARE_REAL(SIZE_T, strlen, const char *s) DECLARE_REAL(char*, strncpy, char *to, const char *from, SIZE_T size) DECLARE_REAL(SIZE_T, strnlen, const char *s, SIZE_T maxlen) +DECLARE_REAL(SIZE_T, wcsnlen, const wchar_t* s, SIZE_T maxlen) DECLARE_REAL(char*, strstr, const char *s1, const char *s2) # if !SANITIZER_APPLE diff --git a/libsanitizer/asan/asan_interceptors_memintrinsics.cpp b/libsanitizer/asan/asan_interceptors_memintrinsics.cpp index bdf328f89206..f52ae9ae8d17 100644 --- a/libsanitizer/asan/asan_interceptors_memintrinsics.cpp +++ b/libsanitizer/asan/asan_interceptors_memintrinsics.cpp @@ -55,8 +55,10 @@ using namespace __asan; if (LIKELY(replace_intrin_cached)) { \ ASAN_READ_RANGE(ctx, from, size); \ ASAN_WRITE_RANGE(ctx, to, size); \ + } else if (UNLIKELY(!AsanInited())) { \ + return internal_memmove(to, from, size); \ } \ - return internal_memmove(to, from, size); \ + return REAL(memmove)(to, from, size); \ } while (0) void *__asan_memcpy(void *to, const void *from, uptr size) { diff --git a/libsanitizer/asan/asan_interceptors_memintrinsics.h b/libsanitizer/asan/asan_interceptors_memintrinsics.h index 14727a5d665e..ec988cff51c5 100644 --- a/libsanitizer/asan/asan_interceptors_memintrinsics.h +++ b/libsanitizer/asan/asan_interceptors_memintrinsics.h @@ -20,6 +20,7 @@ DECLARE_REAL(void *, memcpy, void *to, const void *from, SIZE_T size) DECLARE_REAL(void *, memset, void *block, int c, SIZE_T size) +DECLARE_REAL(void *, memmove, void *to, const void *from, SIZE_T size) namespace __asan { diff --git a/libsanitizer/asan/asan_internal.h b/libsanitizer/asan/asan_internal.h index 06dfc4b17733..35c887eaf851 100644 --- a/libsanitizer/asan/asan_internal.h +++ b/libsanitizer/asan/asan_internal.h @@ -61,6 +61,7 @@ using __sanitizer::StackTrace; void AsanInitFromRtl(); bool TryAsanInitFromRtl(); +void ApplyFlags(); // asan_win.cpp void InitializePlatformExceptionHandlers(); @@ -82,6 +83,7 @@ void ReplaceSystemMalloc(); uptr FindDynamicShadowStart(); void AsanCheckDynamicRTPrereqs(); void AsanCheckIncompatibleRT(); +void TryReExecWithoutASLR(); // Unpoisons platform-specific stacks. // Returns true if all stacks have been unpoisoned. diff --git a/libsanitizer/asan/asan_linux.cpp b/libsanitizer/asan/asan_linux.cpp index 4cabca388ca9..d2c2a394b06c 100644 --- a/libsanitizer/asan/asan_linux.cpp +++ b/libsanitizer/asan/asan_linux.cpp @@ -13,7 +13,11 @@ #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS + SANITIZER_SOLARIS || SANITIZER_HAIKU + +# if SANITIZER_HAIKU +# define _DEFAULT_SOURCE +# endif # include # include @@ -22,7 +26,9 @@ # include # include # include -# include +# if !SANITIZER_HAIKU +# include +# endif # include # include # include @@ -37,10 +43,14 @@ # include "sanitizer_common/sanitizer_libc.h" # include "sanitizer_common/sanitizer_procmaps.h" -# if SANITIZER_FREEBSD +# if SANITIZER_FREEBSD || SANITIZER_HAIKU # include # endif +# if SANITIZER_LINUX +# include +# endif + # if SANITIZER_SOLARIS # include # endif @@ -50,6 +60,8 @@ # elif SANITIZER_NETBSD # include # include +# elif SANITIZER_HAIKU +extern "C" void *_DYNAMIC; # else # include # include @@ -107,6 +119,39 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) { ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size)); } +void TryReExecWithoutASLR() { +# if SANITIZER_LINUX + // ASLR personality check. + // Caution: 'personality' is sometimes forbidden by sandboxes, so only call + // this function as a last resort (when the memory mapping is incompatible + // and ASan would fail anyway). + int old_personality = personality(0xffffffff); + if (old_personality == -1) { + VReport(1, "WARNING: unable to run personality check.\n"); + return; + } + + bool aslr_on = (old_personality & ADDR_NO_RANDOMIZE) == 0; + + if (aslr_on) { + // Disable ASLR if the memory layout was incompatible. + // Alternatively, we could just keep re-execing until we get lucky + // with a compatible randomized layout, but the risk is that if it's + // not an ASLR-related issue, we will be stuck in an infinite loop of + // re-execing (unless we change ReExec to pass a parameter of the + // number of retries allowed.) + VReport(1, + "WARNING: AddressSanitizer: memory layout is incompatible, " + "possibly due to high-entropy ASLR.\n" + "Re-execing with fixed virtual address space.\n" + "N.B. reducing ASLR entropy is preferable.\n"); + CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); + + ReExec(); + } +# endif +} + # if SANITIZER_ANDROID // FIXME: should we do anything for Android? void AsanCheckDynamicRTPrereqs() {} @@ -125,6 +170,12 @@ static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size, return 0; } +# if SANITIZER_HAIKU + if (!info->dlpi_name[0] || + internal_strncmp(info->dlpi_name, "/boot/system/runtime_loader", + sizeof("/boot/system/runtime_loader") - 1) == 0) + return 0; +# endif # if SANITIZER_LINUX // Ignore vDSO. glibc versions earlier than 2.15 (and some patched // by distributors) return an empty name for the vDSO entry, so @@ -237,4 +288,4 @@ bool HandleDlopenInit() { } // namespace __asan #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || - // SANITIZER_SOLARIS + // SANITIZER_SOLARIS || SANITIZER_HAIKU diff --git a/libsanitizer/asan/asan_mac.cpp b/libsanitizer/asan/asan_mac.cpp index bfc349223258..a68e362e07d4 100644 --- a/libsanitizer/asan/asan_mac.cpp +++ b/libsanitizer/asan/asan_mac.cpp @@ -55,6 +55,9 @@ uptr FindDynamicShadowStart() { GetMmapGranularity()); } +// Not used. +void TryReExecWithoutASLR() {} + // No-op. Mac does not support static linkage anyway. void AsanCheckDynamicRTPrereqs() {} @@ -100,6 +103,8 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) { // dispatch_after() // dispatch_group_async_f() // dispatch_group_async() +// dispatch_apply() +// dispatch_apply_f() // TODO(glider): libdispatch API contains other functions that we don't support // yet. // @@ -125,6 +130,7 @@ typedef void* dispatch_queue_t; typedef void* dispatch_source_t; typedef u64 dispatch_time_t; typedef void (*dispatch_function_t)(void *block); +typedef void (*dispatch_apply_function_t)(void *, size_t); typedef void* (*worker_t)(void *block); typedef unsigned long dispatch_mach_reason; typedef void *dispatch_mach_msg_t; @@ -144,7 +150,11 @@ typedef void (^dispatch_mach_handler_t)(dispatch_mach_reason reason, // A wrapper for the ObjC blocks used to support libdispatch. typedef struct { void *block; - dispatch_function_t func; + union { + dispatch_function_t dispatch_func; + dispatch_apply_function_t dispatch_apply_func; + static_assert(sizeof(dispatch_func) == sizeof(dispatch_apply_func)); + }; u32 parent_tid; } asan_block_context_t; @@ -172,8 +182,8 @@ void asan_dispatch_call_block_and_release(void *block) { block, (void*)pthread_self()); asan_register_worker_thread(context->parent_tid, &stack); // Call the original dispatcher for the block. - context->func(context->block); - asan_free(context, &stack, FROM_MALLOC); + context->dispatch_func(context->block); + asan_free(context, &stack); } } // namespace __asan @@ -188,7 +198,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func, asan_block_context_t *asan_ctxt = (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack); asan_ctxt->block = ctxt; - asan_ctxt->func = func; + asan_ctxt->dispatch_func = func; asan_ctxt->parent_tid = GetCurrentTidOrInvalid(); return asan_ctxt; } @@ -240,13 +250,34 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, asan_dispatch_call_block_and_release); } -#if !defined(MISSING_BLOCKS_SUPPORT) +extern "C" void asan_dispatch_apply_f_work(void *context, size_t iteration) { + GET_STACK_TRACE_THREAD; + asan_block_context_t *asan_ctxt = (asan_block_context_t *)context; + asan_register_worker_thread(asan_ctxt->parent_tid, &stack); + asan_ctxt->dispatch_apply_func(asan_ctxt->block, iteration); +} + +INTERCEPTOR(void, dispatch_apply_f, size_t iterations, dispatch_queue_t queue, + void *ctxt, dispatch_apply_function_t work) { + GET_STACK_TRACE_THREAD; + asan_block_context_t *asan_ctxt = + (asan_block_context_t *)asan_malloc(sizeof(asan_block_context_t), &stack); + asan_ctxt->block = ctxt; + asan_ctxt->dispatch_apply_func = work; + asan_ctxt->parent_tid = GetCurrentTidOrInvalid(); + REAL(dispatch_apply_f)(iterations, queue, (void *)asan_ctxt, + asan_dispatch_apply_f_work); +} + +# if !defined(MISSING_BLOCKS_SUPPORT) extern "C" { void dispatch_async(dispatch_queue_t dq, void(^work)(void)); void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, void(^work)(void)); void dispatch_after(dispatch_time_t when, dispatch_queue_t queue, void(^work)(void)); +void dispatch_apply(size_t iterations, dispatch_queue_t queue, + void (^block)(size_t iteration)); void dispatch_source_set_cancel_handler(dispatch_source_t ds, void(^work)(void)); void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void)); @@ -329,6 +360,20 @@ INTERCEPTOR(void *, dispatch_mach_create_f, const char *label, }); } -#endif +INTERCEPTOR(void, dispatch_apply, size_t iterations, dispatch_queue_t queue, + void (^block)(size_t iteration)) { + ENABLE_FRAME_POINTER; + int parent_tid = GetCurrentTidOrInvalid(); + + void (^asan_block)(size_t) = ^(size_t iteration) { + GET_STACK_TRACE_THREAD; + asan_register_worker_thread(parent_tid, &stack); + block(iteration); + }; + + REAL(dispatch_apply)(iterations, queue, asan_block); +} + +# endif #endif // SANITIZER_APPLE diff --git a/libsanitizer/asan/asan_malloc_linux.cpp b/libsanitizer/asan/asan_malloc_linux.cpp index 3d6b03fefab7..add57318785b 100644 --- a/libsanitizer/asan/asan_malloc_linux.cpp +++ b/libsanitizer/asan/asan_malloc_linux.cpp @@ -15,7 +15,7 @@ #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \ - SANITIZER_NETBSD || SANITIZER_SOLARIS + SANITIZER_NETBSD || SANITIZER_SOLARIS || SANITIZER_HAIKU # include "asan_allocator.h" # include "asan_interceptors.h" @@ -49,7 +49,7 @@ INTERCEPTOR(void, free, void *ptr) { if (DlsymAlloc::PointerIsMine(ptr)) return DlsymAlloc::Free(ptr); GET_STACK_TRACE_FREE; - asan_free(ptr, &stack, FROM_MALLOC); + asan_free(ptr, &stack); } #if SANITIZER_INTERCEPT_CFREE @@ -57,7 +57,7 @@ INTERCEPTOR(void, cfree, void *ptr) { if (DlsymAlloc::PointerIsMine(ptr)) return DlsymAlloc::Free(ptr); GET_STACK_TRACE_FREE; - asan_free(ptr, &stack, FROM_MALLOC); + asan_free(ptr, &stack); } #endif // SANITIZER_INTERCEPT_CFREE @@ -93,12 +93,12 @@ INTERCEPTOR(void*, reallocarray, void *ptr, uptr nmemb, uptr size) { #if SANITIZER_INTERCEPT_MEMALIGN INTERCEPTOR(void*, memalign, uptr boundary, uptr size) { GET_STACK_TRACE_MALLOC; - return asan_memalign(boundary, size, &stack, FROM_MALLOC); + return asan_memalign(boundary, size, &stack); } INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) { GET_STACK_TRACE_MALLOC; - return asan_memalign(boundary, size, &stack, FROM_MALLOC); + return asan_memalign(boundary, size, &stack); } #endif // SANITIZER_INTERCEPT_MEMALIGN @@ -217,4 +217,4 @@ void ReplaceSystemMalloc() { #endif // SANITIZER_ANDROID #endif // SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || - // SANITIZER_NETBSD || SANITIZER_SOLARIS + // SANITIZER_NETBSD || SANITIZER_SOLARIS || SANITIZER_HAIKU diff --git a/libsanitizer/asan/asan_malloc_mac.cpp b/libsanitizer/asan/asan_malloc_mac.cpp index f25d7e190153..a442bdbbaa4d 100644 --- a/libsanitizer/asan/asan_malloc_mac.cpp +++ b/libsanitizer/asan/asan_malloc_mac.cpp @@ -31,7 +31,7 @@ using namespace __asan; # define COMMON_MALLOC_FORCE_UNLOCK() asan_mz_force_unlock() # define COMMON_MALLOC_MEMALIGN(alignment, size) \ GET_STACK_TRACE_MALLOC; \ - void *p = asan_memalign(alignment, size, &stack, FROM_MALLOC) + void *p = asan_memalign(alignment, size, &stack) # define COMMON_MALLOC_MALLOC(size) \ GET_STACK_TRACE_MALLOC; \ void *p = asan_malloc(size, &stack) @@ -46,10 +46,10 @@ using namespace __asan; int res = asan_posix_memalign(memptr, alignment, size, &stack); # define COMMON_MALLOC_VALLOC(size) \ GET_STACK_TRACE_MALLOC; \ - void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); + void *p = asan_memalign(GetPageSizeCached(), size, &stack); # define COMMON_MALLOC_FREE(ptr) \ GET_STACK_TRACE_FREE; \ - asan_free(ptr, &stack, FROM_MALLOC); + asan_free(ptr, &stack); # define COMMON_MALLOC_SIZE(ptr) uptr size = asan_mz_size(ptr); # define COMMON_MALLOC_FILL_STATS(zone, stats) \ AsanMallocStats malloc_stats; \ diff --git a/libsanitizer/asan/asan_malloc_win.cpp b/libsanitizer/asan/asan_malloc_win.cpp index 3278f0721987..ea6f7dfaa08c 100644 --- a/libsanitizer/asan/asan_malloc_win.cpp +++ b/libsanitizer/asan/asan_malloc_win.cpp @@ -69,7 +69,7 @@ __declspec(noinline) size_t _msize_base(void *ptr) { return _msize(ptr); } __declspec(noinline) void free(void *ptr) { GET_STACK_TRACE_FREE; - return asan_free(ptr, &stack, FROM_MALLOC); + return asan_free(ptr, &stack); } __declspec(noinline) void _free_dbg(void *ptr, int) { free(ptr); } @@ -252,7 +252,7 @@ INTERCEPTOR_WINAPI(BOOL, HeapFree, HANDLE hHeap, DWORD dwFlags, LPVOID lpMem) { CHECK((HEAP_FREE_UNSUPPORTED_FLAGS & dwFlags) != 0 && "unsupported flags"); } GET_STACK_TRACE_FREE; - asan_free(lpMem, &stack, FROM_MALLOC); + asan_free(lpMem, &stack); return true; } @@ -306,7 +306,7 @@ void *SharedReAlloc(ReAllocFunction reallocFunc, SizeFunction heapSizeFunc, if (replacement_alloc) { size_t old_size = heapSizeFunc(hHeap, dwFlags, lpMem); if (old_size == ((size_t)0) - 1) { - asan_free(replacement_alloc, &stack, FROM_MALLOC); + asan_free(replacement_alloc, &stack); return nullptr; } REAL(memcpy)(replacement_alloc, lpMem, old_size); @@ -322,6 +322,22 @@ void *SharedReAlloc(ReAllocFunction reallocFunc, SizeFunction heapSizeFunc, } } + if (dwFlags & HEAP_REALLOC_IN_PLACE_ONLY) { + size_t old_usable_size = asan_malloc_usable_size(lpMem, pc, bp); + if (dwBytes == old_usable_size) { + // Nothing to change, return the current pointer. + return lpMem; + } else if (dwBytes >= old_usable_size) { + // Growing with HEAP_REALLOC_IN_PLACE_ONLY is not supported. + return nullptr; + } else { + // Shrinking with HEAP_REALLOC_IN_PLACE_ONLY is not yet supported. + // For now return the current pointer and + // leave the allocation size as it is. + return lpMem; + } + } + if (ownershipState == ASAN && !only_asan_supported_flags) { // Conversion to unsupported flags allocation, // transfer this allocation back to the original allocator. @@ -331,7 +347,7 @@ void *SharedReAlloc(ReAllocFunction reallocFunc, SizeFunction heapSizeFunc, old_usable_size = asan_malloc_usable_size(lpMem, pc, bp); REAL(memcpy)(replacement_alloc, lpMem, Min(dwBytes, old_usable_size)); - asan_free(lpMem, &stack, FROM_MALLOC); + asan_free(lpMem, &stack); } return replacement_alloc; } @@ -429,7 +445,7 @@ INTERCEPTOR_WINAPI(BOOL, RtlFreeHeap, HANDLE HeapHandle, DWORD Flags, return REAL(RtlFreeHeap)(HeapHandle, Flags, BaseAddress); } GET_STACK_TRACE_FREE; - asan_free(BaseAddress, &stack, FROM_MALLOC); + asan_free(BaseAddress, &stack); return true; } diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h index 54890ca1789b..bddae9a07405 100644 --- a/libsanitizer/asan/asan_mapping.h +++ b/libsanitizer/asan/asan_mapping.h @@ -83,18 +83,29 @@ // || `[0x0000000000, 0x0d5554ffff]` || LowMem || // // Default Linux/AArch64 (39-bit VMA) mapping: -// || `[0x2000000000, 0x7fffffffff]` || highmem || -// || `[0x1400000000, 0x1fffffffff]` || highshadow || -// || `[0x1200000000, 0x13ffffffff]` || shadowgap || -// || `[0x1000000000, 0x11ffffffff]` || lowshadow || -// || `[0x0000000000, 0x0fffffffff]` || lowmem || +// TODO: this mapping is ok, but the allocator size is too large on non-Android +// AArch64 platforms (see asan_allocator.h) +// || `[0x2000000000, 0x7fffffffff]` || highmem || 384GB +// || `[0x1400000000, 0x1fffffffff]` || highshadow || 48GB +// || `[0x1200000000, 0x13ffffffff]` || shadowgap || 8GB +// || `[0x1000000000, 0x11ffffffff]` || lowshadow || 8GB +// || `[0x0000000000, 0x0fffffffff]` || lowmem || 64GB // // Default Linux/AArch64 (42-bit VMA) mapping: -// || `[0x10000000000, 0x3ffffffffff]` || highmem || -// || `[0x0a000000000, 0x0ffffffffff]` || highshadow || -// || `[0x09000000000, 0x09fffffffff]` || shadowgap || -// || `[0x08000000000, 0x08fffffffff]` || lowshadow || -// || `[0x00000000000, 0x07fffffffff]` || lowmem || +// TODO: this mapping is ok, but the allocator size is too large on non-Android +// AArch64 platforms (see asan_allocator.h) +// || `[0x09000000000, 0x03ffffffffff]` || highmem || 3520GB +// || `[0x02200000000, 0x008fffffffff]` || highshadow || 440GB +// || `[0x01200000000, 0x0021ffffffff]` || shadowgap || 64GB +// || `[0x01000000000, 0x0011ffffffff]` || lowshadow || 8GB +// || `[0x00000000000, 0x000fffffffff]` || lowmem || 64GB +// +// Default Linux/AArch64 (48-bit VMA) mapping: +// || `[0x201000000000, 0xffffffffffff]` || HighMem || 229312GB +// || `[0x041200000000, 0x200fffffffff]` || HighShadow || 28664GB +// || `[0x001200000000, 0x0411ffffffff]` || ShadowGap || 4096GB +// || `[0x001000000000, 0x0011ffffffff]` || LowShadow || 8GB +// || `[0x000000000000, 0x000fffffffff]` || LowMem || 64GB // // Default Linux/S390 mapping: // || `[0x30000000, 0x7fffffff]` || HighMem || @@ -193,7 +204,7 @@ # elif defined(__aarch64__) # define ASAN_SHADOW_OFFSET_CONST 0x0000001000000000 # elif defined(__powerpc64__) -# define ASAN_SHADOW_OFFSET_CONST 0x0000020000000000 +# define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000 # elif defined(__s390x__) # define ASAN_SHADOW_OFFSET_CONST 0x0010000000000000 # elif SANITIZER_FREEBSD diff --git a/libsanitizer/asan/asan_new_delete.cpp b/libsanitizer/asan/asan_new_delete.cpp index b5b1ced8ac5e..d7ed5b570728 100644 --- a/libsanitizer/asan/asan_new_delete.cpp +++ b/libsanitizer/asan/asan_new_delete.cpp @@ -60,18 +60,42 @@ enum class align_val_t: size_t {}; // TODO(alekseyshl): throw std::bad_alloc instead of dying on OOM. // For local pool allocation, align to SHADOW_GRANULARITY to match asan // allocator behavior. -#define OPERATOR_NEW_BODY(type, nothrow) \ - GET_STACK_TRACE_MALLOC; \ - void *res = asan_memalign(0, size, &stack, type); \ - if (!nothrow && UNLIKELY(!res)) \ - ReportOutOfMemory(size, &stack); \ - return res; -#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \ - GET_STACK_TRACE_MALLOC; \ - void *res = asan_memalign((uptr)align, size, &stack, type); \ - if (!nothrow && UNLIKELY(!res)) \ - ReportOutOfMemory(size, &stack); \ - return res; +#define OPERATOR_NEW_BODY \ + GET_STACK_TRACE_MALLOC; \ + void *res = asan_new(size, &stack); \ + if (UNLIKELY(!res)) \ + ReportOutOfMemory(size, &stack); \ + return res +#define OPERATOR_NEW_BODY_NOTHROW \ + GET_STACK_TRACE_MALLOC; \ + return asan_new(size, &stack) +#define OPERATOR_NEW_BODY_ARRAY \ + GET_STACK_TRACE_MALLOC; \ + void *res = asan_new_array(size, &stack); \ + if (UNLIKELY(!res)) \ + ReportOutOfMemory(size, &stack); \ + return res +#define OPERATOR_NEW_BODY_ARRAY_NOTHROW \ + GET_STACK_TRACE_MALLOC; \ + return asan_new_array(size, &stack) +#define OPERATOR_NEW_BODY_ALIGN \ + GET_STACK_TRACE_MALLOC; \ + void *res = asan_new_aligned(size, static_cast(align), &stack); \ + if (UNLIKELY(!res)) \ + ReportOutOfMemory(size, &stack); \ + return res +#define OPERATOR_NEW_BODY_ALIGN_NOTHROW \ + GET_STACK_TRACE_MALLOC; \ + return asan_new_aligned(size, static_cast(align), &stack) +#define OPERATOR_NEW_BODY_ALIGN_ARRAY \ + GET_STACK_TRACE_MALLOC; \ + void *res = asan_new_array_aligned(size, static_cast(align), &stack); \ + if (UNLIKELY(!res)) \ + ReportOutOfMemory(size, &stack); \ + return res +#define OPERATOR_NEW_BODY_ALIGN_ARRAY_NOTHROW \ + GET_STACK_TRACE_MALLOC; \ + return asan_new_array_aligned(size, static_cast(align), &stack) // On OS X it's not enough to just provide our own 'operator new' and // 'operator delete' implementations, because they're going to be in the @@ -82,106 +106,128 @@ enum class align_val_t: size_t {}; // OS X we need to intercept them using their mangled names. #if !SANITIZER_APPLE CXX_OPERATOR_ATTRIBUTE -void *operator new(size_t size) -{ OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); } +void *operator new(size_t size) { OPERATOR_NEW_BODY; } CXX_OPERATOR_ATTRIBUTE -void *operator new[](size_t size) -{ OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); } +void *operator new[](size_t size) { OPERATOR_NEW_BODY_ARRAY; } CXX_OPERATOR_ATTRIBUTE -void *operator new(size_t size, std::nothrow_t const&) -{ OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); } +void *operator new(size_t size, std::nothrow_t const &) { + OPERATOR_NEW_BODY_NOTHROW; +} CXX_OPERATOR_ATTRIBUTE -void *operator new[](size_t size, std::nothrow_t const&) -{ OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); } +void *operator new[](size_t size, std::nothrow_t const &) { + OPERATOR_NEW_BODY_ARRAY_NOTHROW; +} CXX_OPERATOR_ATTRIBUTE -void *operator new(size_t size, std::align_val_t align) -{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, false /*nothrow*/); } +void *operator new(size_t size, std::align_val_t align) { + OPERATOR_NEW_BODY_ALIGN; +} CXX_OPERATOR_ATTRIBUTE -void *operator new[](size_t size, std::align_val_t align) -{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, false /*nothrow*/); } +void *operator new[](size_t size, std::align_val_t align) { + OPERATOR_NEW_BODY_ALIGN_ARRAY; +} CXX_OPERATOR_ATTRIBUTE -void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&) -{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, true /*nothrow*/); } +void *operator new(size_t size, std::align_val_t align, + std::nothrow_t const &) { + OPERATOR_NEW_BODY_ALIGN_NOTHROW; +} CXX_OPERATOR_ATTRIBUTE -void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&) -{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, true /*nothrow*/); } +void *operator new[](size_t size, std::align_val_t align, + std::nothrow_t const &) { + OPERATOR_NEW_BODY_ALIGN_ARRAY_NOTHROW; +} #else // SANITIZER_APPLE -INTERCEPTOR(void *, _Znwm, size_t size) { - OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); -} -INTERCEPTOR(void *, _Znam, size_t size) { - OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); -} +INTERCEPTOR(void *, _Znwm, size_t size) { OPERATOR_NEW_BODY; } +INTERCEPTOR(void *, _Znam, size_t size) { OPERATOR_NEW_BODY_ARRAY; } INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) { - OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); + OPERATOR_NEW_BODY_NOTHROW; } INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) { - OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); + OPERATOR_NEW_BODY_ARRAY_NOTHROW; } #endif // !SANITIZER_APPLE -#define OPERATOR_DELETE_BODY(type) \ +#define OPERATOR_DELETE_BODY \ + GET_STACK_TRACE_FREE; \ + asan_delete(ptr, &stack) +#define OPERATOR_DELETE_BODY_ARRAY \ GET_STACK_TRACE_FREE; \ - asan_delete(ptr, 0, 0, &stack, type); - -#define OPERATOR_DELETE_BODY_SIZE(type) \ - GET_STACK_TRACE_FREE; \ - asan_delete(ptr, size, 0, &stack, type); - -#define OPERATOR_DELETE_BODY_ALIGN(type) \ + asan_delete_array(ptr, &stack) +#define OPERATOR_DELETE_BODY_ALIGN \ + GET_STACK_TRACE_FREE; \ + asan_delete_aligned(ptr, static_cast(align), &stack) +#define OPERATOR_DELETE_BODY_ALIGN_ARRAY \ GET_STACK_TRACE_FREE; \ - asan_delete(ptr, 0, static_cast(align), &stack, type); - -#define OPERATOR_DELETE_BODY_SIZE_ALIGN(type) \ + asan_delete_array_aligned(ptr, static_cast(align), &stack) +#define OPERATOR_DELETE_BODY_SIZE \ + GET_STACK_TRACE_FREE; \ + asan_delete_sized(ptr, size, &stack) +#define OPERATOR_DELETE_BODY_SIZE_ARRAY \ + GET_STACK_TRACE_FREE; \ + asan_delete_array_sized(ptr, size, &stack) +#define OPERATOR_DELETE_BODY_SIZE_ALIGN \ + GET_STACK_TRACE_FREE; \ + asan_delete_sized_aligned(ptr, size, static_cast(align), &stack) +#define OPERATOR_DELETE_BODY_SIZE_ALIGN_ARRAY \ GET_STACK_TRACE_FREE; \ - asan_delete(ptr, size, static_cast(align), &stack, type); + asan_delete_array_sized_aligned(ptr, size, static_cast(align), &stack) #if !SANITIZER_APPLE CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr) NOEXCEPT -{ OPERATOR_DELETE_BODY(FROM_NEW); } +void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr) NOEXCEPT -{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } +void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY_ARRAY; } CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY(FROM_NEW); } +void operator delete(void *ptr, std::nothrow_t const &) { + OPERATOR_DELETE_BODY; +} CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } +void operator delete[](void *ptr, std::nothrow_t const &) { + OPERATOR_DELETE_BODY_ARRAY; +} CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr, size_t size) NOEXCEPT -{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW); } +void operator delete(void *ptr, size_t size) NOEXCEPT { + OPERATOR_DELETE_BODY_SIZE; +} CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr, size_t size) NOEXCEPT -{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW_BR); } +void operator delete[](void *ptr, size_t size) NOEXCEPT { + OPERATOR_DELETE_BODY_SIZE_ARRAY; +} CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr, std::align_val_t align) NOEXCEPT -{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); } +void operator delete(void *ptr, std::align_val_t align) NOEXCEPT { + OPERATOR_DELETE_BODY_ALIGN; +} CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT -{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); } +void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT { + OPERATOR_DELETE_BODY_ALIGN_ARRAY; +} CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); } +void operator delete(void *ptr, std::align_val_t align, + std::nothrow_t const &) { + OPERATOR_DELETE_BODY_ALIGN; +} CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); } +void operator delete[](void *ptr, std::align_val_t align, + std::nothrow_t const &) { + OPERATOR_DELETE_BODY_ALIGN_ARRAY; +} CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT -{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW); } +void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT { + OPERATOR_DELETE_BODY_SIZE_ALIGN; +} CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT -{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW_BR); } +void operator delete[](void *ptr, size_t size, + std::align_val_t align) NOEXCEPT { + OPERATOR_DELETE_BODY_SIZE_ALIGN_ARRAY; +} #else // SANITIZER_APPLE -INTERCEPTOR(void, _ZdlPv, void *ptr) -{ OPERATOR_DELETE_BODY(FROM_NEW); } -INTERCEPTOR(void, _ZdaPv, void *ptr) -{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } -INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY(FROM_NEW); } -INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } +INTERCEPTOR(void, _ZdlPv, void *ptr) { OPERATOR_DELETE_BODY; } +INTERCEPTOR(void, _ZdaPv, void *ptr) { OPERATOR_DELETE_BODY_ARRAY; } +INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const &) { + OPERATOR_DELETE_BODY; +} +INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const &) { + OPERATOR_DELETE_BODY_ARRAY; +} #endif // !SANITIZER_APPLE diff --git a/libsanitizer/asan/asan_poisoning.cpp b/libsanitizer/asan/asan_poisoning.cpp index 762670632f4e..897a2be13e24 100644 --- a/libsanitizer/asan/asan_poisoning.cpp +++ b/libsanitizer/asan/asan_poisoning.cpp @@ -20,11 +20,58 @@ #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_interface_internal.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_ring_buffer.h" +#include "sanitizer_common/sanitizer_stackdepot.h" namespace __asan { +using PoisonRecordRingBuffer = RingBuffer; + static atomic_uint8_t can_poison_memory; +static Mutex poison_records_mutex; +static PoisonRecordRingBuffer *poison_records + SANITIZER_GUARDED_BY(poison_records_mutex) = nullptr; + +void AddPoisonRecord(const PoisonRecord &new_record) { + if (flags()->poison_history_size <= 0) + return; + + GenericScopedLock l(&poison_records_mutex); + + if (poison_records == nullptr) + poison_records = PoisonRecordRingBuffer::New(flags()->poison_history_size); + + poison_records->push(new_record); +} + +bool FindPoisonRecord(uptr addr, PoisonRecord &match) { + if (flags()->poison_history_size <= 0) + return false; + + GenericScopedLock l(&poison_records_mutex); + + if (poison_records) { + for (unsigned int i = 0; i < poison_records->size(); i++) { + PoisonRecord record = (*poison_records)[i]; + if (record.begin <= addr && addr < record.end) { + internal_memcpy(&match, &record, sizeof(record)); + return true; + } + } + } + + return false; +} + +void SANITIZER_ACQUIRE(poison_records_mutex) AcquirePoisonRecords() { + poison_records_mutex.Lock(); +} + +void SANITIZER_RELEASE(poison_records_mutex) ReleasePoisonRecords() { + poison_records_mutex.Unlock(); +} + void SetCanPoisonMemory(bool value) { atomic_store(&can_poison_memory, value, memory_order_release); } @@ -107,6 +154,21 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) { uptr end_addr = beg_addr + size; VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr, (void *)end_addr); + + if (flags()->poison_history_size > 0) { + GET_STACK_TRACE(/*max_size=*/16, /*fast=*/false); + u32 current_tid = GetCurrentTidOrInvalid(); + + u32 stack_id = StackDepotPut(stack); + + PoisonRecord record; + record.stack_id = stack_id; + record.thread_id = current_tid; + record.begin = beg_addr; + record.end = end_addr; + AddPoisonRecord(record); + } + ShadowSegmentEndpoint beg(beg_addr); ShadowSegmentEndpoint end(end_addr); if (beg.chunk == end.chunk) { @@ -147,6 +209,11 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) { uptr end_addr = beg_addr + size; VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr, (void *)end_addr); + + // Note: we don't need to update the poison tracking here. Since the shadow + // memory will be unpoisoned, the poison tracking ring buffer entries will be + // ignored. + ShadowSegmentEndpoint beg(beg_addr); ShadowSegmentEndpoint end(end_addr); if (beg.chunk == end.chunk) { diff --git a/libsanitizer/asan/asan_poisoning.h b/libsanitizer/asan/asan_poisoning.h index 600bd011f304..4b2d6220b1b1 100644 --- a/libsanitizer/asan/asan_poisoning.h +++ b/libsanitizer/asan/asan_poisoning.h @@ -11,6 +11,9 @@ // Shadow memory poisoning by ASan RTL and by user application. //===----------------------------------------------------------------------===// +#ifndef ASAN_POISONING_H +#define ASAN_POISONING_H + #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_mapping.h" @@ -19,6 +22,19 @@ namespace __asan { +struct PoisonRecord { + u32 stack_id; + u32 thread_id; + uptr begin; + uptr end; +}; + +void AddPoisonRecord(const PoisonRecord& new_record); +bool FindPoisonRecord(uptr addr, PoisonRecord& match); + +void AcquirePoisonRecords(); +void ReleasePoisonRecords(); + // Enable/disable memory poisoning. void SetCanPoisonMemory(bool value); bool CanPoisonMemory(); @@ -96,3 +112,5 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone( void FlushUnneededASanShadowMemory(uptr p, uptr size); } // namespace __asan + +#endif // ASAN_POISONING_H diff --git a/libsanitizer/asan/asan_posix.cpp b/libsanitizer/asan/asan_posix.cpp index 39685696a0d0..fb66c871ea8a 100644 --- a/libsanitizer/asan/asan_posix.cpp +++ b/libsanitizer/asan/asan_posix.cpp @@ -157,11 +157,17 @@ static void BeforeFork() { // stuff we need. __lsan::LockThreads(); __lsan::LockAllocator(); + + AcquirePoisonRecords(); + StackDepotLockBeforeFork(); } static void AfterFork(bool fork_child) { StackDepotUnlockAfterFork(fork_child); + + ReleasePoisonRecords(); + // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and unlock // the stuff we need. __lsan::UnlockAllocator(); @@ -174,7 +180,7 @@ static void AfterFork(bool fork_child) { void InstallAtForkHandler() { # if SANITIZER_SOLARIS || SANITIZER_NETBSD || SANITIZER_APPLE || \ - (SANITIZER_LINUX && SANITIZER_SPARC) + (SANITIZER_LINUX && SANITIZER_SPARC) || SANITIZER_HAIKU // While other Linux targets use clone in internal_fork which doesn't // trigger pthread_atfork handlers, Linux/sparc64 uses __fork, causing a // hang. diff --git a/libsanitizer/asan/asan_report.cpp b/libsanitizer/asan/asan_report.cpp index 45aa607dcda0..e049a21e4e16 100644 --- a/libsanitizer/asan/asan_report.cpp +++ b/libsanitizer/asan/asan_report.cpp @@ -21,6 +21,7 @@ #include "asan_scariness_score.h" #include "asan_stack.h" #include "asan_thread.h" +#include "lsan/lsan_common.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_interface_internal.h" @@ -126,6 +127,33 @@ class ScopedInErrorReport { public: explicit ScopedInErrorReport(bool fatal = false) : halt_on_error_(fatal || flags()->halt_on_error) { + // Deadlock Prevention Between ASan and LSan + // + // Background: + // - The `dl_iterate_phdr` function requires holding libdl's internal lock + // (Lock A). + // - LSan acquires the ASan thread registry lock (Lock B) *after* calling + // `dl_iterate_phdr`. + // + // Problem Scenario: + // When ASan attempts to call `dl_iterate_phdr` while holding Lock B (e.g., + // during error reporting via `ErrorDescription::Print`), a circular lock + // dependency may occur: + // 1. Thread 1: Holds Lock B → Requests Lock A (via dl_iterate_phdr) + // 2. Thread 2: Holds Lock A → Requests Lock B (via LSan operations) + // + // Solution: + // Proactively load all required modules before acquiring Lock B. + // This ensures: + // 1. Any `dl_iterate_phdr` calls during module loading complete before + // locking. + // 2. Subsequent error reporting avoids nested lock acquisition patterns. + // 3. Eliminates the lock order inversion risk between libdl and ASan's + // thread registry. +#if CAN_SANITIZE_LEAKS && (SANITIZER_LINUX || SANITIZER_NETBSD) + Symbolizer::GetOrInit()->GetRefreshedListOfModules(); +#endif + // Make sure the registry and sanitizer report mutexes are locked while // we're printing an error report. // We can lock them only here to avoid self-deadlock in case of @@ -592,5 +620,5 @@ void __sanitizer_ptr_cmp(void *a, void *b) { } // extern "C" // Provide default implementation of __asan_on_error that does nothing -// and may be overriden by user. +// and may be overridden by user. SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {} diff --git a/libsanitizer/asan/asan_rtl.cpp b/libsanitizer/asan/asan_rtl.cpp index 19c6c210b564..b9ba250f5bcd 100644 --- a/libsanitizer/asan/asan_rtl.cpp +++ b/libsanitizer/asan/asan_rtl.cpp @@ -390,6 +390,39 @@ void PrintAddressSpaceLayout() { kHighShadowBeg > kMidMemEnd); } +// Apply most options specified either through the ASAN_OPTIONS +// environment variable, or through the `__asan_default_options` user function. +// +// This function may be called multiple times, once per weak reference callback +// on Windows, so it needs to be idempotent. +// +// Context: +// For maximum compatibility on Windows, it is necessary for ASan options to be +// configured/registered/applied inside this method (instead of in +// ASanInitInternal, for example). That's because, on Windows, the user-provided +// definition for `__asan_default_opts` may not be bound when `ASanInitInternal` +// is invoked (it is bound later). +// +// To work around the late binding on windows, `ApplyOptions` will be called, +// again, after binding to the user-provided `__asan_default_opts` function. +// Therefore, any flags not configured here are not guaranteed to be +// configurable through `__asan_default_opts` on Windows. +// +// +// For more details on this issue, see: +// https://github.com/llvm/llvm-project/issues/117925 +void ApplyFlags() { + SetCanPoisonMemory(flags()->poison_heap); + SetMallocContextSize(common_flags()->malloc_context_size); + + __asan_option_detect_stack_use_after_return = + flags()->detect_stack_use_after_return; + + AllocatorOptions allocator_options; + allocator_options.SetFrom(flags(), common_flags()); + ApplyAllocatorOptions(allocator_options); +} + static bool AsanInitInternal() { if (LIKELY(AsanInited())) return true; @@ -397,8 +430,9 @@ static bool AsanInitInternal() { CacheBinaryName(); - // Initialize flags. This must be done early, because most of the - // initialization steps look at flags(). + // Initialize flags. On Windows it also also register weak function callbacks. + // This must be done early, because most of the initialization steps look at + // flags(). InitializeFlags(); WaitForDebugger(flags()->sleep_before_init, "before init"); @@ -416,9 +450,6 @@ static bool AsanInitInternal() { AsanCheckDynamicRTPrereqs(); AvoidCVE_2016_2143(); - SetCanPoisonMemory(flags()->poison_heap); - SetMallocContextSize(common_flags()->malloc_context_size); - InitializePlatformExceptionHandlers(); InitializeHighMemEnd(); @@ -429,10 +460,6 @@ static bool AsanInitInternal() { SetPrintfAndReportCallback(AppendToErrorMessageBuffer); __sanitizer_set_report_path(common_flags()->log_path); - - __asan_option_detect_stack_use_after_return = - flags()->detect_stack_use_after_return; - __sanitizer::InitializePlatformEarly(); // Setup internal allocator callback. @@ -451,6 +478,18 @@ static bool AsanInitInternal() { DisableCoreDumperIfNecessary(); +#if SANITIZER_POSIX + if (StackSizeIsUnlimited()) { + VPrintf(1, + "WARNING: Unlimited stack size detected. This may affect " + "compatibility with the shadow mappings.\n"); + // MSan and TSan re-exec with a fixed size stack. We don't do that because + // it may break the program. InitializeShadowMemory() will, if needed, + // re-exec without ASLR, which solves most shadow mapping compatibility + // issues. + } +#endif // SANITIZER_POSIX + InitializeShadowMemory(); AsanTSDInit(PlatformTSDDtor); @@ -460,6 +499,13 @@ static bool AsanInitInternal() { allocator_options.SetFrom(flags(), common_flags()); InitializeAllocator(allocator_options); + // Apply ASan flags. + // NOTE: In order for options specified through `__asan_default_options` to be + // honored on Windows, it is necessary for those options to be configured + // inside the `ApplyOptions` method. See the function-level comment for + // `ApplyFlags` for more details. + ApplyFlags(); + if (SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL) MaybeStartBackgroudThread(); @@ -482,7 +528,6 @@ static bool AsanInitInternal() { AsanThread *main_thread = CreateMainThread(); CHECK_EQ(0, main_thread->tid()); force_interface_symbols(); // no-op. - SanitizerInitializeUnwinder(); if (CAN_SANITIZE_LEAKS) { __lsan::InitCommonLsan(); diff --git a/libsanitizer/asan/asan_rtl_x86_64.S b/libsanitizer/asan/asan_rtl_x86_64.S index 9c5289856d8a..5ee830d3afc5 100644 --- a/libsanitizer/asan/asan_rtl_x86_64.S +++ b/libsanitizer/asan/asan_rtl_x86_64.S @@ -5,6 +5,7 @@ #include "sanitizer_common/sanitizer_platform.h" .file "asan_rtl_x86_64.S" +.att_syntax #define NAME(n, reg, op, s, i) n##_##op##_##i##_##s##_##reg diff --git a/libsanitizer/asan/asan_shadow_setup.cpp b/libsanitizer/asan/asan_shadow_setup.cpp index fc6de39622b5..5b3591da067b 100644 --- a/libsanitizer/asan/asan_shadow_setup.cpp +++ b/libsanitizer/asan/asan_shadow_setup.cpp @@ -109,6 +109,15 @@ void InitializeShadowMemory() { ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1); ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1); } else { + // ASan's mappings can usually shadow the entire address space, even with + // maximum ASLR entropy. However: + // - On 32-bit systems, the maximum ASLR entropy (currently up to 16-bits + // == 256MB) is a significant chunk of the address space; reclaiming it + // by disabling ASLR might allow chonky binaries to run. + // - On 64-bit systems, some settings (e.g., for Linux, unlimited stack + // size plus 31+ bits of entropy) can lead to an incompatible layout. + TryReExecWithoutASLR(); + Report( "Shadow memory range interleaves with an existing memory mapping. " "ASan cannot proceed correctly. ABORTING.\n"); diff --git a/libsanitizer/asan/asan_suppressions.cpp b/libsanitizer/asan/asan_suppressions.cpp index 94289d14d7e7..30de77523580 100644 --- a/libsanitizer/asan/asan_suppressions.cpp +++ b/libsanitizer/asan/asan_suppressions.cpp @@ -26,9 +26,10 @@ static const char kInterceptorName[] = "interceptor_name"; static const char kInterceptorViaFunction[] = "interceptor_via_fun"; static const char kInterceptorViaLibrary[] = "interceptor_via_lib"; static const char kODRViolation[] = "odr_violation"; +static const char kAllocDeallocMismatch[] = "alloc_dealloc_mismatch"; static const char *kSuppressionTypes[] = { kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary, - kODRViolation}; + kODRViolation, kAllocDeallocMismatch}; SANITIZER_INTERFACE_WEAK_DEF(const char *, __asan_default_suppressions, void) { return ""; @@ -62,6 +63,44 @@ bool IsODRViolationSuppressed(const char *global_var_name) { return suppression_ctx->Match(global_var_name, kODRViolation, &s); } +bool IsAddrSuppressed(const char *suppression, Symbolizer *symbolizer, + uptr addr) { + CHECK(suppression_ctx); + CHECK(suppression_ctx->HasSuppressionType(suppression)); + CHECK(symbolizer); + SymbolizedStackHolder symbolized_stack(symbolizer->SymbolizePC(addr)); + const SymbolizedStack *frames = symbolized_stack.get(); + CHECK(frames); + for (const SymbolizedStack *cur = frames; cur; cur = cur->next) { + const char *function_name = cur->info.function; + if (!function_name) { + continue; + } + // Match suppressions. + Suppression *s; + if (suppression_ctx->Match(function_name, suppression, &s)) { + return true; + } + } + return false; +} + +bool IsAllocDeallocMismatchSuppressed(const StackTrace *stack) { + CHECK(suppression_ctx); + if (!suppression_ctx->HasSuppressionType(kAllocDeallocMismatch)) { + return false; + } + Symbolizer *symbolizer = Symbolizer::GetOrInit(); + for (uptr i = 0; i < stack->size && stack->trace[i]; i++) { + uptr addr = stack->trace[i]; + // Match "alloc_dealloc_mismatch" suppressions. + if (IsAddrSuppressed(kAllocDeallocMismatch, symbolizer, addr)) { + return true; + } + } + return false; +} + bool IsStackTraceSuppressed(const StackTrace *stack) { if (!HaveStackTraceBasedSuppressions()) return false; @@ -80,19 +119,9 @@ bool IsStackTraceSuppressed(const StackTrace *stack) { } if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) { - SymbolizedStackHolder symbolized_stack(symbolizer->SymbolizePC(addr)); - const SymbolizedStack *frames = symbolized_stack.get(); - CHECK(frames); - for (const SymbolizedStack *cur = frames; cur; cur = cur->next) { - const char *function_name = cur->info.function; - if (!function_name) { - continue; - } - // Match "interceptor_via_fun" suppressions. - if (suppression_ctx->Match(function_name, kInterceptorViaFunction, - &s)) { - return true; - } + // Match "interceptor_via_func" suppressions. + if (IsAddrSuppressed(kInterceptorViaFunction, symbolizer, addr)) { + return true; } } } diff --git a/libsanitizer/asan/asan_suppressions.h b/libsanitizer/asan/asan_suppressions.h index 121d4ddf1875..4613a362f28e 100644 --- a/libsanitizer/asan/asan_suppressions.h +++ b/libsanitizer/asan/asan_suppressions.h @@ -23,6 +23,7 @@ bool IsInterceptorSuppressed(const char *interceptor_name); bool HaveStackTraceBasedSuppressions(); bool IsStackTraceSuppressed(const StackTrace *stack); bool IsODRViolationSuppressed(const char *global_var_name); +bool IsAllocDeallocMismatchSuppressed(const StackTrace *stack); } // namespace __asan diff --git a/libsanitizer/asan/asan_thread.cpp b/libsanitizer/asan/asan_thread.cpp index 37fb6f2b07f2..0ed58bbe2a73 100644 --- a/libsanitizer/asan/asan_thread.cpp +++ b/libsanitizer/asan/asan_thread.cpp @@ -163,7 +163,7 @@ void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, if (fake_stack_save) *fake_stack_save = fake_stack_; fake_stack_ = nullptr; - SetTLSFakeStack(nullptr); + ResetTLSFakeStack(); // if fake_stack_save is null, the fiber will die, delete the fakestack if (!fake_stack_save && current_fake_stack) current_fake_stack->Destroy(this->tid()); @@ -177,8 +177,8 @@ void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old, } if (fake_stack_save) { - SetTLSFakeStack(fake_stack_save); fake_stack_ = fake_stack_save; + ResetTLSFakeStack(); } if (bottom_old) @@ -242,7 +242,7 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { Max(stack_size_log, static_cast(flags()->min_uar_stack_size_log)); fake_stack_ = FakeStack::Create(stack_size_log); DCHECK_EQ(GetCurrentThread(), this); - SetTLSFakeStack(fake_stack_); + ResetTLSFakeStack(); return fake_stack_; } return nullptr; @@ -282,7 +282,7 @@ void AsanThread::Init(const InitOptions *options) { // asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls. #if !SANITIZER_FUCHSIA -void AsanThread::ThreadStart(tid_t os_id) { +void AsanThread::ThreadStart(ThreadID os_id) { Init(); asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr); @@ -469,7 +469,7 @@ void EnsureMainThreadIDIsCorrect() { context->os_id = GetTid(); } -__asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) { +__asan::AsanThread *GetAsanThreadByOsIDLocked(ThreadID os_id) { __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>( __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id)); if (!context) @@ -497,7 +497,7 @@ static ThreadRegistry *GetAsanThreadRegistryLocked() { void EnsureMainThreadIDIsCorrect() { __asan::EnsureMainThreadIDIsCorrect(); } -bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, +bool GetThreadRangesLocked(ThreadID os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end, DTLS **dtls) { __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); @@ -516,7 +516,7 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, void GetAllThreadAllocatorCachesLocked(InternalMmapVector *caches) {} -void GetThreadExtraStackRangesLocked(tid_t os_id, +void GetThreadExtraStackRangesLocked(ThreadID os_id, InternalMmapVector *ranges) { __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); if (!t) @@ -546,11 +546,11 @@ void GetAdditionalThreadContextPtrsLocked(InternalMmapVector *ptrs) { __asan::asanThreadArgRetval().GetAllPtrsLocked(ptrs); } -void GetRunningThreadsLocked(InternalMmapVector *threads) { +void GetRunningThreadsLocked(InternalMmapVector *threads) { GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked( [](ThreadContextBase *tctx, void *threads) { if (tctx->status == ThreadStatusRunning) - reinterpret_cast *>(threads)->push_back( + reinterpret_cast *>(threads)->push_back( tctx->os_id); }, threads); diff --git a/libsanitizer/asan/asan_thread.h b/libsanitizer/asan/asan_thread.h index ad9e03d68fe9..19b7f342e171 100644 --- a/libsanitizer/asan/asan_thread.h +++ b/libsanitizer/asan/asan_thread.h @@ -75,7 +75,7 @@ class AsanThread { struct InitOptions; void Init(const InitOptions *options = nullptr); - void ThreadStart(tid_t os_id); + void ThreadStart(ThreadID os_id); thread_return_t RunThread(); uptr stack_top(); @@ -104,7 +104,7 @@ class AsanThread { if (!fake_stack_) return; FakeStack *t = fake_stack_; fake_stack_ = nullptr; - SetTLSFakeStack(nullptr); + ResetTLSFakeStack(); t->Destroy(tid); } diff --git a/libsanitizer/asan/asan_win.cpp b/libsanitizer/asan/asan_win.cpp index 09a13b11cff1..845408ac38ab 100644 --- a/libsanitizer/asan/asan_win.cpp +++ b/libsanitizer/asan/asan_win.cpp @@ -145,7 +145,6 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { t->GetStartData(params); auto res = (*params.start_routine)(params.arg); - t->Destroy(); // POSIX calls this from TSD destructor. return res; } @@ -166,6 +165,13 @@ INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security, thr_flags, tid); } +INTERCEPTOR_WINAPI(void, ExitThread, DWORD dwExitCode) { + AsanThread *t = (AsanThread *)__asan::GetCurrentThread(); + if (t) + t->Destroy(); + REAL(ExitThread)(dwExitCode); +} + // }}} namespace __asan { @@ -181,6 +187,7 @@ void InitializePlatformInterceptors() { (LPCWSTR)&InitializePlatformInterceptors, &pinned)); ASAN_INTERCEPT_FUNC(CreateThread); + ASAN_INTERCEPT_FUNC(ExitThread); ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter); #ifdef _WIN64 @@ -272,6 +279,9 @@ uptr FindDynamicShadowStart() { GetMmapGranularity()); } +// Not used +void TryReExecWithoutASLR() {} + void AsanCheckDynamicRTPrereqs() {} void AsanCheckIncompatibleRT() {} diff --git a/libsanitizer/asan/asan_win_static_runtime_thunk.cpp b/libsanitizer/asan/asan_win_static_runtime_thunk.cpp index 4a69b6657403..46e0e90738f2 100644 --- a/libsanitizer/asan/asan_win_static_runtime_thunk.cpp +++ b/libsanitizer/asan/asan_win_static_runtime_thunk.cpp @@ -63,6 +63,10 @@ INTERCEPT_LIBRARY_FUNCTION_ASAN(strpbrk); INTERCEPT_LIBRARY_FUNCTION_ASAN(strspn); INTERCEPT_LIBRARY_FUNCTION_ASAN(strstr); INTERCEPT_LIBRARY_FUNCTION_ASAN(strtok); +INTERCEPT_LIBRARY_FUNCTION_ASAN(wcscat); +INTERCEPT_LIBRARY_FUNCTION_ASAN(wcscpy); +INTERCEPT_LIBRARY_FUNCTION_ASAN(wcsncat); +INTERCEPT_LIBRARY_FUNCTION_ASAN(wcsncpy); INTERCEPT_LIBRARY_FUNCTION_ASAN(wcslen); INTERCEPT_LIBRARY_FUNCTION_ASAN(wcsnlen); diff --git a/libsanitizer/builtins/assembly.h b/libsanitizer/builtins/assembly.h index 8c42fc773483..2eddbf468c14 100644 --- a/libsanitizer/builtins/assembly.h +++ b/libsanitizer/builtins/assembly.h @@ -14,7 +14,7 @@ #ifndef COMPILERRT_ASSEMBLY_H #define COMPILERRT_ASSEMBLY_H -#if defined(__linux__) && defined(__CET__) +#ifdef __CET__ #if __has_include() #include #endif @@ -61,7 +61,7 @@ #define LOCAL_LABEL(name) .L ## name #define FILE_LEVEL_DIRECTIVE #define SYMBOL_IS_FUNC(name) \ - .def name SEPARATOR \ + .def FUNC_SYMBOL(name) SEPARATOR \ .scl 2 SEPARATOR \ .type 32 SEPARATOR \ .endef @@ -71,19 +71,35 @@ #endif -#if defined(__arm__) || defined(__aarch64__) +#if defined(__aarch64__) && defined(__ELF__) && \ + defined(COMPILER_RT_EXECUTE_ONLY_CODE) +// The assembler always creates an implicit '.text' section with default flags +// (SHF_ALLOC | SHF_EXECINSTR), which is incompatible with the execute-only +// '.text' section we want to create here because of the missing +// SHF_AARCH64_PURECODE section flag. To solve this, we use 'unique,0' to +// differentiate the two sections. The output will therefore have two separate +// sections named '.text', where code will be placed into the execute-only +// '.text' section, and the implicitly-created one will be empty. +#define TEXT_SECTION \ + .section .text,"axy",@progbits,unique,0 +#else +#define TEXT_SECTION \ + .text +#endif + +#if defined(__arm__) || defined(__aarch64__) || defined(__arm64ec__) #define FUNC_ALIGN \ - .text SEPARATOR \ .balign 16 SEPARATOR #else #define FUNC_ALIGN #endif -// BTI and PAC gnu property note +// BTI, PAC, and GCS gnu property note #define NT_GNU_PROPERTY_TYPE_0 5 #define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000 #define GNU_PROPERTY_AARCH64_FEATURE_1_BTI 1 #define GNU_PROPERTY_AARCH64_FEATURE_1_PAC 2 +#define GNU_PROPERTY_AARCH64_FEATURE_1_GCS 4 #if defined(__ARM_FEATURE_BTI_DEFAULT) #define BTI_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_BTI @@ -97,6 +113,12 @@ #define PAC_FLAG 0 #endif +#if defined(__ARM_FEATURE_GCS_DEFAULT) +#define GCS_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_GCS +#else +#define GCS_FLAG 0 +#endif + #define GNU_PROPERTY(type, value) \ .pushsection .note.gnu.property, "a" SEPARATOR \ .p2align 3 SEPARATOR \ @@ -118,11 +140,12 @@ #define BTI_J #endif -#if (BTI_FLAG | PAC_FLAG) != 0 -#define GNU_PROPERTY_BTI_PAC \ - GNU_PROPERTY(GNU_PROPERTY_AARCH64_FEATURE_1_AND, BTI_FLAG | PAC_FLAG) +#if (BTI_FLAG | PAC_FLAG | GCS_FLAG) != 0 +#define GNU_PROPERTY_BTI_PAC_GCS \ + GNU_PROPERTY(GNU_PROPERTY_AARCH64_FEATURE_1_AND, \ + BTI_FLAG | PAC_FLAG | GCS_FLAG) #else -#define GNU_PROPERTY_BTI_PAC +#define GNU_PROPERTY_BTI_PAC_GCS #endif #if defined(__clang__) || defined(__GCC_HAVE_DWARF2_CFI_ASM) @@ -194,6 +217,23 @@ #else #define WIDE(op) op #endif + +#if defined(__ARM_FEATURE_PAC_DEFAULT) && defined(__ARM_FEATURE_BTI_DEFAULT) +#define PACBTI_LANDING pacbti r12, lr, sp +#elif defined(__ARM_FEATURE_PAC_DEFAULT) +#define PACBTI_LANDING pac r12, lr, sp +#elif defined(__ARM_FEATURE_BTI_DEFAULT) +#define PACBTI_LANDING bti +#else +#define PACBTI_LANDING +#endif + +#if defined(__ARM_FEATURE_PAUTH) +#define PAC_RETURN bxaut r12, lr, sp +#else +#define PAC_RETURN aut r12, lr, sp SEPARATOR bx lr +#endif + #else // !defined(__arm) #define DECLARE_FUNC_ENCODING #define DEFINE_CODE_STATE @@ -208,6 +248,16 @@ #define GLUE4(a, b, c, d) GLUE4_(a, b, c, d) #define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name) +#ifndef __arm64ec__ +#define FUNC_SYMBOL(name) name +#else +// On ARM64EC, function names and calls (but not address-taking or data symbol +// references) use symbols prefixed with "#". +#define QUOTE(a) #a +#define STR(a) QUOTE(a) +#define HASH # +#define FUNC_SYMBOL(name) STR(GLUE2(HASH, name)) +#endif #ifdef VISIBILITY_HIDDEN #define DECLARE_SYMBOL_VISIBILITY(name) \ @@ -220,56 +270,61 @@ #endif #define DEFINE_COMPILERRT_FUNCTION(name) \ + TEXT_SECTION SEPARATOR \ DEFINE_CODE_STATE \ FILE_LEVEL_DIRECTIVE SEPARATOR \ - .globl SYMBOL_NAME(name) SEPARATOR \ + .globl FUNC_SYMBOL(SYMBOL_NAME(name)) SEPARATOR \ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \ DECLARE_SYMBOL_VISIBILITY(name) \ DECLARE_FUNC_ENCODING \ - SYMBOL_NAME(name): + FUNC_SYMBOL(SYMBOL_NAME(name)): #define DEFINE_COMPILERRT_THUMB_FUNCTION(name) \ + TEXT_SECTION SEPARATOR \ DEFINE_CODE_STATE \ FILE_LEVEL_DIRECTIVE SEPARATOR \ - .globl SYMBOL_NAME(name) SEPARATOR \ + .globl FUNC_SYMBOL(SYMBOL_NAME(name)) SEPARATOR \ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \ DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \ .thumb_func SEPARATOR \ - SYMBOL_NAME(name): + FUNC_SYMBOL(SYMBOL_NAME(name)): #define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \ + TEXT_SECTION SEPARATOR \ DEFINE_CODE_STATE \ FILE_LEVEL_DIRECTIVE SEPARATOR \ - .globl SYMBOL_NAME(name) SEPARATOR \ + .globl FUNC_SYMBOL(SYMBOL_NAME(name)) SEPARATOR \ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \ HIDDEN(SYMBOL_NAME(name)) SEPARATOR \ DECLARE_FUNC_ENCODING \ - SYMBOL_NAME(name): + FUNC_SYMBOL(SYMBOL_NAME(name)): #define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \ + TEXT_SECTION SEPARATOR \ DEFINE_CODE_STATE \ - .globl name SEPARATOR \ + .globl FUNC_SYMBOL(name) SEPARATOR \ SYMBOL_IS_FUNC(name) SEPARATOR \ HIDDEN(name) SEPARATOR \ DECLARE_FUNC_ENCODING \ - name: + FUNC_SYMBOL(name): #define DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(name) \ + TEXT_SECTION SEPARATOR \ DEFINE_CODE_STATE \ FUNC_ALIGN \ - .globl name SEPARATOR \ + .globl FUNC_SYMBOL(name) SEPARATOR \ SYMBOL_IS_FUNC(name) SEPARATOR \ - DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) SEPARATOR \ + DECLARE_SYMBOL_VISIBILITY_UNMANGLED(FUNC_SYMBOL(name)) SEPARATOR \ DECLARE_FUNC_ENCODING \ - name: \ + FUNC_SYMBOL(name): \ SEPARATOR CFI_START \ SEPARATOR BTI_C #define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \ - .globl SYMBOL_NAME(name) SEPARATOR \ + .globl FUNC_SYMBOL(SYMBOL_NAME(name)) SEPARATOR \ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \ DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \ - .set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR + .set FUNC_SYMBOL(SYMBOL_NAME(name)), FUNC_SYMBOL(SYMBOL_NAME(target)) SEPARATOR #if defined(__ARM_EABI__) #define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name) \ @@ -290,4 +345,21 @@ CFI_END #endif +#ifdef __arm__ +#include "int_endianness.h" + +#if _YUGA_BIG_ENDIAN +#define VMOV_TO_DOUBLE(dst, src0, src1) vmov dst, src1, src0 SEPARATOR +#define VMOV_FROM_DOUBLE(dst0, dst1, src) vmov dst1, dst0, src SEPARATOR +#else +#define VMOV_TO_DOUBLE(dst, src0, src1) vmov dst, src0, src1 SEPARATOR +#define VMOV_FROM_DOUBLE(dst0, dst1, src) vmov dst0, dst1, src SEPARATOR +#endif +#endif + +#if defined(__ASSEMBLER__) && (defined(__i386__) || defined(__amd64__)) && \ + !defined(__arm64ec__) +.att_syntax +#endif + #endif // COMPILERRT_ASSEMBLY_H diff --git a/libsanitizer/hwasan/hwasan.cpp b/libsanitizer/hwasan/hwasan.cpp index 24384d8b4d2c..615bae4b3a3f 100644 --- a/libsanitizer/hwasan/hwasan.cpp +++ b/libsanitizer/hwasan/hwasan.cpp @@ -176,7 +176,7 @@ static void HwasanFormatMemoryUsage(InternalScopedString &s) { "HWASAN pid: %d rss: %zd threads: %zd stacks: %zd" " thr_aux: %zd stack_depot: %zd uniq_stacks: %zd" " heap: %zd", - internal_getpid(), GetRSS(), thread_stats.n_live_threads, + (int)internal_getpid(), GetRSS(), thread_stats.n_live_threads, thread_stats.total_stack_size, thread_stats.n_live_threads * thread_list.MemoryUsedPerThread(), sds.allocated, sds.n_uniq_ids, asc[AllocatorStatMapped]); @@ -692,7 +692,7 @@ void __hwasan_handle_longjmp(const void *sp_dst) { "WARNING: HWASan is ignoring requested __hwasan_handle_longjmp: " "stack top: %p; target %p; distance: %p (%zd)\n" "False positive error reports may follow\n", - (void *)sp, (void *)dst, dst - sp, dst - sp); + (void *)sp, (void *)dst, (void *)(dst - sp), dst - sp); return; } TagMemory(sp, dst - sp, 0); diff --git a/libsanitizer/hwasan/hwasan_fuchsia.cpp b/libsanitizer/hwasan/hwasan_fuchsia.cpp index d1696f8aa796..647211bf199e 100644 --- a/libsanitizer/hwasan/hwasan_fuchsia.cpp +++ b/libsanitizer/hwasan/hwasan_fuchsia.cpp @@ -31,6 +31,15 @@ SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL uptr __hwasan_tls; +namespace __sanitizer { +void EarlySanitizerInit() { + // Setup the hwasan runtime before any `__libc_extensions_init`s are called. + // This is needed because libraries which define this function (like fdio) + // may be instrumented and either access `__hwasan_tls` or make runtime calls. + __hwasan_init(); +} +} // namespace __sanitizer + namespace __hwasan { bool InitShadow() { diff --git a/libsanitizer/hwasan/hwasan_globals.cpp b/libsanitizer/hwasan/hwasan_globals.cpp index 7e0f3df20dd0..9e059ce3c19c 100644 --- a/libsanitizer/hwasan/hwasan_globals.cpp +++ b/libsanitizer/hwasan/hwasan_globals.cpp @@ -73,16 +73,21 @@ ArrayRef HwasanGlobalsFor(ElfW(Addr) base, continue; } - // Only libraries with instrumented globals need to be checked against the - // code model since they use relocations that aren't checked at link time. - CheckCodeModel(base, phdr, phnum); - auto *global_note = reinterpret_cast(desc); auto *globals_begin = reinterpret_cast( note + global_note->begin_relptr); auto *globals_end = reinterpret_cast( note + global_note->end_relptr); + // Only libraries with instrumented globals need to be checked against the + // code model since they use relocations that aren't checked at link time. + // + // There is always a HWASan globals note ("Create the note even if we + // aren't instrumenting globals." - HWAddressSanitizer.cpp), but we can + // elide the code model check if there are no globals. + if (globals_begin != globals_end) + CheckCodeModel(base, phdr, phnum); + return {globals_begin, globals_end}; } } diff --git a/libsanitizer/hwasan/hwasan_interceptors_vfork.S b/libsanitizer/hwasan/hwasan_interceptors_vfork.S index fd20825e3dac..825f41156509 100644 --- a/libsanitizer/hwasan/hwasan_interceptors_vfork.S +++ b/libsanitizer/hwasan/hwasan_interceptors_vfork.S @@ -11,4 +11,4 @@ NO_EXEC_STACK_DIRECTIVE -GNU_PROPERTY_BTI_PAC +GNU_PROPERTY_BTI_PAC_GCS diff --git a/libsanitizer/hwasan/hwasan_interface_internal.h b/libsanitizer/hwasan/hwasan_interface_internal.h index 8f2f77dad917..86ddfea5bf82 100644 --- a/libsanitizer/hwasan/hwasan_interface_internal.h +++ b/libsanitizer/hwasan/hwasan_interface_internal.h @@ -247,6 +247,13 @@ void *__hwasan_memmove_match_all(void *dest, const void *src, uptr n, u8); SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_set_error_report_callback(void (*callback)(const char *)); + +// hwasan does not need fake stack, so we leave it empty here. +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_start_switch_fiber(void **, const void *bottom, uptr size); +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_finish_switch_fiber(void *, const void **bottom_old, + uptr *size_old); } // extern "C" #endif // HWASAN_INTERFACE_INTERNAL_H diff --git a/libsanitizer/hwasan/hwasan_malloc_bisect.h b/libsanitizer/hwasan/hwasan_malloc_bisect.h index 7d134e8c4b7f..52a28438f3a9 100644 --- a/libsanitizer/hwasan/hwasan_malloc_bisect.h +++ b/libsanitizer/hwasan/hwasan_malloc_bisect.h @@ -41,7 +41,7 @@ static inline bool malloc_bisect(StackTrace *stack, uptr orig_size) { if (h < left || h > right) return false; if (flags()->malloc_bisect_dump) { - Printf("[alloc] %u %zu\n", h, orig_size); + Printf("[alloc] %u %zu\n", (u32)h, orig_size); stack->Print(); } return true; diff --git a/libsanitizer/hwasan/hwasan_new_delete.cpp b/libsanitizer/hwasan/hwasan_new_delete.cpp index f0fd3726ef1b..232eb0eb6da6 100644 --- a/libsanitizer/hwasan/hwasan_new_delete.cpp +++ b/libsanitizer/hwasan/hwasan_new_delete.cpp @@ -22,23 +22,75 @@ #if HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE // TODO(alekseys): throw std::bad_alloc instead of dying on OOM. -# define OPERATOR_NEW_BODY(nothrow) \ +# define OPERATOR_NEW_BODY \ GET_MALLOC_STACK_TRACE; \ void *res = hwasan_malloc(size, &stack); \ - if (!nothrow && UNLIKELY(!res)) \ + if (UNLIKELY(!res)) \ ReportOutOfMemory(size, &stack); \ return res -# define OPERATOR_NEW_ALIGN_BODY(nothrow) \ +# define OPERATOR_NEW_BODY_NOTHROW \ + GET_MALLOC_STACK_TRACE; \ + return hwasan_malloc(size, &stack) +# define OPERATOR_NEW_BODY_ARRAY \ + GET_MALLOC_STACK_TRACE; \ + void *res = hwasan_malloc(size, &stack); \ + if (UNLIKELY(!res)) \ + ReportOutOfMemory(size, &stack); \ + return res +# define OPERATOR_NEW_BODY_ARRAY_NOTHROW \ + GET_MALLOC_STACK_TRACE; \ + return hwasan_malloc(size, &stack) +# define OPERATOR_NEW_BODY_ALIGN \ + GET_MALLOC_STACK_TRACE; \ + void *res = hwasan_memalign(static_cast(align), size, &stack); \ + if (UNLIKELY(!res)) \ + ReportOutOfMemory(size, &stack); \ + return res +# define OPERATOR_NEW_BODY_ALIGN_NOTHROW \ + GET_MALLOC_STACK_TRACE; \ + return hwasan_memalign(static_cast(align), size, &stack) +# define OPERATOR_NEW_BODY_ALIGN_ARRAY \ GET_MALLOC_STACK_TRACE; \ void *res = hwasan_memalign(static_cast(align), size, &stack); \ - if (!nothrow && UNLIKELY(!res)) \ + if (UNLIKELY(!res)) \ ReportOutOfMemory(size, &stack); \ return res +# define OPERATOR_NEW_BODY_ALIGN_ARRAY_NOTHROW \ + GET_MALLOC_STACK_TRACE; \ + return hwasan_memalign(static_cast(align), size, &stack) # define OPERATOR_DELETE_BODY \ GET_MALLOC_STACK_TRACE; \ if (ptr) \ hwasan_free(ptr, &stack) +# define OPERATOR_DELETE_BODY_ARRAY \ + GET_MALLOC_STACK_TRACE; \ + if (ptr) \ + hwasan_free(ptr, &stack) +# define OPERATOR_DELETE_BODY_ALIGN \ + GET_MALLOC_STACK_TRACE; \ + if (ptr) \ + hwasan_free(ptr, &stack) +# define OPERATOR_DELETE_BODY_ALIGN_ARRAY \ + GET_MALLOC_STACK_TRACE; \ + if (ptr) \ + hwasan_free(ptr, &stack) +# define OPERATOR_DELETE_BODY_SIZE \ + GET_MALLOC_STACK_TRACE; \ + if (ptr) \ + hwasan_free(ptr, &stack) +# define OPERATOR_DELETE_BODY_SIZE_ARRAY \ + GET_MALLOC_STACK_TRACE; \ + if (ptr) \ + hwasan_free(ptr, &stack) +# define OPERATOR_DELETE_BODY_SIZE_ALIGN \ + GET_MALLOC_STACK_TRACE; \ + if (ptr) \ + hwasan_free(ptr, &stack) +# define OPERATOR_DELETE_BODY_SIZE_ALIGN_ARRAY \ + GET_MALLOC_STACK_TRACE; \ + if (ptr) \ + hwasan_free(ptr, &stack) #elif defined(__ANDROID__) @@ -46,8 +98,14 @@ // since we previously released a runtime that intercepted these functions, // removing the interceptors would break ABI. Therefore we simply forward to // malloc and free. -# define OPERATOR_NEW_BODY(nothrow) return malloc(size) +# define OPERATOR_NEW_BODY return malloc(size) +# define OPERATOR_NEW_BODY_NOTHROW return malloc(size) +# define OPERATOR_NEW_BODY_ARRAY return malloc(size) +# define OPERATOR_NEW_BODY_ARRAY_NOTHROW return malloc(size) # define OPERATOR_DELETE_BODY free(ptr) +# define OPERATOR_DELETE_BODY_ARRAY free(ptr) +# define OPERATOR_DELETE_BODY_SIZE free(ptr) +# define OPERATOR_DELETE_BODY_SIZE_ARRAY free(ptr) #endif @@ -61,19 +119,19 @@ struct nothrow_t {}; } // namespace std INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(size_t size) { - OPERATOR_NEW_BODY(false /*nothrow*/); + OPERATOR_NEW_BODY; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[]( size_t size) { - OPERATOR_NEW_BODY(false /*nothrow*/); + OPERATOR_NEW_BODY_ARRAY; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new( size_t size, std::nothrow_t const &) { - OPERATOR_NEW_BODY(true /*nothrow*/); + OPERATOR_NEW_BODY_NOTHROW; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[]( size_t size, std::nothrow_t const &) { - OPERATOR_NEW_BODY(true /*nothrow*/); + OPERATOR_NEW_BODY_ARRAY_NOTHROW; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete( @@ -82,7 +140,7 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete( } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[]( void *ptr) NOEXCEPT { - OPERATOR_DELETE_BODY; + OPERATOR_DELETE_BODY_ARRAY; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete( void *ptr, std::nothrow_t const &) { @@ -90,20 +148,20 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete( } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[]( void *ptr, std::nothrow_t const &) { - OPERATOR_DELETE_BODY; + OPERATOR_DELETE_BODY_ARRAY; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete( void *ptr, size_t) NOEXCEPT { - OPERATOR_DELETE_BODY; + OPERATOR_DELETE_BODY_SIZE; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[]( void *ptr, size_t) NOEXCEPT { - OPERATOR_DELETE_BODY; + OPERATOR_DELETE_BODY_SIZE_ARRAY; } #endif // OPERATOR_NEW_BODY -#ifdef OPERATOR_NEW_ALIGN_BODY +#ifdef OPERATOR_NEW_BODY_ALIGN namespace std { enum class align_val_t : size_t {}; @@ -111,52 +169,52 @@ enum class align_val_t : size_t {}; INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new( size_t size, std::align_val_t align) { - OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/); + OPERATOR_NEW_BODY_ALIGN; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[]( size_t size, std::align_val_t align) { - OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/); + OPERATOR_NEW_BODY_ALIGN_ARRAY; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new( size_t size, std::align_val_t align, std::nothrow_t const &) { - OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/); + OPERATOR_NEW_BODY_ALIGN_NOTHROW; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[]( size_t size, std::align_val_t align, std::nothrow_t const &) { - OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/); + OPERATOR_NEW_BODY_ALIGN_ARRAY_NOTHROW; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete( void *ptr, std::align_val_t align) NOEXCEPT { - OPERATOR_DELETE_BODY; + OPERATOR_DELETE_BODY_ALIGN; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[]( void *ptr, std::align_val_t) NOEXCEPT { - OPERATOR_DELETE_BODY; + OPERATOR_DELETE_BODY_ALIGN_ARRAY; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete( void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT { - OPERATOR_DELETE_BODY; + OPERATOR_DELETE_BODY_ALIGN; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[]( void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT { - OPERATOR_DELETE_BODY; + OPERATOR_DELETE_BODY_ALIGN_ARRAY; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete( void *ptr, size_t, std::align_val_t) NOEXCEPT { - OPERATOR_DELETE_BODY; + OPERATOR_DELETE_BODY_SIZE_ALIGN; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[]( void *ptr, size_t, std::align_val_t) NOEXCEPT { - OPERATOR_DELETE_BODY; + OPERATOR_DELETE_BODY_SIZE_ALIGN_ARRAY; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete( void *ptr, size_t, std::align_val_t, std::nothrow_t const &) NOEXCEPT { - OPERATOR_DELETE_BODY; + OPERATOR_DELETE_BODY_SIZE_ALIGN; } INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[]( void *ptr, size_t, std::align_val_t, std::nothrow_t const &) NOEXCEPT { - OPERATOR_DELETE_BODY; + OPERATOR_DELETE_BODY_SIZE_ALIGN_ARRAY; } -#endif // OPERATOR_NEW_ALIGN_BODY +#endif // OPERATOR_NEW_BODY_ALIGN diff --git a/libsanitizer/hwasan/hwasan_platform_interceptors.h b/libsanitizer/hwasan/hwasan_platform_interceptors.h index d92b51052194..8a653d83dec6 100644 --- a/libsanitizer/hwasan/hwasan_platform_interceptors.h +++ b/libsanitizer/hwasan/hwasan_platform_interceptors.h @@ -200,12 +200,18 @@ #undef SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID #define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID 0 +#undef SANITIZER_INTERCEPT_TIMER_CREATE +#define SANITIZER_INTERCEPT_TIMER_CREATE 0 + #undef SANITIZER_INTERCEPT_GETITIMER #define SANITIZER_INTERCEPT_GETITIMER 0 #undef SANITIZER_INTERCEPT_TIME #define SANITIZER_INTERCEPT_TIME 0 +#undef SANITIZER_INTERCEPT_TIMESPEC_GET +#define SANITIZER_INTERCEPT_TIMESPEC_GET 0 + #undef SANITIZER_INTERCEPT_GLOB #define SANITIZER_INTERCEPT_GLOB 0 diff --git a/libsanitizer/hwasan/hwasan_report.cpp b/libsanitizer/hwasan/hwasan_report.cpp index bc66e6e805c9..871aa74f99c4 100644 --- a/libsanitizer/hwasan/hwasan_report.cpp +++ b/libsanitizer/hwasan/hwasan_report.cpp @@ -306,8 +306,9 @@ static void PrintStackAllocations(const StackAllocationsRingBuffer *sa, "%p is located %zd bytes %s a %zd-byte local variable %s " "[%p,%p) " "in %s %s\n", - untagged_addr, offset, whence, local.size, local.name, best_beg, - best_beg + local.size, local.function_name, location.data()); + (void *)untagged_addr, offset, whence, local.size, local.name, + (void *)best_beg, (void *)(best_beg + local.size), + local.function_name, location.data()); location.clear(); Printf("%s\n", d.Default()); } @@ -737,9 +738,9 @@ void BaseReport::PrintHeapOrGlobalCandidate() const { Printf("%s", d.Default()); Printf("%s", d.Location()); Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n", - untagged_addr, offset, whence, - candidate.heap.end - candidate.heap.begin, candidate.heap.begin, - candidate.heap.end); + (void*)untagged_addr, offset, whence, + candidate.heap.end - candidate.heap.begin, + (void*)candidate.heap.begin, (void*)candidate.heap.end); Printf("%s", d.Allocation()); Printf("allocated by thread T%u here:\n", candidate.heap.thread_id); Printf("%s", d.Default()); @@ -762,11 +763,11 @@ void BaseReport::PrintHeapOrGlobalCandidate() const { Printf( "%p is located %zd bytes %s a %zd-byte global variable " "%s [%p,%p) in %s\n", - untagged_addr, + (void *)untagged_addr, candidate.after ? untagged_addr - (info.start + info.size) : info.start - untagged_addr, candidate.after ? "after" : "before", info.size, info.name, - info.start, info.start + info.size, module_name); + (void *)info.start, (void *)(info.start + info.size), module_name); } else { uptr size = GetGlobalSizeFromDescriptor(candidate.untagged_addr); if (size == 0) @@ -774,14 +775,14 @@ void BaseReport::PrintHeapOrGlobalCandidate() const { Printf( "%p is located %s a global variable in " "\n #0 0x%x (%s+0x%x)\n", - untagged_addr, candidate.after ? "after" : "before", - candidate.untagged_addr, module_name, module_address); + (void*)untagged_addr, candidate.after ? "after" : "before", + (u32)candidate.untagged_addr, module_name, (u32)module_address); else Printf( "%p is located %s a %zd-byte global variable in " "\n #0 0x%x (%s+0x%x)\n", - untagged_addr, candidate.after ? "after" : "before", size, - candidate.untagged_addr, module_name, module_address); + (void*)untagged_addr, candidate.after ? "after" : "before", size, + (u32)candidate.untagged_addr, module_name, (u32)module_address); } Printf("%s", d.Default()); } @@ -792,8 +793,8 @@ void BaseReport::PrintAddressDescription() const { int num_descriptions_printed = 0; if (MemIsShadow(untagged_addr)) { - Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr, - d.Default()); + Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), + (void *)untagged_addr, d.Default()); return; } @@ -802,7 +803,7 @@ void BaseReport::PrintAddressDescription() const { Printf( "%s[%p,%p) is a %s %s heap chunk; " "size: %zd offset: %zd\n%s", - d.Location(), heap.begin, heap.begin + heap.size, + d.Location(), (void *)heap.begin, (void *)(heap.begin + heap.size), heap.from_small_heap ? "small" : "large", heap.is_allocated ? "allocated" : "unallocated", heap.size, untagged_addr - heap.begin, d.Default()); @@ -821,8 +822,8 @@ void BaseReport::PrintAddressDescription() const { Printf("%s", d.Error()); Printf("\nCause: stack tag-mismatch\n"); Printf("%s", d.Location()); - Printf("Address %p is located in stack of thread T%zd\n", untagged_addr, - sa.thread_id()); + Printf("Address %p is located in stack of thread T%zd\n", + (void *)untagged_addr, (ssize)sa.thread_id()); Printf("%s", d.Default()); announce_by_id(sa.thread_id()); PrintStackAllocations(sa.get(), ptr_tag, untagged_addr); @@ -842,9 +843,9 @@ void BaseReport::PrintAddressDescription() const { Printf("\nCause: use-after-free\n"); Printf("%s", d.Location()); Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n", - untagged_addr, untagged_addr - UntagAddr(har.tagged_addr), - har.requested_size, UntagAddr(har.tagged_addr), - UntagAddr(har.tagged_addr) + har.requested_size); + (void*)untagged_addr, untagged_addr - UntagAddr(har.tagged_addr), + (ssize)har.requested_size, (void*)UntagAddr(har.tagged_addr), + (void*)(UntagAddr(har.tagged_addr) + har.requested_size)); Printf("%s", d.Allocation()); Printf("freed by thread T%u here:\n", ha.free_thread_id); Printf("%s", d.Default()); @@ -858,7 +859,7 @@ void BaseReport::PrintAddressDescription() const { // Print a developer note: the index of this heap object // in the thread's deallocation ring buffer. Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ha.ring_index + 1, - flags()->heap_history_size); + (ssize)flags()->heap_history_size); Printf("hwasan_dev_note_num_matching_addrs: %zd\n", ha.num_matching_addrs); Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n", ha.num_matching_addrs_4b); @@ -915,10 +916,11 @@ InvalidFreeReport::~InvalidFreeReport() { const Thread *thread = GetCurrentThread(); if (thread) { Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n", - SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id()); + SanitizerToolName, bug_type, (void *)untagged_addr, (void *)pc, + (ssize)thread->unique_id()); } else { Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n", - SanitizerToolName, bug_type, untagged_addr, pc); + SanitizerToolName, bug_type, (void *)untagged_addr, (void *)pc); } Printf("%s", d.Access()); if (shadow.addr) { @@ -967,7 +969,8 @@ TailOverwrittenReport::~TailOverwrittenReport() { Printf("%s", d.Error()); const char *bug_type = "allocation-tail-overwritten"; Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName, - bug_type, untagged_addr, untagged_addr + orig_size, orig_size); + bug_type, (void *)untagged_addr, (void *)(untagged_addr + orig_size), + orig_size); Printf("\n%s", d.Default()); Printf( "Stack of invalid access unknown. Issue detected at deallocation " @@ -1037,7 +1040,7 @@ TagMismatchReport::~TagMismatchReport() { uptr pc = GetTopPc(stack); Printf("%s", d.Error()); Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type, - untagged_addr, pc); + (void *)untagged_addr, (void *)pc); Thread *t = GetCurrentThread(); @@ -1049,12 +1052,12 @@ TagMismatchReport::~TagMismatchReport() { GetShortTagCopy(MemToShadow(untagged_addr + mismatch_offset)); Printf( "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n", - is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag, - mem_tag, short_tag, t->unique_id()); + is_store ? "WRITE" : "READ", access_size, (void *)untagged_addr, + ptr_tag, mem_tag, short_tag, (ssize)t->unique_id()); } else { Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n", - is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag, - mem_tag, t->unique_id()); + is_store ? "WRITE" : "READ", access_size, (void *)untagged_addr, + ptr_tag, mem_tag, (ssize)t->unique_id()); } if (mismatch_offset) Printf("Invalid access starting at offset %zu\n", mismatch_offset); @@ -1093,7 +1096,7 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size, // See the frame breakdown defined in __hwasan_tag_mismatch (from // hwasan_tag_mismatch_{aarch64,riscv64}.S). void ReportRegisters(const uptr *frame, uptr pc) { - Printf("\nRegisters where the failure occurred (pc %p):\n", pc); + Printf("\nRegisters where the failure occurred (pc %p):\n", (void *)pc); // We explicitly print a single line (4 registers/line) each iteration to // reduce the amount of logcat error messages printed. Each Printf() will diff --git a/libsanitizer/hwasan/hwasan_setjmp_aarch64.S b/libsanitizer/hwasan/hwasan_setjmp_aarch64.S index 0c0abb6de861..80d680017cfe 100644 --- a/libsanitizer/hwasan/hwasan_setjmp_aarch64.S +++ b/libsanitizer/hwasan/hwasan_setjmp_aarch64.S @@ -28,7 +28,7 @@ // stack pointer when compiling a C function. // Hence we have to write this function in assembly. -.section .text +TEXT_SECTION .file "hwasan_setjmp_aarch64.S" .global ASM_WRAPPER_NAME(setjmp) @@ -99,4 +99,4 @@ ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp) // We do not need executable stack. NO_EXEC_STACK_DIRECTIVE -GNU_PROPERTY_BTI_PAC +GNU_PROPERTY_BTI_PAC_GCS diff --git a/libsanitizer/hwasan/hwasan_setjmp_x86_64.S b/libsanitizer/hwasan/hwasan_setjmp_x86_64.S index 9804e8d7ceca..a5379d39c6e7 100644 --- a/libsanitizer/hwasan/hwasan_setjmp_x86_64.S +++ b/libsanitizer/hwasan/hwasan_setjmp_x86_64.S @@ -30,6 +30,7 @@ .section .text .file "hwasan_setjmp_x86_64.S" +.att_syntax .global ASM_WRAPPER_NAME(setjmp) ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp)) diff --git a/libsanitizer/hwasan/hwasan_tag_mismatch_aarch64.S b/libsanitizer/hwasan/hwasan_tag_mismatch_aarch64.S index fd060c51cd8e..1631d3257a26 100644 --- a/libsanitizer/hwasan/hwasan_tag_mismatch_aarch64.S +++ b/libsanitizer/hwasan/hwasan_tag_mismatch_aarch64.S @@ -70,7 +70,7 @@ // clobbering the x17 register in error reports, and that the program will have // a runtime dependency on the __hwasan_tag_mismatch_v2 symbol therefore it will // fail to start up given an older (i.e. incompatible) runtime. -.section .text +TEXT_SECTION .file "hwasan_tag_mismatch_aarch64.S" .global __hwasan_tag_mismatch .type __hwasan_tag_mismatch, %function @@ -157,4 +157,4 @@ mismatch: // We do not need executable stack. NO_EXEC_STACK_DIRECTIVE -GNU_PROPERTY_BTI_PAC +GNU_PROPERTY_BTI_PAC_GCS diff --git a/libsanitizer/hwasan/hwasan_thread.cpp b/libsanitizer/hwasan/hwasan_thread.cpp index 8b32e4e760e2..bebc11d082ca 100644 --- a/libsanitizer/hwasan/hwasan_thread.cpp +++ b/libsanitizer/hwasan/hwasan_thread.cpp @@ -119,10 +119,64 @@ void Thread::Destroy() { *GetCurrentThreadLongPtr() = 0; } +void Thread::StartSwitchFiber(uptr bottom, uptr size) { + if (atomic_load(&stack_switching_, memory_order_acquire)) { + Report("ERROR: starting fiber switch while in fiber switch\n"); + Die(); + } + + next_stack_bottom_ = bottom; + next_stack_top_ = bottom + size; + atomic_store(&stack_switching_, 1, memory_order_release); +} + +void Thread::FinishSwitchFiber(uptr *bottom_old, uptr *size_old) { + if (!atomic_load(&stack_switching_, memory_order_acquire)) { + Report("ERROR: finishing a fiber switch that has not started\n"); + Die(); + } + + if (bottom_old) + *bottom_old = stack_bottom_; + if (size_old) + *size_old = stack_top_ - stack_bottom_; + stack_bottom_ = next_stack_bottom_; + stack_top_ = next_stack_top_; + atomic_store(&stack_switching_, 0, memory_order_release); + next_stack_top_ = 0; + next_stack_bottom_ = 0; +} + +inline Thread::StackBounds Thread::GetStackBounds() const { + if (!atomic_load(&stack_switching_, memory_order_acquire)) { + // Make sure the stack bounds are fully initialized. + if (stack_bottom_ >= stack_top_) + return {0, 0}; + return {stack_bottom_, stack_top_}; + } + const uptr cur_stack = (uptr)__builtin_frame_address(0); + // Note: need to check next stack first, because FinishSwitchFiber + // may be in process of overwriting stack_top_/bottom_. But in such case + // we are already on the next stack. + if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_) + return {next_stack_bottom_, next_stack_top_}; + return {stack_bottom_, stack_top_}; +} + +uptr Thread::stack_top() { return GetStackBounds().top; } + +uptr Thread::stack_bottom() { return GetStackBounds().bottom; } + +uptr Thread::stack_size() { + const auto bounds = GetStackBounds(); + return bounds.top - bounds.bottom; +} + void Thread::Print(const char *Prefix) { - Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix, unique_id_, - (void *)this, stack_bottom(), stack_top(), - stack_top() - stack_bottom(), tls_begin(), tls_end()); + Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix, + (ssize)unique_id_, (void *)this, (void *)stack_bottom(), + (void *)stack_top(), stack_top() - stack_bottom(), (void *)tls_begin(), + (void *)tls_end()); } static u32 xorshift(u32 state) { @@ -174,7 +228,7 @@ static __hwasan::HwasanThreadList *GetHwasanThreadListLocked() { return &tl; } -static __hwasan::Thread *GetThreadByOsIDLocked(tid_t os_id) { +static __hwasan::Thread *GetThreadByOsIDLocked(ThreadID os_id) { return GetHwasanThreadListLocked()->FindThreadLocked( [os_id](__hwasan::Thread *t) { return t->os_id() == os_id; }); } @@ -191,7 +245,7 @@ void UnlockThreads() { void EnsureMainThreadIDIsCorrect() { __hwasan::EnsureMainThreadIDIsCorrect(); } -bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, +bool GetThreadRangesLocked(ThreadID os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end, DTLS **dtls) { auto *t = GetThreadByOsIDLocked(os_id); @@ -210,7 +264,7 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, void GetAllThreadAllocatorCachesLocked(InternalMmapVector *caches) {} -void GetThreadExtraStackRangesLocked(tid_t os_id, +void GetThreadExtraStackRangesLocked(ThreadID os_id, InternalMmapVector *ranges) {} void GetThreadExtraStackRangesLocked(InternalMmapVector *ranges) {} @@ -218,7 +272,7 @@ void GetAdditionalThreadContextPtrsLocked(InternalMmapVector *ptrs) { __hwasan::hwasanThreadArgRetval().GetAllPtrsLocked(ptrs); } -void GetRunningThreadsLocked(InternalMmapVector *threads) { +void GetRunningThreadsLocked(InternalMmapVector *threads) { // TODO: implement. } void PrintThreads() { @@ -226,3 +280,25 @@ void PrintThreads() { } } // namespace __lsan + +// ---------------------- Interface ---------------- {{{1 +using namespace __hwasan; + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_start_switch_fiber(void **, const void *bottom, uptr size) { + if (auto *t = GetCurrentThread()) + t->StartSwitchFiber((uptr)bottom, size); + else + VReport(1, "__hwasan_start_switch_fiber called from unknown thread\n"); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_finish_switch_fiber(void *, const void **bottom_old, + uptr *size_old) { + if (auto *t = GetCurrentThread()) + t->FinishSwitchFiber((uptr *)bottom_old, size_old); + else + VReport(1, "__hwasan_finish_switch_fiber called from unknown thread\n"); +} +} diff --git a/libsanitizer/hwasan/hwasan_thread.h b/libsanitizer/hwasan/hwasan_thread.h index 9e1b438e48f7..8ef282fd7b10 100644 --- a/libsanitizer/hwasan/hwasan_thread.h +++ b/libsanitizer/hwasan/hwasan_thread.h @@ -41,9 +41,9 @@ class Thread { void Destroy(); - uptr stack_top() { return stack_top_; } - uptr stack_bottom() { return stack_bottom_; } - uptr stack_size() { return stack_top() - stack_bottom(); } + uptr stack_top(); + uptr stack_bottom(); + uptr stack_size(); uptr tls_begin() { return tls_begin_; } uptr tls_end() { return tls_end_; } DTLS *dtls() { return dtls_; } @@ -53,6 +53,9 @@ class Thread { return addr >= stack_bottom_ && addr < stack_top_; } + void StartSwitchFiber(uptr bottom, uptr size); + void FinishSwitchFiber(uptr *bottom_old, uptr *size_old); + AllocatorCache *allocator_cache() { return &allocator_cache_; } HeapAllocationsRingBuffer *heap_allocations() { return heap_allocations_; } StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; } @@ -69,8 +72,8 @@ class Thread { Print("Thread: "); } - tid_t os_id() const { return os_id_; } - void set_os_id(tid_t os_id) { os_id_ = os_id; } + ThreadID os_id() const { return os_id_; } + void set_os_id(ThreadID os_id) { os_id_ = os_id; } uptr &vfork_spill() { return vfork_spill_; } @@ -80,9 +83,22 @@ class Thread { void ClearShadowForThreadStackAndTLS(); void Print(const char *prefix); void InitRandomState(); + + struct StackBounds { + uptr bottom; + uptr top; + }; + StackBounds GetStackBounds() const; + uptr vfork_spill_; uptr stack_top_; uptr stack_bottom_; + // these variables are used when the thread is about to switch stack + uptr next_stack_top_; + uptr next_stack_bottom_; + // true if switching is in progress + atomic_uint8_t stack_switching_; + uptr tls_begin_; uptr tls_end_; DTLS *dtls_; @@ -96,7 +112,7 @@ class Thread { u32 unique_id_; // counting from zero. - tid_t os_id_; + ThreadID os_id_; u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread. diff --git a/libsanitizer/include/sanitizer/memprof_interface.h b/libsanitizer/include/sanitizer/memprof_interface.h index 4660a7818c92..6d9b2a2394f4 100644 --- a/libsanitizer/include/sanitizer/memprof_interface.h +++ b/libsanitizer/include/sanitizer/memprof_interface.h @@ -47,9 +47,9 @@ void SANITIZER_CDECL __memprof_print_accumulated_stats(void); /// User-provided default option settings. /// -/// You can provide your own implementation of this function to return a string -/// containing MemProf runtime options (for example, -/// verbosity=1:print_stats=1). +/// You can set these options via the -memprof-runtime-default-options LLVM flag +/// or you can provide your own implementation of this function. See +/// memprof_flags.h for more info. /// /// \returns Default options string. const char *SANITIZER_CDECL __memprof_default_options(void); diff --git a/libsanitizer/include/sanitizer/tsan_interface_atomic.h b/libsanitizer/include/sanitizer/tsan_interface_atomic.h index de3a1c393609..74ed91efade0 100644 --- a/libsanitizer/include/sanitizer/tsan_interface_atomic.h +++ b/libsanitizer/include/sanitizer/tsan_interface_atomic.h @@ -43,183 +43,178 @@ typedef enum { } __tsan_memory_order; __tsan_atomic8 SANITIZER_CDECL -__tsan_atomic8_load(const volatile __tsan_atomic8 *a, __tsan_memory_order mo); +__tsan_atomic8_load(const volatile __tsan_atomic8 *a, int mo); __tsan_atomic16 SANITIZER_CDECL -__tsan_atomic16_load(const volatile __tsan_atomic16 *a, __tsan_memory_order mo); +__tsan_atomic16_load(const volatile __tsan_atomic16 *a, int mo); __tsan_atomic32 SANITIZER_CDECL -__tsan_atomic32_load(const volatile __tsan_atomic32 *a, __tsan_memory_order mo); +__tsan_atomic32_load(const volatile __tsan_atomic32 *a, int mo); __tsan_atomic64 SANITIZER_CDECL -__tsan_atomic64_load(const volatile __tsan_atomic64 *a, __tsan_memory_order mo); +__tsan_atomic64_load(const volatile __tsan_atomic64 *a, int mo); #if __TSAN_HAS_INT128 -__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_load( - const volatile __tsan_atomic128 *a, __tsan_memory_order mo); +__tsan_atomic128 SANITIZER_CDECL +__tsan_atomic128_load(const volatile __tsan_atomic128 *a, int mo); #endif void SANITIZER_CDECL __tsan_atomic8_store(volatile __tsan_atomic8 *a, - __tsan_atomic8 v, - __tsan_memory_order mo); + __tsan_atomic8 v, int mo); void SANITIZER_CDECL __tsan_atomic16_store(volatile __tsan_atomic16 *a, - __tsan_atomic16 v, - __tsan_memory_order mo); + __tsan_atomic16 v, int mo); void SANITIZER_CDECL __tsan_atomic32_store(volatile __tsan_atomic32 *a, - __tsan_atomic32 v, - __tsan_memory_order mo); + __tsan_atomic32 v, int mo); void SANITIZER_CDECL __tsan_atomic64_store(volatile __tsan_atomic64 *a, - __tsan_atomic64 v, - __tsan_memory_order mo); + __tsan_atomic64 v, int mo); #if __TSAN_HAS_INT128 void SANITIZER_CDECL __tsan_atomic128_store(volatile __tsan_atomic128 *a, - __tsan_atomic128 v, - __tsan_memory_order mo); + __tsan_atomic128 v, int mo); #endif -__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_exchange( - volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic8 SANITIZER_CDECL +__tsan_atomic8_exchange(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo); __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_exchange( - volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo); + volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo); __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_exchange( - volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); + volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo); __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_exchange( - volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); + volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo); #if __TSAN_HAS_INT128 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_exchange( - volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo); + volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo); #endif -__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_add( - volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic8 SANITIZER_CDECL +__tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo); __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_add( - volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo); + volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo); __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_add( - volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); + volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo); __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_add( - volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); + volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo); #if __TSAN_HAS_INT128 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_add( - volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo); + volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo); #endif -__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_sub( - volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic8 SANITIZER_CDECL +__tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo); __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_sub( - volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo); + volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo); __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_sub( - volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); + volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo); __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_sub( - volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); + volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo); #if __TSAN_HAS_INT128 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_sub( - volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo); + volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo); #endif -__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_and( - volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic8 SANITIZER_CDECL +__tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo); __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_and( - volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo); + volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo); __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_and( - volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); + volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo); __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_and( - volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); + volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo); #if __TSAN_HAS_INT128 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_and( - volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo); + volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo); #endif -__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_or( - volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic8 SANITIZER_CDECL +__tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo); __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_or( - volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo); + volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo); __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_or( - volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); + volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo); __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_or( - volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); + volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo); #if __TSAN_HAS_INT128 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_or( - volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo); + volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo); #endif -__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_xor( - volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic8 SANITIZER_CDECL +__tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo); __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_xor( - volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo); + volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo); __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_xor( - volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); + volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo); __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_xor( - volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); + volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo); #if __TSAN_HAS_INT128 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_xor( - volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo); + volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo); #endif -__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_nand( - volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic8 SANITIZER_CDECL +__tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo); __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_nand( - volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo); + volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo); __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_nand( - volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo); + volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo); __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_nand( - volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); + volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo); #if __TSAN_HAS_INT128 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_nand( - volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo); + volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo); #endif int SANITIZER_CDECL __tsan_atomic8_compare_exchange_weak( - volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, int mo, + int fail_mo); int SANITIZER_CDECL __tsan_atomic16_compare_exchange_weak( - volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, int mo, + int fail_mo); int SANITIZER_CDECL __tsan_atomic32_compare_exchange_weak( - volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, int mo, + int fail_mo); int SANITIZER_CDECL __tsan_atomic64_compare_exchange_weak( - volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, int mo, + int fail_mo); #if __TSAN_HAS_INT128 int SANITIZER_CDECL __tsan_atomic128_compare_exchange_weak( volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + int mo, int fail_mo); #endif int SANITIZER_CDECL __tsan_atomic8_compare_exchange_strong( - volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, int mo, + int fail_mo); int SANITIZER_CDECL __tsan_atomic16_compare_exchange_strong( - volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, int mo, + int fail_mo); int SANITIZER_CDECL __tsan_atomic32_compare_exchange_strong( - volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, int mo, + int fail_mo); int SANITIZER_CDECL __tsan_atomic64_compare_exchange_strong( - volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, int mo, + int fail_mo); #if __TSAN_HAS_INT128 int SANITIZER_CDECL __tsan_atomic128_compare_exchange_strong( volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + int mo, int fail_mo); #endif __tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_compare_exchange_val( - volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, int mo, + int fail_mo); __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_compare_exchange_val( - volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, int mo, + int fail_mo); __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_compare_exchange_val( - volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, int mo, + int fail_mo); __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_compare_exchange_val( - volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, int mo, + int fail_mo); #if __TSAN_HAS_INT128 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_compare_exchange_val( volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v, - __tsan_memory_order mo, __tsan_memory_order fail_mo); + int mo, int fail_mo); #endif -void SANITIZER_CDECL __tsan_atomic_thread_fence(__tsan_memory_order mo); -void SANITIZER_CDECL __tsan_atomic_signal_fence(__tsan_memory_order mo); +void SANITIZER_CDECL __tsan_atomic_thread_fence(int mo); +void SANITIZER_CDECL __tsan_atomic_signal_fence(int mo); #ifdef __cplusplus } // extern "C" diff --git a/libsanitizer/interception/interception.h b/libsanitizer/interception/interception.h index 3cb6b446638e..9fe7d3db308b 100644 --- a/libsanitizer/interception/interception.h +++ b/libsanitizer/interception/interception.h @@ -19,7 +19,7 @@ #if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_APPLE && \ !SANITIZER_NETBSD && !SANITIZER_WINDOWS && !SANITIZER_FUCHSIA && \ - !SANITIZER_SOLARIS + !SANITIZER_SOLARIS && !SANITIZER_HAIKU && !SANITIZER_AIX # error "Interception doesn't work on this operating system." #endif @@ -168,6 +168,16 @@ const interpose_substitution substitution_##func_name[] \ extern "C" ret_type func(__VA_ARGS__); # define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \ extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__); +#elif SANITIZER_AIX +# define WRAP(x) __interceptor_##x +# define TRAMPOLINE(x) WRAP(x) +// # define WRAPPER_NAME(x) "__interceptor_" #x +# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default"))) +// AIX's linker will not select the weak symbol, so don't use weak for the +// interceptors. +# define DECLARE_WRAPPER(ret_type, func, ...) \ + extern "C" ret_type func(__VA_ARGS__) \ + __attribute__((alias("__interceptor_" #func), visibility("default"))); #elif !SANITIZER_FUCHSIA // LINUX, FREEBSD, NETBSD, SOLARIS # define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default"))) # if ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT @@ -367,12 +377,17 @@ inline void DoesNotSupportStaticLinking() {} #define INCLUDED_FROM_INTERCEPTION_LIB -#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS +#if SANITIZER_AIX +# include "interception_aix.h" +# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_AIX(func) +# define INTERCEPT_FUNCTION_VER(func, symver) INTERCEPT_FUNCTION_AIX(func) -# include "interception_linux.h" -# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) -# define INTERCEPT_FUNCTION_VER(func, symver) \ +#elif SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ + SANITIZER_SOLARIS || SANITIZER_HAIKU + +# include "interception_linux.h" +# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) +# define INTERCEPT_FUNCTION_VER(func, symver) \ INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) #elif SANITIZER_APPLE # include "interception_mac.h" diff --git a/libsanitizer/interception/interception_linux.cpp b/libsanitizer/interception/interception_linux.cpp index ef8136eb4fc7..f900ae6a593b 100644 --- a/libsanitizer/interception/interception_linux.cpp +++ b/libsanitizer/interception/interception_linux.cpp @@ -14,7 +14,7 @@ #include "interception.h" #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS + SANITIZER_SOLARIS || SANITIZER_HAIKU #include // for dlsym() and dlvsym() @@ -80,4 +80,4 @@ bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real, } // namespace __interception #endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || - // SANITIZER_SOLARIS + // SANITIZER_SOLARIS || SANITIZER_HAIKU diff --git a/libsanitizer/interception/interception_linux.h b/libsanitizer/interception/interception_linux.h index 2e01ff44578c..0958ffe3fecc 100644 --- a/libsanitizer/interception/interception_linux.h +++ b/libsanitizer/interception/interception_linux.h @@ -12,7 +12,7 @@ //===----------------------------------------------------------------------===// #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS + SANITIZER_SOLARIS || SANITIZER_HAIKU #if !defined(INCLUDED_FROM_INTERCEPTION_LIB) # error interception_linux.h should be included from interception library only @@ -52,4 +52,4 @@ bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real, #endif // INTERCEPTION_LINUX_H #endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || - // SANITIZER_SOLARIS + // SANITIZER_SOLARIS || SANITIZER_HAIKU diff --git a/libsanitizer/interception/interception_win.cpp b/libsanitizer/interception/interception_win.cpp index 077a536dd2a3..856872425117 100644 --- a/libsanitizer/interception/interception_win.cpp +++ b/libsanitizer/interception/interception_win.cpp @@ -187,8 +187,12 @@ static uptr GetMmapGranularity() { return si.dwAllocationGranularity; } +UNUSED static uptr RoundDownTo(uptr size, uptr boundary) { + return size & ~(boundary - 1); +} + UNUSED static uptr RoundUpTo(uptr size, uptr boundary) { - return (size + boundary - 1) & ~(boundary - 1); + return RoundDownTo(size + boundary - 1, boundary); } // FIXME: internal_str* and internal_mem* functions should be moved from the @@ -209,6 +213,18 @@ static char* _strchr(char* str, char c) { return nullptr; } +static int _strcmp(const char *s1, const char *s2) { + while (true) { + unsigned c1 = *s1; + unsigned c2 = *s2; + if (c1 != c2) return (c1 < c2) ? -1 : 1; + if (c1 == 0) break; + s1++; + s2++; + } + return 0; +} + static void _memset(void *p, int value, size_t sz) { for (size_t i = 0; i < sz; ++i) ((char*)p)[i] = (char)value; @@ -285,8 +301,11 @@ static void WriteJumpInstruction(uptr from, uptr target) { static void WriteShortJumpInstruction(uptr from, uptr target) { sptr offset = target - from - kShortJumpInstructionLength; - if (offset < -128 || offset > 127) + if (offset < -128 || offset > 127) { + ReportError("interception_win: cannot write short jmp from %p to %p\n", + (void *)from, (void *)target); InterceptionFailed(); + } *(u8*)from = 0xEB; *(u8*)(from + 1) = (u8)offset; } @@ -340,32 +359,78 @@ struct TrampolineMemoryRegion { uptr max_size; }; -UNUSED static const uptr kTrampolineScanLimitRange = 1ull << 31; // 2 gig +UNUSED static const uptr kTrampolineRangeLimit = 1ull << 31; // 2 gig static const int kMaxTrampolineRegion = 1024; static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion]; -static void *AllocateTrampolineRegion(uptr image_address, size_t granularity) { -#if SANITIZER_WINDOWS64 - uptr address = image_address; - uptr scanned = 0; - while (scanned < kTrampolineScanLimitRange) { +static void *AllocateTrampolineRegion(uptr min_addr, uptr max_addr, + uptr func_addr, size_t granularity) { +# if SANITIZER_WINDOWS64 + // Clamp {min,max}_addr to the accessible address space. + SYSTEM_INFO system_info; + ::GetSystemInfo(&system_info); + uptr min_virtual_addr = + RoundUpTo((uptr)system_info.lpMinimumApplicationAddress, granularity); + uptr max_virtual_addr = + RoundDownTo((uptr)system_info.lpMaximumApplicationAddress, granularity); + if (min_addr < min_virtual_addr) + min_addr = min_virtual_addr; + if (max_addr > max_virtual_addr) + max_addr = max_virtual_addr; + + // This loop probes the virtual address space to find free memory in the + // [min_addr, max_addr] interval. The search starts from func_addr and + // proceeds "outwards" towards the interval bounds using two probes, lo_addr + // and hi_addr, for addresses lower/higher than func_addr. At each step, it + // considers the probe closest to func_addr. If that address is not free, the + // probe is advanced (lower or higher depending on the probe) to the next + // memory block and the search continues. + uptr lo_addr = RoundDownTo(func_addr, granularity); + uptr hi_addr = RoundUpTo(func_addr, granularity); + while (lo_addr >= min_addr || hi_addr <= max_addr) { + // Consider the in-range address closest to func_addr. + uptr addr; + if (lo_addr < min_addr) + addr = hi_addr; + else if (hi_addr > max_addr) + addr = lo_addr; + else + addr = (hi_addr - func_addr < func_addr - lo_addr) ? hi_addr : lo_addr; + MEMORY_BASIC_INFORMATION info; - if (!::VirtualQuery((void*)address, &info, sizeof(info))) + if (!::VirtualQuery((void *)addr, &info, sizeof(info))) { + ReportError( + "interception_win: VirtualQuery in AllocateTrampolineRegion failed " + "for %p\n", + (void *)addr); return nullptr; + } - // Check whether a region can be allocated at |address|. + // Check whether a region can be allocated at |addr|. if (info.State == MEM_FREE && info.RegionSize >= granularity) { - void *page = ::VirtualAlloc((void*)RoundUpTo(address, granularity), - granularity, - MEM_RESERVE | MEM_COMMIT, - PAGE_EXECUTE_READWRITE); + void *page = + ::VirtualAlloc((void *)addr, granularity, MEM_RESERVE | MEM_COMMIT, + PAGE_EXECUTE_READWRITE); + if (page == nullptr) + ReportError( + "interception_win: VirtualAlloc in AllocateTrampolineRegion failed " + "for %p\n", + (void *)addr); return page; } - // Move to the next region. - address = (uptr)info.BaseAddress + info.RegionSize; - scanned += info.RegionSize; + if (addr == lo_addr) + lo_addr = + RoundDownTo((uptr)info.AllocationBase - granularity, granularity); + if (addr == hi_addr) + hi_addr = + RoundUpTo((uptr)info.BaseAddress + info.RegionSize, granularity); } + + ReportError( + "interception_win: AllocateTrampolineRegion failed to find free memory; " + "min_addr: %p, max_addr: %p, func_addr: %p, granularity: %zu\n", + (void *)min_addr, (void *)max_addr, (void *)func_addr, granularity); return nullptr; #else return ::VirtualAlloc(nullptr, @@ -387,17 +452,17 @@ void TestOnlyReleaseTrampolineRegions() { } static uptr AllocateMemoryForTrampoline(uptr func_address, size_t size) { - uptr image_address = func_address; +# if SANITIZER_WINDOWS64 + uptr min_addr = func_address - kTrampolineRangeLimit; + uptr max_addr = func_address + kTrampolineRangeLimit - size; -#if SANITIZER_WINDOWS64 - // Allocate memory after the module (DLL or EXE file), but within 2GB - // of the start of the module so that any address within the module can be - // referenced with PC-relative operands. + // Allocate memory within 2GB of the module (DLL or EXE file) so that any + // address within the module can be referenced with PC-relative operands. // This allows us to not just jump to the trampoline with a PC-relative // offset, but to relocate any instructions that we copy to the trampoline // which have references to the original module. If we can't find the base // address of the module (e.g. if func_address is in mmap'ed memory), just - // use func_address as is. + // stay within 2GB of func_address. HMODULE module; if (::GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, @@ -405,19 +470,32 @@ static uptr AllocateMemoryForTrampoline(uptr func_address, size_t size) { MODULEINFO module_info; if (::GetModuleInformation(::GetCurrentProcess(), module, &module_info, sizeof(module_info))) { - image_address = (uptr)module_info.lpBaseOfDll; + min_addr = (uptr)module_info.lpBaseOfDll + module_info.SizeOfImage - + kTrampolineRangeLimit; + max_addr = (uptr)module_info.lpBaseOfDll + kTrampolineRangeLimit - size; } } -#endif - // Find a region within 2G with enough space to allocate |size| bytes. + // Check for overflow. + if (min_addr > func_address) + min_addr = 0; + if (max_addr < func_address) + max_addr = ~(uptr)0; +# else + uptr min_addr = 0; + uptr max_addr = ~min_addr; +# endif + + // Find a region within [min_addr,max_addr] with enough space to allocate + // |size| bytes. TrampolineMemoryRegion *region = nullptr; for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) { TrampolineMemoryRegion* current = &TrampolineRegions[bucket]; if (current->content == 0) { // No valid region found, allocate a new region. size_t bucket_size = GetMmapGranularity(); - void *content = AllocateTrampolineRegion(image_address, bucket_size); + void *content = AllocateTrampolineRegion(min_addr, max_addr, func_address, + bucket_size); if (content == nullptr) return 0U; @@ -427,13 +505,9 @@ static uptr AllocateMemoryForTrampoline(uptr func_address, size_t size) { region = current; break; } else if (current->max_size - current->allocated_size > size) { -#if SANITIZER_WINDOWS64 - // In 64-bits, the memory space must be allocated within 2G boundary. - uptr next_address = current->content + current->allocated_size; - if (next_address < image_address || - next_address - image_address >= 0x7FFF0000) - continue; -#endif + uptr next_address = current->content + current->allocated_size; + if (next_address < min_addr || next_address > max_addr) + continue; // The space can be allocated in the current region. region = current; break; @@ -482,6 +556,10 @@ static const u8 kPrologueWithShortJump2[] = { // Returns 0 on error. static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { + if (rel_offset) { + *rel_offset = 0; + } + #if SANITIZER_ARM64 // An ARM64 instruction is 4 bytes long. return 4; @@ -528,6 +606,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { case 0xb8: // b8 XX XX XX XX : mov eax, XX XX XX XX case 0xB9: // b9 XX XX XX XX : mov ecx, XX XX XX XX + case 0xBA: // ba XX XX XX XX : mov edx, XX XX XX XX return 5; // Cannot overwrite control-instruction. Return 0 to indicate failure. @@ -558,25 +637,45 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { case 0xFF8B: // 8B FF : mov edi, edi case 0xEC8B: // 8B EC : mov ebp, esp case 0xc889: // 89 C8 : mov eax, ecx + case 0xD189: // 89 D1 : mov ecx, edx case 0xE589: // 89 E5 : mov ebp, esp case 0xC18B: // 8B C1 : mov eax, ecx + case 0xC031: // 31 C0 : xor eax, eax + case 0xC931: // 31 C9 : xor ecx, ecx + case 0xD231: // 31 D2 : xor edx, edx case 0xC033: // 33 C0 : xor eax, eax case 0xC933: // 33 C9 : xor ecx, ecx case 0xD233: // 33 D2 : xor edx, edx + case 0xFF33: // 33 FF : xor edi, edi + case 0x9066: // 66 90 : xchg %ax,%ax (Two-byte NOP) case 0xDB84: // 84 DB : test bl,bl + case 0xC084: // 84 C0 : test al,al case 0xC984: // 84 C9 : test cl,cl case 0xD284: // 84 D2 : test dl,dl return 2; + case 0x3980: // 80 39 XX : cmp BYTE PTR [rcx], XX + case 0x4D8B: // 8B 4D XX : mov XX(%ebp), ecx + case 0x558B: // 8B 55 XX : mov XX(%ebp), edx + case 0x758B: // 8B 75 XX : mov XX(%ebp), esp + case 0xE483: // 83 E4 XX : and esp, XX + case 0xEC83: // 83 EC XX : sub esp, XX + case 0xC1F6: // F6 C1 XX : test cl, XX + return 3; + + case 0x89FF: // FF 89 XX XX XX XX : dec dword ptr [ecx + XX XX XX XX] + case 0xEC81: // 81 EC XX XX XX XX : sub esp, XX XX XX XX + return 6; + // Cannot overwrite control-instruction. Return 0 to indicate failure. - case 0x25FF: // FF 25 XX XX XX XX : jmp [XXXXXXXX] + case 0x25FF: // FF 25 XX YY ZZ WW : jmp dword ptr ds:[WWZZYYXX] return 0; } - switch (0x00FFFFFF & *(u32*)address) { - case 0xF8E483: // 83 E4 F8 : and esp, 0xFFFFFFF8 - case 0x64EC83: // 83 EC 64 : sub esp, 64h - return 3; + switch (0x00FFFFFF & *(u32 *)address) { + case 0x244C8D: // 8D 4C 24 XX : lea ecx, [esp + XX] + case 0x2474FF: // FF 74 24 XX : push qword ptr [rsp + XX] + return 4; case 0x24A48D: // 8D A4 24 XX XX XX XX : lea esp, [esp + XX XX XX XX] return 7; } @@ -629,12 +728,10 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { case 0x5541: // push r13 case 0x5641: // push r14 case 0x5741: // push r15 - case 0x9066: // Two-byte NOP case 0xc084: // test al, al case 0x018a: // mov al, byte ptr [rcx] return 2; - case 0x058A: // 8A 05 XX XX XX XX : mov al, byte ptr [XX XX XX XX] case 0x7E80: // 80 7E YY XX cmp BYTE PTR [rsi+YY], XX case 0x7D80: // 80 7D YY XX cmp BYTE PTR [rbp+YY], XX case 0x7A80: // 80 7A YY XX cmp BYTE PTR [rdx+YY], XX @@ -643,9 +740,12 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { case 0x7980: // 80 79 YY XX cmp BYTE ptr [rcx+YY], XX return 4; + case 0x058A: // 8A 05 XX XX XX XX : mov al, byte ptr [XX XX XX XX] case 0x058B: // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX] if (rel_offset) *rel_offset = 2; + FALLTHROUGH; + case 0xB841: // 41 B8 XX XX XX XX : mov r8d, XX XX XX XX return 6; case 0x7E81: // 81 7E YY XX XX XX XX cmp DWORD PTR [rsi+YY], XX XX XX XX @@ -655,11 +755,17 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { case 0x7B81: // 81 7B YY XX XX XX XX cmp DWORD PTR [rbx+YY], XX XX XX XX case 0x7981: // 81 79 YY XX XX XX XX cmp dword ptr [rcx+YY], XX XX XX XX return 7; + + case 0xb848: // 48 b8 XX XX XX XX XX XX XX XX : + // movabsq XX XX XX XX XX XX XX XX, rax + case 0xba48: // 48 ba XX XX XX XX XX XX XX XX : + // movabsq XX XX XX XX XX XX XX XX, rdx + return 10; } - switch (0x00FFFFFF & *(u32*)address) { - case 0x07c1f6: // f6 c1 07 : test cl, 0x7 + switch (0x00FFFFFF & *(u32 *)address) { case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax] + case 0x02b70f: // 0f b7 02 : movzx eax, WORD PTR [rdx] case 0xc00b4d: // 4d 0b c0 : or r8, r8 case 0xc03345: // 45 33 c0 : xor r8d, r8d case 0xc08548: // 48 85 c0 : test rax, rax @@ -673,6 +779,9 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { case 0xc1ff48: // 48 ff c1 : inc rcx case 0xc1ff49: // 49 ff c1 : inc r9 case 0xc28b41: // 41 8b c2 : mov eax, r10d + case 0x01b60f: // 0f b6 01 : movzx eax, BYTE PTR [rcx] + case 0x09b60f: // 0f b6 09 : movzx ecx, BYTE PTR [rcx] + case 0x11b60f: // 0f b6 11 : movzx edx, BYTE PTR [rcx] case 0xc2b60f: // 0f b6 c2 : movzx eax, dl case 0xc2ff48: // 48 ff c2 : inc rdx case 0xc2ff49: // 49 ff c2 : inc r10 @@ -691,6 +800,8 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { case 0xc98548: // 48 85 c9 : test rcx, rcx case 0xc9854d: // 4d 85 c9 : test r9, r9 case 0xc98b4c: // 4c 8b c9 : mov r9, rcx + case 0xd12948: // 48 29 d1 : sub rcx, rdx + case 0xc22b4c: // 4c 2b c2 : sub r8, rdx case 0xca2b48: // 48 2b ca : sub rcx, rdx case 0xca3b48: // 48 3b ca : cmp rcx, rdx case 0xd12b48: // 48 2b d1 : sub rdx, rcx @@ -700,17 +811,34 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { case 0xd2854d: // 4d 85 d2 : test r10, r10 case 0xd28b4c: // 4c 8b d2 : mov r10, rdx case 0xd2b60f: // 0f b6 d2 : movzx edx, dl + case 0xd2be0f: // 0f be d2 : movsx edx, dl case 0xd98b4c: // 4c 8b d9 : mov r11, rcx case 0xd9f748: // 48 f7 d9 : neg rcx + case 0xc03145: // 45 31 c0 : xor r8d,r8d + case 0xc93145: // 45 31 c9 : xor r9d,r9d + case 0xd23345: // 45 33 d2 : xor r10d, r10d case 0xdb3345: // 45 33 db : xor r11d, r11d + case 0xc08445: // 45 84 c0 : test r8b,r8b + case 0xd28445: // 45 84 d2 : test r10b,r10b case 0xdb8548: // 48 85 db : test rbx, rbx case 0xdb854d: // 4d 85 db : test r11, r11 case 0xdc8b4c: // 4c 8b dc : mov r11, rsp - case 0xe0e483: // 83 e4 e0 : and esp, 0xFFFFFFE0 case 0xe48548: // 48 85 e4 : test rsp, rsp case 0xe4854d: // 4d 85 e4 : test r12, r12 + case 0xc88948: // 48 89 c8 : mov rax,rcx + case 0xcb8948: // 48 89 cb : mov rbx,rcx + case 0xd08948: // 48 89 d0 : mov rax,rdx + case 0xd18948: // 48 89 d1 : mov rcx,rdx + case 0xd38948: // 48 89 d3 : mov rbx,rdx case 0xe58948: // 48 89 e5 : mov rbp, rsp case 0xed8548: // 48 85 ed : test rbp, rbp + case 0xc88949: // 49 89 c8 : mov r8, rcx + case 0xc98949: // 49 89 c9 : mov r9, rcx + case 0xca8949: // 49 89 ca : mov r10,rcx + case 0xd08949: // 49 89 d0 : mov r8, rdx + case 0xd18949: // 49 89 d1 : mov r9, rdx + case 0xd28949: // 49 89 d2 : mov r10, rdx + case 0xd38949: // 49 89 d3 : mov r11, rdx case 0xed854d: // 4d 85 ed : test r13, r13 case 0xf6854d: // 4d 85 f6 : test r14, r14 case 0xff854d: // 4d 85 ff : test r15, r15 @@ -721,8 +849,13 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { case 0x588948: // 48 89 58 XX : mov QWORD PTR[rax + XX], rbx case 0xec8348: // 48 83 ec XX : sub rsp, XX case 0xf88349: // 49 83 f8 XX : cmp r8, XX + case 0x488d49: // 49 8d 48 XX : lea rcx, [...] + case 0x048d4c: // 4c 8d 04 XX : lea r8, [...] + case 0x148d4e: // 4e 8d 14 XX : lea r10, [...] + case 0x398366: // 66 83 39 XX : cmp WORD PTR [rcx], XX return 4; + case 0x441F0F: // 0F 1F 44 XX XX : nop DWORD PTR [...] case 0x246483: // 83 64 24 XX YY : and DWORD PTR [rsp+XX], YY return 5; @@ -735,6 +868,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { return 6; case 0xec8148: // 48 81 EC XX XX XX XX : sub rsp, XXXXXXXX + case 0xc0c748: // 48 C7 C0 XX XX XX XX : mov rax, XX XX XX XX return 7; // clang-format off @@ -742,7 +876,6 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { case 0x798141: // 41 81 79 XX YY YY YY YY : cmp DWORD PTR [r9+YY], XX XX XX XX case 0x7a8141: // 41 81 7a XX YY YY YY YY : cmp DWORD PTR [r10+YY], XX XX XX XX case 0x7b8141: // 41 81 7b XX YY YY YY YY : cmp DWORD PTR [r11+YY], XX XX XX XX - case 0x7c8141: // 41 81 7c XX YY YY YY YY : cmp DWORD PTR [r12+YY], XX XX XX XX case 0x7d8141: // 41 81 7d XX YY YY YY YY : cmp DWORD PTR [r13+YY], XX XX XX XX case 0x7e8141: // 41 81 7e XX YY YY YY YY : cmp DWORD PTR [r14+YY], XX XX XX XX case 0x7f8141: // 41 81 7f YY XX XX XX XX : cmp DWORD PTR [r15+YY], XX XX XX XX @@ -754,6 +887,10 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { // mov rax, QWORD PTR [rip + XXXXXXXX] case 0x058d48: // 48 8d 05 XX XX XX XX : // lea rax, QWORD PTR [rip + XXXXXXXX] + case 0x0d8948: // 48 89 0d XX XX XX XX : + // mov QWORD PTR [rip + XXXXXXXX], rcx + case 0x158948: // 48 89 15 XX XX XX XX : + // mov QWORD PTR [rip + XXXXXXXX], rdx case 0x25ff48: // 48 ff 25 XX XX XX XX : // rex.W jmp QWORD PTR [rip + XXXXXXXX] case 0x158D4C: // 4c 8d 15 XX XX XX XX : lea r10, [rip + XX] @@ -765,9 +902,17 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { case 0x2444c7: // C7 44 24 XX YY YY YY YY // mov dword ptr [rsp + XX], YYYYYYYY return 8; + + case 0x7c8141: // 41 81 7c ZZ YY XX XX XX XX + // cmp DWORD PTR [reg+reg*n+YY], XX XX XX XX + return 9; } switch (*(u32*)(address)) { + case 0x01b60f44: // 44 0f b6 01 : movzx r8d, BYTE PTR [rcx] + case 0x09b60f44: // 44 0f b6 09 : movzx r9d, BYTE PTR [rcx] + case 0x0ab60f44: // 44 0f b6 0a : movzx r8d, BYTE PTR [rdx] + case 0x11b60f44: // 44 0f b6 11 : movzx r10d, BYTE PTR [rcx] case 0x1ab60f44: // 44 0f b6 1a : movzx r11d, BYTE PTR [rdx] return 4; case 0x24448b48: // 48 8b 44 24 XX : mov rax, QWORD ptr [rsp + XX] @@ -785,6 +930,13 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { return 5; case 0x24648348: // 48 83 64 24 XX YY : and QWORD PTR [rsp + XX], YY return 6; + case 0x24A48D48: // 48 8D A4 24 XX XX XX XX : lea rsp, [rsp + XX XX XX XX] + return 8; + } + + switch (0xFFFFFFFFFFULL & *(u64 *)(address)) { + case 0xC07E0F4866: // 66 48 0F 7E C0 : movq rax, xmm0 + return 5; } #else @@ -798,11 +950,9 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { case 0x5D8B: // 8B 5D XX : mov ebx, dword ptr [ebp + XX] case 0x7D8B: // 8B 7D XX : mov edi, dword ptr [ebp + XX] case 0x758B: // 8B 75 XX : mov esi, dword ptr [ebp + XX] - case 0xEC83: // 83 EC XX : sub esp, XX case 0x75FF: // FF 75 XX : push dword ptr [ebp + XX] return 3; case 0xC1F7: // F7 C1 XX YY ZZ WW : test ecx, WWZZYYXX - case 0x25FF: // FF 25 XX YY ZZ WW : jmp dword ptr ds:[WWZZYYXX] return 6; case 0x3D83: // 83 3D XX YY ZZ WW TT : cmp TT, WWZZYYXX return 7; @@ -845,6 +995,10 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { return 0; } +size_t TestOnlyGetInstructionSize(uptr address, size_t *rel_offset) { + return GetInstructionSize(address, rel_offset); +} + // Returns 0 on error. static size_t RoundUpToInstrBoundary(size_t size, uptr address) { size_t cursor = 0; @@ -872,8 +1026,14 @@ static bool CopyInstructions(uptr to, uptr from, size_t size) { // this will be untrue if relocated_offset \notin [-2**31, 2**31) s64 delta = to - from; s64 relocated_offset = *(s32 *)(to + cursor + rel_offset) - delta; - if (-0x8000'0000ll > relocated_offset || relocated_offset > 0x7FFF'FFFFll) + if (-0x8000'0000ll > relocated_offset || + relocated_offset > 0x7FFF'FFFFll) { + ReportError( + "interception_win: CopyInstructions relocated_offset %lld outside " + "32-bit range\n", + (long long)relocated_offset); return false; + } # else // on 32-bit, the relative offset will always be correct s32 delta = to - from; @@ -1096,8 +1256,7 @@ static void **InterestingDLLsAvailable() { "libc++.dll", // libc++ "libunwind.dll", // libunwind # endif - // NTDLL should go last as it exports some functions that we should - // override in the CRT [presumably only used internally]. + // NTDLL must go last as it gets special treatment in OverrideFunction. "ntdll.dll", NULL }; @@ -1154,7 +1313,7 @@ uptr InternalGetProcAddress(void *module, const char *func_name) { for (DWORD i = 0; i < exports->NumberOfNames; i++) { RVAPtr name(module, names[i]); - if (!strcmp(func_name, name)) { + if (!_strcmp(func_name, name)) { DWORD index = ordinals[i]; RVAPtr func(module, functions[index]); @@ -1167,19 +1326,27 @@ uptr InternalGetProcAddress(void *module, const char *func_name) { // exported directory. char function_name[256]; size_t funtion_name_length = _strlen(func); - if (funtion_name_length >= sizeof(function_name) - 1) + if (funtion_name_length >= sizeof(function_name) - 1) { + ReportError("interception_win: func too long: '%s'\n", (char *)func); InterceptionFailed(); + } _memcpy(function_name, func, funtion_name_length); function_name[funtion_name_length] = '\0'; char* separator = _strchr(function_name, '.'); - if (!separator) + if (!separator) { + ReportError("interception_win: no separator in '%s'\n", + function_name); InterceptionFailed(); + } *separator = '\0'; void* redirected_module = GetModuleHandleA(function_name); - if (!redirected_module) + if (!redirected_module) { + ReportError("interception_win: GetModuleHandleA failed for '%s'\n", + function_name); InterceptionFailed(); + } return InternalGetProcAddress(redirected_module, separator + 1); } @@ -1192,9 +1359,22 @@ uptr InternalGetProcAddress(void *module, const char *func_name) { bool OverrideFunction( const char *func_name, uptr new_func, uptr *orig_old_func) { + static const char *kNtDllIgnore[] = { + "memcmp", "memcpy", "memmove", "memset" + }; + bool hooked = false; void **DLLs = InterestingDLLsAvailable(); for (size_t i = 0; DLLs[i]; ++i) { + if (DLLs[i + 1] == nullptr) { + // This is the last DLL, i.e. NTDLL. It exports some functions that + // we only want to override in the CRT. + for (const char *ignored : kNtDllIgnore) { + if (_strcmp(func_name, ignored) == 0) + return hooked; + } + } + uptr func_addr = InternalGetProcAddress(DLLs[i], func_name); if (func_addr && OverrideFunction(func_addr, new_func, orig_old_func)) { @@ -1248,7 +1428,7 @@ bool OverrideImportedFunction(const char *module_to_patch, RVAPtr import_by_name( module, name_table->u1.ForwarderString); const char *funcname = &import_by_name->Name[0]; - if (strcmp(funcname, function_name) == 0) + if (_strcmp(funcname, function_name) == 0) break; } } diff --git a/libsanitizer/interception/interception_win.h b/libsanitizer/interception/interception_win.h index f6eca82191cb..91c7e38bfe7d 100644 --- a/libsanitizer/interception/interception_win.h +++ b/libsanitizer/interception/interception_win.h @@ -63,6 +63,9 @@ bool OverrideFunctionWithTrampoline( // Exposed for unittests void TestOnlyReleaseTrampolineRegions(); +// Exposed for unittests +SIZE_T TestOnlyGetInstructionSize(uptr address, SIZE_T *rel_offset); + } // namespace __interception #if defined(INTERCEPTION_DYNAMIC_CRT) diff --git a/libsanitizer/lsan/lsan_allocator.cpp b/libsanitizer/lsan/lsan_allocator.cpp index 493bf5f9efc5..a436d9c07ac6 100644 --- a/libsanitizer/lsan/lsan_allocator.cpp +++ b/libsanitizer/lsan/lsan_allocator.cpp @@ -220,6 +220,10 @@ void lsan_free(void *p) { Deallocate(p); } +void lsan_free_sized(void *p, uptr) { Deallocate(p); } + +void lsan_free_aligned_sized(void *p, uptr, uptr) { Deallocate(p); } + void *lsan_realloc(void *p, uptr size, const StackTrace &stack) { return SetErrnoOnNull(Reallocate(stack, p, size, 1)); } diff --git a/libsanitizer/lsan/lsan_allocator.h b/libsanitizer/lsan/lsan_allocator.h index 5eed0cbdb309..2d0ea0b46fe0 100644 --- a/libsanitizer/lsan/lsan_allocator.h +++ b/libsanitizer/lsan/lsan_allocator.h @@ -53,7 +53,7 @@ struct ChunkMetadata { #if !SANITIZER_CAN_USE_ALLOCATOR64 template struct AP32 { - static const uptr kSpaceBeg = 0; + static const uptr kSpaceBeg = SANITIZER_MMAP_BEGIN; static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; static const uptr kMetadataSize = sizeof(ChunkMetadata); typedef __sanitizer::CompactSizeClassMap SizeClassMap; @@ -93,6 +93,10 @@ using LSanSizeClassMap = DefaultSizeClassMap; const uptr kAllocatorSpace = 0x600000000000ULL; const uptr kAllocatorSize = 0x40000000000ULL; // 4T. using LSanSizeClassMap = DefaultSizeClassMap; +# elif SANITIZER_ANDROID && defined(__aarch64__) +const uptr kAllocatorSpace = 0x3000000000ULL; +const uptr kAllocatorSize = 0x2000000000ULL; +using LSanSizeClassMap = VeryCompactSizeClassMap; # else const uptr kAllocatorSpace = 0x500000000000ULL; const uptr kAllocatorSize = 0x40000000000ULL; // 4T. @@ -127,6 +131,8 @@ void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack); void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack); void *lsan_malloc(uptr size, const StackTrace &stack); void lsan_free(void *p); +void lsan_free_sized(void *p, uptr size); +void lsan_free_aligned_sized(void *p, uptr alignment, uptr size); void *lsan_realloc(void *p, uptr size, const StackTrace &stack); void *lsan_reallocarray(void *p, uptr nmemb, uptr size, const StackTrace &stack); diff --git a/libsanitizer/lsan/lsan_common.cpp b/libsanitizer/lsan/lsan_common.cpp index 5c44c000ae57..39537b67b681 100644 --- a/libsanitizer/lsan/lsan_common.cpp +++ b/libsanitizer/lsan/lsan_common.cpp @@ -124,7 +124,8 @@ static const char kStdSuppressions[] = # endif // TLS leak in some glibc versions, described in // https://sourceware.org/bugzilla/show_bug.cgi?id=12650. - "leak:*tls_get_addr*\n"; + "leak:*tls_get_addr*\n" + "leak:*dlerror*\n"; void InitializeSuppressions() { CHECK_EQ(nullptr, suppression_ctx); @@ -411,7 +412,7 @@ void ScanExtraStackRanges(const InternalMmapVector &ranges, # if SANITIZER_FUCHSIA // Fuchsia handles all threads together with its own callback. -static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t, +static void ProcessThreads(SuspendedThreadsList const &, Frontier *, ThreadID, uptr) {} # else @@ -444,7 +445,7 @@ static void ProcessThreadRegistry(Frontier *frontier) { // Scans thread data (stacks and TLS) for heap pointers. template -static void ProcessThread(tid_t os_id, uptr sp, +static void ProcessThread(ThreadID os_id, uptr sp, const InternalMmapVector ®isters, InternalMmapVector &extra_ranges, Frontier *frontier, Accessor &accessor) { @@ -555,21 +556,21 @@ static void ProcessThread(tid_t os_id, uptr sp, } static void ProcessThreads(SuspendedThreadsList const &suspended_threads, - Frontier *frontier, tid_t caller_tid, + Frontier *frontier, ThreadID caller_tid, uptr caller_sp) { - InternalMmapVector done_threads; + InternalMmapVector done_threads; InternalMmapVector registers; InternalMmapVector extra_ranges; for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { registers.clear(); extra_ranges.clear(); - const tid_t os_id = suspended_threads.GetThreadID(i); + const ThreadID os_id = suspended_threads.GetThreadID(i); uptr sp = 0; PtraceRegistersStatus have_registers = suspended_threads.GetRegistersAndSP(i, ®isters, &sp); if (have_registers != REGISTERS_AVAILABLE) { - Report("Unable to get registers from thread %llu.\n", os_id); + VReport(1, "Unable to get registers from thread %llu.\n", os_id); // If unable to get SP, consider the entire stack to be reachable unless // GetRegistersAndSP failed with ESRCH. if (have_registers == REGISTERS_UNAVAILABLE_FATAL) @@ -588,10 +589,10 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads, if (flags()->use_detached) { CopyMemoryAccessor accessor; - InternalMmapVector known_threads; + InternalMmapVector known_threads; GetRunningThreadsLocked(&known_threads); Sort(done_threads.data(), done_threads.size()); - for (tid_t os_id : known_threads) { + for (ThreadID os_id : known_threads) { registers.clear(); extra_ranges.clear(); @@ -711,7 +712,7 @@ static void CollectIgnoredCb(uptr chunk, void *arg) { // Sets the appropriate tag on each chunk. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads, - Frontier *frontier, tid_t caller_tid, + Frontier *frontier, ThreadID caller_tid, uptr caller_sp) { const InternalMmapVector &suppressed_stacks = GetSuppressionContext()->GetSortedSuppressedStacks(); @@ -789,13 +790,13 @@ static bool ReportUnsuspendedThreads(const SuspendedThreadsList &) { static bool ReportUnsuspendedThreads( const SuspendedThreadsList &suspended_threads) { - InternalMmapVector threads(suspended_threads.ThreadCount()); + InternalMmapVector threads(suspended_threads.ThreadCount()); for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i) threads[i] = suspended_threads.GetThreadID(i); Sort(threads.data(), threads.size()); - InternalMmapVector known_threads; + InternalMmapVector known_threads; GetRunningThreadsLocked(&known_threads); bool succeded = true; @@ -805,7 +806,7 @@ static bool ReportUnsuspendedThreads( succeded = false; Report( "Running thread %zu was not suspended. False leaks are possible.\n", - os_id); + (usize)os_id); } } return succeded; diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h index f990c7850497..39530c2e027f 100644 --- a/libsanitizer/lsan/lsan_common.h +++ b/libsanitizer/lsan/lsan_common.h @@ -102,15 +102,15 @@ void UnlockThreads() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; // where leak checking is initiated from a non-main thread). void EnsureMainThreadIDIsCorrect(); -bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, +bool GetThreadRangesLocked(ThreadID os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end, DTLS **dtls); void GetAllThreadAllocatorCachesLocked(InternalMmapVector *caches); void GetThreadExtraStackRangesLocked(InternalMmapVector *ranges); -void GetThreadExtraStackRangesLocked(tid_t os_id, +void GetThreadExtraStackRangesLocked(ThreadID os_id, InternalMmapVector *ranges); void GetAdditionalThreadContextPtrsLocked(InternalMmapVector *ptrs); -void GetRunningThreadsLocked(InternalMmapVector *threads); +void GetRunningThreadsLocked(InternalMmapVector *threads); void PrintThreads(); //// -------------------------------------------------------------------------- @@ -247,7 +247,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier); struct CheckForLeaksParam { Frontier frontier; LeakedChunks leaks; - tid_t caller_tid; + ThreadID caller_tid; uptr caller_sp; bool success = false; }; diff --git a/libsanitizer/lsan/lsan_common_linux.cpp b/libsanitizer/lsan/lsan_common_linux.cpp index 7a0b2f038be0..6fd54bbea3c7 100644 --- a/libsanitizer/lsan/lsan_common_linux.cpp +++ b/libsanitizer/lsan/lsan_common_linux.cpp @@ -93,11 +93,6 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size, return 0; } -#if SANITIZER_ANDROID && __ANDROID_API__ < 21 -extern "C" __attribute__((weak)) int dl_iterate_phdr( - int (*)(struct dl_phdr_info *, size_t, void *), void *); -#endif - // Scans global variables for heap pointers. void ProcessGlobalRegions(Frontier *frontier) { if (!flags()->use_globals) return; diff --git a/libsanitizer/lsan/lsan_fuchsia.cpp b/libsanitizer/lsan/lsan_fuchsia.cpp index ba59bc9b71e3..bb5de89b5672 100644 --- a/libsanitizer/lsan/lsan_fuchsia.cpp +++ b/libsanitizer/lsan/lsan_fuchsia.cpp @@ -21,6 +21,11 @@ using namespace __lsan; +namespace __sanitizer { +// LSan doesn't need to do anything else special in the startup hook. +void EarlySanitizerInit() {} +} // namespace __sanitizer + namespace __lsan { void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {} diff --git a/libsanitizer/lsan/lsan_interceptors.cpp b/libsanitizer/lsan/lsan_interceptors.cpp index a8252cddacf2..5340c6ffba60 100644 --- a/libsanitizer/lsan/lsan_interceptors.cpp +++ b/libsanitizer/lsan/lsan_interceptors.cpp @@ -84,6 +84,35 @@ INTERCEPTOR(void, free, void *p) { lsan_free(p); } +# if SANITIZER_INTERCEPT_FREE_SIZED +INTERCEPTOR(void, free_sized, void *p, uptr size) { + if (UNLIKELY(!p)) + return; + if (DlsymAlloc::PointerIsMine(p)) + return DlsymAlloc::Free(p); + ENSURE_LSAN_INITED; + lsan_free_sized(p, size); +} +# define LSAN_MAYBE_INTERCEPT_FREE_SIZED INTERCEPT_FUNCTION(free_sized) +# else +# define LSAN_MAYBE_INTERCEPT_FREE_SIZED +# endif + +# if SANITIZER_INTERCEPT_FREE_ALIGNED_SIZED +INTERCEPTOR(void, free_aligned_sized, void *p, uptr alignment, uptr size) { + if (UNLIKELY(!p)) + return; + if (DlsymAlloc::PointerIsMine(p)) + return DlsymAlloc::Free(p); + ENSURE_LSAN_INITED; + lsan_free_aligned_sized(p, alignment, size); +} +# define LSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED \ + INTERCEPT_FUNCTION(free_aligned_sized) +# else +# define LSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED +# endif + INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { if (DlsymAlloc::Use()) return DlsymAlloc::Callocate(nmemb, size); @@ -117,6 +146,9 @@ INTERCEPTOR(void*, valloc, uptr size) { GET_STACK_TRACE_MALLOC; return lsan_valloc(size, stack); } +#else +# define LSAN_MAYBE_INTERCEPT_FREE_SIZED +# define LSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED #endif // !SANITIZER_APPLE #if SANITIZER_INTERCEPT_MEMALIGN @@ -353,12 +385,12 @@ INTERCEPTOR(void, _lwp_exit) { #endif #if SANITIZER_INTERCEPT_THR_EXIT -INTERCEPTOR(void, thr_exit, tid_t *state) { +INTERCEPTOR(void, thr_exit, ThreadID *state) { ENSURE_LSAN_INITED; ThreadFinish(); REAL(thr_exit)(state); } -#define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit) +# define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit) #else #define LSAN_MAYBE_INTERCEPT_THR_EXIT #endif @@ -547,6 +579,8 @@ void InitializeInterceptors() { INTERCEPT_FUNCTION(malloc); INTERCEPT_FUNCTION(free); + LSAN_MAYBE_INTERCEPT_FREE_SIZED; + LSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED; LSAN_MAYBE_INTERCEPT_CFREE; INTERCEPT_FUNCTION(calloc); INTERCEPT_FUNCTION(realloc); diff --git a/libsanitizer/lsan/lsan_malloc_mac.cpp b/libsanitizer/lsan/lsan_malloc_mac.cpp index 525c30272ccc..8a16c053da23 100644 --- a/libsanitizer/lsan/lsan_malloc_mac.cpp +++ b/libsanitizer/lsan/lsan_malloc_mac.cpp @@ -44,16 +44,19 @@ using namespace __lsan; void *p = lsan_valloc(size, stack) #define COMMON_MALLOC_FREE(ptr) \ lsan_free(ptr) -#define COMMON_MALLOC_SIZE(ptr) \ - uptr size = lsan_mz_size(ptr) -#define COMMON_MALLOC_FILL_STATS(zone, stats) -#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \ - (void)zone_name; \ - Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr); -#define COMMON_MALLOC_NAMESPACE __lsan -#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0 -#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0 +# define COMMON_MALLOC_FREE_SIZED(ptr, size) lsan_free_sized(ptr, size) +# define COMMON_MALLOC_FREE_ALIGNED_SIZED(ptr, alignment, size) \ + lsan_free_aligned_sized(ptr, alignment, size) +# define COMMON_MALLOC_SIZE(ptr) uptr size = lsan_mz_size(ptr) +# define COMMON_MALLOC_FILL_STATS(zone, stats) +# define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \ + (void)zone_name; \ + Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", \ + ptr); +# define COMMON_MALLOC_NAMESPACE __lsan +# define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0 +# define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0 -#include "sanitizer_common/sanitizer_malloc_mac.inc" +# include "sanitizer_common/sanitizer_malloc_mac.inc" #endif // SANITIZER_APPLE diff --git a/libsanitizer/lsan/lsan_posix.cpp b/libsanitizer/lsan/lsan_posix.cpp index 593000b9eef9..ae1590b9d6fc 100644 --- a/libsanitizer/lsan/lsan_posix.cpp +++ b/libsanitizer/lsan/lsan_posix.cpp @@ -48,7 +48,7 @@ void ThreadContext::OnStarted(void *arg) { dtls_ = args->dtls; } -void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type) { +void ThreadStart(u32 tid, ThreadID os_id, ThreadType thread_type) { OnStartedArgs args; GetThreadStackAndTls(tid == kMainTid, &args.stack_begin, &args.stack_end, &args.tls_begin, &args.tls_end); @@ -57,7 +57,7 @@ void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type) { ThreadContextLsanBase::ThreadStart(tid, os_id, thread_type, &args); } -bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, +bool GetThreadRangesLocked(ThreadID os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end, DTLS **dtls) { ThreadContext *context = static_cast( diff --git a/libsanitizer/lsan/lsan_posix.h b/libsanitizer/lsan/lsan_posix.h index b1265f233f36..7587a07c40f6 100644 --- a/libsanitizer/lsan/lsan_posix.h +++ b/libsanitizer/lsan/lsan_posix.h @@ -41,7 +41,7 @@ class ThreadContext final : public ThreadContextLsanBase { DTLS *dtls_ = nullptr; }; -void ThreadStart(u32 tid, tid_t os_id, +void ThreadStart(u32 tid, ThreadID os_id, ThreadType thread_type = ThreadType::Regular); } // namespace __lsan diff --git a/libsanitizer/lsan/lsan_thread.cpp b/libsanitizer/lsan/lsan_thread.cpp index b66ea61a2de4..22eb9ee7680f 100644 --- a/libsanitizer/lsan/lsan_thread.cpp +++ b/libsanitizer/lsan/lsan_thread.cpp @@ -66,7 +66,7 @@ u32 ThreadCreate(u32 parent_tid, bool detached, void *arg) { return thread_registry->CreateThread(0, detached, parent_tid, arg); } -void ThreadContextLsanBase::ThreadStart(u32 tid, tid_t os_id, +void ThreadContextLsanBase::ThreadStart(u32 tid, ThreadID os_id, ThreadType thread_type, void *arg) { thread_registry->StartThread(tid, os_id, thread_type, arg); } @@ -80,7 +80,7 @@ void EnsureMainThreadIDIsCorrect() { ///// Interface to the common LSan module. ///// -void GetThreadExtraStackRangesLocked(tid_t os_id, +void GetThreadExtraStackRangesLocked(ThreadID os_id, InternalMmapVector *ranges) {} void GetThreadExtraStackRangesLocked(InternalMmapVector *ranges) {} @@ -99,11 +99,11 @@ ThreadRegistry *GetLsanThreadRegistryLocked() { return thread_registry; } -void GetRunningThreadsLocked(InternalMmapVector *threads) { +void GetRunningThreadsLocked(InternalMmapVector *threads) { GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked( [](ThreadContextBase *tctx, void *threads) { if (tctx->status == ThreadStatusRunning) { - reinterpret_cast *>(threads)->push_back( + reinterpret_cast *>(threads)->push_back( tctx->os_id); } }, diff --git a/libsanitizer/lsan/lsan_thread.h b/libsanitizer/lsan/lsan_thread.h index 222066ee93cd..b7262a9ef3b2 100644 --- a/libsanitizer/lsan/lsan_thread.h +++ b/libsanitizer/lsan/lsan_thread.h @@ -30,7 +30,7 @@ class ThreadContextLsanBase : public ThreadContextBase { uptr cache_end() { return cache_end_; } // The argument is passed on to the subclass's OnStarted member function. - static void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type, + static void ThreadStart(u32 tid, ThreadID os_id, ThreadType thread_type, void *onstarted_arg); protected: diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_dlsym.h b/libsanitizer/sanitizer_common/sanitizer_allocator_dlsym.h index b360478a058a..6e6cdbd9eeae 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator_dlsym.h +++ b/libsanitizer/sanitizer_common/sanitizer_allocator_dlsym.h @@ -36,21 +36,19 @@ struct DlSymAllocator { static void *Allocate(uptr size_in_bytes, uptr align = kWordSize) { void *ptr = InternalAlloc(size_in_bytes, nullptr, align); CHECK(internal_allocator()->FromPrimary(ptr)); - Details::OnAllocate(ptr, - internal_allocator()->GetActuallyAllocatedSize(ptr)); + Details::OnAllocate(ptr, GetSize(ptr)); return ptr; } static void *Callocate(usize nmemb, usize size) { void *ptr = InternalCalloc(nmemb, size); CHECK(internal_allocator()->FromPrimary(ptr)); - Details::OnAllocate(ptr, - internal_allocator()->GetActuallyAllocatedSize(ptr)); + Details::OnAllocate(ptr, GetSize(ptr)); return ptr; } static void Free(void *ptr) { - uptr size = internal_allocator()->GetActuallyAllocatedSize(ptr); + uptr size = GetSize(ptr); Details::OnFree(ptr, size); InternalFree(ptr); } @@ -63,7 +61,7 @@ struct DlSymAllocator { Free(ptr); return nullptr; } - uptr size = internal_allocator()->GetActuallyAllocatedSize(ptr); + uptr size = GetSize(ptr); uptr memcpy_size = Min(new_size, size); void *new_ptr = Allocate(new_size); if (new_ptr) @@ -77,6 +75,10 @@ struct DlSymAllocator { return Realloc(ptr, count * size); } + static uptr GetSize(void *ptr) { + return internal_allocator()->GetActuallyAllocatedSize(ptr); + } + static void OnAllocate(const void *ptr, uptr size) {} static void OnFree(const void *ptr, uptr size) {} }; diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h b/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h index 62523c7ae187..6c2020e6373c 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h +++ b/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h @@ -23,7 +23,7 @@ namespace __sanitizer { typedef CompactSizeClassMap InternalSizeClassMap; struct AP32 { - static const uptr kSpaceBeg = 0; + static const uptr kSpaceBeg = SANITIZER_MMAP_BEGIN; static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; static const uptr kMetadataSize = 0; typedef InternalSizeClassMap SizeClassMap; diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h b/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h index e495c56f0377..6e54c4852fbb 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h +++ b/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h @@ -166,7 +166,7 @@ struct SizeClassAllocator32LocalCache { DCHECK_GT(c->count, 0); } void *res = c->batch[--c->count]; - PREFETCH(c->batch[c->count - 1]); + PREFETCH(c->batch[c->count > 0 ? c->count - 1 : 0]); stats_.Add(AllocatorStatAllocated, c->class_size); return res; } diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h b/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h index 0b0bdb07041e..b39eb1538cbc 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h +++ b/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h @@ -113,6 +113,24 @@ class SizeClassAllocator64 { // ~(uptr)0. void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) { uptr TotalSpaceSize = kSpaceSize + AdditionalSize(); + + uptr MaxAddr = GetMaxUserVirtualAddress(); + // VReport does not call the sanitizer allocator. + VReport(3, "Max user virtual address: 0x%zx\n", MaxAddr); + VReport(3, "Total space size for primary allocator: 0x%zx\n", + TotalSpaceSize); + // TODO: revise the check if we ever configure sanitizers to deliberately + // map beyond the 2**48 barrier (note that Linux pretends the VMA is + // limited to 48-bit for backwards compatibility, but allows apps to + // explicitly specify an address beyond that). + if (heap_start + TotalSpaceSize >= MaxAddr) { + // We can't easily adjust the requested heap size, because kSpaceSize is + // const (for optimization) and used throughout the code. + VReport(0, "Error: heap size %zx exceeds max user virtual address %zx\n", + TotalSpaceSize, MaxAddr); + VReport( + 0, "Try using a kernel that allows a larger virtual address space\n"); + } PremappedHeap = heap_start != 0; if (PremappedHeap) { CHECK(!kUsingConstantSpaceBeg); @@ -185,9 +203,10 @@ class SizeClassAllocator64 { // recoverable. if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks))) { - Report("FATAL: Internal error: %s's allocator exhausted the free list " - "space for size class %zd (%zd bytes).\n", SanitizerToolName, - class_id, ClassIdToSize(class_id)); + Report( + "FATAL: Internal error: %s's allocator exhausted the free list " + "space for size class %zu (%zu bytes).\n", + SanitizerToolName, class_id, ClassIdToSize(class_id)); Die(); } for (uptr i = 0; i < n_chunks; i++) @@ -763,8 +782,9 @@ class SizeClassAllocator64 { if (!region->exhausted) { region->exhausted = true; Printf("%s: Out of memory. ", SanitizerToolName); - Printf("The process has exhausted %zuMB for size class %zu.\n", - kRegionSize >> 20, ClassIdToSize(class_id)); + Printf( + "The process has exhausted %zu MB for size class %zu (%zu bytes).\n", + kRegionSize >> 20, class_id, ClassIdToSize(class_id)); } return true; } diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h b/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h index 1414092e38d7..fded66546f81 100644 --- a/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h +++ b/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h @@ -14,6 +14,18 @@ #ifndef SANITIZER_ATOMIC_CLANG_H #define SANITIZER_ATOMIC_CLANG_H +// Helper to suppress warnings related to 8-byte atomic accesses when the target +// is 32-bit AIX (where such accesses use libatomic). +#if defined(_AIX) && !defined(__powerpc64__) && defined(__clang__) +# define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_BEGIN \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Watomic-alignment\"") +# define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_END _Pragma("clang diagnostic pop") +#else +# define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_BEGIN +# define SANITIZER_IGNORE_ATOMIC_ALIGNMENT_END +#endif + namespace __sanitizer { // We use the compiler builtin atomic operations for loads and stores, which @@ -35,6 +47,7 @@ inline void proc_yield(int cnt) { #endif } +SANITIZER_IGNORE_ATOMIC_ALIGNMENT_BEGIN template inline typename T::Type atomic_load(const volatile T *a, memory_order mo) { DCHECK(mo == memory_order_relaxed || mo == memory_order_consume || @@ -92,6 +105,8 @@ inline bool atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp, return atomic_compare_exchange_strong(a, cmp, xchg, mo); } +SANITIZER_IGNORE_ATOMIC_ALIGNMENT_END + } // namespace __sanitizer #undef ATOMIC_ORDER diff --git a/libsanitizer/sanitizer_common/sanitizer_common.h b/libsanitizer/sanitizer_common/sanitizer_common.h index 0b5e68c5fd79..515a7c9cdf60 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common.h +++ b/libsanitizer/sanitizer_common/sanitizer_common.h @@ -78,8 +78,8 @@ uptr GetMmapGranularity(); uptr GetMaxVirtualAddress(); uptr GetMaxUserVirtualAddress(); // Threads -tid_t GetTid(); -int TgKill(pid_t pid, tid_t tid, int sig); +ThreadID GetTid(); +int TgKill(pid_t pid, ThreadID tid, int sig); uptr GetThreadSelf(); void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, uptr *stack_bottom); @@ -166,7 +166,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, // Used to check if we can map shadow memory to a fixed location. bool MemoryRangeIsAvailable(uptr range_start, uptr range_end); -// Releases memory pages entirely within the [beg, end] address range. Noop if +// Releases memory pages entirely within the [beg, end) address range. Noop if // the provided range does not contain at least one entire page. void ReleaseMemoryPagesToOS(uptr beg, uptr end); void IncreaseTotalMmap(uptr size); @@ -390,6 +390,9 @@ void ReportDeadlySignal(const SignalContext &sig, u32 tid, void SetAlternateSignalStack(); void UnsetAlternateSignalStack(); +bool IsSignalHandlerFromSanitizer(int signum); +bool SetSignalHandlerFromSanitizer(int signum, bool new_state); + // Construct a one-line string: // SUMMARY: SanitizerToolName: error_message // and pass it to __sanitizer_report_error_summary. @@ -484,6 +487,13 @@ inline uptr Log2(uptr x) { return LeastSignificantSetBitIndex(x); } +inline bool IntervalsAreSeparate(uptr start1, uptr end1, uptr start2, + uptr end2) { + CHECK_LE(start1, end1); + CHECK_LE(start2, end2); + return (end1 < start2) || (end2 < start1); +} + // Don't use std::min, std::max or std::swap, to minimize dependency // on libstdc++. template @@ -734,6 +744,7 @@ enum ModuleArch { kModuleArchARMV7S, kModuleArchARMV7K, kModuleArchARM64, + kModuleArchARM64E, kModuleArchLoongArch64, kModuleArchRISCV64, kModuleArchHexagon @@ -807,6 +818,8 @@ inline const char *ModuleArchToString(ModuleArch arch) { return "armv7k"; case kModuleArchARM64: return "arm64"; + case kModuleArchARM64E: + return "arm64e"; case kModuleArchLoongArch64: return "loongarch64"; case kModuleArchRISCV64: @@ -925,13 +938,6 @@ class ListOfModules { // Callback type for iterating over a set of memory ranges. typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg); -enum AndroidApiLevel { - ANDROID_NOT_ANDROID = 0, - ANDROID_KITKAT = 19, - ANDROID_LOLLIPOP_MR1 = 22, - ANDROID_POST_LOLLIPOP = 23 -}; - void WriteToSyslog(const char *buffer); #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__) @@ -964,19 +970,8 @@ inline void AndroidLogInit() {} inline void SetAbortMessage(const char *) {} #endif -#if SANITIZER_ANDROID -void SanitizerInitializeUnwinder(); -AndroidApiLevel AndroidGetApiLevel(); -#else -inline void AndroidLogWrite(const char *buffer_unused) {} -inline void SanitizerInitializeUnwinder() {} -inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; } -#endif - inline uptr GetPthreadDestructorIterations() { -#if SANITIZER_ANDROID - return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4; -#elif SANITIZER_POSIX +#if SANITIZER_POSIX return 4; #else // Unused on Windows. diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc index f2a9348217c8..b10ce7fa44af 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc +++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc @@ -481,7 +481,8 @@ INTERCEPTOR(char*, textdomain, const char *domainname) { #endif #if SANITIZER_INTERCEPT_STRCMP || SANITIZER_INTERCEPT_MEMCMP -static inline int CharCmpX(unsigned char c1, unsigned char c2) { +[[maybe_unused]] static inline int CharCmpX(unsigned char c1, + unsigned char c2) { return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1; } #endif @@ -953,7 +954,7 @@ INTERCEPTOR(double, frexp, double x, int *exp) { #define INIT_FREXP #endif // SANITIZER_INTERCEPT_FREXP -#if SANITIZER_INTERCEPT_FREXPF_FREXPL +#if SANITIZER_INTERCEPT_FREXPF INTERCEPTOR(float, frexpf, float x, int *exp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp); @@ -963,6 +964,12 @@ INTERCEPTOR(float, frexpf, float x, int *exp) { return res; } +# define INIT_FREXPF COMMON_INTERCEPT_FUNCTION(frexpf); +#else +# define INIT_FREXPF +#endif + +#if SANITIZER_INTERCEPT_FREXPL INTERCEPTOR(long double, frexpl, long double x, int *exp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp); @@ -972,12 +979,10 @@ INTERCEPTOR(long double, frexpl, long double x, int *exp) { return res; } -#define INIT_FREXPF_FREXPL \ - COMMON_INTERCEPT_FUNCTION(frexpf); \ - COMMON_INTERCEPT_FUNCTION_LDBL(frexpl) +# define INIT_FREXPL COMMON_INTERCEPT_FUNCTION_LDBL(frexpl) #else -#define INIT_FREXPF_FREXPL -#endif // SANITIZER_INTERCEPT_FREXPF_FREXPL +# define INIT_FREXPL +#endif #if SI_POSIX static void write_iovec(void *ctx, struct __sanitizer_iovec *iovec, @@ -1280,8 +1285,34 @@ INTERCEPTOR(int, puts, char *s) { #endif #if SANITIZER_INTERCEPT_PRCTL -INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3, - unsigned long arg4, unsigned long arg5) { + +# if defined(__aarch64__) +// https://llvm.org/docs/PointerAuth.html +// AArch64 is currently the only architecture with full PAC support. +// Avoid adding PAC instructions to prevent crashes caused by +// prctl(PR_PAC_RESET_KEYS, ...). Since PR_PAC_RESET_KEYS resets the +// authentication key, using the old key afterward will lead to a crash. + +# if defined(__ARM_FEATURE_BTI_DEFAULT) +# define BRANCH_PROTECTION_ATTRIBUTE \ + __attribute__((target("branch-protection=bti"))) +# else +# define BRANCH_PROTECTION_ATTRIBUTE \ + __attribute__((target("branch-protection=none"))) +# endif + +# define PRCTL_INTERCEPTOR(ret_type, func, ...) \ + DEFINE_REAL(ret_type, func, __VA_ARGS__) \ + DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \ + extern "C" INTERCEPTOR_ATTRIBUTE BRANCH_PROTECTION_ATTRIBUTE ret_type \ + WRAP(func)(__VA_ARGS__) + +# else +# define PRCTL_INTERCEPTOR INTERCEPTOR +# endif + +PRCTL_INTERCEPTOR(int, prctl, int option, unsigned long arg2, + unsigned long arg3, unsigned long arg4, unsigned long arg5) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5); static const int PR_SET_NAME = 15; @@ -1295,7 +1326,7 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3, static const int PR_SET_SECCOMP = 22; static const int SECCOMP_MODE_FILTER = 2; # endif - if (option == PR_SET_VMA && arg2 == 0UL) { + if (option == PR_SET_VMA && arg2 == 0UL && arg5 != 0UL) { char *name = (char *)arg5; COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1); } @@ -1313,7 +1344,7 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3, COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (u64 *)(arg5), sizeof(u64)); } else if (res != -1 && option == PR_GET_PDEATHSIG) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (u64 *)(arg2), sizeof(int)); -# if !SANITIZER_ANDROID +# if SANITIZER_GLIBC } else if (res != -1 && option == PR_SET_SECCOMP && arg2 == SECCOMP_MODE_FILTER) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (u64 *)(arg3), struct_sock_fprog_sz); @@ -1321,7 +1352,7 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3, } return res; } -#define INIT_PRCTL COMMON_INTERCEPT_FUNCTION(prctl) +# define INIT_PRCTL COMMON_INTERCEPT_FUNCTION(prctl) #else #define INIT_PRCTL #endif // SANITIZER_INTERCEPT_PRCTL @@ -1346,7 +1377,8 @@ INTERCEPTOR(unsigned long, time, unsigned long *t) { #if SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS static void unpoison_tm(void *ctx, __sanitizer_tm *tm) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tm, sizeof(*tm)); -#if !SANITIZER_SOLARIS +// AIX tm struct does not have tm_zone field. +# if !SANITIZER_SOLARIS && !SANITIZER_AIX if (tm->tm_zone) { // Can not use COMMON_INTERCEPTOR_WRITE_RANGE here, because tm->tm_zone // can point to shared memory and tsan would report a data race. @@ -1731,10 +1763,12 @@ INTERCEPTOR(int, __vsprintf_chk, char *str, int flag, SIZE_T size_to, VSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap) #endif +# if SANITIZER_INTERCEPT_VASPRINTF INTERCEPTOR(int, vasprintf, char **strp, const char *format, va_list ap) VASPRINTF_INTERCEPTOR_IMPL(vasprintf, strp, format, ap) +# endif -#if SANITIZER_INTERCEPT_ISOC99_PRINTF +# if SANITIZER_INTERCEPT_ISOC99_PRINTF INTERCEPTOR(int, __isoc99_vprintf, const char *format, va_list ap) VPRINTF_INTERCEPTOR_IMPL(__isoc99_vprintf, format, ap) @@ -1783,10 +1817,12 @@ INTERCEPTOR(int, __snprintf_chk, char *str, SIZE_T size, int flag, FORMAT_INTERCEPTOR_IMPL(__snprintf_chk, vsnprintf, str, size, format) #endif +# if SANITIZER_INTERCEPT_ASPRINTF INTERCEPTOR(int, asprintf, char **strp, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(asprintf, vasprintf, strp, format) +# endif -#if SANITIZER_INTERCEPT_ISOC99_PRINTF +# if SANITIZER_INTERCEPT_ISOC99_PRINTF INTERCEPTOR(int, __isoc99_printf, const char *format, ...) FORMAT_INTERCEPTOR_IMPL(__isoc99_printf, __isoc99_vprintf, format) @@ -1807,17 +1843,24 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size, #endif // SANITIZER_INTERCEPT_PRINTF #if SANITIZER_INTERCEPT_PRINTF -#define INIT_PRINTF \ - COMMON_INTERCEPT_FUNCTION_LDBL(printf); \ - COMMON_INTERCEPT_FUNCTION_LDBL(sprintf); \ - COMMON_INTERCEPT_FUNCTION_LDBL(snprintf); \ - COMMON_INTERCEPT_FUNCTION_LDBL(asprintf); \ - COMMON_INTERCEPT_FUNCTION_LDBL(fprintf); \ - COMMON_INTERCEPT_FUNCTION_LDBL(vprintf); \ - COMMON_INTERCEPT_FUNCTION_LDBL(vsprintf); \ - COMMON_INTERCEPT_FUNCTION_LDBL(vsnprintf); \ - COMMON_INTERCEPT_FUNCTION_LDBL(vasprintf); \ - COMMON_INTERCEPT_FUNCTION_LDBL(vfprintf); +# define INIT_PRINTF_COMMON \ + COMMON_INTERCEPT_FUNCTION_LDBL(printf); \ + COMMON_INTERCEPT_FUNCTION_LDBL(sprintf); \ + COMMON_INTERCEPT_FUNCTION_LDBL(snprintf); \ + COMMON_INTERCEPT_FUNCTION_LDBL(fprintf); \ + COMMON_INTERCEPT_FUNCTION_LDBL(vprintf); \ + COMMON_INTERCEPT_FUNCTION_LDBL(vsprintf); \ + COMMON_INTERCEPT_FUNCTION_LDBL(vsnprintf); \ + COMMON_INTERCEPT_FUNCTION_LDBL(vfprintf); +# if !SANITIZER_AIX +// AIX does not have [v]asprintf. +# define INIT_PRINTF_EXTRA \ + COMMON_INTERCEPT_FUNCTION_LDBL(asprintf); \ + COMMON_INTERCEPT_FUNCTION_LDBL(vasprintf); +# else +# define INIT_PRINTF_EXTRA +# endif +# define INIT_PRINTF INIT_PRINTF_COMMON INIT_PRINTF_EXTRA #else #define INIT_PRINTF #endif @@ -1855,6 +1898,22 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size, #define INIT_ISOC99_PRINTF #endif +#if SANITIZER_INTERCEPT_SETPROCTITLE +INTERCEPTOR(void, setproctitle, const char *fmt, ...) { + void *ctx; + va_list ap; + va_start(ap, fmt); + COMMON_INTERCEPTOR_ENTER(ctx, setproctitle, fmt, ap); + if (common_flags()->check_printf) + printf_common(ctx, fmt, ap); + REAL(setproctitle)(fmt, ap); + va_end(ap); +} +# define INIT_SETPROCTITLE COMMON_INTERCEPT_FUNCTION(setproctitle); +#else +# define INIT_SETPROCTITLE +#endif + #if SANITIZER_INTERCEPT_IOCTL #include "sanitizer_common_interceptors_ioctl.inc" #include "sanitizer_interceptors_ioctl_netbsd.inc" @@ -2289,6 +2348,61 @@ INTERCEPTOR(int, pthread_getcpuclockid, uptr thread, #define INIT_CLOCK_GETCPUCLOCKID #endif +#if SANITIZER_INTERCEPT_TIMER_CREATE +INTERCEPTOR(int, timer_create, __sanitizer_clockid_t clockid, void *sevp, + __sanitizer_timer_t *timer) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, timer_create, clockid, sevp, timer); + int res = REAL(timer_create)(clockid, sevp, timer); + if (!res && timer) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, timer, sizeof *timer); + } + return res; +} + +INTERCEPTOR(int, timer_delete, __sanitizer_timer_t timer) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, timer_delete, timer); + int res = REAL(timer_delete)(timer); + return res; +} + +INTERCEPTOR(int, timer_gettime, __sanitizer_timer_t timer, + struct __sanitizer_itimerspec *curr_value) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, timer_gettime, timer, curr_value); + int res = REAL(timer_gettime)(timer, curr_value); + if (!res && curr_value) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, sizeof *curr_value); + } + return res; +} + +INTERCEPTOR(int, timer_settime, __sanitizer_timer_t timer, int flags, + const struct __sanitizer_itimerspec *new_value, + struct __sanitizer_itimerspec *old_value) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, timer_settime, timer, flags, new_value, + old_value); + int res = REAL(timer_settime)(timer, flags, new_value, old_value); + if (!res) { + if (new_value) + COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, sizeof *new_value); + if (old_value) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, sizeof *old_value); + } + return res; +} + +# define INIT_TIMER_CREATE \ + COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(timer_create, "GLIBC_2.3.3"); \ + COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(timer_delete, "GLIBC_2.3.3"); \ + COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(timer_gettime, "GLIBC_2.3.3"); \ + COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(timer_settime, "GLIBC_2.3.3"); +#else +# define INIT_TIMER_CREATE +#endif + #if SANITIZER_INTERCEPT_GETITIMER INTERCEPTOR(int, getitimer, int which, void *curr_value) { void *ctx; @@ -2334,6 +2448,25 @@ INTERCEPTOR(int, setitimer, int which, const void *new_value, void *old_value) { #define INIT_GETITIMER #endif +#if SANITIZER_INTERCEPT_TIMESPEC_GET +INTERCEPTOR(int, timespec_get, struct __sanitizer_timespec *ts, int base) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, timespec_get, ts, base); + // We don't yet know if ts is addressable, so we use our own scratch buffer + struct __sanitizer_timespec ts_local; + int res = REAL(timespec_get)(&ts_local, base); + if (res) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ts, + sizeof(struct __sanitizer_timespec)); + internal_memcpy(ts, &ts_local, sizeof(struct __sanitizer_timespec)); + } + return res; +} +# define INIT_TIMESPEC_GET COMMON_INTERCEPT_FUNCTION(timespec_get); +#else +# define INIT_TIMESPEC_GET +#endif + #if SANITIZER_INTERCEPT_GLOB static void unpoison_glob_t(void *ctx, __sanitizer_glob_t *pglob) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pglob, sizeof(*pglob)); @@ -3827,7 +3960,7 @@ INTERCEPTOR(SIZE_T, wcrtomb, char *dest, wchar_t src, void *ps) { if (res != ((SIZE_T)-1)) { CHECK_LE(res, sizeof(local_dest)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, res); - REAL(memcpy)(dest, local_dest, res); + internal_memcpy(dest, local_dest, res); } return res; } @@ -3849,7 +3982,7 @@ INTERCEPTOR(int, wctomb, char *dest, wchar_t src) { if (res != -1) { CHECK_LE(res, sizeof(local_dest)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, res); - REAL(memcpy)(dest, local_dest, res); + internal_memcpy(dest, local_dest, res); } return res; } @@ -10189,6 +10322,71 @@ INTERCEPTOR(SSIZE_T, freadlink, int fd, char *buf, SIZE_T bufsiz) { # define INIT_FREADLINK #endif +#if SANITIZER_INTERCEPT_GETSERVENT_R || SANITIZER_INTERCEPT_GETSERVBYNAME_R || \ + SANITIZER_INTERCEPT_GETSERVBYPORT_R + +UNUSED static void HandleGetServentReentrantResult( + void *ctx, int res, struct __sanitizer_servent *result_buf, char *buf, + SIZE_T buflen, struct __sanitizer_servent **result) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (char *)result, sizeof(void *)); + if (res) + return; + if (*result) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (char *)*result, + sizeof(__sanitizer_servent)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen); + } +} + +#endif + +#if SANITIZER_INTERCEPT_GETSERVENT_R +INTERCEPTOR(int, getservent_r, struct __sanitizer_servent *result_buf, + char *buf, SIZE_T buflen, struct __sanitizer_servent **result) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getservent_r, result_buf, buf, buflen, result); + int res = REAL(getservent_r)(result_buf, buf, buflen, result); + HandleGetServentReentrantResult(ctx, res, result_buf, buf, buflen, result); + return res; +} +# define INIT_GETSERVENT_R COMMON_INTERCEPT_FUNCTION(getservent_r) +#else +# define INIT_GETSERVENT_R +#endif + +#if SANITIZER_INTERCEPT_GETSERVBYNAME_R +INTERCEPTOR(int, getservbyname_r, const char *name, const char *proto, + struct __sanitizer_servent *result_buf, char *buf, SIZE_T buflen, + struct __sanitizer_servent **result) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getservbyname_r, name, proto, result_buf, buf, + buflen, result); + COMMON_INTERCEPTOR_READ_STRING(ctx, name, internal_strlen(name)); + int res = REAL(getservbyname_r)(name, proto, result_buf, buf, buflen, result); + HandleGetServentReentrantResult(ctx, res, result_buf, buf, buflen, result); + return res; +} +# define INIT_GETSERVBYNAME_R COMMON_INTERCEPT_FUNCTION(getservbyname_r) +#else +# define INIT_GETSERVBYNAME_R +#endif + +#if SANITIZER_INTERCEPT_GETSERVBYPORT_R +INTERCEPTOR(int, getservbyport_r, int port, const char *proto, + struct __sanitizer_servent *result_buf, char *buf, SIZE_T buflen, + struct __sanitizer_servent **result) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getservbyport_r, port, proto, result_buf, buf, + buflen, result); + int res = REAL(getservbyport_r)(port, proto, result_buf, buf, buflen, result); + HandleGetServentReentrantResult(ctx, res, result_buf, buf, buflen, result); + return res; +} +# define INIT_GETSERVBYPORT_R COMMON_INTERCEPT_FUNCTION(getservbyport_r) +#else +# define INIT_GETSERVBYPORT_R +#endif + #include "sanitizer_common_interceptors_netbsd_compat.inc" namespace __sanitizer { @@ -10254,8 +10452,10 @@ static void InitializeCommonInterceptors() { INIT_PRINTF; INIT_PRINTF_L; INIT_ISOC99_PRINTF; + INIT_SETPROCTITLE; INIT_FREXP; - INIT_FREXPF_FREXPL; + INIT_FREXPF; + INIT_FREXPL; INIT_GETPWNAM_AND_FRIENDS; INIT_GETPWNAM_R_AND_FRIENDS; INIT_GETPWENT; @@ -10266,8 +10466,10 @@ static void InitializeCommonInterceptors() { INIT_SETPWENT; INIT_CLOCK_GETTIME; INIT_CLOCK_GETCPUCLOCKID; + INIT_TIMER_CREATE; INIT_GETITIMER; INIT_TIME; + INIT_TIMESPEC_GET; INIT_GLOB; INIT_GLOB64; INIT___B64_TO; @@ -10511,4 +10713,7 @@ static void InitializeCommonInterceptors() { INIT_FREADLINK; INIT___PRINTF_CHK; + INIT_GETSERVENT_R; + INIT_GETSERVBYNAME_R; + INIT_GETSERVBYPORT_R; } diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc index 24e5dc0fb22f..dd4dab07b3c6 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc +++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc @@ -67,6 +67,10 @@ static const char *maybe_parse_length_modifier(const char *p, char ll[2]) { // Returns true if the character is an integer conversion specifier. static bool format_is_integer_conv(char c) { +#if SANITIZER_GLIBC + if (char_is_one_of(c, "bB")) + return true; +#endif return char_is_one_of(c, "diouxXn"); } diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc index dda11daa77f4..673f284b6a04 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc +++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc @@ -48,35 +48,41 @@ static void ioctl_table_fill() { ++ioctl_table_size; \ } + _(FIONBIO, READ, sizeof(int)); +#if !SANITIZER_HAIKU _(FIOASYNC, READ, sizeof(int)); _(FIOCLEX, NONE, 0); _(FIOGETOWN, WRITE, sizeof(int)); - _(FIONBIO, READ, sizeof(int)); _(FIONCLEX, NONE, 0); _(FIOSETOWN, READ, sizeof(int)); +#endif _(SIOCATMARK, WRITE, sizeof(int)); _(SIOCGIFCONF, CUSTOM, 0); _(SIOCGPGRP, WRITE, sizeof(int)); _(SIOCSPGRP, READ, sizeof(int)); -#if !SANITIZER_SOLARIS +#if !SANITIZER_SOLARIS && !SANITIZER_HAIKU _(TIOCCONS, NONE, 0); #endif - _(TIOCEXCL, NONE, 0); +#if !SANITIZER_HAIKU _(TIOCGETD, WRITE, sizeof(int)); + _(TIOCNOTTY, NONE, 0); + _(TIOCPKT, READ, sizeof(int)); + _(TIOCSETD, READ, sizeof(int)); + _(TIOCSTI, READ, sizeof(char)); +#endif + _(TIOCEXCL, NONE, 0); _(TIOCGPGRP, WRITE, pid_t_sz); _(TIOCGWINSZ, WRITE, struct_winsize_sz); _(TIOCMBIC, READ, sizeof(int)); _(TIOCMBIS, READ, sizeof(int)); _(TIOCMGET, WRITE, sizeof(int)); _(TIOCMSET, READ, sizeof(int)); - _(TIOCNOTTY, NONE, 0); _(TIOCNXCL, NONE, 0); _(TIOCOUTQ, WRITE, sizeof(int)); - _(TIOCPKT, READ, sizeof(int)); +# if !SANITIZER_AIX _(TIOCSCTTY, NONE, 0); - _(TIOCSETD, READ, sizeof(int)); +# endif _(TIOCSPGRP, READ, pid_t_sz); - _(TIOCSTI, READ, sizeof(char)); _(TIOCSWINSZ, READ, struct_winsize_sz); #if !SANITIZER_IOS @@ -338,12 +344,16 @@ static void ioctl_table_fill() { _(SOUND_PCM_WRITE_CHANNELS, WRITE, sizeof(int)); _(SOUND_PCM_WRITE_FILTER, WRITE, sizeof(int)); _(TCFLSH, NONE, 0); +# if SANITIZER_TERMIOS_IOCTL_CONSTANTS _(TCGETS, WRITE, struct_termios_sz); +# endif _(TCSBRK, NONE, 0); _(TCSBRKP, NONE, 0); +# if SANITIZER_TERMIOS_IOCTL_CONSTANTS _(TCSETS, READ, struct_termios_sz); _(TCSETSF, READ, struct_termios_sz); _(TCSETSW, READ, struct_termios_sz); +# endif _(TCXONC, NONE, 0); _(TIOCGLCKTRMIOS, WRITE, struct_termios_sz); _(TIOCGSOFTCAR, WRITE, sizeof(int)); diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc index 1565a494140f..0b6731c89950 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc +++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc @@ -33,11 +33,13 @@ // Platform-specific options. #if SANITIZER_APPLE -#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0 +# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0 #elif SANITIZER_WINDOWS64 -#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0 +# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0 +#elif SANITIZER_AIX +# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0 #else -#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1 +# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1 #endif // SANITIZER_APPLE #ifndef COMMON_INTERCEPTOR_MEMSET_IMPL diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S index cdfa6f1d7f53..c5c2180e0de9 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S +++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S @@ -5,6 +5,7 @@ ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA) +TEXT_SECTION .comm _ZN14__interception10real_vforkE,8,8 .globl ASM_WRAPPER_NAME(vfork) ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork)) @@ -43,6 +44,6 @@ ASM_SIZE(vfork) ASM_INTERCEPTOR_TRAMPOLINE(vfork) ASM_TRAMPOLINE_ALIAS(vfork, vfork) -GNU_PROPERTY_BTI_PAC +GNU_PROPERTY_BTI_PAC_GCS #endif diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S index c633014e2daa..5ef090c003dc 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S +++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S @@ -2,6 +2,8 @@ #include "sanitizer_common/sanitizer_asm.h" +.att_syntax + .comm _ZN14__interception10real_vforkE,4,4 .globl ASM_WRAPPER_NAME(vfork) ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork)) diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S index 5500f817aec5..9c85407fe022 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S +++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S @@ -2,6 +2,8 @@ #include "sanitizer_common/sanitizer_asm.h" +.att_syntax + .comm _ZN14__interception10real_vforkE,8,8 .globl ASM_WRAPPER_NAME(vfork) ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork)) diff --git a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc index 29fe4721ba40..ee3ac723e366 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc +++ b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc @@ -143,6 +143,12 @@ struct sanitizer_kernel_sockaddr { char sa_data[14]; }; +struct sanitizer_kernel_open_how { + u64 flags; + u64 mode; + u64 resolve; +}; + // Real sigset size is always passed as a syscall argument. // Declare it "void" to catch sizeof(kernel_sigset_t). typedef void kernel_sigset_t; @@ -2843,6 +2849,18 @@ PRE_SYSCALL(openat)(long dfd, const void *filename, long flags, long mode) { POST_SYSCALL(openat) (long res, long dfd, const void *filename, long flags, long mode) {} +PRE_SYSCALL(openat2)(long dfd, const void* filename, + const sanitizer_kernel_open_how* how, uptr howlen) { + if (filename) + PRE_READ(filename, __sanitizer::internal_strlen((const char*)filename) + 1); + + if (how) + PRE_READ(how, howlen); +} + +POST_SYSCALL(openat2)(long res, long dfd, const void* filename, + const sanitizer_kernel_open_how* how, uptr howlen) {} + PRE_SYSCALL(newfstatat) (long dfd, const void *filename, void *statbuf, long flag) { if (filename) @@ -3205,6 +3223,28 @@ POST_SYSCALL(futex) COMMON_SYSCALL_BLOCKING_END(); } +PRE_SYSCALL(copy_file_range) +(int fdin, __sanitizer___kernel_off_t *offin, int fdout, + __sanitizer___kernel_off_t *offout, SIZE_T size, unsigned int flags) { + if (offin != nullptr) { + PRE_READ(offin, sizeof(*offin)); + } + if (offout != nullptr) { + PRE_READ(offout, sizeof(*offout)); + } +} + +POST_SYSCALL(copy_file_range) +(SSIZE_T, int fdin, __sanitizer___kernel_off_t *offin, int fdout, + __sanitizer___kernel_off_t *offout, SIZE_T size, unsigned int flags) { + if (offin != nullptr) { + POST_WRITE(offin, sizeof(*offin)); + } + if (offout != nullptr) { + POST_WRITE(offout, sizeof(*offout)); + } +} + } // extern "C" # undef PRE_SYSCALL diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h index 0749f633b4bc..1664b92b2136 100644 --- a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h +++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h @@ -120,7 +120,7 @@ class DeadlockDetectorTLS { u32 lock; u32 stk; }; - LockWithContext all_locks_with_contexts_[64]; + LockWithContext all_locks_with_contexts_[128]; uptr n_all_locks_; }; diff --git a/libsanitizer/sanitizer_common/sanitizer_errno.h b/libsanitizer/sanitizer_common/sanitizer_errno.h index 46c85364cef5..76919da57d94 100644 --- a/libsanitizer/sanitizer_common/sanitizer_errno.h +++ b/libsanitizer/sanitizer_common/sanitizer_errno.h @@ -29,6 +29,8 @@ # define __errno_location ___errno #elif SANITIZER_WINDOWS # define __errno_location _errno +#elif SANITIZER_HAIKU +# define __errno_location _errnop #endif extern "C" int *__errno_location(); diff --git a/libsanitizer/sanitizer_common/sanitizer_errno_codes.h b/libsanitizer/sanitizer_common/sanitizer_errno_codes.h index 9e6e71ec80c1..10a98a9b4e4d 100644 --- a/libsanitizer/sanitizer_common/sanitizer_errno_codes.h +++ b/libsanitizer/sanitizer_common/sanitizer_errno_codes.h @@ -21,12 +21,21 @@ namespace __sanitizer { -#define errno_ENOMEM 12 -#define errno_EBUSY 16 -#define errno_EINVAL 22 -#define errno_ERANGE 34 -#define errno_ENAMETOOLONG 36 -#define errno_ENOSYS 38 +#ifdef __HAIKU__ +# define errno_ENOMEM (0x80000000) +# define errno_EBUSY (0x80000000 + 14) +# define errno_EINVAL (0x80000000 + 5) +# define errno_ERANGE (0x80007000 + 17) +# define errno_ENAMETOOLONG (0x80000000 + 0x6004) +# define errno_ENOSYS (0x80007009) +#else +# define errno_ENOMEM 12 +# define errno_EBUSY 16 +# define errno_EINVAL 22 +# define errno_ERANGE 34 +# define errno_ENAMETOOLONG 36 +# define errno_ENOSYS 38 +#endif // Those might not present or their value differ on different platforms. extern const int errno_EOWNERDEAD; diff --git a/libsanitizer/sanitizer_common/sanitizer_file.cpp b/libsanitizer/sanitizer_common/sanitizer_file.cpp index 96af270f9d8b..e8f219b94115 100644 --- a/libsanitizer/sanitizer_common/sanitizer_file.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_file.cpp @@ -36,9 +36,17 @@ void RawWrite(const char *buffer) { void ReportFile::ReopenIfNecessary() { mu->CheckLocked(); - if (fd == kStdoutFd || fd == kStderrFd) return; - uptr pid = internal_getpid(); + if (fallbackToStderrActive && fd_pid != pid) { + // If fallbackToStderrActive is set then we fellback to stderr. If this is a + // new process, mark fd as invalid so we attempt to open again. + CHECK_EQ(fd, kStderrFd); + fd = kInvalidFd; + fallbackToStderrActive = false; + } + if (fd == kStdoutFd || fd == kStderrFd) + return; + // If in tracer, use the parent's file. if (pid == stoptheworld_tracer_pid) pid = stoptheworld_tracer_ppid; @@ -48,8 +56,7 @@ void ReportFile::ReopenIfNecessary() { // process, close it now. if (fd_pid == pid) return; - else - CloseFile(fd); + CloseFile(fd); } const char *exe_name = GetProcessName(); @@ -65,18 +72,24 @@ void ReportFile::ReopenIfNecessary() { error_t err; fd = OpenFile(full_path, WrOnly, &err); if (fd == kInvalidFd) { - const char *ErrorMsgPrefix = "ERROR: Can't open file: "; + bool fallback = common_flags()->log_fallback_to_stderr; + const char *ErrorMsgPrefix = + fallback ? "WARNING: Can't open file, falling back to stderr: " + : "ERROR: Can't open file: "; WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix)); WriteToFile(kStderrFd, full_path, internal_strlen(full_path)); char errmsg[100]; internal_snprintf(errmsg, sizeof(errmsg), " (reason: %d)\n", err); WriteToFile(kStderrFd, errmsg, internal_strlen(errmsg)); - Die(); + if (!fallback) + Die(); + fallbackToStderrActive = true; + fd = kStderrFd; } fd_pid = pid; } -static void RecursiveCreateParentDirs(char *path) { +static void RecursiveCreateParentDirs(char *path, fd_t &fd) { if (path[0] == '\0') return; for (int i = 1; path[i] != '\0'; ++i) { @@ -85,24 +98,103 @@ static void RecursiveCreateParentDirs(char *path) { continue; path[i] = '\0'; if (!DirExists(path) && !CreateDir(path)) { - const char *ErrorMsgPrefix = "ERROR: Can't create directory: "; + bool fallback = common_flags()->log_fallback_to_stderr; + const char *ErrorMsgPrefix = + fallback ? "WARNING: Can't create directory, falling back to stderr: " + : "ERROR: Can't create directory: "; WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix)); WriteToFile(kStderrFd, path, internal_strlen(path)); const char *ErrorMsgSuffix = "\n"; WriteToFile(kStderrFd, ErrorMsgSuffix, internal_strlen(ErrorMsgSuffix)); - Die(); + if (!fallback) + Die(); + path[i] = save; + fd = kStderrFd; + return; } path[i] = save; } } +/// Parse the report path \p pattern and copy the parsed path to \p dest. +/// +/// * `%%` becomes `%` +/// * `%H` expands to the environment variable `HOME` +/// * `%t` expands to the environment variable `TMPDIR` +/// * `%p` expands to the process ID (PID) +static void ParseAndSetPath(const char *pattern, char *dest, + const uptr dest_size) { + CHECK(pattern); + CHECK(dest); + CHECK_GE(dest_size, 1); + dest[0] = '\0'; + // Return empty string if empty string was passed + if (internal_strlen(pattern) == 0) + return; + uptr next_substr_start_idx = 0; + for (uptr i = 0; i < internal_strlen(pattern) - 1; i++) { + if (pattern[i] != '%') + continue; + int bytes_to_copy = i - next_substr_start_idx; + // Copy over previous substring. + CHECK_LT(internal_strlcat(dest, pattern + next_substr_start_idx, + internal_strlen(dest) + bytes_to_copy + 1), + dest_size); + const char *str_to_concat; + switch (pattern[++i]) { + case '%': + str_to_concat = "%"; + break; + case 'H': + str_to_concat = GetEnv("HOME"); + break; + case 't': + str_to_concat = GetEnv("TMPDIR"); + break; + case 'p': { + // Use printf directly to write the PID since it's not a static string. + int remaining_capacity = dest_size - internal_strlen(dest); + int bytes_copied = + internal_snprintf(dest + internal_strlen(dest), remaining_capacity, + "%ld", internal_getpid()); + CHECK_GT(bytes_copied, 0); + CHECK_LT(bytes_copied, remaining_capacity); + str_to_concat = ""; + break; + } + default: { + // Invalid pattern: fallback to original pattern. + const char *message = "ERROR: Unexpected pattern: "; + WriteToFile(kStderrFd, message, internal_strlen(message)); + WriteToFile(kStderrFd, pattern, internal_strlen(pattern)); + WriteToFile(kStderrFd, "\n", internal_strlen("\n")); + CHECK_LT(internal_strlcpy(dest, pattern, dest_size), dest_size); + return; + } + } + CHECK(str_to_concat); + CHECK_LT(internal_strlcat(dest, str_to_concat, dest_size), dest_size); + next_substr_start_idx = i + 1; + } + CHECK_LT(internal_strlcat(dest, pattern + next_substr_start_idx, dest_size), + dest_size); +} + void ReportFile::SetReportPath(const char *path) { if (path) { uptr len = internal_strlen(path); if (len > sizeof(path_prefix) - 100) { - Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n", path[0], path[1], - path[2], path[3], path[4], path[5], path[6], path[7]); - Die(); + bool fallback = common_flags()->log_fallback_to_stderr; + const char *message = + fallback ? "WARNING: Path is too long, falling back to stderr: " + : "ERROR: Path is too long: "; + WriteToFile(kStderrFd, message, internal_strlen(message)); + WriteToFile(kStderrFd, path, 8); + message = "...\n"; + WriteToFile(kStderrFd, message, internal_strlen(message)); + if (!fallback) + Die(); + path = "stderr"; } } @@ -115,8 +207,8 @@ void ReportFile::SetReportPath(const char *path) { } else if (internal_strcmp(path, "stdout") == 0) { fd = kStdoutFd; } else { - internal_snprintf(path_prefix, kMaxPathLength, "%s", path); - RecursiveCreateParentDirs(path_prefix); + ParseAndSetPath(path, path_prefix, kMaxPathLength); + RecursiveCreateParentDirs(path_prefix, fd); } } diff --git a/libsanitizer/sanitizer_common/sanitizer_file.h b/libsanitizer/sanitizer_common/sanitizer_file.h index bef2c842d9f2..b3a5fed922da 100644 --- a/libsanitizer/sanitizer_common/sanitizer_file.h +++ b/libsanitizer/sanitizer_common/sanitizer_file.h @@ -43,6 +43,9 @@ struct ReportFile { // PID of the process that opened fd. If a fork() occurs, // the PID of child will be different from fd_pid. uptr fd_pid; + // Set to true if the last attempt to open the logfile failed, perhaps due to + // permission errors + bool fallbackToStderrActive = false; private: void ReopenIfNecessary(); diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.inc b/libsanitizer/sanitizer_common/sanitizer_flags.inc index c1e3530618c2..5f449907f601 100644 --- a/libsanitizer/sanitizer_common/sanitizer_flags.inc +++ b/libsanitizer/sanitizer_common/sanitizer_flags.inc @@ -65,6 +65,8 @@ COMMON_FLAG( bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_APPLE, "Write all sanitizer output to syslog in addition to other means of " "logging.") +COMMON_FLAG(bool, log_fallback_to_stderr, false, + "When set, fallback to stderr if we are unable to open log path.") COMMON_FLAG( int, verbosity, 0, "Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).") @@ -111,6 +113,11 @@ COMMON_FLAG(HandleSignalMode, handle_sigfpe, kHandleSignalYes, COMMON_FLAG(bool, allow_user_segv_handler, true, "Deprecated. True has no effect, use handle_sigbus=1. If false, " "handle_*=1 will be upgraded to handle_*=2.") +COMMON_FLAG(bool, cloak_sanitizer_signal_handlers, false, + "If set, signal/sigaction will pretend that sanitizers did not " + "preinstall any signal handlers. If the user subsequently installs " + "a signal handler, this will disable cloaking for the respective " + "signal.") COMMON_FLAG(bool, use_sigaltstack, true, "If set, uses alternate stack for signal handling.") COMMON_FLAG(bool, detect_deadlocks, true, diff --git a/libsanitizer/sanitizer_common/sanitizer_fuchsia.cpp b/libsanitizer/sanitizer_common/sanitizer_fuchsia.cpp index acbf3ebfc95c..3c61b6080299 100644 --- a/libsanitizer/sanitizer_common/sanitizer_fuchsia.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_fuchsia.cpp @@ -14,6 +14,7 @@ #include "sanitizer_fuchsia.h" #if SANITIZER_FUCHSIA +# include # include # include # include @@ -68,7 +69,7 @@ int internal_dlinfo(void *handle, int request, void *p) { UNIMPLEMENTED(); } uptr GetThreadSelf() { return reinterpret_cast(thrd_current()); } -tid_t GetTid() { return GetThreadSelf(); } +ThreadID GetTid() { return GetThreadSelf(); } void Abort() { abort(); } @@ -117,11 +118,37 @@ uptr GetMmapGranularity() { return _zx_system_get_page_size(); } sanitizer_shadow_bounds_t ShadowBounds; +// Any sanitizer that utilizes shadow should explicitly call whenever it's +// appropriate for that sanitizer to reference shadow bounds. For ASan, this is +// done in `InitializeShadowMemory` and for HWASan, this is done in +// `InitShadow`. void InitShadowBounds() { ShadowBounds = __sanitizer_shadow_bounds(); } +// TODO(leonardchan): It's not immediately clear from a user perspective if +// `GetMaxUserVirtualAddress` should be called exatly once on runtime startup +// or can be called multiple times. Currently it looks like most instances of +// `GetMaxUserVirtualAddress` are meant to be called once, but if someone +// decides to call this multiple times in the future, we should have a separate +// function that's ok to call multiple times. Ideally we would just invoke this +// syscall once. Also for Fuchsia, this syscall technically gets invoked twice +// since `__sanitizer_shadow_bounds` also invokes this syscall under the hood. uptr GetMaxUserVirtualAddress() { - InitShadowBounds(); - return ShadowBounds.memory_limit - 1; + zx_info_vmar_t info; + zx_status_t status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, + &info, sizeof(info), NULL, NULL); + CHECK_EQ(status, ZX_OK); + + // Find the top of the accessible address space. + uintptr_t top = info.base + info.len; + + // Round it up to a power-of-two size. There may be some pages at + // the top that can't actually be mapped, but for purposes of the + // the shadow, we'll pretend they could be. + int bit = (sizeof(uintptr_t) * CHAR_BIT) - __builtin_clzl(top); + if (top != (uintptr_t)1 << bit) + top = (uintptr_t)1 << (bit + 1); + + return top - 1; } uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); } @@ -547,6 +574,8 @@ void __sanitizer_startup_hook(int argc, char **argv, char **envp, __sanitizer::StoredEnviron = envp; __sanitizer::MainThreadStackBase = reinterpret_cast(stack_base); __sanitizer::MainThreadStackSize = stack_size; + + EarlySanitizerInit(); } void __sanitizer_set_report_path(const char *path) { diff --git a/libsanitizer/sanitizer_common/sanitizer_fuchsia.h b/libsanitizer/sanitizer_common/sanitizer_fuchsia.h index 26c1deab9e5f..47e7537c1bdf 100644 --- a/libsanitizer/sanitizer_common/sanitizer_fuchsia.h +++ b/libsanitizer/sanitizer_common/sanitizer_fuchsia.h @@ -32,6 +32,13 @@ struct MemoryMappingLayoutData { void InitShadowBounds(); +// Individual sanitizers can define this to explicitly run something at the end +// of `__sanitizer_startup_hook`. This can be useful if a sanitizer needs to do +// extra work after the common startup hook code is called and before module +// ctors are invoked. For example, hwasan can explicitly call its initializing +// function here so it can be set up before libc extensions are initialized. +void EarlySanitizerInit(); + } // namespace __sanitizer #endif // SANITIZER_FUCHSIA diff --git a/libsanitizer/sanitizer_common/sanitizer_getauxval.h b/libsanitizer/sanitizer_common/sanitizer_getauxval.h index 38439e44f611..910590b627c2 100644 --- a/libsanitizer/sanitizer_common/sanitizer_getauxval.h +++ b/libsanitizer/sanitizer_common/sanitizer_getauxval.h @@ -21,22 +21,21 @@ #if SANITIZER_LINUX || SANITIZER_FUCHSIA -# if (__GLIBC_PREREQ(2, 16) || (SANITIZER_ANDROID && __ANDROID_API__ >= 21) || \ - SANITIZER_FUCHSIA) && \ - !SANITIZER_GO -# define SANITIZER_USE_GETAUXVAL 1 -# else -# define SANITIZER_USE_GETAUXVAL 0 -# endif - -# if SANITIZER_USE_GETAUXVAL -# include -# else +# if (__GLIBC_PREREQ(2, 16) || SANITIZER_ANDROID || SANITIZER_FUCHSIA) && \ + !SANITIZER_GO +# define SANITIZER_USE_GETAUXVAL 1 +# else +# define SANITIZER_USE_GETAUXVAL 0 +# endif + +# if SANITIZER_USE_GETAUXVAL +# include +# else // The weak getauxval definition allows to check for the function at runtime. // This is useful for Android, when compiled at a lower API level yet running // on a more recent platform that offers the function. extern "C" SANITIZER_WEAK_ATTRIBUTE unsigned long getauxval(unsigned long type); -# endif +# endif #elif SANITIZER_NETBSD diff --git a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h index fff60c96f632..c719e2a8ef60 100644 --- a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h +++ b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h @@ -209,7 +209,7 @@ typedef long ssize; typedef sptr ssize; #endif -typedef u64 tid_t; +typedef u64 ThreadID; // ----------- ATTENTION ------------- // This header should NOT include any other headers to avoid portability issues. diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_linux.cpp index 6331c26c0a92..87a18b1120af 100644 --- a/libsanitizer/sanitizer_common/sanitizer_linux.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_linux.cpp @@ -14,7 +14,7 @@ #include "sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS + SANITIZER_SOLARIS || SANITIZER_HAIKU # include "sanitizer_common.h" # include "sanitizer_flags.h" @@ -63,15 +63,17 @@ # include # include # include -# if !SANITIZER_SOLARIS +# if !SANITIZER_SOLARIS && !SANITIZER_HAIKU # include # endif # include # include -# include +# if !SANITIZER_HAIKU +# include +# include +# endif # include # include -# include # include # if SANITIZER_LINUX @@ -82,6 +84,12 @@ # include # endif +# if SANITIZER_ANDROID && __ANDROID_API__ < 35 +// The weak `strerrorname_np` (introduced in API level 35) definition, +// allows for checking the API level at runtime. +extern "C" SANITIZER_WEAK_ATTRIBUTE const char *strerrorname_np(int); +# endif + # if SANITIZER_LINUX && defined(__loongarch__) # include # endif @@ -118,6 +126,13 @@ extern struct ps_strings *__ps_strings; # define environ _environ # endif +# if SANITIZER_HAIKU +# include +# include +# include +extern "C" char **__libc_argv; +# endif + extern char **environ; # if SANITIZER_LINUX @@ -138,9 +153,10 @@ const int FUTEX_WAKE_PRIVATE = FUTEX_WAKE | FUTEX_PRIVATE_FLAG; // Are we using 32-bit or 64-bit Linux syscalls? // x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32 // but it still needs to use 64-bit syscalls. -# if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \ - SANITIZER_WORDSIZE == 64 || \ - (defined(__mips__) && _MIPS_SIM == _ABIN32)) +# if SANITIZER_LINUX && \ + (defined(__x86_64__) || defined(__powerpc64__) || \ + SANITIZER_WORDSIZE == 64 || \ + (defined(__mips__) && defined(_ABIN32) && _MIPS_SIM == _ABIN32)) # define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1 # else # define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0 @@ -168,33 +184,56 @@ void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset) { CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, set, oldset)); } +# if SANITIZER_LINUX +// Deletes the specified signal from newset, if it is not present in oldset +// Equivalently: newset[signum] = newset[signum] & oldset[signum] +static void KeepUnblocked(__sanitizer_sigset_t &newset, + __sanitizer_sigset_t &oldset, int signum) { + // FIXME: https://github.com/google/sanitizers/issues/1816 + if (SANITIZER_ANDROID || !internal_sigismember(&oldset, signum)) + internal_sigdelset(&newset, signum); +} +# endif + // Block asynchronous signals void BlockSignals(__sanitizer_sigset_t *oldset) { - __sanitizer_sigset_t set; - internal_sigfillset(&set); -# if SANITIZER_LINUX && !SANITIZER_ANDROID + __sanitizer_sigset_t newset; + internal_sigfillset(&newset); + +# if SANITIZER_LINUX + __sanitizer_sigset_t currentset; + +# if !SANITIZER_ANDROID + // FIXME: https://github.com/google/sanitizers/issues/1816 + SetSigProcMask(NULL, ¤tset); + // Glibc uses SIGSETXID signal during setuid call. If this signal is blocked // on any thread, setuid call hangs. // See test/sanitizer_common/TestCases/Linux/setuid.c. - internal_sigdelset(&set, 33); -# endif -# if SANITIZER_LINUX + KeepUnblocked(newset, currentset, 33); +# endif // !SANITIZER_ANDROID + // Seccomp-BPF-sandboxed processes rely on SIGSYS to handle trapped syscalls. // If this signal is blocked, such calls cannot be handled and the process may // hang. - internal_sigdelset(&set, 31); + KeepUnblocked(newset, currentset, 31); +# if !SANITIZER_ANDROID // Don't block synchronous signals - internal_sigdelset(&set, SIGSEGV); - internal_sigdelset(&set, SIGBUS); - internal_sigdelset(&set, SIGILL); - internal_sigdelset(&set, SIGTRAP); - internal_sigdelset(&set, SIGABRT); - internal_sigdelset(&set, SIGFPE); - internal_sigdelset(&set, SIGPIPE); -# endif + // but also don't unblock signals that the user had deliberately blocked. + // FIXME: https://github.com/google/sanitizers/issues/1816 + KeepUnblocked(newset, currentset, SIGSEGV); + KeepUnblocked(newset, currentset, SIGBUS); + KeepUnblocked(newset, currentset, SIGILL); + KeepUnblocked(newset, currentset, SIGTRAP); + KeepUnblocked(newset, currentset, SIGABRT); + KeepUnblocked(newset, currentset, SIGFPE); + KeepUnblocked(newset, currentset, SIGPIPE); +# endif //! SANITIZER_ANDROID - SetSigProcMask(&set, oldset); +# endif // SANITIZER_LINUX + + SetSigProcMask(&newset, oldset); } ScopedBlockSignals::ScopedBlockSignals(__sanitizer_sigset_t *copy) { @@ -222,7 +261,7 @@ ScopedBlockSignals::~ScopedBlockSignals() { SetSigProcMask(&saved_, nullptr); } # endif // --------------- sanitizer_libc.h -# if !SANITIZER_SOLARIS && !SANITIZER_NETBSD +# if !SANITIZER_SOLARIS && !SANITIZER_NETBSD && !SANITIZER_HAIKU # if !SANITIZER_S390 uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd, u64 offset) { @@ -410,8 +449,9 @@ uptr internal_stat(const char *path, void *buf) { AT_NO_AUTOMOUNT, STATX_BASIC_STATS, (uptr)&bufx); statx_to_stat(&bufx, (struct stat *)buf); return res; -# elif (SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \ - (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \ +# elif ( \ + SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \ + (defined(__mips__) && defined(_ABIN32) && _MIPS_SIM == _ABIN32)) && \ !SANITIZER_SPARC return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0); @@ -448,8 +488,9 @@ uptr internal_lstat(const char *path, void *buf) { STATX_BASIC_STATS, (uptr)&bufx); statx_to_stat(&bufx, (struct stat *)buf); return res; -# elif (defined(_LP64) || SANITIZER_X32 || \ - (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \ +# elif ( \ + defined(_LP64) || SANITIZER_X32 || \ + (defined(__mips__) && defined(_ABIN32) && _MIPS_SIM == _ABIN32)) && \ !SANITIZER_SPARC return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, AT_SYMLINK_NOFOLLOW); @@ -565,9 +606,9 @@ uptr internal_execve(const char *filename, char *const argv[], return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv, (uptr)envp); } -# endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD +# endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD && !SANITIZER_HAIKU -# if !SANITIZER_NETBSD +# if !SANITIZER_NETBSD && !SANITIZER_HAIKU void internal__exit(int exitcode) { # if SANITIZER_FREEBSD || SANITIZER_SOLARIS internal_syscall(SYSCALL(exit), exitcode); @@ -576,7 +617,7 @@ void internal__exit(int exitcode) { # endif Die(); // Unreachable. } -# endif // !SANITIZER_NETBSD +# endif // !SANITIZER_NETBSD && !SANITIZER_HAIKU // ----------------- sanitizer_common.h bool FileExists(const char *filename) { @@ -597,19 +638,21 @@ bool DirExists(const char *path) { } # if !SANITIZER_NETBSD -tid_t GetTid() { +ThreadID GetTid() { # if SANITIZER_FREEBSD long Tid; thr_self(&Tid); return Tid; # elif SANITIZER_SOLARIS return thr_self(); +# elif SANITIZER_HAIKU + return find_thread(NULL); # else return internal_syscall(SYSCALL(gettid)); # endif } -int TgKill(pid_t pid, tid_t tid, int sig) { +int TgKill(pid_t pid, ThreadID tid, int sig) { # if SANITIZER_LINUX return internal_syscall(SYSCALL(tgkill), pid, tid, sig); # elif SANITIZER_FREEBSD @@ -619,6 +662,8 @@ int TgKill(pid_t pid, tid_t tid, int sig) { errno = thr_kill(tid, sig); // TgKill is expected to return -1 on error, not an errno. return errno != 0 ? -1 : 0; +# elif SANITIZER_HAIKU + return kill_thread(tid); # endif } # endif @@ -646,7 +691,8 @@ u64 NanoTime() { // 'environ' array (on some others) and does not use libc. This function // should be called first inside __asan_init. const char *GetEnv(const char *name) { -# if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_SOLARIS +# if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_SOLARIS || \ + SANITIZER_HAIKU if (::environ != 0) { uptr NameLen = internal_strlen(name); for (char **Env = ::environ; *Env != 0; Env++) { @@ -684,13 +730,14 @@ const char *GetEnv(const char *name) { # endif } -# if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && !SANITIZER_GO +# if !SANITIZER_HAIKU && !SANITIZER_FREEBSD && !SANITIZER_NETBSD && \ + !SANITIZER_GO extern "C" { SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end; } # endif -# if !SANITIZER_FREEBSD && !SANITIZER_NETBSD +# if !SANITIZER_HAIKU && !SANITIZER_FREEBSD && !SANITIZER_NETBSD static void ReadNullSepFileToArray(const char *path, char ***arr, int arr_size) { char *buff; @@ -717,7 +764,10 @@ static void ReadNullSepFileToArray(const char *path, char ***arr, # endif static void GetArgsAndEnv(char ***argv, char ***envp) { -# if SANITIZER_FREEBSD +# if SANITIZER_HAIKU + *argv = __libc_argv; + *envp = environ; +# elif SANITIZER_FREEBSD // On FreeBSD, retrieving the argument and environment arrays is done via the // kern.ps_strings sysctl, which returns a pointer to a structure containing // this information. See also . @@ -757,7 +807,7 @@ static void GetArgsAndEnv(char ***argv, char ***envp) { # if !SANITIZER_GO } # endif // !SANITIZER_GO -# endif // SANITIZER_FREEBSD +# endif // SANITIZER_HAIKU } char **GetArgv() { @@ -776,7 +826,7 @@ char **GetEnviron() { void FutexWait(atomic_uint32_t *p, u32 cmp) { # if SANITIZER_FREEBSD _umtx_op(p, UMTX_OP_WAIT_UINT, cmp, 0, 0); -# elif SANITIZER_NETBSD +# elif SANITIZER_NETBSD || SANITIZER_HAIKU sched_yield(); /* No userspace futex-like synchronization */ # else internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAIT_PRIVATE, cmp, 0, 0, 0); @@ -786,7 +836,7 @@ void FutexWait(atomic_uint32_t *p, u32 cmp) { void FutexWake(atomic_uint32_t *p, u32 count) { # if SANITIZER_FREEBSD _umtx_op(p, UMTX_OP_WAKE, count, 0, 0); -# elif SANITIZER_NETBSD +# elif SANITIZER_NETBSD || SANITIZER_HAIKU /* No userspace futex-like synchronization */ # else internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAKE_PRIVATE, count, 0, 0, 0); @@ -818,7 +868,7 @@ struct linux_dirent { }; # endif -# if !SANITIZER_SOLARIS && !SANITIZER_NETBSD +# if !SANITIZER_SOLARIS && !SANITIZER_NETBSD && !SANITIZER_HAIKU // Syscall wrappers. uptr internal_ptrace(int request, int pid, void *addr, void *data) { return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr, @@ -1032,14 +1082,14 @@ bool internal_sigismember(__sanitizer_sigset_t *set, int signum) { # endif # endif // !SANITIZER_SOLARIS -# if !SANITIZER_NETBSD +# if !SANITIZER_NETBSD && !SANITIZER_HAIKU // ThreadLister implementation. ThreadLister::ThreadLister(pid_t pid) : buffer_(4096) { task_path_.AppendF("/proc/%d/task", pid); } ThreadLister::Result ThreadLister::ListThreads( - InternalMmapVector *threads) { + InternalMmapVector *threads) { int descriptor = internal_open(task_path_.data(), O_RDONLY | O_DIRECTORY); if (internal_iserror(descriptor)) { Report("Can't open %s for reading.\n", task_path_.data()); @@ -1094,7 +1144,7 @@ ThreadLister::Result ThreadLister::ListThreads( } } -const char *ThreadLister::LoadStatus(tid_t tid) { +const char *ThreadLister::LoadStatus(ThreadID tid) { status_path_.clear(); status_path_.AppendF("%s/%llu/status", task_path_.data(), tid); auto cleanup = at_scope_exit([&] { @@ -1107,7 +1157,7 @@ const char *ThreadLister::LoadStatus(tid_t tid) { return buffer_.data(); } -bool ThreadLister::IsAlive(tid_t tid) { +bool ThreadLister::IsAlive(ThreadID tid) { // /proc/%d/task/%d/status uses same call to detect alive threads as // proc_task_readdir. See task_state implementation in Linux. static const char kPrefix[] = "\nPPid:"; @@ -1218,6 +1268,16 @@ uptr GetPageSize() { CHECK_EQ(rv, 0); return (uptr)pz; # elif SANITIZER_USE_GETAUXVAL +# if SANITIZER_ANDROID && __ANDROID_API__ < 35 + // The 16 KB page size was introduced in Android 15 (API level 35), while + // earlier versions of Android always used a 4 KB page size. + // We are checking the weak definition of `strerrorname_np` (introduced in API + // level 35) because some earlier API levels crashed when + // `getauxval(AT_PAGESZ)` was called from the `.preinit_array`. + if (!strerrorname_np) + return 4096; +# endif + return getauxval(AT_PAGESZ); # else return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy. @@ -1226,7 +1286,19 @@ uptr GetPageSize() { # endif uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) { -# if SANITIZER_SOLARIS +# if SANITIZER_HAIKU + int cookie = 0; + image_info info; + const char *argv0 = ""; + while (get_next_image_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) { + if (info.type != B_APP_IMAGE) + continue; + argv0 = info.name; + break; + } + internal_strncpy(buf, argv0, buf_len); + return internal_strlen(buf); +# elif SANITIZER_SOLARIS const char *default_module_name = getexecname(); CHECK_NE(default_module_name, NULL); return internal_snprintf(buf, buf_len, "%s", default_module_name); @@ -1292,11 +1364,11 @@ bool LibraryNameIs(const char *full_name, const char *base_name) { return (name[base_name_length] == '-' || name[base_name_length] == '.'); } -# if !SANITIZER_ANDROID +# if !SANITIZER_ANDROID && !SANITIZER_HAIKU // Call cb for each region mapped by map. void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) { CHECK_NE(map, nullptr); -# if !SANITIZER_FREEBSD +# if !SANITIZER_FREEBSD && !SANITIZER_HAIKU typedef ElfW(Phdr) Elf_Phdr; typedef ElfW(Ehdr) Elf_Ehdr; # endif // !SANITIZER_FREEBSD @@ -1829,63 +1901,6 @@ int internal_uname(struct utsname *buf) { } # endif -# if SANITIZER_ANDROID -# if __ANDROID_API__ < 21 -extern "C" __attribute__((weak)) int dl_iterate_phdr( - int (*)(struct dl_phdr_info *, size_t, void *), void *); -# endif - -static int dl_iterate_phdr_test_cb(struct dl_phdr_info *info, size_t size, - void *data) { - // Any name starting with "lib" indicates a bug in L where library base names - // are returned instead of paths. - if (info->dlpi_name && info->dlpi_name[0] == 'l' && - info->dlpi_name[1] == 'i' && info->dlpi_name[2] == 'b') { - *(bool *)data = true; - return 1; - } - return 0; -} - -static atomic_uint32_t android_api_level; - -static AndroidApiLevel AndroidDetectApiLevelStatic() { -# if __ANDROID_API__ <= 19 - return ANDROID_KITKAT; -# elif __ANDROID_API__ <= 22 - return ANDROID_LOLLIPOP_MR1; -# else - return ANDROID_POST_LOLLIPOP; -# endif -} - -static AndroidApiLevel AndroidDetectApiLevel() { - if (!&dl_iterate_phdr) - return ANDROID_KITKAT; // K or lower - bool base_name_seen = false; - dl_iterate_phdr(dl_iterate_phdr_test_cb, &base_name_seen); - if (base_name_seen) - return ANDROID_LOLLIPOP_MR1; // L MR1 - return ANDROID_POST_LOLLIPOP; // post-L - // Plain L (API level 21) is completely broken wrt ASan and not very - // interesting to detect. -} - -extern "C" __attribute__((weak)) void *_DYNAMIC; - -AndroidApiLevel AndroidGetApiLevel() { - AndroidApiLevel level = - (AndroidApiLevel)atomic_load(&android_api_level, memory_order_relaxed); - if (level) - return level; - level = &_DYNAMIC == nullptr ? AndroidDetectApiLevelStatic() - : AndroidDetectApiLevel(); - atomic_store(&android_api_level, level, memory_order_relaxed); - return level; -} - -# endif - static HandleSignalMode GetHandleSignalModeImpl(int signum) { switch (signum) { case SIGABRT: @@ -1964,11 +1979,15 @@ using Context = ucontext_t; SignalContext::WriteFlag SignalContext::GetWriteFlag() const { Context *ucontext = (Context *)context; # if defined(__x86_64__) || defined(__i386__) +# if !SANITIZER_HAIKU static const uptr PF_WRITE = 1U << 1; +# endif # if SANITIZER_FREEBSD uptr err = ucontext->uc_mcontext.mc_err; # elif SANITIZER_NETBSD uptr err = ucontext->uc_mcontext.__gregs[_REG_ERR]; +# elif SANITIZER_HAIKU + uptr err = ucontext->uc_mcontext.r13; # elif SANITIZER_SOLARIS && defined(__i386__) const int Err = 13; uptr err = ucontext->uc_mcontext.gregs[Err]; @@ -2581,6 +2600,11 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { *pc = ucontext->uc_mcontext.mc_rip; *bp = ucontext->uc_mcontext.mc_rbp; *sp = ucontext->uc_mcontext.mc_rsp; +# elif SANITIZER_HAIKU + ucontext_t *ucontext = (ucontext_t *)context; + *pc = ucontext->uc_mcontext.rip; + *bp = ucontext->uc_mcontext.rbp; + *sp = ucontext->uc_mcontext.rsp; # else ucontext_t *ucontext = (ucontext_t *)context; *pc = ucontext->uc_mcontext.gregs[REG_RIP]; diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.h b/libsanitizer/sanitizer_common/sanitizer_linux.h index 8b7874bb5a34..e621799c4bdf 100644 --- a/libsanitizer/sanitizer_common/sanitizer_linux.h +++ b/libsanitizer/sanitizer_common/sanitizer_linux.h @@ -14,7 +14,7 @@ #include "sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS + SANITIZER_SOLARIS || SANITIZER_HAIKU # include "sanitizer_common.h" # include "sanitizer_internal_defs.h" # include "sanitizer_platform_limits_freebsd.h" @@ -31,6 +31,11 @@ namespace __sanitizer { // the one in , which is used by readdir(). struct linux_dirent; +# if SANITIZER_HAIKU +struct MemoryMappingLayoutData { + long signed int cookie; +}; +# else struct ProcSelfMapsBuff { char *data; uptr mmaped_size; @@ -43,6 +48,7 @@ struct MemoryMappingLayoutData { }; void ReadProcMaps(ProcSelfMapsBuff *proc_maps); +# endif // SANITIZER_HAIKU // Syscall wrappers. uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count); @@ -102,11 +108,11 @@ class ThreadLister { Incomplete, Ok, }; - Result ListThreads(InternalMmapVector *threads); - const char *LoadStatus(tid_t tid); + Result ListThreads(InternalMmapVector *threads); + const char *LoadStatus(ThreadID tid); private: - bool IsAlive(tid_t tid); + bool IsAlive(ThreadID tid); InternalScopedString task_path_; InternalScopedString status_path_; @@ -124,7 +130,7 @@ bool LibraryNameIs(const char *full_name, const char *base_name); // Call cb for each region mapped by map. void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)); -// Releases memory pages entirely within the [beg, end] address range. +// Releases memory pages entirely within the [beg, end) address range. // The pages no longer count toward RSS; reads are guaranteed to return 0. // Requires (but does not verify!) that pages are MAP_PRIVATE. inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) { diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp index 9d01a97af5f4..530ff90c4cd1 100644 --- a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp @@ -14,7 +14,7 @@ #include "sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS + SANITIZER_SOLARIS || SANITIZER_HAIKU # include "sanitizer_allocator_internal.h" # include "sanitizer_atomic.h" @@ -28,8 +28,20 @@ # include "sanitizer_procmaps.h" # include "sanitizer_solaris.h" +# if SANITIZER_HAIKU +# define _GNU_SOURCE +# define _DEFAULT_SOURCE +# endif + # if SANITIZER_NETBSD -# define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast() +# // for __lwp_gettcb_fast() / __lwp_getprivate_fast() +# define _RTLD_SOURCE +# include +# undef _RTLD_SOURCE +# include +# if __NetBSD_Version__ >= 1099001200 +# include +# endif # endif # include // for dlsym() @@ -57,7 +69,7 @@ // that, it was never implemented. So just define it to zero. # undef MAP_NORESERVE # define MAP_NORESERVE 0 -extern const Elf_Auxinfo *__elf_aux_vector; +extern const Elf_Auxinfo *__elf_aux_vector __attribute__((weak)); extern "C" int __sys_sigaction(int signum, const struct sigaction *act, struct sigaction *oldact); # endif @@ -74,18 +86,9 @@ extern "C" int __sys_sigaction(int signum, const struct sigaction *act, # include # endif -# if SANITIZER_ANDROID -# include -# if !defined(CPU_COUNT) && !defined(__aarch64__) -# include -# include -struct __sanitizer::linux_dirent { - long d_ino; - off_t d_off; - unsigned short d_reclen; - char d_name[]; -}; -# endif +# if SANITIZER_HAIKU +# include +# include # endif # if !SANITIZER_ANDROID @@ -651,6 +654,7 @@ static void GetTls(uptr *addr, uptr *size) { *addr = (uptr)tcb->tcb_dtv[1]; } } +# elif SANITIZER_HAIKU # else # error "Unknown OS" # endif @@ -721,8 +725,13 @@ static int AddModuleSegments(const char *module_name, dl_phdr_info *info, if (phdr->p_type == PT_LOAD) { uptr cur_beg = info->dlpi_addr + phdr->p_vaddr; uptr cur_end = cur_beg + phdr->p_memsz; +# if SANITIZER_HAIKU + bool executable = phdr->p_flags & PF_EXECUTE; + bool writable = phdr->p_flags & PF_WRITE; +# else bool executable = phdr->p_flags & PF_X; bool writable = phdr->p_flags & PF_W; +# endif cur_module.addAddressRange(cur_beg, cur_end, executable, writable); } else if (phdr->p_type == PT_NOTE) { # ifdef NT_GNU_BUILD_ID @@ -774,47 +783,13 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { return 0; } -# if SANITIZER_ANDROID && __ANDROID_API__ < 21 -extern "C" __attribute__((weak)) int dl_iterate_phdr( - int (*)(struct dl_phdr_info *, size_t, void *), void *); -# endif - -static bool requiresProcmaps() { -# if SANITIZER_ANDROID && __ANDROID_API__ <= 22 - // Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken. - // The runtime check allows the same library to work with - // both K and L (and future) Android releases. - return AndroidGetApiLevel() <= ANDROID_LOLLIPOP_MR1; -# else - return false; -# endif -} - -static void procmapsInit(InternalMmapVectorNoCtor *modules) { - MemoryMappingLayout memory_mapping(/*cache_enabled*/ true); - memory_mapping.DumpListOfModules(modules); -} - void ListOfModules::init() { clearOrInit(); - if (requiresProcmaps()) { - procmapsInit(&modules_); - } else { - DlIteratePhdrData data = {&modules_, true}; - dl_iterate_phdr(dl_iterate_phdr_cb, &data); - } + DlIteratePhdrData data = {&modules_, true}; + dl_iterate_phdr(dl_iterate_phdr_cb, &data); } -// When a custom loader is used, dl_iterate_phdr may not contain the full -// list of modules. Allow callers to fall back to using procmaps. -void ListOfModules::fallbackInit() { - if (!requiresProcmaps()) { - clearOrInit(); - procmapsInit(&modules_); - } else { - clear(); - } -} +void ListOfModules::fallbackInit() { clear(); } // getrusage does not give us the current RSS, only the max RSS. // Still, this is better than nothing if /proc/self/statm is not available @@ -860,55 +835,23 @@ u32 GetNumberOfCPUs() { int req[2]; uptr len = sizeof(ncpu); req[0] = CTL_HW; +# ifdef HW_NCPUONLINE + req[1] = HW_NCPUONLINE; +# else req[1] = HW_NCPU; +# endif CHECK_EQ(internal_sysctl(req, 2, &ncpu, &len, NULL, 0), 0); return ncpu; -# elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__) - // Fall back to /sys/devices/system/cpu on Android when cpu_set_t doesn't - // exist in sched.h. That is the case for toolchains generated with older - // NDKs. - // This code doesn't work on AArch64 because internal_getdents makes use of - // the 64bit getdents syscall, but cpu_set_t seems to always exist on AArch64. - uptr fd = internal_open("/sys/devices/system/cpu", O_RDONLY | O_DIRECTORY); - if (internal_iserror(fd)) - return 0; - InternalMmapVector buffer(4096); - uptr bytes_read = buffer.size(); - uptr n_cpus = 0; - u8 *d_type; - struct linux_dirent *entry = (struct linux_dirent *)&buffer[bytes_read]; - while (true) { - if ((u8 *)entry >= &buffer[bytes_read]) { - bytes_read = internal_getdents(fd, (struct linux_dirent *)buffer.data(), - buffer.size()); - if (internal_iserror(bytes_read) || !bytes_read) - break; - entry = (struct linux_dirent *)buffer.data(); - } - d_type = (u8 *)entry + entry->d_reclen - 1; - if (d_type >= &buffer[bytes_read] || - (u8 *)&entry->d_name[3] >= &buffer[bytes_read]) - break; - if (entry->d_ino != 0 && *d_type == DT_DIR) { - if (entry->d_name[0] == 'c' && entry->d_name[1] == 'p' && - entry->d_name[2] == 'u' && entry->d_name[3] >= '0' && - entry->d_name[3] <= '9') - n_cpus++; - } - entry = (struct linux_dirent *)(((u8 *)entry) + entry->d_reclen); - } - internal_close(fd); - return n_cpus; +# elif SANITIZER_HAIKU + system_info info; + get_system_info(&info); + return info.cpu_count; # elif SANITIZER_SOLARIS return sysconf(_SC_NPROCESSORS_ONLN); # else -# if defined(CPU_COUNT) cpu_set_t CPUs; CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0); return CPU_COUNT(&CPUs); -# else - return 1; -# endif # endif } @@ -945,11 +888,8 @@ extern "C" SANITIZER_WEAK_ATTRIBUTE int __android_log_write(int prio, void WriteOneLineToSyslog(const char *s) { if (&async_safe_write_log) { async_safe_write_log(SANITIZER_ANDROID_LOG_INFO, GetProcessName(), s); - } else if (AndroidGetApiLevel() > ANDROID_KITKAT) { - syslog(LOG_INFO, "%s", s); } else { - CHECK(&__android_log_write); - __android_log_write(SANITIZER_ANDROID_LOG_INFO, nullptr, s); + syslog(LOG_INFO, "%s", s); } } diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.cpp b/libsanitizer/sanitizer_common/sanitizer_mac.cpp index d2144546cb0f..a6f757173728 100644 --- a/libsanitizer/sanitizer_common/sanitizer_mac.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_mac.cpp @@ -22,6 +22,11 @@ # endif # include +// Start searching for available memory region past PAGEZERO, which is +// 4KB on 32-bit and 4GB on 64-bit. +# define GAP_SEARCH_START_ADDRESS \ + ((SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000) + # include "sanitizer_common.h" # include "sanitizer_file.h" # include "sanitizer_flags.h" @@ -38,14 +43,7 @@ extern char **environ; # endif -# if defined(__has_include) && __has_include() && defined(__BLOCKS__) -# define SANITIZER_OS_TRACE 1 -# include -# else -# define SANITIZER_OS_TRACE 0 -# endif - -// import new crash reporting api +// Integrate with CrashReporter library if available # if defined(__has_include) && __has_include() # define HAVE_CRASHREPORTERCLIENT_H 1 # include @@ -65,21 +63,15 @@ extern char ***_NSGetArgv(void); # include // for dladdr() # include # include +# include # include # include # include +# include # include # include # include -# if defined(__has_builtin) && __has_builtin(__builtin_os_log_format) -# include -# else - /* Without support for __builtin_os_log_format, fall back to the older - method. */ -# define OS_LOG_DEFAULT 0 -# define os_log_error(A,B,C) \ - asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", (C)); -# endif +# include # include # include # include @@ -111,8 +103,16 @@ extern "C" { natural_t *nesting_depth, vm_region_recurse_info_t info, mach_msg_type_number_t *infoCnt); + + extern const void* _dyld_get_shared_cache_range(size_t* length); } +# if !SANITIZER_GO +// Weak symbol no-op when TSan is not linked +SANITIZER_WEAK_ATTRIBUTE extern void __tsan_set_in_internal_write_call( + bool value) {} +# endif + namespace __sanitizer { #include "sanitizer_syscall_generic.inc" @@ -183,7 +183,15 @@ uptr internal_read(fd_t fd, void *buf, uptr count) { } uptr internal_write(fd_t fd, const void *buf, uptr count) { +# if SANITIZER_GO return write(fd, buf, count); +# else + // We need to disable interceptors when writing in TSan + __tsan_set_in_internal_write_call(true); + uptr res = write(fd, buf, count); + __tsan_set_in_internal_write_call(false); + return res; +# endif } uptr internal_stat(const char *path, void *buf) { @@ -409,8 +417,8 @@ bool DirExists(const char *path) { return S_ISDIR(st.st_mode); } -tid_t GetTid() { - tid_t tid; +ThreadID GetTid() { + ThreadID tid; pthread_threadid_np(nullptr, &tid); return tid; } @@ -784,11 +792,17 @@ void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); } static Mutex syslog_lock; # endif +# if SANITIZER_DRIVERKIT +# define SANITIZER_OS_LOG os_log +# else +# define SANITIZER_OS_LOG os_log_error +# endif + void WriteOneLineToSyslog(const char *s) { #if !SANITIZER_GO syslog_lock.CheckLocked(); if (GetMacosAlignedVersion() >= MacosVersion(10, 12)) { - os_log_error(OS_LOG_DEFAULT, "%{public}s", s); + SANITIZER_OS_LOG(OS_LOG_DEFAULT, "%{public}s", s); } else { #pragma clang diagnostic push // as_log is deprecated. @@ -804,8 +818,13 @@ static char crashreporter_info_buff[__sanitizer::kErrorMessageBufferSize] = {}; static Mutex crashreporter_info_mutex; extern "C" { -// Integrate with crash reporter libraries. + #if HAVE_CRASHREPORTERCLIENT_H +// Available in CRASHREPORTER_ANNOTATIONS_VERSION 5+ +# ifdef CRASHREPORTER_ANNOTATIONS_INITIALIZER +CRASHREPORTER_ANNOTATIONS_INITIALIZER() +# else +// Support for older CrashRerporter annotiations CRASH_REPORTER_CLIENT_HIDDEN struct crashreporter_annotations_t gCRAnnotations __attribute__((section("__DATA," CRASHREPORTER_ANNOTATIONS_SECTION))) = { @@ -816,17 +835,17 @@ struct crashreporter_annotations_t gCRAnnotations 0, 0, 0, -#if CRASHREPORTER_ANNOTATIONS_VERSION > 4 +# if CRASHREPORTER_ANNOTATIONS_VERSION > 4 0, -#endif +# endif }; - -#else -// fall back to old crashreporter api +# endif +# else +// Revert to previous crash reporter API if client header is not available static const char *__crashreporter_info__ __attribute__((__used__)) = &crashreporter_info_buff[0]; asm(".desc ___crashreporter_info__, 0x10"); -#endif +#endif // HAVE_CRASHREPORTERCLIENT_H } // extern "C" @@ -846,31 +865,23 @@ void LogMessageOnPrintf(const char *str) { } void LogFullErrorReport(const char *buffer) { -#if !SANITIZER_GO - // Log with os_trace. This will make it into the crash log. -#if SANITIZER_OS_TRACE -#pragma clang diagnostic push -// os_trace is deprecated. -#pragma clang diagnostic ignored "-Wdeprecated-declarations" - if (GetMacosAlignedVersion() >= MacosVersion(10, 10)) { - // os_trace requires the message (format parameter) to be a string literal. - if (internal_strncmp(SanitizerToolName, "AddressSanitizer", - sizeof("AddressSanitizer") - 1) == 0) - os_trace("Address Sanitizer reported a failure."); - else if (internal_strncmp(SanitizerToolName, "UndefinedBehaviorSanitizer", - sizeof("UndefinedBehaviorSanitizer") - 1) == 0) - os_trace("Undefined Behavior Sanitizer reported a failure."); - else if (internal_strncmp(SanitizerToolName, "ThreadSanitizer", - sizeof("ThreadSanitizer") - 1) == 0) - os_trace("Thread Sanitizer reported a failure."); - else - os_trace("Sanitizer tool reported a failure."); - - if (common_flags()->log_to_syslog) - os_trace("Consult syslog for more information."); - } -#pragma clang diagnostic pop -#endif +# if !SANITIZER_GO + // When logging with os_log_error this will make it into the crash log. + if (internal_strncmp(SanitizerToolName, "AddressSanitizer", + sizeof("AddressSanitizer") - 1) == 0) + SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Address Sanitizer reported a failure."); + else if (internal_strncmp(SanitizerToolName, "UndefinedBehaviorSanitizer", + sizeof("UndefinedBehaviorSanitizer") - 1) == 0) + SANITIZER_OS_LOG(OS_LOG_DEFAULT, + "Undefined Behavior Sanitizer reported a failure."); + else if (internal_strncmp(SanitizerToolName, "ThreadSanitizer", + sizeof("ThreadSanitizer") - 1) == 0) + SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Thread Sanitizer reported a failure."); + else + SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Sanitizer tool reported a failure."); + + if (common_flags()->log_to_syslog) + SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Consult syslog for more information."); // Log to syslog. // The logging on OS X may call pthread_create so we need the threading @@ -884,7 +895,7 @@ void LogFullErrorReport(const char *buffer) { WriteToSyslog(buffer); // The report is added to CrashLog as part of logging all of Printf output. -#endif +# endif // !SANITIZER_GO } SignalContext::WriteFlag SignalContext::GetWriteFlag() const { @@ -951,7 +962,17 @@ static void DisableMmapExcGuardExceptions() { RTLD_DEFAULT, "task_set_exc_guard_behavior"); if (set_behavior == nullptr) return; const task_exc_guard_behavior_t task_exc_guard_none = 0; - set_behavior(mach_task_self(), task_exc_guard_none); + kern_return_t res = set_behavior(mach_task_self(), task_exc_guard_none); + if (res != KERN_SUCCESS) { + Report( + "WARN: task_set_exc_guard_behavior returned %d (%s), " + "mmap may fail unexpectedly.\n", + res, mach_error_string(res)); + if (res == KERN_DENIED) + Report( + "HINT: Check that task_set_exc_guard_behavior is allowed by " + "sandbox.\n"); + } } static void VerifyInterceptorsWorking(); @@ -980,8 +1001,9 @@ static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES"; LowLevelAllocator allocator_for_env; static bool ShouldCheckInterceptors() { - // Restrict "interceptors working?" check to ASan and TSan. - const char *sanitizer_names[] = {"AddressSanitizer", "ThreadSanitizer"}; + // Restrict "interceptors working?" check + const char *sanitizer_names[] = {"AddressSanitizer", "ThreadSanitizer", + "RealtimeSanitizer"}; size_t count = sizeof(sanitizer_names) / sizeof(sanitizer_names[0]); for (size_t i = 0; i < count; i++) { if (internal_strcmp(sanitizer_names[i], SanitizerToolName) == 0) @@ -1117,6 +1139,67 @@ static void StripEnv() { } #endif // SANITIZER_GO +// Prints out a consolidated memory map: contiguous regions +// are merged together. +static void PrintVmmap() { + const mach_vm_address_t max_vm_address = GetMaxVirtualAddress() + 1; + mach_vm_address_t address = GAP_SEARCH_START_ADDRESS; + kern_return_t kr = KERN_SUCCESS; + + Report("Memory map:\n"); + mach_vm_address_t last = 0; + mach_vm_address_t lastsz = 0; + + while (1) { + mach_vm_size_t vmsize = 0; + natural_t depth = 0; + vm_region_submap_short_info_data_64_t vminfo; + mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; + kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth, + (vm_region_info_t)&vminfo, &count); + + if (kr == KERN_DENIED) { + Report( + "ERROR: mach_vm_region_recurse got KERN_DENIED when printing memory " + "map.\n"); + Report( + "HINT: Check whether mach_vm_region_recurse is allowed by " + "sandbox.\n"); + } + + if (kr == KERN_SUCCESS && address < max_vm_address) { + if (last + lastsz == address) { + // This region is contiguous with the last; merge together. + lastsz += vmsize; + } else { + if (lastsz) + Printf("|| `[%p, %p]` || size=0x%016" PRIx64 " ||\n", (void*)last, + (void*)(last + lastsz), lastsz); + + last = address; + lastsz = vmsize; + } + address += vmsize; + } else { + // We've reached the end of the memory map. Print the last remaining + // region, if there is one. + if (lastsz) + Printf("|| `[%p, %p]` || size=0x%016" PRIx64 " ||\n", (void*)last, + (void*)(last + lastsz), lastsz); + + break; + } + } +} + +static void ReportShadowAllocFail(uptr shadow_size_bytes, uptr alignment) { + Report( + "FATAL: Failed to allocate shadow memory. Tried to allocate %p bytes " + "(alignment=%p).\n", + (void*)shadow_size_bytes, (void*)alignment); + PrintVmmap(); +} + char **GetArgv() { return *_NSGetArgv(); } @@ -1205,13 +1288,14 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, const uptr left_padding = Max(granularity, 1ULL << min_shadow_base_alignment); - uptr space_size = shadow_size_bytes + left_padding; + uptr space_size = shadow_size_bytes; uptr largest_gap_found = 0; uptr max_occupied_addr = 0; + VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size); uptr shadow_start = - FindAvailableMemoryRange(space_size, alignment, granularity, + FindAvailableMemoryRange(space_size, alignment, left_padding, &largest_gap_found, &max_occupied_addr); // If the shadow doesn't fit, restrict the address space to make it fit. if (shadow_start == 0) { @@ -1223,20 +1307,22 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, if (new_max_vm < max_occupied_addr) { Report("Unable to find a memory range for dynamic shadow.\n"); Report( - "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, " - "new_max_vm = %p\n", - (void *)space_size, (void *)largest_gap_found, - (void *)max_occupied_addr, (void *)new_max_vm); + "\tspace_size = %p\n\tlargest_gap_found = %p\n\tmax_occupied_addr " + "= %p\n\tnew_max_vm = %p\n", + (void*)space_size, (void*)largest_gap_found, (void*)max_occupied_addr, + (void*)new_max_vm); + ReportShadowAllocFail(shadow_size_bytes, alignment); CHECK(0 && "cannot place shadow"); } RestrictMemoryToMaxAddress(new_max_vm); high_mem_end = new_max_vm - 1; - space_size = (high_mem_end >> shadow_scale) + left_padding; + space_size = (high_mem_end >> shadow_scale); VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size); - shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity, + shadow_start = FindAvailableMemoryRange(space_size, alignment, left_padding, nullptr, nullptr); if (shadow_start == 0) { Report("Unable to find a memory range after restricting VM.\n"); + ReportShadowAllocFail(shadow_size_bytes, alignment); CHECK(0 && "cannot place shadow after restricting vm"); } } @@ -1252,35 +1338,51 @@ uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size, } uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, - uptr *largest_gap_found, - uptr *max_occupied_addr) { - typedef vm_region_submap_short_info_data_64_t RegionInfo; - enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 }; - // Start searching for available memory region past PAGEZERO, which is - // 4KB on 32-bit and 4GB on 64-bit. - mach_vm_address_t start_address = - (SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000; - + uptr* largest_gap_found, + uptr* max_occupied_addr) { const mach_vm_address_t max_vm_address = GetMaxVirtualAddress() + 1; - mach_vm_address_t address = start_address; - mach_vm_address_t free_begin = start_address; + mach_vm_address_t address = GAP_SEARCH_START_ADDRESS; + mach_vm_address_t free_begin = GAP_SEARCH_START_ADDRESS; kern_return_t kr = KERN_SUCCESS; if (largest_gap_found) *largest_gap_found = 0; if (max_occupied_addr) *max_occupied_addr = 0; while (kr == KERN_SUCCESS) { mach_vm_size_t vmsize = 0; natural_t depth = 0; - RegionInfo vminfo; - mach_msg_type_number_t count = kRegionInfoSize; + vm_region_submap_short_info_data_64_t vminfo; + mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth, (vm_region_info_t)&vminfo, &count); - if (kr == KERN_INVALID_ADDRESS) { + + if (kr == KERN_SUCCESS) { + // There are cases where going beyond the processes' max vm does + // not return KERN_INVALID_ADDRESS so we check for going beyond that + // max address as well. + if (address > max_vm_address) { + address = max_vm_address; + kr = -1; // break after this iteration. + } + + if (max_occupied_addr) + *max_occupied_addr = address + vmsize; + } else if (kr == KERN_INVALID_ADDRESS) { // No more regions beyond "address", consider the gap at the end of VM. address = max_vm_address; - vmsize = 0; + + // We will break after this iteration anyway since kr != KERN_SUCCESS + } else if (kr == KERN_DENIED) { + Report("ERROR: Unable to find a memory range for dynamic shadow.\n"); + Report("HINT: Ensure mach_vm_region_recurse is allowed under sandbox.\n"); + Die(); } else { - if (max_occupied_addr) *max_occupied_addr = address + vmsize; + Report( + "WARNING: mach_vm_region_recurse returned unexpected code %d (%s)\n", + kr, mach_error_string(kr)); + DCHECK(false && "mach_vm_region_recurse returned unexpected code"); + break; // address is not valid unless KERN_SUCCESS, therefore we must not + // use it. } + if (free_begin != address) { // We found a free region [free_begin..address-1]. uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment); @@ -1303,6 +1405,58 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, return 0; } +// This function (when used during initialization when there is +// only a single thread), can be used to verify that a range +// of memory hasn't already been mapped, and won't be mapped +// later in the shared cache. +// +// If the syscall mach_vm_region_recurse fails (due to sandbox), +// we assume that the memory is not mapped so that execution can continue. +// +// NOTE: range_end is inclusive +// +// WARNING: This function must NOT allocate memory, since it is +// used in InitializeShadowMemory between where we search for +// space for shadow and where we actually allocate it. +bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { + mach_vm_size_t vmsize = 0; + natural_t depth = 0; + vm_region_submap_short_info_data_64_t vminfo; + mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; + mach_vm_address_t address = range_start; + + // First, check if the range is already mapped. + kern_return_t kr = + mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth, + (vm_region_info_t)&vminfo, &count); + + if (kr == KERN_DENIED) { + Report( + "WARN: mach_vm_region_recurse returned KERN_DENIED when checking " + "whether an address is mapped.\n"); + Report("HINT: Is mach_vm_region_recurse allowed by sandbox?\n"); + } + + if (kr == KERN_SUCCESS && !IntervalsAreSeparate(address, address + vmsize - 1, + range_start, range_end)) { + // Overlaps with already-mapped memory + return false; + } + + size_t cacheLength; + uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength); + + if (cacheStart && + !IntervalsAreSeparate(cacheStart, cacheStart + cacheLength - 1, + range_start, range_end)) { + // Overlaps with shared cache region + return false; + } + + // We believe this address is available. + return true; +} + // FIXME implement on this platform. void GetMemoryProfile(fill_profile_f cb, uptr *stats) {} diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.h b/libsanitizer/sanitizer_common/sanitizer_mac.h index 1cf2e298cc91..b0e4ac7f4074 100644 --- a/libsanitizer/sanitizer_common/sanitizer_mac.h +++ b/libsanitizer/sanitizer_common/sanitizer_mac.h @@ -14,26 +14,6 @@ #include "sanitizer_common.h" #include "sanitizer_platform.h" - -/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use - TARGET_OS_MAC (we have no support for iOS in any form for these versions, - so there's no ambiguity). */ -#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC -# define TARGET_OS_OSX 1 -#endif - -/* Other TARGET_OS_xxx are not present on earlier versions, define them to - 0 (we have no support for them; they are not valid targets anyway). */ -#ifndef TARGET_OS_IOS -#define TARGET_OS_IOS 0 -#endif -#ifndef TARGET_OS_TV -#define TARGET_OS_TV 0 -#endif -#ifndef TARGET_OS_WATCH -#define TARGET_OS_WATCH 0 -#endif - #if SANITIZER_APPLE #include "sanitizer_posix.h" @@ -57,9 +37,6 @@ struct VersionBase { VersionBase(u16 major, u16 minor) : major(major), minor(minor) {} - bool operator==(const VersionType &other) const { - return major == other.major && minor == other.minor; - } bool operator>=(const VersionType &other) const { return major > other.major || (major == other.major && minor >= other.minor); @@ -67,6 +44,12 @@ struct VersionBase { bool operator<(const VersionType &other) const { return !(*this >= other); } }; +template +bool operator==(const VersionBase &self, + const VersionBase &other) { + return self.major == other.major && self.minor == other.minor; +} + struct MacosVersion : VersionBase { MacosVersion(u16 major, u16 minor) : VersionBase(major, minor) {} }; diff --git a/libsanitizer/sanitizer_common/sanitizer_malloc_mac.inc b/libsanitizer/sanitizer_common/sanitizer_malloc_mac.inc index 6343eb284afb..be27584f2053 100644 --- a/libsanitizer/sanitizer_common/sanitizer_malloc_mac.inc +++ b/libsanitizer/sanitizer_common/sanitizer_malloc_mac.inc @@ -144,6 +144,22 @@ INTERCEPTOR(void, free, void *ptr) { COMMON_MALLOC_FREE(ptr); } +#if SANITIZER_INTERCEPT_FREE_SIZED && defined(COMMON_MALLOC_FREE_SIZED) +INTERCEPTOR(void, free_sized, void *ptr, size_t size) { + COMMON_MALLOC_ENTER(); + COMMON_MALLOC_FREE_SIZED(ptr, size); +} +#endif + +#if SANITIZER_INTERCEPT_FREE_ALIGNED_SIZED && \ + defined(COMMON_MALLOC_FREE_ALIGNED_SIZED) +INTERCEPTOR(void, free_aligned_sized, void *ptr, size_t alignment, + size_t size) { + COMMON_MALLOC_ENTER(); + COMMON_MALLOC_FREE_ALIGNED_SIZED(ptr, alignment, size); +} +#endif + INTERCEPTOR(void *, realloc, void *ptr, size_t size) { COMMON_MALLOC_ENTER(); COMMON_MALLOC_REALLOC(ptr, size); diff --git a/libsanitizer/sanitizer_common/sanitizer_netbsd.cpp b/libsanitizer/sanitizer_common/sanitizer_netbsd.cpp index 5e601bdcde1e..737e336dfbe8 100644 --- a/libsanitizer/sanitizer_common/sanitizer_netbsd.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_netbsd.cpp @@ -229,12 +229,12 @@ uptr internal_execve(const char *filename, char *const argv[], return _sys_execve(filename, argv, envp); } -tid_t GetTid() { +ThreadID GetTid() { DEFINE__REAL(int, _lwp_self); return _REAL(_lwp_self); } -int TgKill(pid_t pid, tid_t tid, int sig) { +int TgKill(pid_t pid, ThreadID tid, int sig) { DEFINE__REAL(int, _lwp_kill, int a, int b); (void)pid; return _REAL(_lwp_kill, tid, sig); diff --git a/libsanitizer/sanitizer_common/sanitizer_platform.h b/libsanitizer/sanitizer_common/sanitizer_platform.h index 57966403c92a..13099fe84b0a 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform.h @@ -14,7 +14,8 @@ #if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \ !defined(__APPLE__) && !defined(_WIN32) && !defined(__Fuchsia__) && \ - !(defined(__sun__) && defined(__svr4__)) + !(defined(__sun__) && defined(__svr4__)) && !defined(__HAIKU__) && \ + !defined(__wasi__) # error "This operating system is not supported" #endif @@ -55,6 +56,18 @@ # define SANITIZER_SOLARIS 0 #endif +#if defined(__HAIKU__) +# define SANITIZER_HAIKU 1 +#else +# define SANITIZER_HAIKU 0 +#endif + +#if defined(__wasi__) +# define SANITIZER_WASI 1 +#else +# define SANITIZER_WASI 0 +#endif + // - SANITIZER_APPLE: all Apple code // - TARGET_OS_OSX: macOS // - SANITIZER_IOS: devices (iOS and iOS-like) @@ -136,9 +149,9 @@ # define SANITIZER_MUSL 0 #endif -#define SANITIZER_POSIX \ +#define SANITIZER_POSIX \ (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_APPLE || \ - SANITIZER_NETBSD || SANITIZER_SOLARIS) + SANITIZER_NETBSD || SANITIZER_SOLARIS || SANITIZER_HAIKU) #if __LP64__ || defined(_WIN64) # define SANITIZER_WORDSIZE 64 @@ -305,6 +318,9 @@ # endif #endif +// The first address that can be returned by mmap. +#define SANITIZER_MMAP_BEGIN 0 + // The range of addresses which can be returned my mmap. // FIXME: this value should be different on different platforms. Larger values // will still work but will consume more memory for TwoLevelByteMap. @@ -410,7 +426,8 @@ # define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0 #endif -#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD || SANITIZER_SOLARIS +#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD || \ + SANITIZER_SOLARIS || SANITIZER_HAIKU # define SANITIZER_MADVISE_DONTNEED MADV_FREE #else # define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED @@ -465,4 +482,19 @@ # define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 0 #endif +#if SANITIZER_LINUX +# if SANITIZER_GLIBC +// Workaround for +// glibc/commit/3d3572f59059e2b19b8541ea648a6172136ec42e +// Linux: Keep termios ioctl constants strictly internal +# if __GLIBC_PREREQ(2, 41) +# define SANITIZER_TERMIOS_IOCTL_CONSTANTS 0 +# else +# define SANITIZER_TERMIOS_IOCTL_CONSTANTS 1 +# endif +# else +# define SANITIZER_TERMIOS_IOCTL_CONSTANTS 1 +# endif +#endif + #endif // SANITIZER_PLATFORM_H diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h index 3fd6b595ef19..ed60671014d5 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h @@ -129,8 +129,10 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #if SANITIZER_FUCHSIA #define SI_NOT_FUCHSIA 0 +#define SI_FUCHSIA 1 #else #define SI_NOT_FUCHSIA 1 +#define SI_FUCHSIA 0 #endif #if SANITIZER_SOLARIS @@ -139,6 +141,12 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SI_SOLARIS 0 #endif +#if SANITIZER_AIX +# define SI_NOT_AIX 0 +#else +# define SI_NOT_AIX 1 +#endif + #if SANITIZER_SOLARIS32 #define SI_SOLARIS32 1 #else @@ -159,20 +167,20 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SANITIZER_INTERCEPT_STRLEN SI_NOT_FUCHSIA #define SANITIZER_INTERCEPT_STRNLEN (SI_NOT_MAC && SI_NOT_FUCHSIA) -#define SANITIZER_INTERCEPT_STRCMP SI_NOT_FUCHSIA +#define SANITIZER_INTERCEPT_STRCMP (SI_NOT_FUCHSIA && SI_NOT_AIX) #define SANITIZER_INTERCEPT_STRSTR SI_NOT_FUCHSIA -#define SANITIZER_INTERCEPT_STRCASESTR SI_POSIX +#define SANITIZER_INTERCEPT_STRCASESTR (SI_POSIX && SI_NOT_AIX) #define SANITIZER_INTERCEPT_STRTOK SI_NOT_FUCHSIA #define SANITIZER_INTERCEPT_STRCHR SI_NOT_FUCHSIA -#define SANITIZER_INTERCEPT_STRCHRNUL SI_POSIX_NOT_MAC +#define SANITIZER_INTERCEPT_STRCHRNUL (SI_POSIX_NOT_MAC && SI_NOT_AIX) #define SANITIZER_INTERCEPT_STRRCHR SI_NOT_FUCHSIA #define SANITIZER_INTERCEPT_STRSPN SI_NOT_FUCHSIA #define SANITIZER_INTERCEPT_STRPBRK SI_NOT_FUCHSIA #define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_STRCASECMP SI_POSIX #define SANITIZER_INTERCEPT_MEMSET 1 -#define SANITIZER_INTERCEPT_MEMMOVE 1 -#define SANITIZER_INTERCEPT_MEMCPY 1 +#define SANITIZER_INTERCEPT_MEMMOVE SI_NOT_AIX +#define SANITIZER_INTERCEPT_MEMCPY SI_NOT_AIX #define SANITIZER_INTERCEPT_MEMCMP SI_NOT_FUCHSIA #define SANITIZER_INTERCEPT_BCMP \ SANITIZER_INTERCEPT_MEMCMP && \ @@ -210,6 +218,8 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SANITIZER_INTERCEPT_PREAD64 (SI_GLIBC || SI_SOLARIS32) #define SANITIZER_INTERCEPT_PWRITE64 (SI_GLIBC || SI_SOLARIS32) +#define SANITIZER_INTERCEPT_LSEEK64 (SI_GLIBC || SI_SOLARIS32) + #define SANITIZER_INTERCEPT_READV SI_POSIX #define SANITIZER_INTERCEPT_WRITEV SI_POSIX @@ -229,16 +239,22 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SANITIZER_INTERCEPT_ISOC99_SCANF SI_GLIBC #ifndef SANITIZER_INTERCEPT_PRINTF -#define SANITIZER_INTERCEPT_PRINTF SI_POSIX -#define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD) -#define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_GLIBC +# define SANITIZER_INTERCEPT_ASPRINTF SI_NOT_AIX +# define SANITIZER_INTERCEPT_VASPRINTF SI_NOT_AIX +# define SANITIZER_INTERCEPT_PRINTF SI_POSIX +# define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD) +# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_GLIBC #endif +#define SANITIZER_INTERCEPT_SETPROCTITLE (SI_FREEBSD || SI_NETBSD) + #define SANITIZER_INTERCEPT___PRINTF_CHK \ (SANITIZER_INTERCEPT_PRINTF && SI_GLIBC) -#define SANITIZER_INTERCEPT_FREXP SI_NOT_FUCHSIA -#define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_POSIX +// AIX libc does not export FREXP and FREXPF. +#define SANITIZER_INTERCEPT_FREXP (SI_NOT_FUCHSIA && SI_NOT_AIX) +#define SANITIZER_INTERCEPT_FREXPF (SI_POSIX && SI_NOT_AIX) +#define SANITIZER_INTERCEPT_FREXPL SI_POSIX #define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_POSIX #define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \ @@ -256,8 +272,12 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, (SI_FREEBSD || SI_NETBSD || SI_LINUX || SI_SOLARIS) #define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID \ (SI_LINUX || SI_FREEBSD || SI_NETBSD) +// TODO: This should be SI_POSIX, adding glibc first until I have time +// to verify all timer_t typedefs on other platforms. +#define SANITIZER_INTERCEPT_TIMER_CREATE SI_GLIBC #define SANITIZER_INTERCEPT_GETITIMER SI_POSIX #define SANITIZER_INTERCEPT_TIME SI_POSIX +#define SANITIZER_INTERCEPT_TIMESPEC_GET SI_LINUX #define SANITIZER_INTERCEPT_GLOB (SI_GLIBC || SI_SOLARIS) #define SANITIZER_INTERCEPT_GLOB64 SI_GLIBC #define SANITIZER_INTERCEPT___B64_TO SI_LINUX_NOT_ANDROID @@ -283,7 +303,7 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SANITIZER_INTERCEPT_ACCEPT4 \ (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_FREEBSD) #define SANITIZER_INTERCEPT_PACCEPT SI_NETBSD -#define SANITIZER_INTERCEPT_MODF SI_POSIX +#define SANITIZER_INTERCEPT_MODF (SI_POSIX && SI_NOT_AIX) #define SANITIZER_INTERCEPT_RECVMSG SI_POSIX #define SANITIZER_INTERCEPT_SENDMSG SI_POSIX #define SANITIZER_INTERCEPT_RECVMMSG SI_LINUX @@ -318,8 +338,9 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SANITIZER_INTERCEPT___WCSXFRM_L SI_LINUX #define SANITIZER_INTERCEPT_WCSNRTOMBS \ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) -#define SANITIZER_INTERCEPT_WCRTOMB \ - (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) +#define SANITIZER_INTERCEPT_WCRTOMB \ + (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS || \ + !SI_NOT_AIX) #define SANITIZER_INTERCEPT_WCTOMB \ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID || SI_SOLARIS @@ -339,6 +360,8 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SANITIZER_INTERCEPT_GETGROUPS SI_POSIX #define SANITIZER_INTERCEPT_POLL SI_POSIX #define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID || SI_SOLARIS +#define SANITIZER_INTERCEPT_EPOLL (SI_LINUX) +#define SANITIZER_INTERCEPT_KQUEUE (SI_FREEBSD || SI_NETBSD || SI_MAC) #define SANITIZER_INTERCEPT_WORDEXP \ (SI_FREEBSD || SI_NETBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID || \ SI_SOLARIS) @@ -357,7 +380,8 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_STATFS \ (SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) -#define SANITIZER_INTERCEPT_STATFS64 SI_GLIBC && SANITIZER_HAS_STATFS64 +#define SANITIZER_INTERCEPT_STATFS64 \ + ((SI_GLIBC || !SI_NOT_AIX) && SANITIZER_HAS_STATFS64) #define SANITIZER_INTERCEPT_STATVFS \ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_STATVFS64 SI_GLIBC @@ -406,10 +430,10 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SANITIZER_INTERCEPT_TTYNAME_R SI_POSIX #define SANITIZER_INTERCEPT_TEMPNAM SI_POSIX #define SANITIZER_INTERCEPT_SINCOS SI_LINUX || SI_SOLARIS -#define SANITIZER_INTERCEPT_REMQUO SI_POSIX -#define SANITIZER_INTERCEPT_REMQUOL (SI_POSIX && !SI_NETBSD) -#define SANITIZER_INTERCEPT_LGAMMA SI_POSIX -#define SANITIZER_INTERCEPT_LGAMMAL (SI_POSIX && !SI_NETBSD) +#define SANITIZER_INTERCEPT_REMQUO (SI_POSIX && SI_NOT_AIX) +#define SANITIZER_INTERCEPT_REMQUOL (SI_POSIX && !SI_NETBSD && SI_NOT_AIX) +#define SANITIZER_INTERCEPT_LGAMMA (SI_POSIX && SI_NOT_AIX) +#define SANITIZER_INTERCEPT_LGAMMAL (SI_POSIX && !SI_NETBSD && SI_NOT_AIX) #define SANITIZER_INTERCEPT_LGAMMA_R (SI_FREEBSD || SI_LINUX || SI_SOLARIS) #define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_DRAND48_R SI_GLIBC @@ -492,11 +516,13 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE (SI_LINUX || SI_FREEBSD) #define SI_STAT_LINUX (SI_LINUX && __GLIBC_PREREQ(2, 33)) -#define SANITIZER_INTERCEPT_STAT \ - (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS || \ - SI_STAT_LINUX) -#define SANITIZER_INTERCEPT_STAT64 SI_STAT_LINUX && SANITIZER_HAS_STAT64 -#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD || SI_STAT_LINUX) +#define SANITIZER_INTERCEPT_STAT \ + (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS || \ + SI_STAT_LINUX || !SI_NOT_AIX) +#define SANITIZER_INTERCEPT_STAT64 \ + ((SI_STAT_LINUX || !SI_NOT_AIX) && SANITIZER_HAS_STAT64) +#define SANITIZER_INTERCEPT_LSTAT \ + (SI_NETBSD || SI_FREEBSD || SI_STAT_LINUX || !SI_NOT_AIX) #define SANITIZER_INTERCEPT___XSTAT \ ((!SANITIZER_INTERCEPT_STAT && SI_POSIX) || SI_STAT_LINUX) #define SANITIZER_INTERCEPT___XSTAT64 SI_GLIBC @@ -513,7 +539,8 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SANITIZER_INTERCEPT_MMAP SI_POSIX #define SANITIZER_INTERCEPT_MMAP64 SI_GLIBC || SI_SOLARIS -#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (SI_GLIBC || SI_ANDROID) +#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \ + (SI_GLIBC || SI_ANDROID || SI_FUCHSIA) #define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD) #define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC #define SANITIZER_INTERCEPT_PVALLOC (SI_GLIBC || SI_ANDROID) @@ -524,7 +551,8 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD) #define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_WCSLEN 1 -#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX +#define SANITIZER_INTERCEPT_WCSNLEN 1 +#define SANITIZER_INTERCEPT_WCSCAT (SI_POSIX || SI_WINDOWS) #define SANITIZER_INTERCEPT_WCSDUP SI_POSIX #define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION (!SI_WINDOWS && SI_NOT_FUCHSIA) #define SANITIZER_INTERCEPT_BSD_SIGNAL SI_ANDROID @@ -564,7 +592,7 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SANITIZER_INTERCEPT_PROTOENT_R SI_GLIBC #define SANITIZER_INTERCEPT_NETENT (SI_LINUX || SI_NETBSD || SI_FREEBSD) #define SANITIZER_INTERCEPT_SETVBUF \ - (SI_NETBSD || SI_FREEBSD || SI_LINUX || SI_MAC) + (SI_NETBSD || SI_FREEBSD || SI_LINUX || SI_MAC || !SI_NOT_AIX) #define SANITIZER_INTERCEPT_GETMNTINFO (SI_NETBSD || SI_FREEBSD || SI_MAC) #define SANITIZER_INTERCEPT_MI_VECTOR_HASH SI_NETBSD #define SANITIZER_INTERCEPT_GETVFSSTAT SI_NETBSD @@ -585,7 +613,7 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SANITIZER_INTERCEPT_SHA1 SI_NETBSD #define SANITIZER_INTERCEPT_MD4 SI_NETBSD #define SANITIZER_INTERCEPT_RMD160 SI_NETBSD -#define SANITIZER_INTERCEPT_FSEEK (SI_NETBSD || SI_FREEBSD) +#define SANITIZER_INTERCEPT_FSEEK SI_POSIX #define SANITIZER_INTERCEPT_MD2 SI_NETBSD #define SANITIZER_INTERCEPT_CDB SI_NETBSD #define SANITIZER_INTERCEPT_VIS (SI_NETBSD || SI_FREEBSD) @@ -632,6 +660,21 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, # define SI_MAC_OS_DEPLOYMENT_MIN_13_00 0 #endif #define SANITIZER_INTERCEPT_FREADLINK (SI_MAC && SI_MAC_OS_DEPLOYMENT_MIN_13_00) +#define SANITIZER_INTERCEPT_GETSERVENT_R SI_GLIBC +#define SANITIZER_INTERCEPT_GETSERVBYNAME_R SI_GLIBC +#define SANITIZER_INTERCEPT_GETSERVBYPORT_R SI_GLIBC + +// Until free_sized and free_aligned_sized are more generally available, +// we can only unconditionally intercept on ELF-based platforms where it +// is okay to have undefined weak symbols. +#ifdef __ELF__ +# define SANITIZER_INTERCEPT_FREE_SIZED 1 +# define SANITIZER_INTERCEPT_FREE_ALIGNED_SIZED 1 +#else +# define SANITIZER_INTERCEPT_FREE_SIZED 0 +# define SANITIZER_INTERCEPT_FREE_ALIGNED_SIZED 0 +#endif + // This macro gives a way for downstream users to override the above // interceptor macros irrespective of the platform they are on. They have // to do two things: diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.cpp index 4940062eeae4..c4fa1e3c1f6f 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.cpp @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -70,14 +71,8 @@ #include #include #include -#include -#include -#include -#include -#include #include #include -#include #include #include #include @@ -87,10 +82,6 @@ #include #include -#define _KERNEL // to declare 'shminfo' structure -#include -#undef _KERNEL - #undef IOC_DIRMASK // Include these after system headers to avoid name clashes and ambiguities. @@ -141,8 +132,6 @@ unsigned struct_timeb_sz = sizeof(struct timeb); unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds); unsigned struct_mq_attr_sz = sizeof(struct mq_attr); unsigned struct_statvfs_sz = sizeof(struct statvfs); -unsigned struct_shminfo_sz = sizeof(struct shminfo); -unsigned struct_shm_info_sz = sizeof(struct shm_info); unsigned struct_regmatch_sz = sizeof(regmatch_t); unsigned struct_regex_sz = sizeof(regex_t); unsigned struct_fstab_sz = sizeof(struct fstab); @@ -156,9 +145,6 @@ const uptr sig_err = (uptr)SIG_ERR; const uptr sa_siginfo = (uptr)SA_SIGINFO; int shmctl_ipc_stat = (int)IPC_STAT; -int shmctl_ipc_info = (int)IPC_INFO; -int shmctl_shm_info = (int)SHM_INFO; -int shmctl_shm_stat = (int)SHM_STAT; unsigned struct_utmpx_sz = sizeof(struct utmpx); int map_fixed = MAP_FIXED; @@ -378,22 +364,6 @@ const int si_SEGV_MAPERR = SEGV_MAPERR; const int si_SEGV_ACCERR = SEGV_ACCERR; const int unvis_valid = UNVIS_VALID; const int unvis_validpush = UNVIS_VALIDPUSH; - -const unsigned MD5_CTX_sz = sizeof(MD5_CTX); -const unsigned MD5_return_length = MD5_DIGEST_STRING_LENGTH; - -#define SHA2_CONST(LEN) \ - const unsigned SHA##LEN##_CTX_sz = sizeof(SHA##LEN##_CTX); \ - const unsigned SHA##LEN##_return_length = SHA##LEN##_DIGEST_STRING_LENGTH; \ - const unsigned SHA##LEN##_block_length = SHA##LEN##_BLOCK_LENGTH; \ - const unsigned SHA##LEN##_digest_length = SHA##LEN##_DIGEST_LENGTH - -SHA2_CONST(224); -SHA2_CONST(256); -SHA2_CONST(384); -SHA2_CONST(512); - -#undef SHA2_CONST } // namespace __sanitizer using namespace __sanitizer; diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.h index 8ce73f206fd8..1cbb40e0b2ff 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.h @@ -419,12 +419,14 @@ struct __sanitizer_wordexp_t { typedef void __sanitizer_FILE; -extern unsigned struct_shminfo_sz; -extern unsigned struct_shm_info_sz; extern int shmctl_ipc_stat; -extern int shmctl_ipc_info; -extern int shmctl_shm_info; -extern int shmctl_shm_stat; + +// This simplifies generic code +#define struct_shminfo_sz -1 +#define struct_shm_info_sz -1 +#define shmctl_shm_stat -1 +#define shmctl_ipc_info -1 +#define shmctl_shm_info -1 extern unsigned struct_utmpx_sz; @@ -708,22 +710,6 @@ extern unsigned IOCTL_KDSKBMODE; extern const int si_SEGV_MAPERR; extern const int si_SEGV_ACCERR; -extern const unsigned MD5_CTX_sz; -extern const unsigned MD5_return_length; - -#define SHA2_EXTERN(LEN) \ - extern const unsigned SHA##LEN##_CTX_sz; \ - extern const unsigned SHA##LEN##_return_length; \ - extern const unsigned SHA##LEN##_block_length; \ - extern const unsigned SHA##LEN##_digest_length - -SHA2_EXTERN(224); -SHA2_EXTERN(256); -SHA2_EXTERN(384); -SHA2_EXTERN(512); - -#undef SHA2_EXTERN - struct __sanitizer_cap_rights { u64 cr_rights[2]; }; diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp index c278c8797f75..bf0f355847cb 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp @@ -26,10 +26,7 @@ // With old kernels (and even new kernels on powerpc) asm/stat.h uses types that // are not defined anywhere in userspace headers. Fake them. This seems to work -// fine with newer headers, too. Beware that with , struct stat -// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64. -// Also, for some platforms (e.g. mips) there are additional members in the -// struct stat:s. +// fine with newer headers, too. #include # if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__) # include diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.cpp index c40877ba48d0..435f3b2861dc 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.cpp @@ -498,7 +498,6 @@ struct urio_command { #include #include #include -#include #include #include #include @@ -515,7 +514,7 @@ struct urio_command { #include #if defined(__x86_64__) -#include +#include #endif // clang-format on @@ -547,6 +546,7 @@ unsigned pid_t_sz = sizeof(pid_t); unsigned timeval_sz = sizeof(timeval); unsigned uid_t_sz = sizeof(uid_t); unsigned gid_t_sz = sizeof(gid_t); +unsigned fpos_t_sz = sizeof(fpos_t); unsigned mbstate_t_sz = sizeof(mbstate_t); unsigned sigset_t_sz = sizeof(sigset_t); unsigned struct_timezone_sz = sizeof(struct timezone); @@ -2487,8 +2487,6 @@ const unsigned RMD160_return_length = RMD160_DIGEST_STRING_LENGTH; const unsigned MD5_CTX_sz = sizeof(MD5_CTX); const unsigned MD5_return_length = MD5_DIGEST_STRING_LENGTH; -const unsigned fpos_t_sz = sizeof(fpos_t); - const unsigned MD2_CTX_sz = sizeof(MD2_CTX); const unsigned MD2_return_length = MD2_DIGEST_STRING_LENGTH; diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.h index 4c697b4d107d..3758a9101c2a 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.h @@ -36,6 +36,7 @@ extern unsigned pid_t_sz; extern unsigned timeval_sz; extern unsigned uid_t_sz; extern unsigned gid_t_sz; +extern unsigned fpos_t_sz; extern unsigned mbstate_t_sz; extern unsigned struct_timezone_sz; extern unsigned struct_tms_sz; @@ -2335,8 +2336,6 @@ extern const unsigned RMD160_return_length; extern const unsigned MD5_CTX_sz; extern const unsigned MD5_return_length; -extern const unsigned fpos_t_sz; - extern const unsigned MD2_CTX_sz; extern const unsigned MD2_return_length; diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp index 9856ac3c3ec4..ea8cc306268c 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp @@ -24,7 +24,7 @@ // Must go after undef _FILE_OFFSET_BITS. #include "sanitizer_platform.h" -#if SANITIZER_LINUX || SANITIZER_APPLE +#if SANITIZER_LINUX || SANITIZER_APPLE || SANITIZER_HAIKU // Must go after undef _FILE_OFFSET_BITS. #include "sanitizer_glibc_version.h" @@ -52,7 +52,7 @@ #include #include #include -#if !SANITIZER_APPLE +#if !SANITIZER_APPLE && !SANITIZER_HAIKU #include #endif @@ -61,7 +61,9 @@ #endif #if !SANITIZER_ANDROID +#if !SANITIZER_HAIKU #include +#endif #include #include #endif @@ -111,9 +113,11 @@ typedef struct user_fpregs elf_fpregset_t; #if !SANITIZER_ANDROID #include +#if !SANITIZER_HAIKU #include #include #endif +#endif #if SANITIZER_LINUX #if SANITIZER_GLIBC @@ -163,7 +167,7 @@ typedef struct user_fpregs elf_fpregset_t; #include #include #include -#else +#elif !SANITIZER_HAIKU #include #endif // SANITIZER_LINUX @@ -173,6 +177,11 @@ typedef struct user_fpregs elf_fpregset_t; #include #endif +#if SANITIZER_HAIKU +#include +#include +#endif + // Include these after system headers to avoid name clashes and ambiguities. # include "sanitizer_common.h" # include "sanitizer_internal_defs.h" @@ -217,7 +226,7 @@ namespace __sanitizer { unsigned struct_fstab_sz = sizeof(struct fstab); #endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD || // SANITIZER_APPLE -#if !SANITIZER_ANDROID +#if !SANITIZER_ANDROID && !SANITIZER_HAIKU unsigned struct_statfs_sz = sizeof(struct statfs); unsigned struct_sockaddr_sz = sizeof(struct sockaddr); @@ -324,7 +333,7 @@ namespace __sanitizer { int shmctl_shm_stat = (int)SHM_STAT; #endif -#if !SANITIZER_APPLE && !SANITIZER_FREEBSD +#if !SANITIZER_APPLE && !SANITIZER_FREEBSD && !SANITIZER_HAIKU unsigned struct_utmp_sz = sizeof(struct utmp); #endif #if !SANITIZER_ANDROID @@ -356,9 +365,9 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); int glob_altdirfunc = GLOB_ALTDIRFUNC; #endif -# if !SANITIZER_ANDROID +# if !SANITIZER_ANDROID && !SANITIZER_HAIKU const int wordexp_wrde_dooffs = WRDE_DOOFFS; -# endif // !SANITIZER_ANDROID +# endif // !SANITIZER_ANDROID && !SANITIZER_HAIKU # if SANITIZER_LINUX && !SANITIZER_ANDROID && \ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ @@ -537,21 +546,25 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); unsigned struct_sock_fprog_sz = sizeof(struct sock_fprog); # endif // SANITIZER_GLIBC -# if !SANITIZER_ANDROID && !SANITIZER_APPLE +# if !SANITIZER_ANDROID && !SANITIZER_APPLE && !SANITIZER_HAIKU unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req); unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req); #endif + unsigned fpos_t_sz = sizeof(fpos_t); + const unsigned long __sanitizer_bufsiz = BUFSIZ; const unsigned IOCTL_NOT_PRESENT = 0; + unsigned IOCTL_FIONBIO = FIONBIO; +#if !SANITIZER_HAIKU unsigned IOCTL_FIOASYNC = FIOASYNC; unsigned IOCTL_FIOCLEX = FIOCLEX; unsigned IOCTL_FIOGETOWN = FIOGETOWN; - unsigned IOCTL_FIONBIO = FIONBIO; unsigned IOCTL_FIONCLEX = FIONCLEX; unsigned IOCTL_FIOSETOWN = FIOSETOWN; +#endif unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI; unsigned IOCTL_SIOCATMARK = SIOCATMARK; unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI; @@ -572,23 +585,27 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU; unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK; unsigned IOCTL_SIOCSPGRP = SIOCSPGRP; + +#if !SANITIZER_HAIKU unsigned IOCTL_TIOCCONS = TIOCCONS; - unsigned IOCTL_TIOCEXCL = TIOCEXCL; unsigned IOCTL_TIOCGETD = TIOCGETD; + unsigned IOCTL_TIOCNOTTY = TIOCNOTTY; + unsigned IOCTL_TIOCPKT = TIOCPKT; + unsigned IOCTL_TIOCSETD = TIOCSETD; + unsigned IOCTL_TIOCSTI = TIOCSTI; +#endif + + unsigned IOCTL_TIOCEXCL = TIOCEXCL; unsigned IOCTL_TIOCGPGRP = TIOCGPGRP; unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ; unsigned IOCTL_TIOCMBIC = TIOCMBIC; unsigned IOCTL_TIOCMBIS = TIOCMBIS; unsigned IOCTL_TIOCMGET = TIOCMGET; unsigned IOCTL_TIOCMSET = TIOCMSET; - unsigned IOCTL_TIOCNOTTY = TIOCNOTTY; unsigned IOCTL_TIOCNXCL = TIOCNXCL; unsigned IOCTL_TIOCOUTQ = TIOCOUTQ; - unsigned IOCTL_TIOCPKT = TIOCPKT; unsigned IOCTL_TIOCSCTTY = TIOCSCTTY; - unsigned IOCTL_TIOCSETD = TIOCSETD; unsigned IOCTL_TIOCSPGRP = TIOCSPGRP; - unsigned IOCTL_TIOCSTI = TIOCSTI; unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ; #if SANITIZER_LINUX && !SANITIZER_ANDROID unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT; @@ -762,12 +779,16 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); unsigned IOCTL_SOUND_PCM_WRITE_FILTER = SOUND_PCM_WRITE_FILTER; #endif // SOUND_VERSION unsigned IOCTL_TCFLSH = TCFLSH; +# if SANITIZER_TERMIOS_IOCTL_CONSTANTS unsigned IOCTL_TCGETS = TCGETS; +# endif unsigned IOCTL_TCSBRK = TCSBRK; unsigned IOCTL_TCSBRKP = TCSBRKP; +# if SANITIZER_TERMIOS_IOCTL_CONSTANTS unsigned IOCTL_TCSETS = TCSETS; unsigned IOCTL_TCSETSF = TCSETSF; unsigned IOCTL_TCSETSW = TCSETSW; +# endif unsigned IOCTL_TCXONC = TCXONC; unsigned IOCTL_TIOCGLCKTRMIOS = TIOCGLCKTRMIOS; unsigned IOCTL_TIOCGSOFTCAR = TIOCGSOFTCAR; @@ -1084,7 +1105,7 @@ CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len); CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level); CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type); -#if SANITIZER_LINUX && (__ANDROID_API__ >= 21 || __GLIBC_PREREQ (2, 14)) +# if SANITIZER_LINUX && (SANITIZER_ANDROID || __GLIBC_PREREQ(2, 14)) CHECK_TYPE_SIZE(mmsghdr); CHECK_SIZE_AND_OFFSET(mmsghdr, msg_hdr); CHECK_SIZE_AND_OFFSET(mmsghdr, msg_len); @@ -1094,7 +1115,7 @@ COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent)); CHECK_SIZE_AND_OFFSET(dirent, d_ino); #if SANITIZER_APPLE CHECK_SIZE_AND_OFFSET(dirent, d_seekoff); -#elif SANITIZER_FREEBSD +#elif SANITIZER_FREEBSD || SANITIZER_HAIKU // There is no 'd_off' field on FreeBSD. #else CHECK_SIZE_AND_OFFSET(dirent, d_off); @@ -1110,7 +1131,9 @@ CHECK_SIZE_AND_OFFSET(dirent64, d_reclen); CHECK_TYPE_SIZE(ifconf); CHECK_SIZE_AND_OFFSET(ifconf, ifc_len); +#if !SANITIZER_HAIKU CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu); +#endif CHECK_TYPE_SIZE(pollfd); CHECK_SIZE_AND_OFFSET(pollfd, fd); @@ -1165,7 +1188,7 @@ CHECK_TYPE_SIZE(__kernel_loff_t); CHECK_TYPE_SIZE(__kernel_fd_set); #endif -#if !SANITIZER_ANDROID +#if !SANITIZER_ANDROID && !SANITIZER_HAIKU CHECK_TYPE_SIZE(wordexp_t); CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc); CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv); @@ -1195,7 +1218,9 @@ CHECK_SIZE_AND_OFFSET(mntent, mnt_freq); CHECK_SIZE_AND_OFFSET(mntent, mnt_passno); #endif +#if !SANITIZER_HAIKU CHECK_TYPE_SIZE(ether_addr); +#endif #if SANITIZER_GLIBC || SANITIZER_FREEBSD CHECK_TYPE_SIZE(ipc_perm); @@ -1233,7 +1258,7 @@ CHECK_TYPE_SIZE(clock_t); CHECK_TYPE_SIZE(clockid_t); #endif -#if !SANITIZER_ANDROID +#if !SANITIZER_ANDROID && !SANITIZER_HAIKU CHECK_TYPE_SIZE(ifaddrs); CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next); CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name); diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h index cfac4903b76b..24966523f3a0 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h @@ -14,39 +14,39 @@ #ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H #define SANITIZER_PLATFORM_LIMITS_POSIX_H -#if SANITIZER_LINUX || SANITIZER_APPLE - -#include "sanitizer_internal_defs.h" -#include "sanitizer_platform.h" -#include "sanitizer_mallinfo.h" - -#if SANITIZER_APPLE -#include -#if !__DARWIN_ONLY_64_BIT_INO_T -#define SANITIZER_HAS_STAT64 1 -#define SANITIZER_HAS_STATFS64 1 -#else -#define SANITIZER_HAS_STAT64 0 -#define SANITIZER_HAS_STATFS64 0 -#endif -#elif SANITIZER_GLIBC || SANITIZER_ANDROID -#define SANITIZER_HAS_STAT64 1 -#define SANITIZER_HAS_STATFS64 1 -#endif +#if SANITIZER_LINUX || SANITIZER_APPLE || SANITIZER_HAIKU + +# include "sanitizer_internal_defs.h" +# include "sanitizer_mallinfo.h" +# include "sanitizer_platform.h" + +# if SANITIZER_APPLE +# include +# if !__DARWIN_ONLY_64_BIT_INO_T +# define SANITIZER_HAS_STAT64 1 +# define SANITIZER_HAS_STATFS64 1 +# else +# define SANITIZER_HAS_STAT64 0 +# define SANITIZER_HAS_STATFS64 0 +# endif +# elif SANITIZER_GLIBC || SANITIZER_ANDROID +# define SANITIZER_HAS_STAT64 1 +# define SANITIZER_HAS_STATFS64 1 +# endif -#if defined(__sparc__) +# if defined(__sparc__) // FIXME: This can't be included from tsan which does not support sparc yet. -#include "sanitizer_glibc_version.h" -#endif +# include "sanitizer_glibc_version.h" +# endif -# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle)) +# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map *)(handle)) namespace __sanitizer { extern unsigned struct_utsname_sz; extern unsigned struct_stat_sz; -#if SANITIZER_HAS_STAT64 +# if SANITIZER_HAS_STAT64 extern unsigned struct_stat64_sz; -#endif +# endif extern unsigned struct_rusage_sz; extern unsigned siginfo_t_sz; extern unsigned struct_itimerval_sz; @@ -64,13 +64,13 @@ extern unsigned struct_itimerspec_sz; extern unsigned struct_sigevent_sz; extern unsigned struct_stack_t_sz; extern unsigned struct_sched_param_sz; -#if SANITIZER_HAS_STATFS64 +# if SANITIZER_HAS_STATFS64 extern unsigned struct_statfs64_sz; -#endif +# endif extern unsigned struct_regex_sz; extern unsigned struct_regmatch_sz; -#if !SANITIZER_ANDROID +# if !SANITIZER_ANDROID extern unsigned struct_fstab_sz; extern unsigned struct_statfs_sz; extern unsigned struct_sockaddr_sz; @@ -82,42 +82,47 @@ unsigned ucontext_t_sz(void *uctx); # if defined(__x86_64__) const unsigned struct_kernel_stat_sz = 144; const unsigned struct_kernel_stat64_sz = 0; -#elif defined(__i386__) +# elif defined(__i386__) const unsigned struct_kernel_stat_sz = 64; const unsigned struct_kernel_stat64_sz = 96; -#elif defined(__arm__) +# elif defined(__arm__) const unsigned struct_kernel_stat_sz = 64; const unsigned struct_kernel_stat64_sz = 104; -#elif defined(__aarch64__) +# elif defined(__aarch64__) const unsigned struct_kernel_stat_sz = 128; const unsigned struct_kernel_stat64_sz = 104; -#elif defined(__powerpc__) && !defined(__powerpc64__) +# elif defined(__powerpc__) && !defined(__powerpc64__) const unsigned struct_kernel_stat_sz = 72; const unsigned struct_kernel_stat64_sz = 104; -#elif defined(__powerpc64__) +# elif defined(__powerpc64__) const unsigned struct_kernel_stat_sz = 144; const unsigned struct_kernel_stat64_sz = 104; -#elif defined(__mips__) -const unsigned struct_kernel_stat_sz = - SANITIZER_ANDROID - ? FIRST_32_SECOND_64(104, 128) - : FIRST_32_SECOND_64((_MIPS_SIM == _ABIN32) ? 176 : 160, 216); +# elif defined(__mips__) +const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID + ? FIRST_32_SECOND_64(104, 128) +# if defined(_ABIN32) && _MIPS_SIM == _ABIN32 + : FIRST_32_SECOND_64(176, 216); +# elif SANITIZER_MUSL + : FIRST_32_SECOND_64(160, 208); +# else + : FIRST_32_SECOND_64(160, 216); +# endif const unsigned struct_kernel_stat64_sz = 104; -#elif defined(__s390__) && !defined(__s390x__) +# elif defined(__s390__) && !defined(__s390x__) const unsigned struct_kernel_stat_sz = 64; const unsigned struct_kernel_stat64_sz = 104; -#elif defined(__s390x__) +# elif defined(__s390x__) const unsigned struct_kernel_stat_sz = 144; const unsigned struct_kernel_stat64_sz = 0; -#elif defined(__sparc__) && defined(__arch64__) +# elif defined(__sparc__) && defined(__arch64__) const unsigned struct___old_kernel_stat_sz = 0; const unsigned struct_kernel_stat_sz = 104; const unsigned struct_kernel_stat64_sz = 144; -#elif defined(__sparc__) && !defined(__arch64__) +# elif defined(__sparc__) && !defined(__arch64__) const unsigned struct___old_kernel_stat_sz = 0; const unsigned struct_kernel_stat_sz = 64; const unsigned struct_kernel_stat64_sz = 104; -#elif SANITIZER_RISCV64 +# elif SANITIZER_RISCV64 const unsigned struct_kernel_stat_sz = 128; const unsigned struct_kernel_stat64_sz = 0; // RISCV64 does not use stat64 # elif defined(__hexagon__) @@ -142,15 +147,15 @@ extern unsigned struct_old_utsname_sz; extern unsigned struct_oldold_utsname_sz; const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long); -#endif // SANITIZER_LINUX +# endif // SANITIZER_LINUX -#if SANITIZER_LINUX +# if SANITIZER_LINUX -#if defined(__powerpc64__) || defined(__s390__) || defined(__loongarch__) +# if defined(__powerpc64__) || defined(__s390__) || defined(__loongarch__) const unsigned struct___old_kernel_stat_sz = 0; -#elif !defined(__sparc__) +# elif !defined(__sparc__) const unsigned struct___old_kernel_stat_sz = 32; -#endif +# endif extern unsigned struct_rlimit_sz; extern unsigned struct_utimbuf_sz; @@ -195,17 +200,17 @@ struct __sanitizer___sysctl_args { const unsigned old_sigset_t_sz = sizeof(unsigned long); struct __sanitizer_sem_t { -#if SANITIZER_ANDROID && defined(_LP64) +# if SANITIZER_ANDROID && defined(_LP64) int data[4]; -#elif SANITIZER_ANDROID && !defined(_LP64) +# elif SANITIZER_ANDROID && !defined(_LP64) int data; -#elif SANITIZER_LINUX +# elif SANITIZER_LINUX uptr data[4]; -#endif +# endif }; -#endif // SANITIZER_LINUX +# endif // SANITIZER_LINUX -#if SANITIZER_LINUX && !SANITIZER_ANDROID +# if SANITIZER_LINUX && !SANITIZER_ANDROID extern unsigned struct_ustat_sz; extern unsigned struct_rlimit64_sz; extern unsigned struct_statvfs64_sz; @@ -216,45 +221,45 @@ struct __sanitizer_ipc_perm { int gid; int cuid; int cgid; -#ifdef __powerpc__ +# ifdef __powerpc__ unsigned mode; unsigned __seq; u64 __unused1; u64 __unused2; -#elif defined(__sparc__) +# elif defined(__sparc__) unsigned mode; unsigned short __pad2; unsigned short __seq; unsigned long long __unused1; unsigned long long __unused2; -#else +# else unsigned int mode; unsigned short __seq; unsigned short __pad2; -#if defined(__x86_64__) && !defined(_LP64) +# if defined(__x86_64__) && !defined(_LP64) u64 __unused1; u64 __unused2; -#else +# else unsigned long __unused1; unsigned long __unused2; -#endif -#endif +# endif +# endif }; struct __sanitizer_shmid_ds { __sanitizer_ipc_perm shm_perm; -#if defined(__sparc__) -#if !defined(__arch64__) +# if defined(__sparc__) +# if !defined(__arch64__) u32 __pad1; -#endif +# endif long shm_atime; -#if !defined(__arch64__) +# if !defined(__arch64__) u32 __pad2; -#endif +# endif long shm_dtime; -#if !defined(__arch64__) +# if !defined(__arch64__) u32 __pad3; -#endif +# endif long shm_ctime; uptr shm_segsz; int shm_cpid; @@ -262,61 +267,61 @@ struct __sanitizer_shmid_ds { unsigned long shm_nattch; unsigned long __glibc_reserved1; unsigned long __glibc_reserved2; -#else -#ifndef __powerpc__ +# else +# ifndef __powerpc__ uptr shm_segsz; -#elif !defined(__powerpc64__) +# elif !defined(__powerpc64__) uptr __unused0; -#endif -#if defined(__x86_64__) && !defined(_LP64) +# endif +# if defined(__x86_64__) && !defined(_LP64) u64 shm_atime; u64 shm_dtime; u64 shm_ctime; -#else +# else uptr shm_atime; -#if !defined(_LP64) && !defined(__mips__) +# if !defined(_LP64) && !defined(__mips__) uptr __unused1; -#endif +# endif uptr shm_dtime; -#if !defined(_LP64) && !defined(__mips__) +# if !defined(_LP64) && !defined(__mips__) uptr __unused2; -#endif +# endif uptr shm_ctime; -#if !defined(_LP64) && !defined(__mips__) +# if !defined(_LP64) && !defined(__mips__) uptr __unused3; -#endif -#endif -#ifdef __powerpc__ +# endif +# endif +# ifdef __powerpc__ uptr shm_segsz; -#endif +# endif int shm_cpid; int shm_lpid; -#if defined(__x86_64__) && !defined(_LP64) +# if defined(__x86_64__) && !defined(_LP64) u64 shm_nattch; u64 __unused4; u64 __unused5; -#else +# else uptr shm_nattch; uptr __unused4; uptr __unused5; -#endif -#endif +# endif +# endif }; -#endif +# endif -#if SANITIZER_LINUX && !SANITIZER_ANDROID +# if SANITIZER_LINUX && !SANITIZER_ANDROID extern unsigned struct_msqid_ds_sz; extern unsigned struct_mq_attr_sz; extern unsigned struct_timex_sz; extern unsigned struct_statvfs_sz; -#endif // SANITIZER_LINUX && !SANITIZER_ANDROID +# endif // SANITIZER_LINUX && !SANITIZER_ANDROID struct __sanitizer_iovec { void *iov_base; usize iov_len; }; -#if !SANITIZER_ANDROID +# if !SANITIZER_ANDROID struct __sanitizer_ifaddrs { struct __sanitizer_ifaddrs *ifa_next; char *ifa_name; @@ -324,21 +329,21 @@ struct __sanitizer_ifaddrs { void *ifa_addr; // (struct sockaddr *) void *ifa_netmask; // (struct sockaddr *) // This is a union on Linux. -# ifdef ifa_dstaddr -# undef ifa_dstaddr -# endif +# ifdef ifa_dstaddr +# undef ifa_dstaddr +# endif void *ifa_dstaddr; // (struct sockaddr *) void *ifa_data; }; -#endif // !SANITIZER_ANDROID +# endif // !SANITIZER_ANDROID -#if SANITIZER_APPLE +# if SANITIZER_APPLE typedef unsigned long __sanitizer_pthread_key_t; -#else +# else typedef unsigned __sanitizer_pthread_key_t; -#endif +# endif -#if SANITIZER_LINUX && !SANITIZER_ANDROID +# if SANITIZER_LINUX && !SANITIZER_ANDROID struct __sanitizer_XDR { int x_op; @@ -352,25 +357,28 @@ struct __sanitizer_XDR { const int __sanitizer_XDR_ENCODE = 0; const int __sanitizer_XDR_DECODE = 1; const int __sanitizer_XDR_FREE = 2; -#endif +# endif struct __sanitizer_passwd { char *pw_name; char *pw_passwd; int pw_uid; int pw_gid; -#if SANITIZER_APPLE +# if SANITIZER_APPLE long pw_change; char *pw_class; -#endif -#if !(SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32)) +# endif +# if !(SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32)) && !SANITIZER_HAIKU char *pw_gecos; -#endif +# endif char *pw_dir; char *pw_shell; -#if SANITIZER_APPLE +# if SANITIZER_APPLE long pw_expire; -#endif +# endif +# if SANITIZER_HAIKU + char *pw_gecos; +# endif }; struct __sanitizer_group { @@ -383,12 +391,22 @@ struct __sanitizer_group { # if (SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID) || \ (defined(__x86_64__) && !defined(_LP64)) || defined(__hexagon__) typedef long long __sanitizer_time_t; -#else +# else typedef long __sanitizer_time_t; -#endif +# endif typedef long __sanitizer_suseconds_t; +struct __sanitizer_timespec { + __sanitizer_time_t tv_sec; /* seconds */ + u64 tv_nsec; /* nanoseconds */ +}; + +struct __sanitizer_itimerspec { + struct __sanitizer_timespec it_interval; /* timer period */ + struct __sanitizer_timespec it_value; /* timer expiration */ +}; + struct __sanitizer_timeval { __sanitizer_time_t tv_sec; __sanitizer_suseconds_t tv_usec; @@ -420,11 +438,15 @@ struct __sanitizer_tm { int tm_wday; int tm_yday; int tm_isdst; +# if SANITIZER_HAIKU + int tm_gmtoff; +# else long int tm_gmtoff; +# endif const char *tm_zone; }; -#if SANITIZER_LINUX +# if SANITIZER_LINUX struct __sanitizer_mntent { char *mnt_fsname; char *mnt_dir; @@ -439,9 +461,9 @@ struct __sanitizer_file_handle { int handle_type; unsigned char f_handle[1]; // variable sized }; -#endif +# endif -#if SANITIZER_APPLE +# if SANITIZER_APPLE || SANITIZER_HAIKU struct __sanitizer_msghdr { void *msg_name; unsigned msg_namelen; @@ -456,7 +478,31 @@ struct __sanitizer_cmsghdr { int cmsg_level; int cmsg_type; }; -#else +# elif SANITIZER_MUSL +struct __sanitizer_msghdr { + void *msg_name; + unsigned msg_namelen; + struct __sanitizer_iovec *msg_iov; + int msg_iovlen; +# if SANITIZER_WORDSIZE == 64 + int __pad1; +# endif + void *msg_control; + unsigned msg_controllen; +# if SANITIZER_WORDSIZE == 64 + int __pad2; +# endif + int msg_flags; +}; +struct __sanitizer_cmsghdr { + unsigned cmsg_len; +# if SANITIZER_WORDSIZE == 64 + int __pad1; +# endif + int cmsg_level; + int cmsg_type; +}; +# else // In POSIX, int msg_iovlen; socklen_t msg_controllen; socklen_t cmsg_len; but // many implementations don't conform to the standard. struct __sanitizer_msghdr { @@ -473,22 +519,31 @@ struct __sanitizer_cmsghdr { int cmsg_level; int cmsg_type; }; -#endif +# endif -#if SANITIZER_LINUX +# if SANITIZER_LINUX struct __sanitizer_mmsghdr { __sanitizer_msghdr msg_hdr; unsigned int msg_len; }; -#endif +# endif -#if SANITIZER_APPLE +# if SANITIZER_APPLE struct __sanitizer_dirent { unsigned long long d_ino; unsigned long long d_seekoff; unsigned short d_reclen; // more fields that we don't care about }; +# elif SANITIZER_HAIKU +struct __sanitizer_dirent { + int d_dev; + int d_pdev; + unsigned long long d_ino; + unsigned long long d_pino; + unsigned short d_reclen; + // more fields that we don't care about +}; # elif (SANITIZER_LINUX && !SANITIZER_GLIBC) || defined(__x86_64__) || \ defined(__hexagon__) struct __sanitizer_dirent { @@ -513,47 +568,50 @@ struct __sanitizer_dirent64 { unsigned short d_reclen; // more fields that we don't care about }; -#endif +extern unsigned struct_sock_fprog_sz; +# endif -#if defined(__x86_64__) && !defined(_LP64) +# if SANITIZER_HAIKU +typedef int __sanitizer_clock_t; +# elif defined(__x86_64__) && !defined(_LP64) typedef long long __sanitizer_clock_t; -#else +# else typedef long __sanitizer_clock_t; -#endif +# endif -#if SANITIZER_LINUX +# if SANITIZER_LINUX || SANITIZER_HAIKU typedef int __sanitizer_clockid_t; typedef unsigned long long __sanitizer_eventfd_t; -#endif +# endif -#if SANITIZER_LINUX +# if SANITIZER_LINUX # if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \ defined(__mips__) || defined(__hexagon__) typedef unsigned __sanitizer___kernel_uid_t; typedef unsigned __sanitizer___kernel_gid_t; -#else +# else typedef unsigned short __sanitizer___kernel_uid_t; typedef unsigned short __sanitizer___kernel_gid_t; -#endif -#if defined(__x86_64__) && !defined(_LP64) +# endif +# if defined(__x86_64__) && !defined(_LP64) typedef long long __sanitizer___kernel_off_t; -#else +# else typedef long __sanitizer___kernel_off_t; -#endif +# endif -#if defined(__powerpc__) || defined(__mips__) +# if defined(__powerpc__) || defined(__mips__) typedef unsigned int __sanitizer___kernel_old_uid_t; typedef unsigned int __sanitizer___kernel_old_gid_t; -#else +# else typedef unsigned short __sanitizer___kernel_old_uid_t; typedef unsigned short __sanitizer___kernel_old_gid_t; -#endif +# endif typedef long long __sanitizer___kernel_loff_t; typedef struct { unsigned long fds_bits[1024 / (8 * sizeof(long))]; } __sanitizer___kernel_fd_set; -#endif +# endif // This thing depends on the platform. We are only interested in the upper // limit. Verified with a compiler assert in .cpp. @@ -562,50 +620,52 @@ union __sanitizer_pthread_attr_t { void *align; }; -#if SANITIZER_ANDROID -# if SANITIZER_MIPS +# if SANITIZER_ANDROID +# if SANITIZER_MIPS typedef unsigned long __sanitizer_sigset_t[16 / sizeof(unsigned long)]; -# else +# else typedef unsigned long __sanitizer_sigset_t; -# endif -#elif SANITIZER_APPLE +# endif +# elif SANITIZER_APPLE typedef unsigned __sanitizer_sigset_t; -#elif SANITIZER_LINUX +# elif SANITIZER_HAIKU +typedef unsigned long __sanitizer_sigset_t; +# elif SANITIZER_LINUX struct __sanitizer_sigset_t { // The size is determined by looking at sizeof of real sigset_t on linux. uptr val[128 / sizeof(uptr)]; }; -#endif +# endif struct __sanitizer_siginfo_pad { -#if SANITIZER_X32 +# if SANITIZER_X32 // x32 siginfo_t is aligned to 8 bytes. u64 pad[128 / sizeof(u64)]; -#else +# else // Require uptr, because siginfo_t is always pointer-size aligned on Linux. uptr pad[128 / sizeof(uptr)]; -#endif +# endif }; -#if SANITIZER_LINUX -# define SANITIZER_HAS_SIGINFO 1 +# if SANITIZER_LINUX +# define SANITIZER_HAS_SIGINFO 1 union __sanitizer_siginfo { - struct { + __extension__ struct { int si_signo; -# if SANITIZER_MIPS +# if SANITIZER_MIPS int si_code; int si_errno; -# else +# else int si_errno; int si_code; -# endif +# endif }; __sanitizer_siginfo_pad pad; }; -#else -# define SANITIZER_HAS_SIGINFO 0 +# else +# define SANITIZER_HAS_SIGINFO 0 typedef __sanitizer_siginfo_pad __sanitizer_siginfo; -#endif +# endif using __sanitizer_sighandler_ptr = void (*)(int sig); using __sanitizer_sigactionhandler_ptr = void (*)(int sig, @@ -613,7 +673,7 @@ using __sanitizer_sigactionhandler_ptr = void (*)(int sig, void *uctx); // Linux system headers define the 'sa_handler' and 'sa_sigaction' macros. -#if SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 64) +# if SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 64) struct __sanitizer_sigaction { unsigned sa_flags; union { @@ -623,7 +683,8 @@ struct __sanitizer_sigaction { __sanitizer_sigset_t sa_mask; void (*sa_restorer)(); }; -#elif SANITIZER_ANDROID && SANITIZER_MIPS32 // check this before WORDSIZE == 32 +# elif SANITIZER_ANDROID && \ + SANITIZER_MIPS32 // check this before WORDSIZE == 32 struct __sanitizer_sigaction { unsigned sa_flags; union { @@ -632,7 +693,7 @@ struct __sanitizer_sigaction { }; __sanitizer_sigset_t sa_mask; }; -#elif SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32) +# elif SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32) struct __sanitizer_sigaction { union { __sanitizer_sigactionhandler_ptr sigaction; @@ -642,66 +703,66 @@ struct __sanitizer_sigaction { uptr sa_flags; void (*sa_restorer)(); }; -#else // !SANITIZER_ANDROID +# else // !SANITIZER_ANDROID struct __sanitizer_sigaction { -#if defined(__mips__) && !SANITIZER_FREEBSD +# if defined(__mips__) && !SANITIZER_FREEBSD && !SANITIZER_MUSL unsigned int sa_flags; -#endif +# endif union { __sanitizer_sigactionhandler_ptr sigaction; __sanitizer_sighandler_ptr handler; }; -#if SANITIZER_FREEBSD +# if SANITIZER_FREEBSD int sa_flags; __sanitizer_sigset_t sa_mask; -#else -#if defined(__s390x__) +# else +# if defined(__s390x__) int sa_resv; -#else +# else __sanitizer_sigset_t sa_mask; -#endif -#ifndef __mips__ -#if defined(__sparc__) -#if __GLIBC_PREREQ (2, 20) +# endif +# if !defined(__mips__) || SANITIZER_MUSL +# if defined(__sparc__) +# if __GLIBC_PREREQ(2, 20) // On sparc glibc 2.19 and earlier sa_flags was unsigned long. -#if defined(__arch64__) +# if defined(__arch64__) // To maintain ABI compatibility on sparc64 when switching to an int, // __glibc_reserved0 was added. int __glibc_reserved0; -#endif +# endif int sa_flags; -#else +# else unsigned long sa_flags; -#endif -#else +# endif +# else int sa_flags; -#endif -#endif -#endif -#if SANITIZER_LINUX +# endif +# endif +# endif +# if SANITIZER_LINUX || SANITIZER_HAIKU void (*sa_restorer)(); -#endif -#if defined(__mips__) && (SANITIZER_WORDSIZE == 32) +# endif +# if defined(__mips__) && (SANITIZER_WORDSIZE == 32) && !SANITIZER_MUSL int sa_resv[1]; -#endif -#if defined(__s390x__) +# endif +# if defined(__s390x__) __sanitizer_sigset_t sa_mask; -#endif +# endif }; -#endif // !SANITIZER_ANDROID +# endif // !SANITIZER_ANDROID -#if defined(__mips__) -#define __SANITIZER_KERNEL_NSIG 128 -#else -#define __SANITIZER_KERNEL_NSIG 64 -#endif +# if defined(__mips__) +# define __SANITIZER_KERNEL_NSIG 128 +# else +# define __SANITIZER_KERNEL_NSIG 64 +# endif struct __sanitizer_kernel_sigset_t { uptr sig[__SANITIZER_KERNEL_NSIG / (sizeof(uptr) * 8)]; }; // Linux system headers define the 'sa_handler' and 'sa_sigaction' macros. -#if SANITIZER_MIPS +# if SANITIZER_MIPS struct __sanitizer_kernel_sigaction_t { unsigned int sa_flags; union { @@ -711,7 +772,7 @@ struct __sanitizer_kernel_sigaction_t { __sanitizer_kernel_sigset_t sa_mask; void (*sa_restorer)(void); }; -#else +# else struct __sanitizer_kernel_sigaction_t { union { void (*handler)(int signo); @@ -721,22 +782,22 @@ struct __sanitizer_kernel_sigaction_t { void (*sa_restorer)(void); __sanitizer_kernel_sigset_t sa_mask; }; -#endif +# endif extern const uptr sig_ign; extern const uptr sig_dfl; extern const uptr sig_err; extern const uptr sa_siginfo; -#if SANITIZER_LINUX +# if SANITIZER_LINUX extern int e_tabsz; -#endif +# endif extern int af_inet; extern int af_inet6; uptr __sanitizer_in_addr_sz(int af); -#if SANITIZER_LINUX +# if SANITIZER_LINUX struct __sanitizer_dl_phdr_info { uptr dlpi_addr; const char *dlpi_name; @@ -745,7 +806,7 @@ struct __sanitizer_dl_phdr_info { }; extern unsigned struct_ElfW_Phdr_sz; -#endif +# endif struct __sanitizer_protoent { char *p_name; @@ -765,15 +826,15 @@ struct __sanitizer_addrinfo { int ai_family; int ai_socktype; int ai_protocol; -#if SANITIZER_ANDROID || SANITIZER_APPLE +# if SANITIZER_ANDROID || SANITIZER_APPLE || SANITIZER_HAIKU unsigned ai_addrlen; char *ai_canonname; void *ai_addr; -#else // LINUX +# else // LINUX unsigned ai_addrlen; void *ai_addr; char *ai_canonname; -#endif +# endif struct __sanitizer_addrinfo *ai_next; }; @@ -791,14 +852,14 @@ struct __sanitizer_pollfd { short revents; }; -#if SANITIZER_ANDROID || SANITIZER_APPLE +# if SANITIZER_ANDROID || SANITIZER_APPLE typedef unsigned __sanitizer_nfds_t; -#else +# else typedef unsigned long __sanitizer_nfds_t; -#endif +# endif -#if !SANITIZER_ANDROID -# if SANITIZER_LINUX +# if !SANITIZER_ANDROID +# if SANITIZER_LINUX struct __sanitizer_glob_t { uptr gl_pathc; char **gl_pathv; @@ -811,13 +872,13 @@ struct __sanitizer_glob_t { int (*gl_lstat)(const char *, void *); int (*gl_stat)(const char *, void *); }; -# endif // SANITIZER_LINUX +# endif // SANITIZER_LINUX -# if SANITIZER_LINUX +# if SANITIZER_LINUX extern int glob_nomatch; extern int glob_altdirfunc; -# endif -#endif // !SANITIZER_ANDROID +# endif +# endif // !SANITIZER_ANDROID extern unsigned path_max; @@ -831,7 +892,7 @@ struct __sanitizer_wordexp_t { uptr we_offs; }; -#if SANITIZER_LINUX && !SANITIZER_ANDROID +# if SANITIZER_LINUX && !SANITIZER_ANDROID struct __sanitizer_FILE { int _flags; char *_IO_read_ptr; @@ -849,11 +910,11 @@ struct __sanitizer_FILE { __sanitizer_FILE *_chain; int _fileno; }; -# define SANITIZER_HAS_STRUCT_FILE 1 -#else +# define SANITIZER_HAS_STRUCT_FILE 1 +# else typedef void __sanitizer_FILE; -# define SANITIZER_HAS_STRUCT_FILE 0 -#endif +# define SANITIZER_HAS_STRUCT_FILE 0 +# endif # if SANITIZER_LINUX && !SANITIZER_ANDROID && \ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ @@ -905,14 +966,14 @@ extern int shmctl_ipc_stat; extern int shmctl_ipc_info; extern int shmctl_shm_info; extern int shmctl_shm_stat; -#endif +# endif -#if !SANITIZER_APPLE && !SANITIZER_FREEBSD +# if !SANITIZER_APPLE && !SANITIZER_FREEBSD extern unsigned struct_utmp_sz; -#endif -#if !SANITIZER_ANDROID +# endif +# if !SANITIZER_ANDROID extern unsigned struct_utmpx_sz; -#endif +# endif extern int map_fixed; @@ -922,13 +983,13 @@ struct __sanitizer_ifconf { union { void *ifcu_req; } ifc_ifcu; -#if SANITIZER_APPLE +# if SANITIZER_APPLE } __attribute__((packed)); -#else +# else }; -#endif +# endif -#if SANITIZER_LINUX && !SANITIZER_ANDROID +# if SANITIZER_LINUX && !SANITIZER_ANDROID struct __sanitizer__obstack_chunk { char *limit; struct __sanitizer__obstack_chunk *prev; @@ -955,57 +1016,57 @@ struct __sanitizer_cookie_io_functions_t { __sanitizer_cookie_io_seek seek; __sanitizer_cookie_io_close close; }; -#endif +# endif -#define IOC_NRBITS 8 -#define IOC_TYPEBITS 8 -#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || \ - defined(__sparc__) -#define IOC_SIZEBITS 13 -#define IOC_DIRBITS 3 -#define IOC_NONE 1U -#define IOC_WRITE 4U -#define IOC_READ 2U -#else -#define IOC_SIZEBITS 14 -#define IOC_DIRBITS 2 -#define IOC_NONE 0U -#define IOC_WRITE 1U -#define IOC_READ 2U -#endif -#define IOC_NRMASK ((1 << IOC_NRBITS) - 1) -#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1) -#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1) -#if defined(IOC_DIRMASK) -#undef IOC_DIRMASK -#endif -#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1) -#define IOC_NRSHIFT 0 -#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS) -#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS) -#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS) -#define EVIOC_EV_MAX 0x1f -#define EVIOC_ABS_MAX 0x3f - -#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK) -#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK) -#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK) - -#if defined(__sparc__) +# define IOC_NRBITS 8 +# define IOC_TYPEBITS 8 +# if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || \ + defined(__sparc__) +# define IOC_SIZEBITS 13 +# define IOC_DIRBITS 3 +# define IOC_NONE 1U +# define IOC_WRITE 4U +# define IOC_READ 2U +# else +# define IOC_SIZEBITS 14 +# define IOC_DIRBITS 2 +# define IOC_NONE 0U +# define IOC_WRITE 1U +# define IOC_READ 2U +# endif +# define IOC_NRMASK ((1 << IOC_NRBITS) - 1) +# define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1) +# define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1) +# if defined(IOC_DIRMASK) +# undef IOC_DIRMASK +# endif +# define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1) +# define IOC_NRSHIFT 0 +# define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS) +# define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS) +# define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS) +# define EVIOC_EV_MAX 0x1f +# define EVIOC_ABS_MAX 0x3f + +# define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK) +# define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK) +# define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK) + +# if defined(__sparc__) // In sparc the 14 bits SIZE field overlaps with the // least significant bit of DIR, so either IOC_READ or // IOC_WRITE shall be 1 in order to get a non-zero SIZE. -#define IOC_SIZE(nr) \ - ((((((nr) >> 29) & 0x7) & (4U | 2U)) == 0) ? 0 : (((nr) >> 16) & 0x3fff)) -#else -#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK) -#endif +# define IOC_SIZE(nr) \ + ((((((nr) >> 29) & 0x7) & (4U | 2U)) == 0) ? 0 : (((nr) >> 16) & 0x3fff)) +# else +# define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK) +# endif extern unsigned struct_ifreq_sz; extern unsigned struct_termios_sz; extern unsigned struct_winsize_sz; -#if SANITIZER_LINUX +# if SANITIZER_LINUX extern unsigned struct_arpreq_sz; extern unsigned struct_cdrom_msf_sz; extern unsigned struct_cdrom_multisession_sz; @@ -1032,9 +1093,9 @@ extern unsigned struct_mtpos_sz; extern unsigned struct_vt_consize_sz; extern unsigned struct_vt_sizes_sz; extern unsigned struct_vt_stat_sz; -#endif // SANITIZER_LINUX +# endif // SANITIZER_LINUX -#if SANITIZER_LINUX +# if SANITIZER_LINUX extern unsigned struct_copr_buffer_sz; extern unsigned struct_copr_debug_buf_sz; extern unsigned struct_copr_msg_sz; @@ -1046,9 +1107,9 @@ extern unsigned struct_sbi_instrument_sz; extern unsigned struct_seq_event_rec_sz; extern unsigned struct_synth_info_sz; extern unsigned struct_vt_mode_sz; -#endif // SANITIZER_LINUX +# endif // SANITIZER_LINUX -#if SANITIZER_LINUX && !SANITIZER_ANDROID +# if SANITIZER_LINUX && !SANITIZER_ANDROID extern unsigned struct_ax25_parms_struct_sz; extern unsigned struct_input_keymap_entry_sz; extern unsigned struct_ipx_config_data_sz; @@ -1065,20 +1126,21 @@ extern unsigned struct_serial_struct_sz; extern unsigned struct_sockaddr_ax25_sz; extern unsigned struct_unimapdesc_sz; extern unsigned struct_unimapinit_sz; -extern unsigned struct_sock_fprog_sz; # endif // SANITIZER_LINUX && !SANITIZER_ANDROID extern const unsigned long __sanitizer_bufsiz; -#if SANITIZER_LINUX && !SANITIZER_ANDROID +# if SANITIZER_LINUX && !SANITIZER_ANDROID extern unsigned struct_audio_buf_info_sz; extern unsigned struct_ppp_stats_sz; -#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID +# endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID -#if !SANITIZER_ANDROID && !SANITIZER_APPLE +# if !SANITIZER_ANDROID && !SANITIZER_APPLE extern unsigned struct_sioc_sg_req_sz; extern unsigned struct_sioc_vif_req_sz; -#endif +# endif + +extern unsigned fpos_t_sz; // ioctl request identifiers @@ -1112,29 +1174,31 @@ extern unsigned IOCTL_SIOCSIFMETRIC; extern unsigned IOCTL_SIOCSIFMTU; extern unsigned IOCTL_SIOCSIFNETMASK; extern unsigned IOCTL_SIOCSPGRP; +# if !SANITIZER_HAIKU extern unsigned IOCTL_TIOCCONS; -extern unsigned IOCTL_TIOCEXCL; extern unsigned IOCTL_TIOCGETD; +extern unsigned IOCTL_TIOCNOTTY; +extern unsigned IOCTL_TIOCPKT; +extern unsigned IOCTL_TIOCSETD; +extern unsigned IOCTL_TIOCSTI; +# endif +extern unsigned IOCTL_TIOCEXCL; extern unsigned IOCTL_TIOCGPGRP; extern unsigned IOCTL_TIOCGWINSZ; extern unsigned IOCTL_TIOCMBIC; extern unsigned IOCTL_TIOCMBIS; extern unsigned IOCTL_TIOCMGET; extern unsigned IOCTL_TIOCMSET; -extern unsigned IOCTL_TIOCNOTTY; extern unsigned IOCTL_TIOCNXCL; extern unsigned IOCTL_TIOCOUTQ; -extern unsigned IOCTL_TIOCPKT; extern unsigned IOCTL_TIOCSCTTY; -extern unsigned IOCTL_TIOCSETD; extern unsigned IOCTL_TIOCSPGRP; -extern unsigned IOCTL_TIOCSTI; extern unsigned IOCTL_TIOCSWINSZ; -#if SANITIZER_LINUX && !SANITIZER_ANDROID +# if SANITIZER_LINUX && !SANITIZER_ANDROID extern unsigned IOCTL_SIOCGETSGCNT; extern unsigned IOCTL_SIOCGETVIFCNT; -#endif -#if SANITIZER_LINUX +# endif +# if SANITIZER_LINUX extern unsigned IOCTL_EVIOCGABS; extern unsigned IOCTL_EVIOCGBIT; extern unsigned IOCTL_EVIOCGEFFECTS; @@ -1274,12 +1338,14 @@ extern unsigned IOCTL_SNDCTL_COPR_SENDMSG; extern unsigned IOCTL_SNDCTL_COPR_WCODE; extern unsigned IOCTL_SNDCTL_COPR_WDATA; extern unsigned IOCTL_TCFLSH; -extern unsigned IOCTL_TCGETS; extern unsigned IOCTL_TCSBRK; extern unsigned IOCTL_TCSBRKP; +# if SANITIZER_TERMIOS_IOCTL_CONSTANTS +extern unsigned IOCTL_TCGETS; extern unsigned IOCTL_TCSETS; extern unsigned IOCTL_TCSETSF; extern unsigned IOCTL_TCSETSW; +# endif extern unsigned IOCTL_TCXONC; extern unsigned IOCTL_TIOCGLCKTRMIOS; extern unsigned IOCTL_TIOCGSOFTCAR; @@ -1396,9 +1462,9 @@ extern unsigned IOCTL_VT_OPENQRY; extern unsigned IOCTL_VT_RELDISP; extern unsigned IOCTL_VT_SETMODE; extern unsigned IOCTL_VT_WAITACTIVE; -#endif // SANITIZER_LINUX +# endif // SANITIZER_LINUX -#if SANITIZER_LINUX && !SANITIZER_ANDROID +# if SANITIZER_LINUX && !SANITIZER_ANDROID extern unsigned IOCTL_EQL_EMANCIPATE; extern unsigned IOCTL_EQL_ENSLAVE; extern unsigned IOCTL_EQL_GETMASTRCFG; @@ -1487,31 +1553,44 @@ extern unsigned IOCTL_KDSETMODE; extern unsigned IOCTL_KDSKBMODE; extern unsigned IOCTL_KIOCSOUND; extern unsigned IOCTL_PIO_SCRNMAP; -#endif +# endif + +# if SANITIZER_GLIBC +struct __sanitizer_servent { + char *s_name; + char **s_aliases; + int s_port; + char *s_proto; +}; +# endif extern const int si_SEGV_MAPERR; extern const int si_SEGV_ACCERR; } // namespace __sanitizer -#define CHECK_TYPE_SIZE(TYPE) \ - COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE)) +# define CHECK_TYPE_SIZE(TYPE) \ + COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE)) -#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \ - COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \ - sizeof(((CLASS *)NULL)->MEMBER)); \ - COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \ - offsetof(CLASS, MEMBER)) +# define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \ + COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \ + sizeof(((CLASS *)NULL)->MEMBER)); \ + COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \ + offsetof(CLASS, MEMBER)) // For sigaction, which is a function and struct at the same time, // and thus requires explicit "struct" in sizeof() expression. -#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \ - COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \ - sizeof(((struct CLASS *)NULL)->MEMBER)); \ - COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \ - offsetof(struct CLASS, MEMBER)) +# define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \ + COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \ + sizeof(((struct CLASS *)NULL)->MEMBER)); \ + COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \ + offsetof(struct CLASS, MEMBER)) -#define SIGACTION_SYMNAME sigaction +# define SIGACTION_SYMNAME sigaction + +# if SANITIZER_LINUX +typedef void *__sanitizer_timer_t; +# endif -#endif // SANITIZER_LINUX || SANITIZER_APPLE +#endif // SANITIZER_LINUX || SANITIZER_APPLE || SANITIZER_HAIKU #endif diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.cpp index dad7bde1498a..7ea6134b702b 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.cpp @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -135,6 +136,8 @@ namespace __sanitizer { unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req); unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req); + unsigned fpos_t_sz = sizeof(fpos_t); + const unsigned IOCTL_NOT_PRESENT = 0; unsigned IOCTL_FIOASYNC = FIOASYNC; diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.h index 84a81265162c..bf6586d27228 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.h @@ -418,6 +418,8 @@ extern unsigned struct_winsize_sz; extern unsigned struct_sioc_sg_req_sz; extern unsigned struct_sioc_vif_req_sz; +extern unsigned fpos_t_sz; + // ioctl request identifiers // A special value to mark ioctls that are not present on the target platform, diff --git a/libsanitizer/sanitizer_common/sanitizer_posix.cpp b/libsanitizer/sanitizer_common/sanitizer_posix.cpp index 69af6465a62c..5b2c4e668ca8 100644 --- a/libsanitizer/sanitizer_common/sanitizer_posix.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_posix.cpp @@ -225,17 +225,9 @@ void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) { return (void *)p; } -static inline bool IntervalsAreSeparate(uptr start1, uptr end1, - uptr start2, uptr end2) { - CHECK(start1 <= end1); - CHECK(start2 <= end2); - return (end1 < start2) || (end2 < start1); -} - +# if !SANITIZER_APPLE // FIXME: this is thread-unsafe, but should not cause problems most of the time. -// When the shadow is mapped only a single thread usually exists (plus maybe -// several worker threads on Mac, which aren't expected to map big chunks of -// memory). +// When the shadow is mapped only a single thread usually exists bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { MemoryMappingLayout proc_maps(/*cache_enabled*/true); if (proc_maps.Error()) @@ -251,7 +243,6 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { return true; } -#if !SANITIZER_APPLE void DumpProcessMap() { MemoryMappingLayout proc_maps(/*cache_enabled*/true); const sptr kBufSize = 4095; @@ -265,7 +256,7 @@ void DumpProcessMap() { Report("End of process memory map.\n"); UnmapOrDie(filename, kBufSize); } -#endif +# endif const char *GetPwd() { return GetEnv("PWD"); diff --git a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp index b1eb2009cf15..8e5e87938c37 100644 --- a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp @@ -47,6 +47,8 @@ typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); namespace __sanitizer { +[[maybe_unused]] static atomic_uint8_t signal_handler_is_from_sanitizer[64]; + u32 GetUid() { return getuid(); } @@ -210,6 +212,20 @@ void UnsetAlternateSignalStack() { UnmapOrDie(oldstack.ss_sp, oldstack.ss_size); } +bool IsSignalHandlerFromSanitizer(int signum) { + return atomic_load(&signal_handler_is_from_sanitizer[signum], + memory_order_relaxed); +} + +bool SetSignalHandlerFromSanitizer(int signum, bool new_state) { + if (signum < 0 || static_cast(signum) >= + ARRAY_SIZE(signal_handler_is_from_sanitizer)) + return false; + + return atomic_exchange(&signal_handler_is_from_sanitizer[signum], new_state, + memory_order_relaxed); +} + static void MaybeInstallSigaction(int signum, SignalHandlerType handler) { if (GetHandleSignalMode(signum) == kHandleSignalNo) return; @@ -223,6 +239,9 @@ static void MaybeInstallSigaction(int signum, if (common_flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK; CHECK_EQ(0, internal_sigaction(signum, &sigact, nullptr)); VReport(1, "Installed the sigaction for signal %d\n", signum); + + if (common_flags()->cloak_sanitizer_signal_handlers) + SetSignalHandlerFromSanitizer(signum, true); } void InstallDeadlySignalHandlers(SignalHandlerType handler) { diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps.h b/libsanitizer/sanitizer_common/sanitizer_procmaps.h index bf3c2c28e32e..d713ddf847df 100644 --- a/libsanitizer/sanitizer_common/sanitizer_procmaps.h +++ b/libsanitizer/sanitizer_common/sanitizer_procmaps.h @@ -16,7 +16,7 @@ #include "sanitizer_platform.h" #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ - SANITIZER_APPLE || SANITIZER_SOLARIS || \ + SANITIZER_APPLE || SANITIZER_SOLARIS || SANITIZER_HAIKU || \ SANITIZER_FUCHSIA #include "sanitizer_common.h" diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp index 64e9c4858b69..f40fba6bf715 100644 --- a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cpp @@ -20,18 +20,21 @@ #include // These are not available in older macOS SDKs. -#ifndef CPU_SUBTYPE_X86_64_H -#define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t)8) /* Haswell */ -#endif -#ifndef CPU_SUBTYPE_ARM_V7S -#define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t)11) /* Swift */ -#endif -#ifndef CPU_SUBTYPE_ARM_V7K -#define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t)12) -#endif -#ifndef CPU_TYPE_ARM64 -#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64) -#endif +# ifndef CPU_SUBTYPE_X86_64_H +# define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t)8) /* Haswell */ +# endif +# ifndef CPU_SUBTYPE_ARM_V7S +# define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t)11) /* Swift */ +# endif +# ifndef CPU_SUBTYPE_ARM_V7K +# define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t)12) +# endif +# ifndef CPU_TYPE_ARM64 +# define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64) +# endif +# ifndef CPU_SUBTYPE_ARM64E +# define CPU_SUBTYPE_ARM64E ((cpu_subtype_t)2) +# endif namespace __sanitizer { @@ -42,7 +45,6 @@ struct MemoryMappedSegmentData { const char *current_load_cmd_addr; u32 lc_type; uptr base_virt_addr; - uptr addr_mask; }; template @@ -51,12 +53,60 @@ static void NextSectionLoad(LoadedModule *module, MemoryMappedSegmentData *data, const Section *sc = (const Section *)data->current_load_cmd_addr; data->current_load_cmd_addr += sizeof(Section); - uptr sec_start = (sc->addr & data->addr_mask) + data->base_virt_addr; + uptr sec_start = sc->addr + data->base_virt_addr; uptr sec_end = sec_start + sc->size; module->addAddressRange(sec_start, sec_end, /*executable=*/false, isWritable, sc->sectname); } +static bool VerifyMemoryMapping(MemoryMappingLayout* mapping) { + InternalMmapVector modules; + modules.reserve(128); // matches DumpProcessMap + mapping->DumpListOfModules(&modules); + + InternalMmapVector segments; + for (uptr i = 0; i < modules.size(); ++i) { + for (auto& range : modules[i].ranges()) { + segments.push_back(range); + } + } + + // Verify that none of the segments overlap: + // 1. Sort the segments by the start address + // 2. Check that every segment starts after the previous one ends. + Sort(segments.data(), segments.size(), + [](LoadedModule::AddressRange& a, LoadedModule::AddressRange& b) { + return a.beg < b.beg; + }); + + // To avoid spam, we only print the report message once-per-process. + static bool invalid_module_map_reported = false; + bool well_formed = true; + + for (size_t i = 1; i < segments.size(); i++) { + uptr cur_start = segments[i].beg; + uptr prev_end = segments[i - 1].end; + if (cur_start < prev_end) { + well_formed = false; + VReport(2, "Overlapping mappings: %s start = %p, %s end = %p\n", + segments[i].name, (void*)cur_start, segments[i - 1].name, + (void*)prev_end); + if (!invalid_module_map_reported) { + Report( + "WARN: Invalid dyld module map detected. This is most likely a bug " + "in the sanitizer.\n"); + Report("WARN: Backtraces may be unreliable.\n"); + invalid_module_map_reported = true; + } + } + } + + for (auto& m : modules) m.clear(); + + mapping->Reset(); + return well_formed; +} + void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) { // Don't iterate over sections when the caller hasn't set up the // data pointer, when there are no sections, or when the segment @@ -82,6 +132,7 @@ void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) { MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) { Reset(); + VerifyMemoryMapping(this); } MemoryMappingLayout::~MemoryMappingLayout() { @@ -146,13 +197,8 @@ static bool IsDyldHdr(const mach_header *hdr) { // until we hit a Mach header matching dyld instead. These recurse // calls are expensive, but the first memory map generation occurs // early in the process, when dyld is one of the only images loaded, -// so it will be hit after only a few iterations. These assumptions don't -// hold on macOS 13+ anymore (dyld itself has moved into the shared cache). - -// FIXME: Unfortunately, the upstream revised version to deal with macOS 13+ -// is incompatible with GCC and also uses APIs not available on earlier -// systems which we support; backed out for now. - +// so it will be hit after only a few iterations. These assumptions don't hold +// on macOS 13+ anymore (dyld itself has moved into the shared cache). static mach_header *GetDyldImageHeaderViaVMRegion() { vm_address_t address = 0; @@ -176,17 +222,65 @@ static mach_header *GetDyldImageHeaderViaVMRegion() { } } +extern "C" { +struct dyld_shared_cache_dylib_text_info { + uint64_t version; // current version 2 + // following fields all exist in version 1 + uint64_t loadAddressUnslid; + uint64_t textSegmentSize; + uuid_t dylibUuid; + const char *path; // pointer invalid at end of iterations + // following fields all exist in version 2 + uint64_t textSegmentOffset; // offset from start of cache +}; +typedef struct dyld_shared_cache_dylib_text_info + dyld_shared_cache_dylib_text_info; + +extern bool _dyld_get_shared_cache_uuid(uuid_t uuid); +extern const void *_dyld_get_shared_cache_range(size_t *length); +extern intptr_t _dyld_get_image_slide(const struct mach_header* mh); +extern int dyld_shared_cache_iterate_text( + const uuid_t cacheUuid, + void (^callback)(const dyld_shared_cache_dylib_text_info *info)); +} // extern "C" + +static mach_header *GetDyldImageHeaderViaSharedCache() { + uuid_t uuid; + bool hasCache = _dyld_get_shared_cache_uuid(uuid); + if (!hasCache) + return nullptr; + + size_t cacheLength; + __block uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength); + CHECK(cacheStart && cacheLength); + + __block mach_header *dyldHdr = nullptr; + int res = dyld_shared_cache_iterate_text( + uuid, ^(const dyld_shared_cache_dylib_text_info *info) { + CHECK_GE(info->version, 2); + mach_header *hdr = + (mach_header *)(cacheStart + info->textSegmentOffset); + if (IsDyldHdr(hdr)) + dyldHdr = hdr; + }); + CHECK_EQ(res, 0); + + return dyldHdr; +} + const mach_header *get_dyld_hdr() { if (!dyld_hdr) { // On macOS 13+, dyld itself has moved into the shared cache. Looking it up // via vm_region_recurse_64() causes spins/hangs/crashes. - // FIXME: find a way to do this compatible with GCC. if (GetMacosAlignedVersion() >= MacosVersion(13, 0)) { + dyld_hdr = GetDyldImageHeaderViaSharedCache(); + if (!dyld_hdr) { VReport(1, - "looking up the dyld image header in the shared cache on " - "macOS 13+ is not yet supported. Falling back to " + "Failed to lookup the dyld image header in the shared cache on " + "macOS 13+ (or no shared cache in use). Falling back to " "lookup via vm_region_recurse_64().\n"); dyld_hdr = GetDyldImageHeaderViaVMRegion(); + } } else { dyld_hdr = GetDyldImageHeaderViaVMRegion(); } @@ -213,23 +307,21 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment, layout_data->current_load_cmd_count--; if (((const load_command *)lc)->cmd == kLCSegment) { const SegmentCommand* sc = (const SegmentCommand *)lc; - uptr base_virt_addr, addr_mask; - if (layout_data->current_image == kDyldImageIdx) { - base_virt_addr = (uptr)get_dyld_hdr(); - // vmaddr is masked with 0xfffff because on macOS versions < 10.12, - // it contains an absolute address rather than an offset for dyld. - // To make matters even more complicated, this absolute address - // isn't actually the absolute segment address, but the offset portion - // of the address is accurate when combined with the dyld base address, - // and the mask will give just this offset. - addr_mask = 0xfffff; - } else { + if (internal_strcmp(sc->segname, "__LINKEDIT") == 0) { + // The LINKEDIT sections are for internal linker use, and may alias + // with the LINKEDIT section for other modules. (If we included them, + // our memory map would contain overlappping sections.) + return false; + } + + uptr base_virt_addr; + if (layout_data->current_image == kDyldImageIdx) + base_virt_addr = (uptr)_dyld_get_image_slide(get_dyld_hdr()); + else base_virt_addr = (uptr)_dyld_get_image_vmaddr_slide(layout_data->current_image); - addr_mask = ~0; - } - segment->start = (sc->vmaddr & addr_mask) + base_virt_addr; + segment->start = sc->vmaddr + base_virt_addr; segment->end = segment->start + sc->vmsize; // Most callers don't need section information, so only fill this struct // when required. @@ -239,9 +331,9 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment, (const char *)lc + sizeof(SegmentCommand); seg_data->lc_type = kLCSegment; seg_data->base_virt_addr = base_virt_addr; - seg_data->addr_mask = addr_mask; internal_strncpy(seg_data->name, sc->segname, ARRAY_SIZE(seg_data->name)); + seg_data->name[ARRAY_SIZE(seg_data->name) - 1] = 0; } // Return the initial protection. @@ -255,6 +347,7 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment, ? kDyldPath : _dyld_get_image_name(layout_data->current_image); internal_strncpy(segment->filename, src, segment->filename_size); + segment->filename[segment->filename_size - 1] = 0; } segment->arch = layout_data->current_arch; internal_memcpy(segment->uuid, layout_data->current_uuid, kModuleUUIDSize); @@ -269,18 +362,26 @@ ModuleArch ModuleArchFromCpuType(cpu_type_t cputype, cpu_subtype_t cpusubtype) { case CPU_TYPE_I386: return kModuleArchI386; case CPU_TYPE_X86_64: - if (cpusubtype == CPU_SUBTYPE_X86_64_ALL) return kModuleArchX86_64; - if (cpusubtype == CPU_SUBTYPE_X86_64_H) return kModuleArchX86_64H; + if (cpusubtype == CPU_SUBTYPE_X86_64_ALL) + return kModuleArchX86_64; + if (cpusubtype == CPU_SUBTYPE_X86_64_H) + return kModuleArchX86_64H; CHECK(0 && "Invalid subtype of x86_64"); return kModuleArchUnknown; case CPU_TYPE_ARM: - if (cpusubtype == CPU_SUBTYPE_ARM_V6) return kModuleArchARMV6; - if (cpusubtype == CPU_SUBTYPE_ARM_V7) return kModuleArchARMV7; - if (cpusubtype == CPU_SUBTYPE_ARM_V7S) return kModuleArchARMV7S; - if (cpusubtype == CPU_SUBTYPE_ARM_V7K) return kModuleArchARMV7K; + if (cpusubtype == CPU_SUBTYPE_ARM_V6) + return kModuleArchARMV6; + if (cpusubtype == CPU_SUBTYPE_ARM_V7) + return kModuleArchARMV7; + if (cpusubtype == CPU_SUBTYPE_ARM_V7S) + return kModuleArchARMV7S; + if (cpusubtype == CPU_SUBTYPE_ARM_V7K) + return kModuleArchARMV7K; CHECK(0 && "Invalid subtype of ARM"); return kModuleArchUnknown; case CPU_TYPE_ARM64: + if (cpusubtype == CPU_SUBTYPE_ARM64E) + return kModuleArchARM64E; return kModuleArchARM64; default: CHECK(0 && "Invalid CPU type"); @@ -292,9 +393,22 @@ static const load_command *NextCommand(const load_command *lc) { return (const load_command *)((const char *)lc + lc->cmdsize); } -static void FindUUID(const load_command *first_lc, u8 *uuid_output) { - for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) { - if (lc->cmd != LC_UUID) continue; +# ifdef MH_MAGIC_64 +static constexpr size_t header_size = sizeof(mach_header_64); +# else +static constexpr size_t header_size = sizeof(mach_header); +# endif + +static void FindUUID(const load_command *first_lc, const mach_header *hdr, + u8 *uuid_output) { + uint32_t curcmd = 0; + for (const load_command *lc = first_lc; curcmd < hdr->ncmds; + curcmd++, lc = NextCommand(lc)) { + CHECK_LT((const char *)lc, + (const char *)hdr + header_size + hdr->sizeofcmds); + + if (lc->cmd != LC_UUID) + continue; const uuid_command *uuid_lc = (const uuid_command *)lc; const uint8_t *uuid = &uuid_lc->uuid[0]; @@ -303,9 +417,16 @@ static void FindUUID(const load_command *first_lc, u8 *uuid_output) { } } -static bool IsModuleInstrumented(const load_command *first_lc) { - for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) { - if (lc->cmd != LC_LOAD_DYLIB) continue; +static bool IsModuleInstrumented(const load_command *first_lc, + const mach_header *hdr) { + uint32_t curcmd = 0; + for (const load_command *lc = first_lc; curcmd < hdr->ncmds; + curcmd++, lc = NextCommand(lc)) { + CHECK_LT((const char *)lc, + (const char *)hdr + header_size + hdr->sizeofcmds); + + if (lc->cmd != LC_LOAD_DYLIB) + continue; const dylib_command *dylib_lc = (const dylib_command *)lc; uint32_t dylib_name_offset = dylib_lc->dylib.name.offset; @@ -351,10 +472,10 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) { continue; } } - FindUUID((const load_command *)data_.current_load_cmd_addr, + FindUUID((const load_command *)data_.current_load_cmd_addr, hdr, data_.current_uuid); data_.current_instrumented = IsModuleInstrumented( - (const load_command *)data_.current_load_cmd_addr); + (const load_command *)data_.current_load_cmd_addr, hdr); } while (data_.current_load_cmd_count > 0) { diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_solaris.cpp b/libsanitizer/sanitizer_common/sanitizer_procmaps_solaris.cpp index 80b8158f43db..452b30308be9 100644 --- a/libsanitizer/sanitizer_common/sanitizer_procmaps_solaris.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_solaris.cpp @@ -9,9 +9,6 @@ // Information about the process mappings (Solaris-specific parts). //===----------------------------------------------------------------------===// -// Before Solaris 11.4, doesn't work in a largefile environment. -#undef _FILE_OFFSET_BITS - // Avoid conflict between `_TIME_BITS` defined vs. `_FILE_OFFSET_BITS` // undefined in some Linux configurations. #undef _TIME_BITS diff --git a/libsanitizer/sanitizer_common/sanitizer_redefine_builtins.h b/libsanitizer/sanitizer_common/sanitizer_redefine_builtins.h index 7b34e781b6d2..7d8891117657 100644 --- a/libsanitizer/sanitizer_common/sanitizer_redefine_builtins.h +++ b/libsanitizer/sanitizer_common/sanitizer_redefine_builtins.h @@ -15,7 +15,7 @@ # define SANITIZER_REDEFINE_BUILTINS_H // The asm hack only works with GCC and Clang. -# if !defined(_WIN32) && defined(HAVE_AS_SYM_ASSIGN) +# if !defined(_WIN32) && !defined(_AIX) && !defined(__APPLE__) asm(R"( .set memcpy, __sanitizer_internal_memcpy @@ -52,7 +52,7 @@ using vector = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file; } // namespace std # endif // __cpluplus -# endif // !_WIN32 && HAVE_AS_SYM_ASSIGN +# endif // !_WIN32 # endif // SANITIZER_REDEFINE_BUILTINS_H #endif // SANITIZER_COMMON_NO_REDEFINE_BUILTINS diff --git a/libsanitizer/sanitizer_common/sanitizer_signal_interceptors.inc b/libsanitizer/sanitizer_common/sanitizer_signal_interceptors.inc index 94e4e2954a3b..8511e4d55fa9 100644 --- a/libsanitizer/sanitizer_common/sanitizer_signal_interceptors.inc +++ b/libsanitizer/sanitizer_common/sanitizer_signal_interceptors.inc @@ -45,6 +45,8 @@ using namespace __sanitizer; INTERCEPTOR(uptr, bsd_signal, int signum, uptr handler) { SIGNAL_INTERCEPTOR_ENTER(); if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0; + + // TODO: support cloak_sanitizer_signal_handlers SIGNAL_INTERCEPTOR_SIGNAL_IMPL(bsd_signal, signum, handler); } #define INIT_BSD_SIGNAL COMMON_INTERCEPT_FUNCTION(bsd_signal) @@ -56,19 +58,55 @@ INTERCEPTOR(uptr, bsd_signal, int signum, uptr handler) { INTERCEPTOR(uptr, signal, int signum, uptr handler) { SIGNAL_INTERCEPTOR_ENTER(); if (GetHandleSignalMode(signum) == kHandleSignalExclusive) + // The user can neither view nor change the signal handler, regardless of + // the cloak_sanitizer_signal_handlers setting. This differs from + // sigaction(). return (uptr) nullptr; - SIGNAL_INTERCEPTOR_SIGNAL_IMPL(signal, signum, handler); + + uptr ret = +[](auto signal, int signum, uptr handler) { + SIGNAL_INTERCEPTOR_SIGNAL_IMPL(signal, signum, handler); + }(signal, signum, handler); + + if (ret != sig_err && SetSignalHandlerFromSanitizer(signum, false)) + // If the user sets a signal handler, it becomes uncloaked, even if they + // reuse a sanitizer's signal handler. + ret = sig_dfl; + + return ret; } #define INIT_SIGNAL COMMON_INTERCEPT_FUNCTION(signal) INTERCEPTOR(int, sigaction_symname, int signum, const __sanitizer_sigaction *act, __sanitizer_sigaction *oldact) { SIGNAL_INTERCEPTOR_ENTER(); + if (GetHandleSignalMode(signum) == kHandleSignalExclusive) { if (!oldact) return 0; act = nullptr; + // If cloak_sanitizer_signal_handlers=true, the user can neither view nor + // change the signal handle. + // If false, the user can view but not change the signal handler. This + // differs from signal(). } - SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact); + + int ret = +[](int signum, const __sanitizer_sigaction* act, + __sanitizer_sigaction* oldact) { + SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact); + }(signum, act, oldact); + + if (act) { + if (ret == 0 && SetSignalHandlerFromSanitizer(signum, false)) { + // If the user sets a signal handler, it becomes uncloaked, even if they + // reuse a sanitizer's signal handler. + + if (oldact) + oldact->handler = reinterpret_cast<__sanitizer_sighandler_ptr>(sig_dfl); + } + } else if (ret == 0 && oldact && IsSignalHandlerFromSanitizer(signum)) { + oldact->handler = reinterpret_cast<__sanitizer_sighandler_ptr>(sig_dfl); + } + + return ret; } #define INIT_SIGACTION COMMON_INTERCEPT_FUNCTION(sigaction_symname) diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp index 661495e23405..d24fae98213a 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp @@ -87,8 +87,8 @@ static inline uhwptr *GetCanonicFrame(uptr bp, // Nope, this does not look right either. This means the frame after next does // not have a valid frame pointer, but we can still extract the caller PC. // Unfortunately, there is no way to decide between GCC and LLVM frame - // layouts. Assume GCC. - return bp_prev - 1; + // layouts. Assume LLVM. + return bp_prev; #else return (uhwptr*)bp; #endif @@ -111,21 +111,14 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top, IsAligned((uptr)frame, sizeof(*frame)) && size < max_depth) { #ifdef __powerpc__ - // PowerPC ABIs specify that the return address is saved on the - // *caller's* stack frame. Thus we must dereference the back chain - // to find the caller frame before extracting it. + // PowerPC ABIs specify that the return address is saved at offset + // 16 of the *caller's* stack frame. Thus we must dereference the + // back chain to find the caller frame before extracting it. uhwptr *caller_frame = (uhwptr*)frame[0]; if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) || !IsAligned((uptr)caller_frame, sizeof(uhwptr))) break; - // For most ABIs the offset where the return address is saved is two - // register sizes. The exception is the SVR4 ABI, which uses an - // offset of only one register size. -#ifdef _CALL_SYSV - uhwptr pc1 = caller_frame[1]; -#else uhwptr pc1 = caller_frame[2]; -#endif #elif defined(__s390__) uhwptr pc1 = frame[14]; #elif defined(__loongarch__) || defined(__riscv) diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld.h b/libsanitizer/sanitizer_common/sanitizer_stoptheworld.h index 7891c1081fe7..b4ed23abb964 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld.h +++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld.h @@ -38,7 +38,7 @@ class SuspendedThreadsList { } virtual uptr ThreadCount() const { UNIMPLEMENTED(); } - virtual tid_t GetThreadID(uptr index) const { UNIMPLEMENTED(); } + virtual ThreadID GetThreadID(uptr index) const { UNIMPLEMENTED(); } protected: ~SuspendedThreadsList() {} diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp index 58d17d90c343..2bf547f4a721 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp @@ -38,25 +38,28 @@ # include #endif #include // for user_regs_struct -#if SANITIZER_ANDROID && SANITIZER_MIPS -# include // for mips SP register in sys/user.h -#endif -#include // for signal-related stuff - -#ifdef sa_handler -# undef sa_handler -#endif - -#ifdef sa_sigaction -# undef sa_sigaction -#endif - -#include "sanitizer_common.h" -#include "sanitizer_flags.h" -#include "sanitizer_libc.h" -#include "sanitizer_linux.h" -#include "sanitizer_mutex.h" -#include "sanitizer_placement_new.h" +# if SANITIZER_MIPS +// clang-format off +# include // must be included before +# include // for mips SP register +// clang-format on +# endif +# include // for signal-related stuff + +# ifdef sa_handler +# undef sa_handler +# endif + +# ifdef sa_sigaction +# undef sa_sigaction +# endif + +# include "sanitizer_common.h" +# include "sanitizer_flags.h" +# include "sanitizer_libc.h" +# include "sanitizer_linux.h" +# include "sanitizer_mutex.h" +# include "sanitizer_placement_new.h" // Sufficiently old kernel headers don't provide this value, but we can still // call prctl with it. If the runtime kernel is new enough, the prctl call will @@ -91,17 +94,17 @@ class SuspendedThreadsListLinux final : public SuspendedThreadsList { public: SuspendedThreadsListLinux() { thread_ids_.reserve(1024); } - tid_t GetThreadID(uptr index) const override; + ThreadID GetThreadID(uptr index) const override; uptr ThreadCount() const override; - bool ContainsTid(tid_t thread_id) const; - void Append(tid_t tid); + bool ContainsTid(ThreadID thread_id) const; + void Append(ThreadID tid); PtraceRegistersStatus GetRegistersAndSP(uptr index, InternalMmapVector *buffer, uptr *sp) const override; private: - InternalMmapVector thread_ids_; + InternalMmapVector thread_ids_; }; // Structure for passing arguments into the tracer thread. @@ -134,10 +137,10 @@ class ThreadSuspender { private: SuspendedThreadsListLinux suspended_threads_list_; pid_t pid_; - bool SuspendThread(tid_t thread_id); + bool SuspendThread(ThreadID thread_id); }; -bool ThreadSuspender::SuspendThread(tid_t tid) { +bool ThreadSuspender::SuspendThread(ThreadID tid) { int pterrno; if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr), &pterrno)) { @@ -207,7 +210,7 @@ void ThreadSuspender::KillAllThreads() { bool ThreadSuspender::SuspendAllThreads() { ThreadLister thread_lister(pid_); bool retry = true; - InternalMmapVector threads; + InternalMmapVector threads; threads.reserve(128); for (int i = 0; i < 30 && retry; ++i) { retry = false; @@ -223,7 +226,7 @@ bool ThreadSuspender::SuspendAllThreads() { case ThreadLister::Ok: break; } - for (tid_t tid : threads) { + for (ThreadID tid : threads) { // Are we already attached to this thread? // Currently this check takes linear time, however the number of threads // is usually small. @@ -400,7 +403,77 @@ struct ScopedSetTracerPID { } }; +// This detects whether ptrace is blocked (e.g., by seccomp), by forking and +// then attempting ptrace. +// This separate check is necessary because StopTheWorld() creates a thread +// with a shared virtual address space and shared TLS, and therefore +// cannot use waitpid() due to the shared errno. +static void TestPTrace() { +# if SANITIZER_SPARC + // internal_fork() on SPARC actually calls __fork(). We can't safely fork, + // because it's possible seccomp has been configured to disallow fork() but + // allow clone(). + VReport(1, "WARNING: skipping TestPTrace() because this is SPARC\n"); + VReport(1, + "If seccomp blocks ptrace, LeakSanitizer may hang without further " + "notice\n"); + VReport( + 1, + "If seccomp does not block ptrace, you can safely ignore this warning\n"); +# else + // Heuristic: only check the first time this is called. This is not always + // correct (e.g., user manually triggers leak detection, then updates + // seccomp, then leak detection is triggered again). + static bool checked = false; + if (checked) + return; + checked = true; + + // Hopefully internal_fork() is not too expensive, thanks to copy-on-write. + // Besides, this is only called the first time. + // Note that internal_fork() on non-SPARC Linux actually calls + // SYSCALL(clone); thus, it is reasonable to use it because if seccomp kills + // TestPTrace(), it would have killed StopTheWorld() anyway. + int pid = internal_fork(); + + if (pid < 0) { + int rverrno; + if (internal_iserror(pid, &rverrno)) + VReport(0, "WARNING: TestPTrace() failed to fork (errno %d)\n", rverrno); + + // We don't abort the sanitizer - it's still worth letting the sanitizer + // try. + return; + } + + if (pid == 0) { + // Child subprocess + + // TODO: consider checking return value of internal_ptrace, to handle + // SCMP_ACT_ERRNO. However, be careful not to consume too many + // resources performing a proper ptrace. + internal_ptrace(PTRACE_ATTACH, 0, nullptr, nullptr); + internal__exit(0); + } else { + int wstatus; + internal_waitpid(pid, &wstatus, 0); + + // Handle SCMP_ACT_KILL + if (WIFSIGNALED(wstatus)) { + VReport(0, + "WARNING: ptrace appears to be blocked (is seccomp enabled?). " + "LeakSanitizer may hang.\n"); + VReport(0, "Child exited with signal %d.\n", WTERMSIG(wstatus)); + // We don't abort the sanitizer - it's still worth letting the sanitizer + // try. + } + } +# endif +} + void StopTheWorld(StopTheWorldCallback callback, void *argument) { + TestPTrace(); + StopTheWorldScope in_stoptheworld; // Prepare the arguments for TracerThread. struct TracerThreadArgument tracer_thread_argument; @@ -454,7 +527,8 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) { internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0); // Allow the tracer thread to start. tracer_thread_argument.mutex.Unlock(); - // NOTE: errno is shared between this thread and the tracer thread. + // NOTE: errno is shared between this thread and the tracer thread + // (clone was called without CLONE_SETTLS / newtls). // internal_waitpid() may call syscall() which can access/spoil errno, // so we can't call it now. Instead we for the tracer thread to finish using // the spin loop below. Man page for sched_yield() says "In the Linux @@ -511,11 +585,7 @@ typedef pt_regs regs_struct; #elif defined(__mips__) typedef struct user regs_struct; -# if SANITIZER_ANDROID -# define REG_SP regs[EF_R29] -# else -# define REG_SP regs[EF_REG29] -# endif +# define REG_SP regs[EF_R29] #elif defined(__aarch64__) typedef struct user_pt_regs regs_struct; @@ -547,7 +617,7 @@ static constexpr uptr kExtraRegs[] = {0}; #error "Unsupported architecture" #endif // SANITIZER_ANDROID && defined(__arm__) -tid_t SuspendedThreadsListLinux::GetThreadID(uptr index) const { +ThreadID SuspendedThreadsListLinux::GetThreadID(uptr index) const { CHECK_LT(index, thread_ids_.size()); return thread_ids_[index]; } @@ -556,14 +626,14 @@ uptr SuspendedThreadsListLinux::ThreadCount() const { return thread_ids_.size(); } -bool SuspendedThreadsListLinux::ContainsTid(tid_t thread_id) const { +bool SuspendedThreadsListLinux::ContainsTid(ThreadID thread_id) const { for (uptr i = 0; i < thread_ids_.size(); i++) { if (thread_ids_[i] == thread_id) return true; } return false; } -void SuspendedThreadsListLinux::Append(tid_t tid) { +void SuspendedThreadsListLinux::Append(ThreadID tid) { thread_ids_.push_back(tid); } diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_mac.cpp b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_mac.cpp index 813616467656..d6ef37ac847c 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_mac.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_mac.cpp @@ -23,7 +23,7 @@ namespace __sanitizer { typedef struct { - tid_t tid; + ThreadID tid; thread_t thread; } SuspendedThreadInfo; @@ -31,7 +31,7 @@ class SuspendedThreadsListMac final : public SuspendedThreadsList { public: SuspendedThreadsListMac() = default; - tid_t GetThreadID(uptr index) const override; + ThreadID GetThreadID(uptr index) const override; thread_t GetThread(uptr index) const; uptr ThreadCount() const override; bool ContainsThread(thread_t thread) const; @@ -111,7 +111,7 @@ typedef x86_thread_state32_t regs_struct; #error "Unsupported architecture" #endif -tid_t SuspendedThreadsListMac::GetThreadID(uptr index) const { +ThreadID SuspendedThreadsListMac::GetThreadID(uptr index) const { CHECK_LT(index, threads_.size()); return threads_[index].tid; } diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp index 58a0cfdbf9d4..33d603fec800 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp @@ -52,17 +52,17 @@ class SuspendedThreadsListNetBSD final : public SuspendedThreadsList { public: SuspendedThreadsListNetBSD() { thread_ids_.reserve(1024); } - tid_t GetThreadID(uptr index) const; + ThreadID GetThreadID(uptr index) const; uptr ThreadCount() const; - bool ContainsTid(tid_t thread_id) const; - void Append(tid_t tid); + bool ContainsTid(ThreadID thread_id) const; + void Append(ThreadID tid); PtraceRegistersStatus GetRegistersAndSP(uptr index, InternalMmapVector *buffer, uptr *sp) const; private: - InternalMmapVector thread_ids_; + InternalMmapVector thread_ids_; }; struct TracerThreadArgument { @@ -313,7 +313,7 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) { } } -tid_t SuspendedThreadsListNetBSD::GetThreadID(uptr index) const { +ThreadID SuspendedThreadsListNetBSD::GetThreadID(uptr index) const { CHECK_LT(index, thread_ids_.size()); return thread_ids_[index]; } @@ -322,7 +322,7 @@ uptr SuspendedThreadsListNetBSD::ThreadCount() const { return thread_ids_.size(); } -bool SuspendedThreadsListNetBSD::ContainsTid(tid_t thread_id) const { +bool SuspendedThreadsListNetBSD::ContainsTid(ThreadID thread_id) const { for (uptr i = 0; i < thread_ids_.size(); i++) { if (thread_ids_[i] == thread_id) return true; @@ -330,7 +330,7 @@ bool SuspendedThreadsListNetBSD::ContainsTid(tid_t thread_id) const { return false; } -void SuspendedThreadsListNetBSD::Append(tid_t tid) { +void SuspendedThreadsListNetBSD::Append(ThreadID tid) { thread_ids_.push_back(tid); } diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_win.cpp b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_win.cpp index f114acea79c9..43df59544d30 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_win.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_win.cpp @@ -38,7 +38,7 @@ struct SuspendedThreadsListWindows final : public SuspendedThreadsList { InternalMmapVector *buffer, uptr *sp) const override; - tid_t GetThreadID(uptr index) const override; + ThreadID GetThreadID(uptr index) const override; uptr ThreadCount() const override; }; @@ -49,6 +49,8 @@ struct SuspendedThreadsListWindows final : public SuspendedThreadsList { # define SP_REG Esp # elif SANITIZER_ARM | SANITIZER_ARM64 # define SP_REG Sp +# elif SANITIZER_MIPS32 +# define SP_REG IntSp # else # error Architecture not supported! # endif @@ -66,7 +68,7 @@ PtraceRegistersStatus SuspendedThreadsListWindows::GetRegistersAndSP( return REGISTERS_AVAILABLE; } -tid_t SuspendedThreadsListWindows::GetThreadID(uptr index) const { +ThreadID SuspendedThreadsListWindows::GetThreadID(uptr index) const { CHECK_LT(index, threadIds.size()); return threadIds[index]; } diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp index 74458028ae8f..565701c85d97 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp @@ -31,11 +31,12 @@ Symbolizer *Symbolizer::GetOrInit() { const char *ExtractToken(const char *str, const char *delims, char **result) { uptr prefix_len = internal_strcspn(str, delims); - *result = (char*)InternalAlloc(prefix_len + 1); + *result = (char *)InternalAlloc(prefix_len + 1); internal_memcpy(*result, str, prefix_len); (*result)[prefix_len] = '\0'; const char *prefix_end = str + prefix_len; - if (*prefix_end != '\0') prefix_end++; + if (*prefix_end != '\0') + prefix_end++; return prefix_end; } @@ -78,7 +79,8 @@ const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter, internal_memcpy(*result, str, prefix_len); (*result)[prefix_len] = '\0'; const char *prefix_end = str + prefix_len; - if (*prefix_end != '\0') prefix_end += internal_strlen(delimiter); + if (*prefix_end != '\0') + prefix_end += internal_strlen(delimiter); return prefix_end; } @@ -215,18 +217,20 @@ const LoadedModule *Symbolizer::FindModuleForAddress(uptr address) { modules_were_reloaded = true; } const LoadedModule *module = SearchForModule(modules_, address); - if (module) return module; + if (module) + return module; // dlopen/dlclose interceptors invalidate the module list, but when // interception is disabled, we need to retry if the lookup fails in // case the module list changed. -#if !SANITIZER_INTERCEPT_DLOPEN_DLCLOSE +# if !SANITIZER_INTERCEPT_DLOPEN_DLCLOSE if (!modules_were_reloaded) { RefreshModules(); module = SearchForModule(modules_, address); - if (module) return module; + if (module) + return module; } -#endif +# endif if (fallback_modules_.size()) { module = SearchForModule(fallback_modules_, address); @@ -260,31 +264,31 @@ class LLVMSymbolizerProcess final : public SymbolizerProcess { // script/asan_symbolize.py and sanitizer_common.h. void GetArgV(const char *path_to_binary, const char *(&argv)[kArgVMax]) const override { -#if defined(__x86_64h__) - const char* const kSymbolizerArch = "--default-arch=x86_64h"; -#elif defined(__x86_64__) - const char* const kSymbolizerArch = "--default-arch=x86_64"; -#elif defined(__i386__) - const char* const kSymbolizerArch = "--default-arch=i386"; -#elif SANITIZER_LOONGARCH64 +# if defined(__x86_64h__) + const char *const kSymbolizerArch = "--default-arch=x86_64h"; +# elif defined(__x86_64__) + const char *const kSymbolizerArch = "--default-arch=x86_64"; +# elif defined(__i386__) + const char *const kSymbolizerArch = "--default-arch=i386"; +# elif SANITIZER_LOONGARCH64 const char *const kSymbolizerArch = "--default-arch=loongarch64"; -#elif SANITIZER_RISCV64 +# elif SANITIZER_RISCV64 const char *const kSymbolizerArch = "--default-arch=riscv64"; -#elif defined(__aarch64__) - const char* const kSymbolizerArch = "--default-arch=arm64"; -#elif defined(__arm__) - const char* const kSymbolizerArch = "--default-arch=arm"; -#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - const char* const kSymbolizerArch = "--default-arch=powerpc64"; -#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - const char* const kSymbolizerArch = "--default-arch=powerpc64le"; -#elif defined(__s390x__) - const char* const kSymbolizerArch = "--default-arch=s390x"; -#elif defined(__s390__) - const char* const kSymbolizerArch = "--default-arch=s390"; -#else - const char* const kSymbolizerArch = "--default-arch=unknown"; -#endif +# elif defined(__aarch64__) + const char *const kSymbolizerArch = "--default-arch=arm64"; +# elif defined(__arm__) + const char *const kSymbolizerArch = "--default-arch=arm"; +# elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + const char *const kSymbolizerArch = "--default-arch=powerpc64"; +# elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + const char *const kSymbolizerArch = "--default-arch=powerpc64le"; +# elif defined(__s390x__) + const char *const kSymbolizerArch = "--default-arch=s390x"; +# elif defined(__s390__) + const char *const kSymbolizerArch = "--default-arch=s390"; +# else + const char *const kSymbolizerArch = "--default-arch=unknown"; +# endif const char *const demangle_flag = common_flags()->demangle ? "--demangle" : "--no-demangle"; @@ -315,7 +319,8 @@ static const char *ParseFileLineInfo(AddressInfo *info, const char *str) { char *back = file_line_info + size - 1; for (int i = 0; i < 2; ++i) { while (back > file_line_info && IsDigit(*back)) --back; - if (*back != ':' || !IsDigit(back[1])) break; + if (*back != ':' || !IsDigit(back[1])) + break; info->column = info->line; info->line = internal_atoll(back + 1); // Truncate the string at the colon to keep only filename. @@ -436,7 +441,7 @@ bool LLVMSymbolizer::SymbolizeData(uptr addr, DataInfo *info) { if (!buf) return false; ParseSymbolizeDataOutput(buf, info); - info->start += (addr - info->module_offset); // Add the base address. + info->start += (addr - info->module_offset); // Add the base address. return true; } @@ -459,10 +464,9 @@ const char *LLVMSymbolizer::FormatAndSendCommand(const char *command_prefix, size_needed = internal_snprintf(buffer_, kBufferSize, "%s \"%s\" 0x%zx\n", command_prefix, module_name, module_offset); else - size_needed = internal_snprintf(buffer_, kBufferSize, - "%s \"%s:%s\" 0x%zx\n", command_prefix, - module_name, ModuleArchToString(arch), - module_offset); + size_needed = internal_snprintf( + buffer_, kBufferSize, "%s \"%s:%s\" 0x%zx\n", command_prefix, + module_name, ModuleArchToString(arch), module_offset); if (size_needed >= static_cast(kBufferSize)) { Report("WARNING: Command buffer too small"); @@ -484,9 +488,9 @@ SymbolizerProcess::SymbolizerProcess(const char *path, bool use_posix_spawn) CHECK_NE(path_[0], '\0'); } -static bool IsSameModule(const char* path) { - if (const char* ProcessName = GetProcessName()) { - if (const char* SymbolizerName = StripModuleName(path)) { +static bool IsSameModule(const char *path) { + if (const char *ProcessName = GetProcessName()) { + if (const char *SymbolizerName = StripModuleName(path)) { return !internal_strcmp(ProcessName, SymbolizerName); } } @@ -516,9 +520,9 @@ const char *SymbolizerProcess::SendCommand(const char *command) { const char *SymbolizerProcess::SendCommandImpl(const char *command) { if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd) - return nullptr; + return nullptr; if (!WriteToSymbolizer(command, internal_strlen(command))) - return nullptr; + return nullptr; if (!ReadFromSymbolizer()) return nullptr; return buffer_.data(); diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp index 0ddc24802d21..7eb0c9756d64 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp @@ -38,9 +38,10 @@ // because we do not require a C++ ABI library to be linked to a program // using sanitizers; if it's not present, we'll just use the mangled name. namespace __cxxabiv1 { - extern "C" SANITIZER_WEAK_ATTRIBUTE - char *__cxa_demangle(const char *mangled, char *buffer, - size_t *length, int *status); +extern "C" SANITIZER_WEAK_ATTRIBUTE char *__cxa_demangle(const char *mangled, + char *buffer, + size_t *length, + int *status); } namespace __sanitizer { @@ -53,8 +54,7 @@ const char *DemangleCXXABI(const char *name) { // it does not allocate). For now, we just call it anyway, and we leak // the returned value. if (&__cxxabiv1::__cxa_demangle) - if (const char *demangled_name = - __cxxabiv1::__cxa_demangle(name, 0, 0, 0)) + if (const char *demangled_name = __cxxabiv1::__cxa_demangle(name, 0, 0, 0)) return demangled_name; return nullptr; @@ -85,7 +85,8 @@ const char *DemangleSwift(const char *name) { } const char *DemangleSwiftAndCXX(const char *name) { - if (!name) return nullptr; + if (!name) + return nullptr; if (const char *swift_demangled_name = DemangleSwift(name)) return swift_demangled_name; return DemangleCXXABI(name); @@ -114,7 +115,8 @@ static bool CreateTwoHighNumberedPipes(int *infd_, int *outfd_) { } else { outfd = sock_pair[i]; for (int j = 0; j < i; j++) { - if (sock_pair[j] == infd) continue; + if (sock_pair[j] == infd) + continue; internal_close(sock_pair[j][0]); internal_close(sock_pair[j][1]); } @@ -155,7 +157,7 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() { } if (use_posix_spawn_) { -#if SANITIZER_APPLE +# if SANITIZER_APPLE fd_t fd = internal_spawn(argv, const_cast(GetEnvP()), &pid); if (fd == kInvalidFd) { Report("WARNING: failed to spawn external symbolizer (errno: %d)\n", @@ -165,14 +167,16 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() { input_fd_ = fd; output_fd_ = fd; -#else // SANITIZER_APPLE +# else // SANITIZER_APPLE UNIMPLEMENTED(); -#endif // SANITIZER_APPLE +# endif // SANITIZER_APPLE } else { fd_t infd[2] = {}, outfd[2] = {}; if (!CreateTwoHighNumberedPipes(infd, outfd)) { - Report("WARNING: Can't create a socket pair to start " - "external symbolizer (errno: %d)\n", errno); + Report( + "WARNING: Can't create a socket pair to start " + "external symbolizer (errno: %d)\n", + errno); return false; } @@ -260,10 +264,11 @@ bool Addr2LineProcess::ReachedEndOfOutput(const char *buffer, // 1. First one, corresponding to given offset to be symbolized // (may be equal to output_terminator_, if offset is not valid). // 2. Second one for output_terminator_, itself to mark the end of output. - if (length <= kTerminatorLen) return false; + if (length <= kTerminatorLen) + return false; // Addr2Line output should end up with output_terminator_. - return !internal_memcmp(buffer + length - kTerminatorLen, - output_terminator_, kTerminatorLen); + return !internal_memcmp(buffer + length - kTerminatorLen, output_terminator_, + kTerminatorLen); } class Addr2LinePool final : public SymbolizerTool { @@ -283,9 +288,7 @@ class Addr2LinePool final : public SymbolizerTool { return false; } - bool SymbolizeData(uptr addr, DataInfo *info) override { - return false; - } + bool SymbolizeData(uptr addr, DataInfo *info) override { return false; } private: const char *SendCommand(const char *module_name, uptr module_offset) { @@ -299,22 +302,21 @@ class Addr2LinePool final : public SymbolizerTool { } if (!addr2line) { addr2line = - new(*allocator_) Addr2LineProcess(addr2line_path_, module_name); + new (*allocator_) Addr2LineProcess(addr2line_path_, module_name); addr2line_pool_.push_back(addr2line); } CHECK_EQ(0, internal_strcmp(module_name, addr2line->module_name())); char buffer[kBufferSize]; - internal_snprintf(buffer, kBufferSize, "0x%zx\n0x%zx\n", - module_offset, dummy_address_); + internal_snprintf(buffer, kBufferSize, "0x%zx\n0x%zx\n", module_offset, + dummy_address_); return addr2line->SendCommand(buffer); } static const uptr kBufferSize = 64; const char *addr2line_path_; LowLevelAllocator *allocator_; - InternalMmapVector addr2line_pool_; - static const uptr dummy_address_ = - FIRST_32_SECOND_64(UINT32_MAX, UINT64_MAX); + InternalMmapVector addr2line_pool_; + static const uptr dummy_address_ = FIRST_32_SECOND_64(UINT32_MAX, UINT64_MAX); }; # if SANITIZER_SUPPORTS_WEAK_HOOKS @@ -352,8 +354,9 @@ class InternalSymbolizer final : public SymbolizerTool { } bool SymbolizePC(uptr addr, SymbolizedStack *stack) override { - bool result = __sanitizer_symbolize_code( - stack->info.module, stack->info.module_offset, buffer_, sizeof(buffer_)); + bool result = __sanitizer_symbolize_code(stack->info.module, + stack->info.module_offset, buffer_, + sizeof(buffer_)); if (result) ParseSymbolizePCOutput(buffer_, stack); return result; @@ -423,44 +426,53 @@ static SymbolizerTool *ChooseExternalSymbolizer(LowLevelAllocator *allocator) { } else if (!internal_strncmp(binary_name, kLLVMSymbolizerPrefix, internal_strlen(kLLVMSymbolizerPrefix))) { VReport(2, "Using llvm-symbolizer at user-specified path: %s\n", path); - return new(*allocator) LLVMSymbolizer(path, allocator); + return new (*allocator) LLVMSymbolizer(path, allocator); } else if (!internal_strcmp(binary_name, "atos")) { -#if SANITIZER_APPLE +# if SANITIZER_APPLE VReport(2, "Using atos at user-specified path: %s\n", path); - return new(*allocator) AtosSymbolizer(path, allocator); -#else // SANITIZER_APPLE + return new (*allocator) AtosSymbolizer(path, allocator); +# else // SANITIZER_APPLE Report("ERROR: Using `atos` is only supported on Darwin.\n"); Die(); -#endif // SANITIZER_APPLE +# endif // SANITIZER_APPLE } else if (!internal_strcmp(binary_name, "addr2line")) { VReport(2, "Using addr2line at user-specified path: %s\n", path); - return new(*allocator) Addr2LinePool(path, allocator); + return new (*allocator) Addr2LinePool(path, allocator); } else if (path) { - Report("ERROR: External symbolizer path is set to '%s' which isn't " - "a known symbolizer. Please set the path to the llvm-symbolizer " - "binary or other known tool.\n", path); + Report( + "ERROR: External symbolizer path is set to '%s' which isn't " + "a known symbolizer. Please set the path to the llvm-symbolizer " + "binary or other known tool.\n", + path); Die(); } // Otherwise symbolizer program is unknown, let's search $PATH +# ifdef SANITIZER_DISABLE_SYMBOLIZER_PATH_SEARCH + VReport(2, + "Symbolizer path search is disabled in the runtime " + "build configuration.\n"); + return nullptr; +# else CHECK(path == nullptr); -#if SANITIZER_APPLE +# if SANITIZER_APPLE if (const char *found_path = FindPathToBinary("atos")) { VReport(2, "Using atos found at: %s\n", found_path); - return new(*allocator) AtosSymbolizer(found_path, allocator); + return new (*allocator) AtosSymbolizer(found_path, allocator); } -#endif // SANITIZER_APPLE +# endif // SANITIZER_APPLE if (const char *found_path = FindPathToBinary("llvm-symbolizer")) { VReport(2, "Using llvm-symbolizer found at: %s\n", found_path); - return new(*allocator) LLVMSymbolizer(found_path, allocator); + return new (*allocator) LLVMSymbolizer(found_path, allocator); } if (common_flags()->allow_addr2line) { if (const char *found_path = FindPathToBinary("addr2line")) { VReport(2, "Using addr2line found at: %s\n", found_path); - return new(*allocator) Addr2LinePool(found_path, allocator); + return new (*allocator) Addr2LinePool(found_path, allocator); } } return nullptr; +# endif // SANITIZER_DISABLE_SYMBOLIZER_PATH_SEARCH } static void ChooseSymbolizerTools(IntrusiveList *list, @@ -492,17 +504,24 @@ static void ChooseSymbolizerTools(IntrusiveList *list, list->push_back(tool); } -#if SANITIZER_APPLE +# if SANITIZER_APPLE + if (list->empty()) { + Report( + "WARN: No external symbolizers found. Symbols may be missing or " + "unreliable.\n"); + Report( + "HINT: Is PATH set? Does sandbox allow file-read of /usr/bin/atos?\n"); + } VReport(2, "Using dladdr symbolizer.\n"); - list->push_back(new(*allocator) DlAddrSymbolizer()); -#endif // SANITIZER_APPLE + list->push_back(new (*allocator) DlAddrSymbolizer()); +# endif // SANITIZER_APPLE } Symbolizer *Symbolizer::PlatformInit() { IntrusiveList list; list.clear(); ChooseSymbolizerTools(&list, &symbolizer_allocator_); - return new(symbolizer_allocator_) Symbolizer(list); + return new (symbolizer_allocator_) Symbolizer(list); } void Symbolizer::LateInitialize() { diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_report.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer_report.cpp index 80ae31e938ae..351e00db6fb2 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_report.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_report.cpp @@ -41,18 +41,10 @@ static bool FrameIsInternal(const SymbolizedStack *frame) { return true; if (file && internal_strstr(file, "\\compiler-rt\\lib\\")) return true; - if (file && internal_strstr(file, "\\libsanitizer\\")) - return true; if (module && (internal_strstr(module, "libclang_rt."))) return true; if (module && (internal_strstr(module, "clang_rt."))) return true; - if (module && (internal_strstr(module, "libtsan.") - || internal_strstr(module, "libhwasan.") - || internal_strstr(module, "liblsan.") - || internal_strstr(module, "libasan.") - || internal_strstr(module, "libubsan."))) - return true; return false; } diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_registry.cpp b/libsanitizer/sanitizer_common/sanitizer_thread_registry.cpp index cdc24f4a8869..d726d282437c 100644 --- a/libsanitizer/sanitizer_common/sanitizer_thread_registry.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_thread_registry.cpp @@ -80,7 +80,7 @@ void ThreadContextBase::SetFinished() { OnFinished(); } -void ThreadContextBase::SetStarted(tid_t _os_id, ThreadType _thread_type, +void ThreadContextBase::SetStarted(ThreadID _os_id, ThreadType _thread_type, void *arg) { status = ThreadStatusRunning; os_id = _os_id; @@ -228,7 +228,8 @@ static bool FindThreadContextByOsIdCallback(ThreadContextBase *tctx, tctx->status != ThreadStatusDead); } -ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(tid_t os_id) { +ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked( + ThreadID os_id) { return FindThreadContextLocked(FindThreadContextByOsIdCallback, (void *)os_id); } @@ -322,8 +323,8 @@ ThreadStatus ThreadRegistry::FinishThread(u32 tid) { return prev_status; } -void ThreadRegistry::StartThread(u32 tid, tid_t os_id, ThreadType thread_type, - void *arg) { +void ThreadRegistry::StartThread(u32 tid, ThreadID os_id, + ThreadType thread_type, void *arg) { ThreadRegistryLock l(this); running_threads_++; ThreadContextBase *tctx = threads_[tid]; diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_registry.h b/libsanitizer/sanitizer_common/sanitizer_thread_registry.h index e06abb3932da..8adc420c8cce 100644 --- a/libsanitizer/sanitizer_common/sanitizer_thread_registry.h +++ b/libsanitizer/sanitizer_common/sanitizer_thread_registry.h @@ -43,7 +43,7 @@ class ThreadContextBase { const u32 tid; // Thread ID. Main thread should have tid = 0. u64 unique_id; // Unique thread ID. u32 reuse_count; // Number of times this tid was reused. - tid_t os_id; // PID (used for reporting). + ThreadID os_id; // PID (used for reporting). uptr user_id; // Some opaque user thread id (e.g. pthread_t). char name[64]; // As annotated by user. @@ -62,7 +62,7 @@ class ThreadContextBase { void SetDead(); void SetJoined(void *arg); void SetFinished(); - void SetStarted(tid_t _os_id, ThreadType _thread_type, void *arg); + void SetStarted(ThreadID _os_id, ThreadType _thread_type, void *arg); void SetCreated(uptr _user_id, u64 _unique_id, bool _detached, u32 _parent_tid, u32 _stack_tid, void *arg); void Reset(); @@ -126,7 +126,7 @@ class SANITIZER_MUTEX ThreadRegistry { // is found. ThreadContextBase *FindThreadContextLocked(FindThreadCallback cb, void *arg); - ThreadContextBase *FindThreadContextByOsIDLocked(tid_t os_id); + ThreadContextBase *FindThreadContextByOsIDLocked(ThreadID os_id); void SetThreadName(u32 tid, const char *name); void SetThreadNameByUserId(uptr user_id, const char *name); @@ -134,7 +134,7 @@ class SANITIZER_MUTEX ThreadRegistry { void JoinThread(u32 tid, void *arg); // Finishes thread and returns previous status. ThreadStatus FinishThread(u32 tid); - void StartThread(u32 tid, tid_t os_id, ThreadType thread_type, void *arg); + void StartThread(u32 tid, ThreadID os_id, ThreadType thread_type, void *arg); u32 ConsumeThreadUserId(uptr user_id); void SetThreadUserId(u32 tid, uptr user_id); diff --git a/libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp index 6a8e82e2e213..4f1538eeb9c5 100644 --- a/libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp @@ -12,7 +12,7 @@ #include "sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS + SANITIZER_SOLARIS || SANITIZER_HAIKU #include "sanitizer_common.h" #include "sanitizer_stacktrace.h" @@ -91,38 +91,6 @@ _Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) { } // namespace -#if SANITIZER_ANDROID -void SanitizerInitializeUnwinder() { - if (AndroidGetApiLevel() >= ANDROID_LOLLIPOP_MR1) return; - - // Pre-lollipop Android can not unwind through signal handler frames with - // libgcc unwinder, but it has a libcorkscrew.so library with the necessary - // workarounds. - void *p = dlopen("libcorkscrew.so", RTLD_LAZY); - if (!p) { - VReport(1, - "Failed to open libcorkscrew.so. You may see broken stack traces " - "in SEGV reports."); - return; - } - acquire_my_map_info_list = - (acquire_my_map_info_list_func)(uptr)dlsym(p, "acquire_my_map_info_list"); - release_my_map_info_list = - (release_my_map_info_list_func)(uptr)dlsym(p, "release_my_map_info_list"); - unwind_backtrace_signal_arch = (unwind_backtrace_signal_arch_func)(uptr)dlsym( - p, "unwind_backtrace_signal_arch"); - if (!acquire_my_map_info_list || !release_my_map_info_list || - !unwind_backtrace_signal_arch) { - VReport(1, - "Failed to find one of the required symbols in libcorkscrew.so. " - "You may see broken stack traces in SEGV reports."); - acquire_my_map_info_list = 0; - unwind_backtrace_signal_arch = 0; - release_my_map_info_list = 0; - } -} -#endif - void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) { CHECK_GE(max_depth, 2); size = 0; @@ -171,4 +139,4 @@ void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) { } // namespace __sanitizer #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || - // SANITIZER_SOLARIS + // SANITIZER_SOLARIS || SANITIZER_HAIKU diff --git a/libsanitizer/sanitizer_common/sanitizer_unwind_win.cpp b/libsanitizer/sanitizer_common/sanitizer_unwind_win.cpp index 6fc18396ca63..30ba812afc4b 100644 --- a/libsanitizer/sanitizer_common/sanitizer_unwind_win.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_unwind_win.cpp @@ -75,6 +75,11 @@ void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) { stack_frame.AddrPC.Offset = ctx.Pc; stack_frame.AddrFrame.Offset = ctx.R11; stack_frame.AddrStack.Offset = ctx.Sp; +# elif SANITIZER_MIPS32 + int machine_type = IMAGE_FILE_MACHINE_R4000; + stack_frame.AddrPC.Offset = ctx.Fir; + stack_frame.AddrFrame.Offset = ctx.IntS8; + stack_frame.AddrStack.Offset = ctx.IntSp; # else int machine_type = IMAGE_FILE_MACHINE_I386; stack_frame.AddrPC.Offset = ctx.Eip; diff --git a/libsanitizer/sanitizer_common/sanitizer_win.cpp b/libsanitizer/sanitizer_common/sanitizer_win.cpp index ea513d5f263f..ed4f60deeffc 100644 --- a/libsanitizer/sanitizer_common/sanitizer_win.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_win.cpp @@ -108,9 +108,7 @@ int internal_dlinfo(void *handle, int request, void *p) { // In contrast to POSIX, on Windows GetCurrentThreadId() // returns a system-unique identifier. -tid_t GetTid() { - return GetCurrentThreadId(); -} +ThreadID GetTid() { return GetCurrentThreadId(); } uptr GetThreadSelf() { return GetTid(); @@ -164,7 +162,24 @@ void UnmapOrDie(void *addr, uptr size, bool raw_report) { static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type, const char *mmap_type) { error_t last_error = GetLastError(); - if (last_error == ERROR_NOT_ENOUGH_MEMORY) + + // Assumption: VirtualAlloc is the last system call that was invoked before + // this method. + // VirtualAlloc emits one of 3 error codes when running out of memory + // 1. ERROR_NOT_ENOUGH_MEMORY: + // There's not enough memory to execute the command + // 2. ERROR_INVALID_PARAMETER: + // VirtualAlloc will return this if the request would allocate memory at an + // address exceeding or being very close to the maximum application address + // (the `lpMaximumApplicationAddress` field within the `SystemInfo` struct). + // This does not seem to be officially documented, but is corroborated here: + // https://stackoverflow.com/questions/45833674/why-does-virtualalloc-fail-for-lpaddress-greater-than-0x6ffffffffff + // 3. ERROR_COMMITMENT_LIMIT: + // VirtualAlloc will return this if e.g. the pagefile is too small to commit + // the requested amount of memory. + if (last_error == ERROR_NOT_ENOUGH_MEMORY || + last_error == ERROR_INVALID_PARAMETER || + last_error == ERROR_COMMITMENT_LIMIT) return nullptr; ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error); } @@ -994,6 +1009,9 @@ void SignalContext::InitPcSpBp() { # if SANITIZER_ARM bp = (uptr)context_record->R11; sp = (uptr)context_record->Sp; +# elif SANITIZER_MIPS32 + bp = (uptr)context_record->IntS8; + sp = (uptr)context_record->IntSp; # else bp = (uptr)context_record->Ebp; sp = (uptr)context_record->Esp; diff --git a/libsanitizer/tsan/tsan_debugging.cpp b/libsanitizer/tsan/tsan_debugging.cpp index 41fa293dbaaa..b3422af75606 100644 --- a/libsanitizer/tsan/tsan_debugging.cpp +++ b/libsanitizer/tsan/tsan_debugging.cpp @@ -165,7 +165,7 @@ int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr, } SANITIZER_INTERFACE_ATTRIBUTE -int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id, +int __tsan_get_report_thread(void *report, uptr idx, int *tid, ThreadID *os_id, int *running, const char **name, int *parent_tid, void **trace, uptr trace_size) { const ReportDesc *rep = (ReportDesc *)report; @@ -242,7 +242,7 @@ const char *__tsan_locate_address(uptr addr, char *name, uptr name_size, SANITIZER_INTERFACE_ATTRIBUTE int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id, - tid_t *os_id) { + ThreadID *os_id) { MBlock *b = 0; Allocator *a = allocator(); if (a->PointerIsMine((void *)addr)) { diff --git a/libsanitizer/tsan/tsan_flags.cpp b/libsanitizer/tsan/tsan_flags.cpp index 3fd58f46983f..efaaef8b7ae9 100644 --- a/libsanitizer/tsan/tsan_flags.cpp +++ b/libsanitizer/tsan/tsan_flags.cpp @@ -20,6 +20,43 @@ #include "tsan_rtl.h" #include "ubsan/ubsan_flags.h" +#if SANITIZER_APPLE && !SANITIZER_GO +namespace __sanitizer { + +template <> +inline bool FlagHandler::Parse(const char *value) { + if (internal_strcmp(value, "on") == 0) { + *t_ = kLockDuringAllWrites; + return true; + } + if (internal_strcmp(value, "disable_for_current_process") == 0) { + *t_ = kNoLockDuringWritesCurrentProcess; + return true; + } + if (internal_strcmp(value, "disable_for_all_processes") == 0) { + *t_ = kNoLockDuringWritesAllProcesses; + return true; + } + Printf("ERROR: Invalid value for signal handler option: '%s'\n", value); + return false; +} + +template <> +inline bool FlagHandler::Format(char *buffer, + uptr size) { + switch (*t_) { + case kLockDuringAllWrites: + return FormatString(buffer, size, "on"); + case kNoLockDuringWritesCurrentProcess: + return FormatString(buffer, size, "disable_for_current_process"); + case kNoLockDuringWritesAllProcesses: + return FormatString(buffer, size, "disable_for_all_processes"); + } +} + +} // namespace __sanitizer +#endif // SANITIZER_APPLE && !SANITIZER_GO + namespace __tsan { // Can be overriden in frontend. diff --git a/libsanitizer/tsan/tsan_flags.h b/libsanitizer/tsan/tsan_flags.h index da27d5b992bc..e63d7c405a6c 100644 --- a/libsanitizer/tsan/tsan_flags.h +++ b/libsanitizer/tsan/tsan_flags.h @@ -16,6 +16,14 @@ #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_deadlock_detector_interface.h" +#if SANITIZER_APPLE && !SANITIZER_GO +enum LockDuringWriteSetting { + kLockDuringAllWrites, + kNoLockDuringWritesCurrentProcess, + kNoLockDuringWritesAllProcesses, +}; +#endif + namespace __tsan { struct Flags : DDFlags { diff --git a/libsanitizer/tsan/tsan_flags.inc b/libsanitizer/tsan/tsan_flags.inc index 731d776cc893..77ab910f08fb 100644 --- a/libsanitizer/tsan/tsan_flags.inc +++ b/libsanitizer/tsan/tsan_flags.inc @@ -80,3 +80,15 @@ TSAN_FLAG(bool, shared_ptr_interceptor, true, TSAN_FLAG(bool, print_full_thread_history, false, "If set, prints thread creation stacks for the threads involved in " "the report and their ancestors up to the main thread.") + +#if SANITIZER_APPLE && !SANITIZER_GO +TSAN_FLAG(LockDuringWriteSetting, lock_during_write, kLockDuringAllWrites, + "Determines whether to obtain a lock while writing logs or error " + "reports. " + "\"on\" - [default] lock during all writes. " + "\"disable_for_current_process\" - don't lock during all writes in " + "the current process, but do lock for all writes in child " + "processes." + "\"disable_for_all_processes\" - don't lock during all writes in " + "the current process and it's children processes.") +#endif diff --git a/libsanitizer/tsan/tsan_interceptors.h b/libsanitizer/tsan/tsan_interceptors.h index a357a870fdf8..f8cc8ff3b406 100644 --- a/libsanitizer/tsan/tsan_interceptors.h +++ b/libsanitizer/tsan/tsan_interceptors.h @@ -1,6 +1,9 @@ #ifndef TSAN_INTERCEPTORS_H #define TSAN_INTERCEPTORS_H +#if SANITIZER_APPLE && !SANITIZER_GO +# include "sanitizer_common/sanitizer_mac.h" +#endif #include "sanitizer_common/sanitizer_stacktrace.h" #include "tsan_rtl.h" @@ -43,7 +46,12 @@ inline bool in_symbolizer() { #endif inline bool MustIgnoreInterceptor(ThreadState *thr) { - return !thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib; + return !thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib +#if SANITIZER_APPLE && !SANITIZER_GO + || (flags()->lock_during_write != kLockDuringAllWrites && + thr->in_internal_write_call) +#endif + ; } } // namespace __tsan diff --git a/libsanitizer/tsan/tsan_interceptors_mac.cpp b/libsanitizer/tsan/tsan_interceptors_mac.cpp index 9db0eebd9236..c5e12b472aac 100644 --- a/libsanitizer/tsan/tsan_interceptors_mac.cpp +++ b/libsanitizer/tsan/tsan_interceptors_mac.cpp @@ -14,22 +14,21 @@ #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_APPLE -#include "interception/interception.h" -#include "tsan_interceptors.h" -#include "tsan_interface.h" -#include "tsan_interface_ann.h" -#include "tsan_spinlock_defs_mac.h" -#include "sanitizer_common/sanitizer_addrhashmap.h" - -#include -#include -#include -#include -#include - -#if defined(__has_include) && __has_include() -#include -#endif // #if defined(__has_include) && __has_include() +# include +# include +# include +# include +# include + +# include "interception/interception.h" +# include "sanitizer_common/sanitizer_addrhashmap.h" +# include "tsan_interceptors.h" +# include "tsan_interface.h" +# include "tsan_interface_ann.h" + +# if defined(__has_include) && __has_include() +# include +# endif // #if defined(__has_include) && __has_include() typedef long long_t; @@ -49,55 +48,55 @@ static constexpr morder kMacOrderBarrier = mo_acq_rel; static constexpr morder kMacOrderNonBarrier = mo_acq_rel; static constexpr morder kMacFailureOrder = mo_relaxed; -#define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \ - TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \ - SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \ - return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \ - } +# define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \ + TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \ + SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \ + return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \ + } -#define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \ - TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \ - SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \ - return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \ - } +# define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, \ + mo) \ + TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \ + SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \ + return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \ + } -#define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \ - TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \ - SCOPED_TSAN_INTERCEPTOR(f, ptr); \ - return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \ - } +# define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \ + mo) \ + TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \ + SCOPED_TSAN_INTERCEPTOR(f, ptr); \ + return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \ + } -#define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \ - mo) \ - TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \ - SCOPED_TSAN_INTERCEPTOR(f, ptr); \ - return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \ - } +# define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \ + mo) \ + TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \ + SCOPED_TSAN_INTERCEPTOR(f, ptr); \ + return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \ + } -#define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \ - m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \ - kMacOrderNonBarrier) \ - m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \ - kMacOrderBarrier) \ - m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \ - kMacOrderNonBarrier) \ - m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \ - kMacOrderBarrier) - -#define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \ - m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \ - kMacOrderNonBarrier) \ - m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \ - kMacOrderBarrier) \ - m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \ - kMacOrderNonBarrier) \ - m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \ - __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier) - - -#pragma clang diagnostic push -// OSAtomic* functions are deprecated. -#pragma clang diagnostic ignored "-Wdeprecated-declarations" +# define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \ + m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \ + kMacOrderNonBarrier) \ + m(int32_t, int32_t, a32, f##32##Barrier, \ + __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier) \ + m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \ + kMacOrderNonBarrier) \ + m(int64_t, int64_t, a64, f##64##Barrier, \ + __tsan_atomic64_##tsan_atomic_f, kMacOrderBarrier) + +# define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \ + m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \ + kMacOrderNonBarrier) \ + m(int32_t, uint32_t, a32, f##32##Barrier, \ + __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier) \ + m_orig(int32_t, uint32_t, a32, f##32##Orig, \ + __tsan_atomic32_##tsan_atomic_f, kMacOrderNonBarrier) \ + m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \ + __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier) + +# pragma clang diagnostic push // OSAtomic* deprecation +# pragma clang diagnostic ignored "-Wdeprecated-declarations" OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add, OSATOMIC_INTERCEPTOR_PLUS_X) OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add, @@ -110,26 +109,26 @@ OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and, OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR) OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor, OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR) +# pragma clang diagnostic pop // OSAtomic* deprecation + +# define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \ + TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \ + SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \ + return tsan_atomic_f##_compare_exchange_strong( \ + (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \ + kMacOrderNonBarrier, kMacFailureOrder); \ + } \ + \ + TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \ + t volatile *ptr) { \ + SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \ + return tsan_atomic_f##_compare_exchange_strong( \ + (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \ + kMacOrderBarrier, kMacFailureOrder); \ + } -#define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \ - TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \ - SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \ - return tsan_atomic_f##_compare_exchange_strong( \ - (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \ - kMacOrderNonBarrier, kMacFailureOrder); \ - } \ - \ - TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \ - t volatile *ptr) { \ - SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \ - return tsan_atomic_f##_compare_exchange_strong( \ - (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \ - kMacOrderBarrier, kMacFailureOrder); \ - } - -#pragma clang diagnostic push -// OSAtomicCompareAndSwap* functions are deprecated. -#pragma clang diagnostic ignored "-Wdeprecated-declarations" +# pragma clang diagnostic push // OSAtomicCompareAndSwap* deprecation +# pragma clang diagnostic ignored "-Wdeprecated-declarations" OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int) OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64, long_t) @@ -139,25 +138,28 @@ OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32, int32_t) OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64, int64_t) -#pragma clang diagnostic pop - -#define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \ - TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \ - SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \ - volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \ - char bit = 0x80u >> (n & 7); \ - char mask = clear ? ~bit : bit; \ - char orig_byte = op((volatile a8 *)byte_ptr, mask, mo); \ - return orig_byte & bit; \ - } +# pragma clang diagnostic pop // OSAtomicCompareAndSwap* deprecation + +# define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \ + TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \ + SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \ + volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \ + char bit = 0x80u >> (n & 7); \ + char mask = clear ? ~bit : bit; \ + char orig_byte = op((volatile a8 *)byte_ptr, mask, mo); \ + return orig_byte & bit; \ + } -#define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \ - OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \ - OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier) +# define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \ + OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \ + OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier) +# pragma clang diagnostic push // OSAtomicTestAnd* deprecation +# pragma clang diagnostic ignored "-Wdeprecated-declarations" OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false) OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and, true) +# pragma clang diagnostic pop // OSAtomicTestAnd* deprecation TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item, size_t offset) { @@ -169,12 +171,13 @@ TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item, TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) { SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset); void *item = REAL(OSAtomicDequeue)(list, offset); - if (item) __tsan_acquire(item); + if (item) + __tsan_acquire(item); return item; } // OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X. -#if !SANITIZER_IOS +# if !SANITIZER_IOS TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item, size_t offset) { @@ -187,11 +190,22 @@ TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list, size_t offset) { SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset); void *item = REAL(OSAtomicFifoDequeue)(list, offset); - if (item) __tsan_acquire(item); + if (item) + __tsan_acquire(item); return item; } -#endif +# endif + +// If `OSSPINLOCK_USE_INLINED=1` is set, then SDK headers don't declare these +// as functions, but macros that call non-deprecated APIs. Undefine these +// macros so they don't interfere with the interceptor machinery. +# undef OSSpinLockLock +# undef OSSpinLockTry +# undef OSSpinLockUnlock + +# pragma clang diagnostic push // OSSpinLock* deprecation +# pragma clang diagnostic ignored "-Wdeprecated-declarations" TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) { CHECK(!cur_thread()->is_dead); @@ -224,6 +238,7 @@ TSAN_INTERCEPTOR(void, OSSpinLockUnlock, volatile OSSpinLock *lock) { Release(thr, pc, (uptr)lock); REAL(OSSpinLockUnlock)(lock); } +# pragma clang diagnostic pop // OSSpinLock* deprecation TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) { CHECK(!cur_thread()->is_dead); @@ -266,6 +281,25 @@ TSAN_INTERCEPTOR(void, os_unfair_lock_lock, os_unfair_lock_t lock) { Acquire(thr, pc, (uptr)lock); } +// os_unfair_lock_lock_with_flags was introduced in macOS 15 +# if defined(__MAC_15_0) || defined(__IPHONE_18_0) || defined(__TVOS_18_0) || \ + defined(__VISIONOS_2_0) || defined(__WATCHOS_11_0) +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wunguarded-availability-new" +// We're just intercepting this - if it doesn't exist on the platform, then the +// process shouldn't have called it in the first place. +TSAN_INTERCEPTOR(void, os_unfair_lock_lock_with_flags, os_unfair_lock_t lock, + os_unfair_lock_flags_t flags) { + if (!cur_thread()->is_inited || cur_thread()->is_dead) { + return REAL(os_unfair_lock_lock_with_flags)(lock, flags); + } + SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_lock_with_flags, lock, flags); + REAL(os_unfair_lock_lock_with_flags)(lock, flags); + Acquire(thr, pc, (uptr)lock); +} +# pragma clang diagnostic pop +# endif + TSAN_INTERCEPTOR(void, os_unfair_lock_lock_with_options, os_unfair_lock_t lock, u32 options) { if (!cur_thread()->is_inited || cur_thread()->is_dead) { @@ -296,7 +330,7 @@ TSAN_INTERCEPTOR(void, os_unfair_lock_unlock, os_unfair_lock_t lock) { REAL(os_unfair_lock_unlock)(lock); } -#if defined(__has_include) && __has_include() +# if defined(__has_include) && __has_include() TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler, xpc_connection_t connection, xpc_handler_t handler) { @@ -350,7 +384,7 @@ TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) { REAL(xpc_connection_cancel)(connection); } -#endif // #if defined(__has_include) && __has_include() +# endif // #if defined(__has_include) && __has_include() // Determines whether the Obj-C object pointer is a tagged pointer. Tagged // pointers encode the object data directly in their pointer bits and do not @@ -373,7 +407,7 @@ static uptr GetOrCreateSyncAddress(uptr addr, ThreadState *thr, uptr pc) { Map::Handle h(&Addresses, addr); if (h.created()) { ThreadIgnoreBegin(thr, pc); - *h = (uptr) user_alloc(thr, pc, /*size=*/1); + *h = (uptr)user_alloc(thr, pc, /*size=*/1); ThreadIgnoreEnd(thr); } return *h; @@ -391,7 +425,8 @@ static uptr SyncAddressForObjCObject(id obj, ThreadState *thr, uptr pc) { TSAN_INTERCEPTOR(int, objc_sync_enter, id obj) { SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj); - if (!obj) return REAL(objc_sync_enter)(obj); + if (!obj) + return REAL(objc_sync_enter)(obj); uptr addr = SyncAddressForObjCObject(obj, thr, pc); MutexPreLock(thr, pc, addr, MutexFlagWriteReentrant); int result = REAL(objc_sync_enter)(obj); @@ -402,11 +437,13 @@ TSAN_INTERCEPTOR(int, objc_sync_enter, id obj) { TSAN_INTERCEPTOR(int, objc_sync_exit, id obj) { SCOPED_TSAN_INTERCEPTOR(objc_sync_exit, obj); - if (!obj) return REAL(objc_sync_exit)(obj); + if (!obj) + return REAL(objc_sync_exit)(obj); uptr addr = SyncAddressForObjCObject(obj, thr, pc); MutexUnlock(thr, pc, addr); int result = REAL(objc_sync_exit)(obj); - if (result != OBJC_SYNC_SUCCESS) MutexInvalidAccess(thr, pc, addr); + if (result != OBJC_SYNC_SUCCESS) + MutexInvalidAccess(thr, pc, addr); return result; } @@ -437,7 +474,7 @@ TSAN_INTERCEPTOR(int, swapcontext, ucontext_t *oucp, const ucontext_t *ucp) { // On macOS, libc++ is always linked dynamically, so intercepting works the // usual way. -#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR +# define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR namespace { struct fake_shared_weak_count { diff --git a/libsanitizer/tsan/tsan_interceptors_posix.cpp b/libsanitizer/tsan/tsan_interceptors_posix.cpp index f671c8167a3c..714220a0109a 100644 --- a/libsanitizer/tsan/tsan_interceptors_posix.cpp +++ b/libsanitizer/tsan/tsan_interceptors_posix.cpp @@ -12,6 +12,9 @@ // sanitizer_common/sanitizer_common_interceptors.inc //===----------------------------------------------------------------------===// +#include + +#include "interception/interception.h" #include "sanitizer_common/sanitizer_allocator_dlsym.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_errno.h" @@ -19,21 +22,24 @@ #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_linux.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_platform_interceptors.h" #include "sanitizer_common/sanitizer_platform_limits_netbsd.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" #include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" -#include "interception/interception.h" +#include "sanitizer_common/sanitizer_vector.h" +#include "tsan_fd.h" +#if SANITIZER_APPLE && !SANITIZER_GO +# include "tsan_flags.h" +#endif #include "tsan_interceptors.h" #include "tsan_interface.h" +#include "tsan_mman.h" #include "tsan_platform.h" -#include "tsan_suppressions.h" #include "tsan_rtl.h" -#include "tsan_mman.h" -#include "tsan_fd.h" - -#include +#include "tsan_suppressions.h" using namespace __tsan; @@ -76,17 +82,6 @@ struct ucontext_t { }; #endif -#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \ - defined(__s390x__) -#define PTHREAD_ABI_BASE "GLIBC_2.3.2" -#elif defined(__aarch64__) || SANITIZER_PPC64V2 -#define PTHREAD_ABI_BASE "GLIBC_2.17" -#elif SANITIZER_LOONGARCH64 -#define PTHREAD_ABI_BASE "GLIBC_2.36" -#elif SANITIZER_RISCV64 -# define PTHREAD_ABI_BASE "GLIBC_2.27" -#endif - extern "C" int pthread_attr_init(void *attr); extern "C" int pthread_attr_destroy(void *attr); DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *) @@ -177,7 +172,7 @@ struct ThreadSignalContext { SignalDesc pending_signals[kSigCount]; // emptyset and oldset are too big for stack. __sanitizer_sigset_t emptyset; - __sanitizer_sigset_t oldset; + __sanitizer::Vector<__sanitizer_sigset_t> oldset; }; void EnterBlockingFunc(ThreadState *thr) { @@ -338,11 +333,6 @@ void ScopedInterceptor::DisableIgnoresImpl() { } #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func) -#if SANITIZER_FREEBSD || SANITIZER_NETBSD -# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func) -#else -# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver) -#endif #if SANITIZER_FREEBSD # define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \ INTERCEPT_FUNCTION(_pthread_##func) @@ -558,6 +548,7 @@ static void SetJmp(ThreadState *thr, uptr sp) { buf->shadow_stack_pos = thr->shadow_stack_pos; ThreadSignalContext *sctx = SigCtx(thr); buf->int_signal_send = sctx ? sctx->int_signal_send : 0; + buf->oldset_stack_size = sctx ? sctx->oldset.Size() : 0; buf->in_blocking_func = atomic_load(&thr->in_blocking_func, memory_order_relaxed); buf->in_signal_handler = atomic_load(&thr->in_signal_handler, memory_order_relaxed); @@ -574,8 +565,11 @@ static void LongJmp(ThreadState *thr, uptr *env) { while (thr->shadow_stack_pos > buf->shadow_stack_pos) FuncExit(thr); ThreadSignalContext *sctx = SigCtx(thr); - if (sctx) + if (sctx) { sctx->int_signal_send = buf->int_signal_send; + while (sctx->oldset.Size() > buf->oldset_stack_size) + sctx->oldset.PopBack(); + } atomic_store(&thr->in_blocking_func, buf->in_blocking_func, memory_order_relaxed); atomic_store(&thr->in_signal_handler, buf->in_signal_handler, @@ -742,6 +736,41 @@ TSAN_INTERCEPTOR(void, free, void *p) { user_free(thr, pc, p); } +# if SANITIZER_INTERCEPT_FREE_SIZED +TSAN_INTERCEPTOR(void, free_sized, void *p, uptr size) { + if (UNLIKELY(!p)) + return; + if (in_symbolizer()) + return InternalFree(p); + if (DlsymAlloc::PointerIsMine(p)) + return DlsymAlloc::Free(p); + invoke_free_hook(p); + SCOPED_INTERCEPTOR_RAW(free_sized, p, size); + user_free(thr, pc, p); +} +# define TSAN_MAYBE_INTERCEPT_FREE_SIZED INTERCEPT_FUNCTION(free_sized) +# else +# define TSAN_MAYBE_INTERCEPT_FREE_SIZED +# endif + +# if SANITIZER_INTERCEPT_FREE_ALIGNED_SIZED +TSAN_INTERCEPTOR(void, free_aligned_sized, void *p, uptr alignment, uptr size) { + if (UNLIKELY(!p)) + return; + if (in_symbolizer()) + return InternalFree(p); + if (DlsymAlloc::PointerIsMine(p)) + return DlsymAlloc::Free(p); + invoke_free_hook(p); + SCOPED_INTERCEPTOR_RAW(free_aligned_sized, p, alignment, size); + user_free(thr, pc, p); +} +# define TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED \ + INTERCEPT_FUNCTION(free_aligned_sized) +# else +# define TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED +# endif + TSAN_INTERCEPTOR(void, cfree, void *p) { if (UNLIKELY(!p)) return; @@ -758,6 +787,9 @@ TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) { SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p); return user_alloc_usable_size(p); } +#else +# define TSAN_MAYBE_INTERCEPT_FREE_SIZED +# define TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED #endif TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) { @@ -892,10 +924,9 @@ constexpr u32 kGuardWaiter = 1 << 17; static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g, bool blocking_hooks = true) { - if (blocking_hooks) - OnPotentiallyBlockingRegionBegin(); - auto on_exit = at_scope_exit([blocking_hooks] { - if (blocking_hooks) + bool in_potentially_blocking_region = false; + auto on_exit = at_scope_exit([&] { + if (in_potentially_blocking_region) OnPotentiallyBlockingRegionEnd(); }); @@ -912,8 +943,13 @@ static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g, } else { if ((cmp & kGuardWaiter) || atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter, - memory_order_relaxed)) + memory_order_relaxed)) { + if (blocking_hooks && !in_potentially_blocking_region) { + in_potentially_blocking_region = true; + OnPotentiallyBlockingRegionBegin(); + } FutexWait(g, cmp | kGuardWaiter); + } } } } @@ -976,6 +1012,7 @@ void PlatformCleanUpThreadState(ThreadState *thr) { &thr->signal_ctx, memory_order_relaxed); if (sctx) { atomic_store(&thr->signal_ctx, 0, memory_order_relaxed); + sctx->oldset.Reset(); UnmapOrDie(sctx, sizeof(*sctx)); } } @@ -1096,6 +1133,22 @@ TSAN_INTERCEPTOR(int, pthread_create, TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) { SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret); +#if SANITIZER_ANDROID + { + // In Bionic, if the target thread has already exited when pthread_detach is + // called, pthread_detach will call pthread_join internally to clean it up. + // In that case, the thread has already been consumed by the pthread_detach + // interceptor. + Tid tid = ctx->thread_registry.FindThread( + [](ThreadContextBase* tctx, void* arg) { + return tctx->user_id == (uptr)arg; + }, + th); + if (tid == kInvalidTid) { + return REAL(pthread_join)(th, ret); + } + } +#endif Tid tid = ThreadConsumeTid(thr, pc, (uptr)th); ThreadIgnoreBegin(thr, pc); int res = BLOCK_REAL(pthread_join)(th, ret); @@ -1615,6 +1668,14 @@ TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) { TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) { SCOPED_INTERCEPTOR_RAW(pthread_once, o, f); +#if SANITIZER_APPLE && !SANITIZER_GO + if (flags()->lock_during_write != kLockDuringAllWrites && + cur_thread_init()->in_internal_write_call) { + // This is needed to make it through process launch without hanging + f(); + return 0; + } +#endif if (o == 0 || f == 0) return errno_EINVAL; atomic_uint32_t *a; @@ -2092,13 +2153,29 @@ static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) { // StackTrace::GetNestInstructionPc(pc) is used because return address is // expected, OutputReport() will undo this. ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack); - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeErrnoInSignal); - rep.SetSigNum(sig); - if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { - rep.AddStack(stack, true); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + bool suppressed; + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeErrnoInSignal); + rep->SetSigNum(sig); + suppressed = IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack); + if (!suppressed) + rep->AddStack(stack, true); +#if SANITIZER_APPLE + } // Close this scope to release the locks before writing report +#endif + if (!suppressed) + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE } +#endif } static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, @@ -2172,7 +2249,8 @@ void ProcessPendingSignalsImpl(ThreadState *thr) { return; atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); internal_sigfillset(&sctx->emptyset); - int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset); + __sanitizer_sigset_t *oldset = sctx->oldset.PushBack(); + int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, oldset); CHECK_EQ(res, 0); for (int sig = 0; sig < kSigCount; sig++) { SignalDesc *signal = &sctx->pending_signals[sig]; @@ -2182,8 +2260,9 @@ void ProcessPendingSignalsImpl(ThreadState *thr) { &signal->ctx); } } - res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0); + res = REAL(pthread_sigmask)(SIG_SETMASK, oldset, 0); CHECK_EQ(res, 0); + sctx->oldset.PopBack(); atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); } @@ -2360,7 +2439,11 @@ TSAN_INTERCEPTOR(int, vfork, int fake) { } #endif -#if SANITIZER_LINUX +#if SANITIZER_LINUX && !SANITIZER_ANDROID +// Bionic's pthread_create internally calls clone. When the CLONE_THREAD flag is +// set, clone does not create a new process but a new thread. This is a +// workaround for Android. Disabling the interception of clone solves the +// problem in most scenarios. TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags, void *arg, int *parent_tid, void *tls, pid_t *child_tid) { SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls, @@ -2837,12 +2920,12 @@ TSAN_INTERCEPTOR(void, _lwp_exit) { #endif #if SANITIZER_FREEBSD -TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) { +TSAN_INTERCEPTOR(void, thr_exit, ThreadID *state) { SCOPED_TSAN_INTERCEPTOR(thr_exit, state); DestroyThreadState(); REAL(thr_exit(state)); } -#define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit) +# define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit) #else #define TSAN_MAYBE_INTERCEPT_THR_EXIT #endif @@ -2951,6 +3034,8 @@ void InitializeInterceptors() { TSAN_INTERCEPT(realloc); TSAN_INTERCEPT(reallocarray); TSAN_INTERCEPT(free); + TSAN_MAYBE_INTERCEPT_FREE_SIZED; + TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED; TSAN_INTERCEPT(cfree); TSAN_INTERCEPT(munmap); TSAN_MAYBE_INTERCEPT_MEMALIGN; @@ -2971,12 +3056,26 @@ void InitializeInterceptors() { TSAN_INTERCEPT(pthread_timedjoin_np); #endif - TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE); - TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE); - TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE); - TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE); - TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE); - TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE); + // In glibc versions older than 2.36, dlsym(RTLD_NEXT, "pthread_cond_init") + // may return an outdated symbol (max(2.2,base_version)) if the port was + // introduced before 2.3.2 (when the new pthread_cond_t was introduced). +#if SANITIZER_GLIBC && !__GLIBC_PREREQ(2, 36) && \ + (defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \ + defined(__s390x__)) + INTERCEPT_FUNCTION_VER(pthread_cond_init, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_signal, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_broadcast, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_wait, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_timedwait, "GLIBC_2.3.2"); + INTERCEPT_FUNCTION_VER(pthread_cond_destroy, "GLIBC_2.3.2"); +#else + INTERCEPT_FUNCTION(pthread_cond_init); + INTERCEPT_FUNCTION(pthread_cond_signal); + INTERCEPT_FUNCTION(pthread_cond_broadcast); + INTERCEPT_FUNCTION(pthread_cond_wait); + INTERCEPT_FUNCTION(pthread_cond_timedwait); + INTERCEPT_FUNCTION(pthread_cond_destroy); +#endif TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT; @@ -3067,12 +3166,16 @@ void InitializeInterceptors() { TSAN_INTERCEPT(fork); TSAN_INTERCEPT(vfork); -#if SANITIZER_LINUX +#if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPT(clone); #endif #if !SANITIZER_ANDROID TSAN_INTERCEPT(dl_iterate_phdr); #endif + + // Symbolization indirectly calls dl_iterate_phdr + ready_to_symbolize = true; + TSAN_MAYBE_INTERCEPT_ON_EXIT; TSAN_INTERCEPT(__cxa_atexit); TSAN_INTERCEPT(_exit); diff --git a/libsanitizer/tsan/tsan_interface.h b/libsanitizer/tsan/tsan_interface.h index 3731c90d4591..db94cf48f9c2 100644 --- a/libsanitizer/tsan/tsan_interface.h +++ b/libsanitizer/tsan/tsan_interface.h @@ -16,8 +16,8 @@ #define TSAN_INTERFACE_H #include +using __sanitizer::ThreadID; using __sanitizer::uptr; -using __sanitizer::tid_t; // This header should NOT include any other headers. // All functions in this header are extern "C" and start with __tsan_. @@ -175,7 +175,7 @@ int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr, // Returns information about threads included in the report. SANITIZER_INTERFACE_ATTRIBUTE -int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id, +int __tsan_get_report_thread(void *report, uptr idx, int *tid, ThreadID *os_id, int *running, const char **name, int *parent_tid, void **trace, uptr trace_size); @@ -192,7 +192,7 @@ const char *__tsan_locate_address(uptr addr, char *name, uptr name_size, // Returns the allocation stack for a heap pointer. SANITIZER_INTERFACE_ATTRIBUTE int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id, - tid_t *os_id); + ThreadID *os_id); #endif // SANITIZER_GO @@ -203,17 +203,18 @@ int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id, namespace __tsan { // These should match declarations from public tsan_interface_atomic.h header. -typedef unsigned char a8; +typedef unsigned char a8; typedef unsigned short a16; -typedef unsigned int a32; +typedef unsigned int a32; typedef unsigned long long a64; -#if !SANITIZER_GO && (defined(__SIZEOF_INT128__) \ - || (__clang_major__ * 100 + __clang_minor__ >= 302)) && \ +#if !SANITIZER_GO && \ + (defined(__SIZEOF_INT128__) || \ + (__clang_major__ * 100 + __clang_minor__ >= 302)) && \ !defined(__mips64) && !defined(__s390x__) __extension__ typedef __int128 a128; -# define __TSAN_HAS_INT128 1 +# define __TSAN_HAS_INT128 1 #else -# define __TSAN_HAS_INT128 0 +# define __TSAN_HAS_INT128 0 #endif // Part of ABI, do not change. @@ -231,180 +232,180 @@ struct ThreadState; extern "C" { SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_load(const volatile a8 *a, morder mo); +a8 __tsan_atomic8_load(const volatile a8 *a, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_load(const volatile a16 *a, morder mo); +a16 __tsan_atomic16_load(const volatile a16 *a, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_load(const volatile a32 *a, morder mo); +a32 __tsan_atomic32_load(const volatile a32 *a, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_load(const volatile a64 *a, morder mo); +a64 __tsan_atomic64_load(const volatile a64 *a, int mo); #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_load(const volatile a128 *a, morder mo); +a128 __tsan_atomic128_load(const volatile a128 *a, int mo); #endif SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo); +void __tsan_atomic8_store(volatile a8 *a, a8 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo); +void __tsan_atomic16_store(volatile a16 *a, a16 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo); +void __tsan_atomic32_store(volatile a32 *a, a32 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo); +void __tsan_atomic64_store(volatile a64 *a, a64 v, int mo); #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo); +void __tsan_atomic128_store(volatile a128 *a, a128 v, int mo); #endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo); +a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo); +a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo); +a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo); +a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, int mo); #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo); +a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, int mo); #endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo); +a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo); +a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo); +a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo); +a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, int mo); #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo); +a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, int mo); #endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo); +a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo); +a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo); +a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo); +a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, int mo); #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo); +a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, int mo); #endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo); +a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo); +a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo); +a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo); +a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, int mo); #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo); +a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, int mo); #endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo); +a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo); +a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo); +a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo); +a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, int mo); #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo); +a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, int mo); #endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo); +a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo); +a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo); +a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo); +a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, int mo); #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo); +a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, int mo); #endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo); +a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo); +a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo); +a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, int mo); SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo); +a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, int mo); #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo); +a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, int mo); #endif SANITIZER_INTERFACE_ATTRIBUTE -int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, - morder mo, morder fmo); +int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, int mo, + int fmo); SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, - morder mo, morder fmo); + int mo, int fmo); SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, - morder mo, morder fmo); + int mo, int fmo); SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, - morder mo, morder fmo); + int mo, int fmo); #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v, - morder mo, morder fmo); + int mo, int fmo); #endif SANITIZER_INTERFACE_ATTRIBUTE -int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo, - morder fmo); +int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, int mo, + int fmo); SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, - morder mo, morder fmo); + int mo, int fmo); SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, - morder mo, morder fmo); + int mo, int fmo); SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, - morder mo, morder fmo); + int mo, int fmo); #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v, - morder mo, morder fmo); + int mo, int fmo); #endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo, - morder fmo); +a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, int mo, + int fmo); SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, - morder mo, morder fmo); +a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, int mo, + int fmo); SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, - morder mo, morder fmo); +a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, int mo, + int fmo); SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, - morder mo, morder fmo); +a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, int mo, + int fmo); #if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v, - morder mo, morder fmo); + int mo, int fmo); #endif SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_atomic_thread_fence(morder mo); +void __tsan_atomic_thread_fence(int mo); SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_atomic_signal_fence(morder mo); +void __tsan_atomic_signal_fence(int mo); SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a); diff --git a/libsanitizer/tsan/tsan_interface_ann.cpp b/libsanitizer/tsan/tsan_interface_ann.cpp index befd6a369026..02ca82369aae 100644 --- a/libsanitizer/tsan/tsan_interface_ann.cpp +++ b/libsanitizer/tsan/tsan_interface_ann.cpp @@ -437,16 +437,30 @@ void __tsan_mutex_post_divert(void *addr, unsigned flagz) { } static void ReportMutexHeldWrongContext(ThreadState *thr, uptr pc) { - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeMutexHeldWrongContext); - for (uptr i = 0; i < thr->mset.Size(); ++i) { - MutexSet::Desc desc = thr->mset.Get(i); - rep.AddMutex(desc.addr, desc.stack_id); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeMutexHeldWrongContext); + for (uptr i = 0; i < thr->mset.Size(); ++i) { + MutexSet::Desc desc = thr->mset.Get(i); + rep->AddMutex(desc.addr, desc.stack_id); + } + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep->AddStack(trace, true); +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE } - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); - OutputReport(thr, rep); +#endif } INTERFACE_ATTRIBUTE diff --git a/libsanitizer/tsan/tsan_interface_atomic.cpp b/libsanitizer/tsan/tsan_interface_atomic.cpp index 2b5a2c6ef79b..527e5a9b4a8d 100644 --- a/libsanitizer/tsan/tsan_interface_atomic.cpp +++ b/libsanitizer/tsan/tsan_interface_atomic.cpp @@ -18,9 +18,9 @@ // The following page contains more background information: // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/ +#include "sanitizer_common/sanitizer_mutex.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stacktrace.h" -#include "sanitizer_common/sanitizer_mutex.h" #include "tsan_flags.h" #include "tsan_interface.h" #include "tsan_rtl.h" @@ -34,8 +34,8 @@ static StaticSpinMutex mutex128; #if SANITIZER_DEBUG static bool IsLoadOrder(morder mo) { - return mo == mo_relaxed || mo == mo_consume - || mo == mo_acquire || mo == mo_seq_cst; + return mo == mo_relaxed || mo == mo_consume || mo == mo_acquire || + mo == mo_seq_cst; } static bool IsStoreOrder(morder mo) { @@ -48,42 +48,49 @@ static bool IsReleaseOrder(morder mo) { } static bool IsAcquireOrder(morder mo) { - return mo == mo_consume || mo == mo_acquire - || mo == mo_acq_rel || mo == mo_seq_cst; + return mo == mo_consume || mo == mo_acquire || mo == mo_acq_rel || + mo == mo_seq_cst; } static bool IsAcqRelOrder(morder mo) { return mo == mo_acq_rel || mo == mo_seq_cst; } -template T func_xchg(volatile T *v, T op) { +template +T func_xchg(volatile T *v, T op) { T res = __sync_lock_test_and_set(v, op); // __sync_lock_test_and_set does not contain full barrier. __sync_synchronize(); return res; } -template T func_add(volatile T *v, T op) { +template +T func_add(volatile T *v, T op) { return __sync_fetch_and_add(v, op); } -template T func_sub(volatile T *v, T op) { +template +T func_sub(volatile T *v, T op) { return __sync_fetch_and_sub(v, op); } -template T func_and(volatile T *v, T op) { +template +T func_and(volatile T *v, T op) { return __sync_fetch_and_and(v, op); } -template T func_or(volatile T *v, T op) { +template +T func_or(volatile T *v, T op) { return __sync_fetch_and_or(v, op); } -template T func_xor(volatile T *v, T op) { +template +T func_xor(volatile T *v, T op) { return __sync_fetch_and_xor(v, op); } -template T func_nand(volatile T *v, T op) { +template +T func_nand(volatile T *v, T op) { // clang does not support __sync_fetch_and_nand. T cmp = *v; for (;;) { @@ -95,7 +102,8 @@ template T func_nand(volatile T *v, T op) { } } -template T func_cas(volatile T *v, T cmp, T xch) { +template +T func_cas(volatile T *v, T cmp, T xch) { return __sync_val_compare_and_swap(v, cmp, xch); } @@ -103,8 +111,8 @@ template T func_cas(volatile T *v, T cmp, T xch) { // Atomic ops are executed under tsan internal mutex, // here we assume that the atomic variables are not accessed // from non-instrumented code. -#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \ - && __TSAN_HAS_INT128 +#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO && \ + __TSAN_HAS_INT128 a128 func_xchg(volatile a128 *v, a128 op) { SpinMutexLock lock(&mutex128); a128 cmp = *v; @@ -197,89 +205,24 @@ static atomic_uint64_t *to_atomic(const volatile a64 *a) { static memory_order to_mo(morder mo) { switch (mo) { - case mo_relaxed: return memory_order_relaxed; - case mo_consume: return memory_order_consume; - case mo_acquire: return memory_order_acquire; - case mo_release: return memory_order_release; - case mo_acq_rel: return memory_order_acq_rel; - case mo_seq_cst: return memory_order_seq_cst; + case mo_relaxed: + return memory_order_relaxed; + case mo_consume: + return memory_order_consume; + case mo_acquire: + return memory_order_acquire; + case mo_release: + return memory_order_release; + case mo_acq_rel: + return memory_order_acq_rel; + case mo_seq_cst: + return memory_order_seq_cst; } DCHECK(0); return memory_order_seq_cst; } -template -static T NoTsanAtomicLoad(const volatile T *a, morder mo) { - return atomic_load(to_atomic(a), to_mo(mo)); -} - -#if __TSAN_HAS_INT128 && !SANITIZER_GO -static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) { - SpinMutexLock lock(&mutex128); - return *a; -} -#endif - -template -static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) { - DCHECK(IsLoadOrder(mo)); - // This fast-path is critical for performance. - // Assume the access is atomic. - if (!IsAcquireOrder(mo)) { - MemoryAccess(thr, pc, (uptr)a, AccessSize(), - kAccessRead | kAccessAtomic); - return NoTsanAtomicLoad(a, mo); - } - // Don't create sync object if it does not exist yet. For example, an atomic - // pointer is initialized to nullptr and then periodically acquire-loaded. - T v = NoTsanAtomicLoad(a, mo); - SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a); - if (s) { - SlotLocker locker(thr); - ReadLock lock(&s->mtx); - thr->clock.Acquire(s->clock); - // Re-read under sync mutex because we need a consistent snapshot - // of the value and the clock we acquire. - v = NoTsanAtomicLoad(a, mo); - } - MemoryAccess(thr, pc, (uptr)a, AccessSize(), kAccessRead | kAccessAtomic); - return v; -} - -template -static void NoTsanAtomicStore(volatile T *a, T v, morder mo) { - atomic_store(to_atomic(a), v, to_mo(mo)); -} - -#if __TSAN_HAS_INT128 && !SANITIZER_GO -static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) { - SpinMutexLock lock(&mutex128); - *a = v; -} -#endif - -template -static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, - morder mo) { - DCHECK(IsStoreOrder(mo)); - MemoryAccess(thr, pc, (uptr)a, AccessSize(), kAccessWrite | kAccessAtomic); - // This fast-path is critical for performance. - // Assume the access is atomic. - // Strictly saying even relaxed store cuts off release sequence, - // so must reset the clock. - if (!IsReleaseOrder(mo)) { - NoTsanAtomicStore(a, v, mo); - return; - } - SlotLocker locker(thr); - { - auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false); - Lock lock(&s->mtx); - thr->clock.ReleaseStore(&s->clock); - NoTsanAtomicStore(a, v, mo); - } - IncrementEpoch(thr); -} +namespace { template static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { @@ -303,175 +246,265 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { return v; } -template -static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) { - return func_xchg(a, v); -} +struct OpLoad { + template + static T NoTsanAtomic(morder mo, const volatile T *a) { + return atomic_load(to_atomic(a), to_mo(mo)); + } -template -static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) { - return func_add(a, v); -} +#if __TSAN_HAS_INT128 && !SANITIZER_GO + static a128 NoTsanAtomic(morder mo, const volatile a128 *a) { + SpinMutexLock lock(&mutex128); + return *a; + } +#endif -template -static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) { - return func_sub(a, v); -} + template + static T Atomic(ThreadState *thr, uptr pc, morder mo, const volatile T *a) { + DCHECK(IsLoadOrder(mo)); + // This fast-path is critical for performance. + // Assume the access is atomic. + if (!IsAcquireOrder(mo)) { + MemoryAccess(thr, pc, (uptr)a, AccessSize(), + kAccessRead | kAccessAtomic); + return NoTsanAtomic(mo, a); + } + // Don't create sync object if it does not exist yet. For example, an atomic + // pointer is initialized to nullptr and then periodically acquire-loaded. + T v = NoTsanAtomic(mo, a); + SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a); + if (s) { + SlotLocker locker(thr); + ReadLock lock(&s->mtx); + thr->clock.Acquire(s->clock); + // Re-read under sync mutex because we need a consistent snapshot + // of the value and the clock we acquire. + v = NoTsanAtomic(mo, a); + } + MemoryAccess(thr, pc, (uptr)a, AccessSize(), + kAccessRead | kAccessAtomic); + return v; + } +}; -template -static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) { - return func_and(a, v); -} +struct OpStore { + template + static void NoTsanAtomic(morder mo, volatile T *a, T v) { + atomic_store(to_atomic(a), v, to_mo(mo)); + } -template -static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) { - return func_or(a, v); -} +#if __TSAN_HAS_INT128 && !SANITIZER_GO + static void NoTsanAtomic(morder mo, volatile a128 *a, a128 v) { + SpinMutexLock lock(&mutex128); + *a = v; + } +#endif -template -static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) { - return func_xor(a, v); -} + template + static void Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) { + DCHECK(IsStoreOrder(mo)); + MemoryAccess(thr, pc, (uptr)a, AccessSize(), + kAccessWrite | kAccessAtomic); + // This fast-path is critical for performance. + // Assume the access is atomic. + // Strictly saying even relaxed store cuts off release sequence, + // so must reset the clock. + if (!IsReleaseOrder(mo)) { + NoTsanAtomic(mo, a, v); + return; + } + SlotLocker locker(thr); + { + auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false); + Lock lock(&s->mtx); + thr->clock.ReleaseStore(&s->clock); + NoTsanAtomic(mo, a, v); + } + IncrementEpoch(thr); + } +}; -template -static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) { - return func_nand(a, v); -} +struct OpExchange { + template + static T NoTsanAtomic(morder mo, volatile T *a, T v) { + return func_xchg(a, v); + } + template + static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) { + return AtomicRMW(thr, pc, a, v, mo); + } +}; -template -static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v, - morder mo) { - return AtomicRMW(thr, pc, a, v, mo); -} +struct OpFetchAdd { + template + static T NoTsanAtomic(morder mo, volatile T *a, T v) { + return func_add(a, v); + } -template -static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v, - morder mo) { - return AtomicRMW(thr, pc, a, v, mo); -} + template + static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) { + return AtomicRMW(thr, pc, a, v, mo); + } +}; -template -static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v, - morder mo) { - return AtomicRMW(thr, pc, a, v, mo); -} +struct OpFetchSub { + template + static T NoTsanAtomic(morder mo, volatile T *a, T v) { + return func_sub(a, v); + } -template -static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v, - morder mo) { - return AtomicRMW(thr, pc, a, v, mo); -} + template + static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) { + return AtomicRMW(thr, pc, a, v, mo); + } +}; -template -static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v, - morder mo) { - return AtomicRMW(thr, pc, a, v, mo); -} +struct OpFetchAnd { + template + static T NoTsanAtomic(morder mo, volatile T *a, T v) { + return func_and(a, v); + } -template -static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v, - morder mo) { - return AtomicRMW(thr, pc, a, v, mo); -} + template + static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) { + return AtomicRMW(thr, pc, a, v, mo); + } +}; -template -static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v, - morder mo) { - return AtomicRMW(thr, pc, a, v, mo); -} +struct OpFetchOr { + template + static T NoTsanAtomic(morder mo, volatile T *a, T v) { + return func_or(a, v); + } -template -static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) { - return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo)); -} + template + static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) { + return AtomicRMW(thr, pc, a, v, mo); + } +}; -#if __TSAN_HAS_INT128 -static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v, - morder mo, morder fmo) { - a128 old = *c; - a128 cur = func_cas(a, old, v); - if (cur == old) - return true; - *c = cur; - return false; -} -#endif +struct OpFetchXor { + template + static T NoTsanAtomic(morder mo, volatile T *a, T v) { + return func_xor(a, v); + } -template -static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) { - NoTsanAtomicCAS(a, &c, v, mo, fmo); - return c; -} + template + static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) { + return AtomicRMW(thr, pc, a, v, mo); + } +}; -template -static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, - morder mo, morder fmo) { - // 31.7.2.18: "The failure argument shall not be memory_order_release - // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic - // (mo_relaxed) when those are used. - DCHECK(IsLoadOrder(fmo)); +struct OpFetchNand { + template + static T NoTsanAtomic(morder mo, volatile T *a, T v) { + return func_nand(a, v); + } - MemoryAccess(thr, pc, (uptr)a, AccessSize(), kAccessWrite | kAccessAtomic); - if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) { - T cc = *c; - T pr = func_cas(a, cc, v); - if (pr == cc) + template + static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) { + return AtomicRMW(thr, pc, a, v, mo); + } +}; + +struct OpCAS { + template + static bool NoTsanAtomic(morder mo, morder fmo, volatile T *a, T *c, T v) { + return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo)); + } + +#if __TSAN_HAS_INT128 + static bool NoTsanAtomic(morder mo, morder fmo, volatile a128 *a, a128 *c, + a128 v) { + a128 old = *c; + a128 cur = func_cas(a, old, v); + if (cur == old) return true; - *c = pr; + *c = cur; return false; } - SlotLocker locker(thr); - bool release = IsReleaseOrder(mo); - bool success; - { - auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false); - RWLock lock(&s->mtx, release); - T cc = *c; - T pr = func_cas(a, cc, v); - success = pr == cc; - if (!success) { +#endif + + template + static T NoTsanAtomic(morder mo, morder fmo, volatile T *a, T c, T v) { + NoTsanAtomic(mo, fmo, a, &c, v); + return c; + } + + template + static bool Atomic(ThreadState *thr, uptr pc, morder mo, morder fmo, + volatile T *a, T *c, T v) { + // 31.7.2.18: "The failure argument shall not be memory_order_release + // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic + // (mo_relaxed) when those are used. + DCHECK(IsLoadOrder(fmo)); + + MemoryAccess(thr, pc, (uptr)a, AccessSize(), + kAccessWrite | kAccessAtomic); + if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) { + T cc = *c; + T pr = func_cas(a, cc, v); + if (pr == cc) + return true; *c = pr; - mo = fmo; + return false; } - if (success && IsAcqRelOrder(mo)) - thr->clock.ReleaseAcquire(&s->clock); - else if (success && IsReleaseOrder(mo)) - thr->clock.Release(&s->clock); - else if (IsAcquireOrder(mo)) - thr->clock.Acquire(s->clock); + SlotLocker locker(thr); + bool release = IsReleaseOrder(mo); + bool success; + { + auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false); + RWLock lock(&s->mtx, release); + T cc = *c; + T pr = func_cas(a, cc, v); + success = pr == cc; + if (!success) { + *c = pr; + mo = fmo; + } + if (success && IsAcqRelOrder(mo)) + thr->clock.ReleaseAcquire(&s->clock); + else if (success && IsReleaseOrder(mo)) + thr->clock.Release(&s->clock); + else if (IsAcquireOrder(mo)) + thr->clock.Acquire(s->clock); + } + if (success && release) + IncrementEpoch(thr); + return success; } - if (success && release) - IncrementEpoch(thr); - return success; -} -template -static T AtomicCAS(ThreadState *thr, uptr pc, - volatile T *a, T c, T v, morder mo, morder fmo) { - AtomicCAS(thr, pc, a, &c, v, mo, fmo); - return c; -} + template + static T Atomic(ThreadState *thr, uptr pc, morder mo, morder fmo, + volatile T *a, T c, T v) { + Atomic(thr, pc, mo, fmo, a, &c, v); + return c; + } +}; #if !SANITIZER_GO -static void NoTsanAtomicFence(morder mo) { - __sync_synchronize(); -} +struct OpFence { + static void NoTsanAtomic(morder mo) { __sync_synchronize(); } -static void AtomicFence(ThreadState *thr, uptr pc, morder mo) { - // FIXME(dvyukov): not implemented. - __sync_synchronize(); -} + static void Atomic(ThreadState *thr, uptr pc, morder mo) { + // FIXME(dvyukov): not implemented. + __sync_synchronize(); + } +}; #endif +} // namespace + // Interface functions follow. #if !SANITIZER_GO // C/C++ static morder convert_morder(morder mo) { - if (flags()->force_seq_cst_atomics) - return (morder)mo_seq_cst; + return flags()->force_seq_cst_atomics ? mo_seq_cst : mo; +} +static morder to_morder(int mo) { // Filter out additional memory order flags: // MEMMODEL_SYNC = 1 << 15 // __ATOMIC_HLE_ACQUIRE = 1 << 16 @@ -482,468 +515,481 @@ static morder convert_morder(morder mo) { // since we use __sync_ atomics for actual atomic operations, // we can safely ignore it as well. It also subtly affects semantics, // but we don't model the difference. - return (morder)(mo & 0x7fff); + morder res = static_cast(static_cast(mo)); + DCHECK_LE(res, mo_seq_cst); + return res; } -# define ATOMIC_IMPL(func, ...) \ - ThreadState *const thr = cur_thread(); \ - ProcessPendingSignals(thr); \ - if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \ - return NoTsanAtomic##func(__VA_ARGS__); \ - mo = convert_morder(mo); \ - return Atomic##func(thr, GET_CALLER_PC(), __VA_ARGS__); +template +ALWAYS_INLINE auto AtomicImpl(morder mo, Types... args) { + ThreadState *const thr = cur_thread(); + ProcessPendingSignals(thr); + if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) + return Op::NoTsanAtomic(mo, args...); + return Op::Atomic(thr, GET_CALLER_PC(), convert_morder(mo), args...); +} extern "C" { SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) { - ATOMIC_IMPL(Load, a, mo); +a8 __tsan_atomic8_load(const volatile a8 *a, int mo) { + return AtomicImpl(to_morder(mo), a); } SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) { - ATOMIC_IMPL(Load, a, mo); +a16 __tsan_atomic16_load(const volatile a16 *a, int mo) { + return AtomicImpl(to_morder(mo), a); } SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) { - ATOMIC_IMPL(Load, a, mo); +a32 __tsan_atomic32_load(const volatile a32 *a, int mo) { + return AtomicImpl(to_morder(mo), a); } SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) { - ATOMIC_IMPL(Load, a, mo); +a64 __tsan_atomic64_load(const volatile a64 *a, int mo) { + return AtomicImpl(to_morder(mo), a); } -#if __TSAN_HAS_INT128 +# if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) { - ATOMIC_IMPL(Load, a, mo); +a128 __tsan_atomic128_load(const volatile a128 *a, int mo) { + return AtomicImpl(to_morder(mo), a); } -#endif +# endif SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) { - ATOMIC_IMPL(Store, a, v, mo); +void __tsan_atomic8_store(volatile a8 *a, a8 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) { - ATOMIC_IMPL(Store, a, v, mo); +void __tsan_atomic16_store(volatile a16 *a, a16 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) { - ATOMIC_IMPL(Store, a, v, mo); +void __tsan_atomic32_store(volatile a32 *a, a32 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) { - ATOMIC_IMPL(Store, a, v, mo); +void __tsan_atomic64_store(volatile a64 *a, a64 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#if __TSAN_HAS_INT128 +# if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) { - ATOMIC_IMPL(Store, a, v, mo); +void __tsan_atomic128_store(volatile a128 *a, a128 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#endif +# endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) { - ATOMIC_IMPL(Exchange, a, v, mo); +a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) { - ATOMIC_IMPL(Exchange, a, v, mo); +a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) { - ATOMIC_IMPL(Exchange, a, v, mo); +a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) { - ATOMIC_IMPL(Exchange, a, v, mo); +a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#if __TSAN_HAS_INT128 +# if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) { - ATOMIC_IMPL(Exchange, a, v, mo); +a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#endif +# endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) { - ATOMIC_IMPL(FetchAdd, a, v, mo); +a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) { - ATOMIC_IMPL(FetchAdd, a, v, mo); +a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) { - ATOMIC_IMPL(FetchAdd, a, v, mo); +a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) { - ATOMIC_IMPL(FetchAdd, a, v, mo); +a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#if __TSAN_HAS_INT128 +# if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) { - ATOMIC_IMPL(FetchAdd, a, v, mo); +a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#endif +# endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) { - ATOMIC_IMPL(FetchSub, a, v, mo); +a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) { - ATOMIC_IMPL(FetchSub, a, v, mo); +a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) { - ATOMIC_IMPL(FetchSub, a, v, mo); +a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) { - ATOMIC_IMPL(FetchSub, a, v, mo); +a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#if __TSAN_HAS_INT128 +# if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) { - ATOMIC_IMPL(FetchSub, a, v, mo); +a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#endif +# endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) { - ATOMIC_IMPL(FetchAnd, a, v, mo); +a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) { - ATOMIC_IMPL(FetchAnd, a, v, mo); +a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) { - ATOMIC_IMPL(FetchAnd, a, v, mo); +a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) { - ATOMIC_IMPL(FetchAnd, a, v, mo); +a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#if __TSAN_HAS_INT128 +# if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) { - ATOMIC_IMPL(FetchAnd, a, v, mo); +a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#endif +# endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) { - ATOMIC_IMPL(FetchOr, a, v, mo); +a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) { - ATOMIC_IMPL(FetchOr, a, v, mo); +a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) { - ATOMIC_IMPL(FetchOr, a, v, mo); +a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) { - ATOMIC_IMPL(FetchOr, a, v, mo); +a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#if __TSAN_HAS_INT128 +# if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) { - ATOMIC_IMPL(FetchOr, a, v, mo); +a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#endif +# endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) { - ATOMIC_IMPL(FetchXor, a, v, mo); +a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) { - ATOMIC_IMPL(FetchXor, a, v, mo); +a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) { - ATOMIC_IMPL(FetchXor, a, v, mo); +a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) { - ATOMIC_IMPL(FetchXor, a, v, mo); +a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#if __TSAN_HAS_INT128 +# if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) { - ATOMIC_IMPL(FetchXor, a, v, mo); +a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#endif +# endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) { - ATOMIC_IMPL(FetchNand, a, v, mo); +a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) { - ATOMIC_IMPL(FetchNand, a, v, mo); +a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) { - ATOMIC_IMPL(FetchNand, a, v, mo); +a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) { - ATOMIC_IMPL(FetchNand, a, v, mo); +a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#if __TSAN_HAS_INT128 +# if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE -a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) { - ATOMIC_IMPL(FetchNand, a, v, mo); +a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, int mo) { + return AtomicImpl(to_morder(mo), a, v); } -#endif +# endif SANITIZER_INTERFACE_ATTRIBUTE -int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); +int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, int mo, + int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); + int mo, int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); + int mo, int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); + int mo, int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } -#if __TSAN_HAS_INT128 +# if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); + int mo, int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } -#endif +# endif SANITIZER_INTERFACE_ATTRIBUTE -int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); +int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, int mo, + int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); + int mo, int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); + int mo, int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); + int mo, int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } -#if __TSAN_HAS_INT128 +# if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); + int mo, int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } -#endif +# endif SANITIZER_INTERFACE_ATTRIBUTE -a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); +a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, int mo, + int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } SANITIZER_INTERFACE_ATTRIBUTE -a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); +a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, int mo, + int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } SANITIZER_INTERFACE_ATTRIBUTE -a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); +a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, int mo, + int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } SANITIZER_INTERFACE_ATTRIBUTE -a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); +a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, int mo, + int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } -#if __TSAN_HAS_INT128 +# if __TSAN_HAS_INT128 SANITIZER_INTERFACE_ATTRIBUTE a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v, - morder mo, morder fmo) { - ATOMIC_IMPL(CAS, a, c, v, mo, fmo); + int mo, int fmo) { + return AtomicImpl(to_morder(mo), to_morder(fmo), a, c, v); } -#endif +# endif SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); } +void __tsan_atomic_thread_fence(int mo) { + return AtomicImpl(to_morder(mo)); +} SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_atomic_signal_fence(morder mo) { -} +void __tsan_atomic_signal_fence(int mo) {} } // extern "C" #else // #if !SANITIZER_GO // Go -# define ATOMIC(func, ...) \ - if (thr->ignore_sync) { \ - NoTsanAtomic##func(__VA_ARGS__); \ - } else { \ - FuncEntry(thr, cpc); \ - Atomic##func(thr, pc, __VA_ARGS__); \ - FuncExit(thr); \ - } +template +void AtomicGo(ThreadState *thr, uptr cpc, uptr pc, Types... args) { + if (thr->ignore_sync) { + (void)Op::NoTsanAtomic(args...); + } else { + FuncEntry(thr, cpc); + (void)Op::Atomic(thr, pc, args...); + FuncExit(thr); + } +} -# define ATOMIC_RET(func, ret, ...) \ - if (thr->ignore_sync) { \ - (ret) = NoTsanAtomic##func(__VA_ARGS__); \ - } else { \ - FuncEntry(thr, cpc); \ - (ret) = Atomic##func(thr, pc, __VA_ARGS__); \ - FuncExit(thr); \ - } +template +auto AtomicGoRet(ThreadState *thr, uptr cpc, uptr pc, Types... args) { + if (thr->ignore_sync) { + return Op::NoTsanAtomic(args...); + } else { + FuncEntry(thr, cpc); + auto ret = Op::Atomic(thr, pc, args...); + FuncExit(thr); + return ret; + } +} extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { - ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire); + *(a32 *)(a + 8) = AtomicGoRet(thr, cpc, pc, mo_acquire, *(a32 **)a); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { - ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire); + *(a64 *)(a + 8) = AtomicGoRet(thr, cpc, pc, mo_acquire, *(a64 **)a); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { - ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release); + AtomicGo(thr, cpc, pc, mo_release, *(a32 **)a, *(a32 *)(a + 8)); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { - ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release); + AtomicGo(thr, cpc, pc, mo_release, *(a64 **)a, *(a64 *)(a + 8)); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { - ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel); + *(a32 *)(a + 16) = AtomicGoRet(thr, cpc, pc, mo_acq_rel, + *(a32 **)a, *(a32 *)(a + 8)); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { - ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel); + *(a64 *)(a + 16) = AtomicGoRet(thr, cpc, pc, mo_acq_rel, + *(a64 **)a, *(a64 *)(a + 8)); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic32_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { - ATOMIC_RET(FetchAnd, *(a32 *)(a + 16), *(a32 **)a, *(a32 *)(a + 8), - mo_acq_rel); + *(a32 *)(a + 16) = AtomicGoRet(thr, cpc, pc, mo_acq_rel, + *(a32 **)a, *(a32 *)(a + 8)); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic64_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { - ATOMIC_RET(FetchAnd, *(a64 *)(a + 16), *(a64 **)a, *(a64 *)(a + 8), - mo_acq_rel); + *(a64 *)(a + 16) = AtomicGoRet(thr, cpc, pc, mo_acq_rel, + *(a64 **)a, *(a64 *)(a + 8)); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic32_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { - ATOMIC_RET(FetchOr, *(a32 *)(a + 16), *(a32 **)a, *(a32 *)(a + 8), - mo_acq_rel); + *(a32 *)(a + 16) = AtomicGoRet(thr, cpc, pc, mo_acq_rel, + *(a32 **)a, *(a32 *)(a + 8)); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { - ATOMIC_RET(FetchOr, *(a64 *)(a + 16), *(a64 **)a, *(a64 *)(a + 8), - mo_acq_rel); + *(a64 *)(a + 16) = AtomicGoRet(thr, cpc, pc, mo_acq_rel, + *(a64 **)a, *(a64 *)(a + 8)); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { - ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel); + *(a32 *)(a + 16) = AtomicGoRet(thr, cpc, pc, mo_acq_rel, + *(a32 **)a, *(a32 *)(a + 8)); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { - ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel); + *(a64 *)(a + 16) = AtomicGoRet(thr, cpc, pc, mo_acq_rel, + *(a64 **)a, *(a64 *)(a + 8)); } SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_go_atomic32_compare_exchange( - ThreadState *thr, uptr cpc, uptr pc, u8 *a) { - a32 cur = 0; - a32 cmp = *(a32*)(a+8); - ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire); - *(bool*)(a+16) = (cur == cmp); +void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc, + u8 *a) { + a32 cmp = *(a32 *)(a + 8); + a32 cur = AtomicGoRet(thr, cpc, pc, mo_acq_rel, mo_acquire, *(a32 **)a, + cmp, *(a32 *)(a + 12)); + *(bool *)(a + 16) = (cur == cmp); } SANITIZER_INTERFACE_ATTRIBUTE -void __tsan_go_atomic64_compare_exchange( - ThreadState *thr, uptr cpc, uptr pc, u8 *a) { - a64 cur = 0; - a64 cmp = *(a64*)(a+8); - ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire); - *(bool*)(a+24) = (cur == cmp); +void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc, + u8 *a) { + a64 cmp = *(a64 *)(a + 8); + a64 cur = AtomicGoRet(thr, cpc, pc, mo_acq_rel, mo_acquire, *(a64 **)a, + cmp, *(a64 *)(a + 16)); + *(bool *)(a + 24) = (cur == cmp); } } // extern "C" #endif // #if !SANITIZER_GO diff --git a/libsanitizer/tsan/tsan_interface_java.cpp b/libsanitizer/tsan/tsan_interface_java.cpp index 7c15a1638826..cb4d767d903d 100644 --- a/libsanitizer/tsan/tsan_interface_java.cpp +++ b/libsanitizer/tsan/tsan_interface_java.cpp @@ -122,7 +122,6 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) { DCHECK_GE(dst, jctx->heap_begin); DCHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size); DCHECK_NE(dst, src); - DCHECK_NE(size, 0); // Assuming it's not running concurrently with threads that do // memory accesses and mutex operations (stop-the-world phase). diff --git a/libsanitizer/tsan/tsan_malloc_mac.cpp b/libsanitizer/tsan/tsan_malloc_mac.cpp index e973be963e57..9d097806fa9b 100644 --- a/libsanitizer/tsan/tsan_malloc_mac.cpp +++ b/libsanitizer/tsan/tsan_malloc_mac.cpp @@ -73,15 +73,19 @@ using namespace __tsan; invoke_free_hook(ptr); \ SCOPED_INTERCEPTOR_RAW(free, ptr); \ user_free(thr, pc, ptr) -#define COMMON_MALLOC_SIZE(ptr) uptr size = user_alloc_usable_size(ptr); -#define COMMON_MALLOC_FILL_STATS(zone, stats) -#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \ - (void)zone_name; \ - Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr); -#define COMMON_MALLOC_NAMESPACE __tsan -#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0 -#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0 +# define COMMON_MALLOC_FREE_SIZED(ptr, size) COMMON_MALLOC_FREE(ptr) +# define COMMON_MALLOC_FREE_ALIGNED_SIZED(ptr, alignment, size) \ + COMMON_MALLOC_FREE(ptr) +# define COMMON_MALLOC_SIZE(ptr) uptr size = user_alloc_usable_size(ptr); +# define COMMON_MALLOC_FILL_STATS(zone, stats) +# define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \ + (void)zone_name; \ + Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", \ + ptr); +# define COMMON_MALLOC_NAMESPACE __tsan +# define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0 +# define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0 -#include "sanitizer_common/sanitizer_malloc_mac.inc" +# include "sanitizer_common/sanitizer_malloc_mac.inc" #endif diff --git a/libsanitizer/tsan/tsan_mman.cpp b/libsanitizer/tsan/tsan_mman.cpp index 0ea83fb3b598..caacb3675825 100644 --- a/libsanitizer/tsan/tsan_mman.cpp +++ b/libsanitizer/tsan/tsan_mman.cpp @@ -182,10 +182,24 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) { ObtainCurrentStack(thr, pc, &stack); if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack)) return; - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeSignalUnsafe); - rep.AddStack(stack, true); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeSignalUnsafe); + rep->AddStack(stack, true); +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE + } +#endif } diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h index 377f8aeb8d66..00b493bf2d93 100644 --- a/libsanitizer/tsan/tsan_platform.h +++ b/libsanitizer/tsan/tsan_platform.h @@ -681,6 +681,59 @@ struct MappingGoMips64_47 { static const uptr kShadowAdd = 0x200000000000ull; }; +/* Go on linux/riscv64 (39-bit VMA) +0000 0001 0000 - 000f 0000 0000: executable and heap (60 GiB) +000f 0000 0000 - 0010 0000 0000: - +0010 0000 0000 - 0030 0000 0000: shadow - 128 GiB ( ~ 2 * app) +0030 0000 0000 - 0038 0000 0000: metainfo - 32 GiB ( ~ 0.5 * app) +0038 0000 0000 - 0040 0000 0000: - +*/ +struct MappingGoRiscv64_39 { + static const uptr kMetaShadowBeg = 0x003000000000ull; + static const uptr kMetaShadowEnd = 0x003800000000ull; + static const uptr kShadowBeg = 0x001000000000ull; + static const uptr kShadowEnd = 0x003000000000ull; + static const uptr kLoAppMemBeg = 0x000000010000ull; + static const uptr kLoAppMemEnd = 0x000f00000000ull; + static const uptr kMidAppMemBeg = 0; + static const uptr kMidAppMemEnd = 0; + static const uptr kHiAppMemBeg = 0; + static const uptr kHiAppMemEnd = 0; + static const uptr kHeapMemBeg = 0; + static const uptr kHeapMemEnd = 0; + static const uptr kVdsoBeg = 0; + static const uptr kShadowMsk = 0; + static const uptr kShadowXor = 0; + static const uptr kShadowAdd = 0x001000000000ull; +}; + +/* Go on linux/riscv64 (48-bit VMA) +0000 0001 0000 - 00e0 0000 0000: executable and heap (896 GiB) +00e0 0000 0000 - 2000 0000 0000: - +2000 0000 0000 - 2400 0000 0000: shadow - 4 TiB ( ~ 4 * app) +2400 0000 0000 - 3000 0000 0000: - +3000 0000 0000 - 3100 0000 0000: metainfo - 1 TiB ( ~ 1 * app) +3100 0000 0000 - 8000 0000 0000: - +*/ +struct MappingGoRiscv64_48 { + static const uptr kMetaShadowBeg = 0x300000000000ull; + static const uptr kMetaShadowEnd = 0x310000000000ull; + static const uptr kShadowBeg = 0x200000000000ull; + static const uptr kShadowEnd = 0x240000000000ull; + static const uptr kLoAppMemBeg = 0x000000010000ull; + static const uptr kLoAppMemEnd = 0x00e000000000ull; + static const uptr kMidAppMemBeg = 0; + static const uptr kMidAppMemEnd = 0; + static const uptr kHiAppMemBeg = 0; + static const uptr kHiAppMemEnd = 0; + static const uptr kHeapMemBeg = 0; + static const uptr kHeapMemEnd = 0; + static const uptr kVdsoBeg = 0; + static const uptr kShadowMsk = 0; + static const uptr kShadowXor = 0; + static const uptr kShadowAdd = 0x200000000000ull; +}; + /* Go on linux/s390x 0000 0000 1000 - 1000 0000 0000: executable and heap - 16 TiB @@ -728,6 +781,13 @@ ALWAYS_INLINE auto SelectMapping(Arg arg) { return Func::template Apply(arg); # elif defined(__loongarch_lp64) return Func::template Apply(arg); +# elif SANITIZER_RISCV64 + switch (vmaSize) { + case 39: + return Func::template Apply(arg); + case 48: + return Func::template Apply(arg); + } # elif SANITIZER_WINDOWS return Func::template Apply(arg); # else @@ -798,6 +858,8 @@ void ForEachMapping() { Func::template Apply(); Func::template Apply(); Func::template Apply(); + Func::template Apply(); + Func::template Apply(); Func::template Apply(); } @@ -901,7 +963,7 @@ bool IsAppMem(uptr mem) { return SelectMapping(mem); } struct IsShadowMemImpl { template static bool Apply(uptr mem) { - return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd; + return mem >= Mapping::kShadowBeg && mem < Mapping::kShadowEnd; } }; @@ -913,7 +975,7 @@ bool IsShadowMem(RawShadow *p) { struct IsMetaMemImpl { template static bool Apply(uptr mem) { - return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd; + return mem >= Mapping::kMetaShadowBeg && mem < Mapping::kMetaShadowEnd; } }; diff --git a/libsanitizer/tsan/tsan_platform_linux.cpp b/libsanitizer/tsan/tsan_platform_linux.cpp index 3e08a1bece98..c974f549acbc 100644 --- a/libsanitizer/tsan/tsan_platform_linux.cpp +++ b/libsanitizer/tsan/tsan_platform_linux.cpp @@ -259,7 +259,15 @@ static void ReExecIfNeeded(bool ignore_heap) { "WARNING: Program is run with randomized virtual address " "space, which wouldn't work with ThreadSanitizer on Android.\n" "Re-execing with fixed virtual address space.\n"); - CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); + + if (personality(old_personality | ADDR_NO_RANDOMIZE) == -1) { + Printf( + "FATAL: ThreadSanitizer: unable to disable ASLR (perhaps " + "sandboxing is enabled?).\n"); + Printf("FATAL: Please rerun without sandboxing and/or ASLR.\n"); + Die(); + } + reexec = true; } # endif @@ -287,7 +295,18 @@ static void ReExecIfNeeded(bool ignore_heap) { "possibly due to high-entropy ASLR.\n" "Re-execing with fixed virtual address space.\n" "N.B. reducing ASLR entropy is preferable.\n"); - CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); + + if (personality(old_personality | ADDR_NO_RANDOMIZE) == -1) { + Printf( + "FATAL: ThreadSanitizer: encountered an incompatible memory " + "layout but was unable to disable ASLR (perhaps sandboxing is " + "enabled?).\n"); + Printf( + "FATAL: Please rerun with lower ASLR entropy, ASLR disabled, " + "and/or sandboxing disabled.\n"); + Die(); + } + reexec = true; } else { Printf( @@ -373,6 +392,12 @@ void InitializePlatformEarly() { Printf("FATAL: Found %zd - Supported 39 and 48\n", vmaSize); Die(); } +# else + if (vmaSize != 39 && vmaSize != 48) { + Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); + Printf("FATAL: Found %zd - Supported 39 and 48\n", vmaSize); + Die(); + } # endif # endif @@ -390,7 +415,7 @@ void InitializePlatform() { // is not compiled with -pie. #if !SANITIZER_GO { -# if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch_lp64)) +# if INIT_LONGJMP_XOR_KEY // Initialize the xor key used in {sig}{set,long}jump. InitializeLongjmpXorKey(); # endif @@ -461,8 +486,20 @@ int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) { // Reverse operation of libc stack pointer mangling static uptr UnmangleLongJmpSp(uptr mangled_sp) { -#if defined(__x86_64__) -# if SANITIZER_LINUX +# if SANITIZER_ANDROID && INIT_LONGJMP_XOR_KEY + if (longjmp_xor_key == 0) { + // bionic libc initialization process: __libc_init_globals -> + // __libc_init_vdso (calls strcmp) -> __libc_init_setjmp_cookie. strcmp is + // intercepted by TSan, so during TSan initialization the setjmp_cookie + // remains uninitialized. On Android, longjmp_xor_key must be set on first + // use. + InitializeLongjmpXorKey(); + CHECK_NE(longjmp_xor_key, 0); + } +# endif + +# if defined(__x86_64__) +# if SANITIZER_LINUX // Reverse of: // xor %fs:0x30, %rsi // rol $0x11, %rsi @@ -517,13 +554,23 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) { # else # define LONG_JMP_SP_ENV_SLOT 2 # endif -#elif SANITIZER_LINUX -# ifdef __aarch64__ -# define LONG_JMP_SP_ENV_SLOT 13 -# elif defined(__loongarch__) -# define LONG_JMP_SP_ENV_SLOT 1 -# elif defined(__mips64) -# define LONG_JMP_SP_ENV_SLOT 1 +# elif SANITIZER_ANDROID +# ifdef __aarch64__ +# define LONG_JMP_SP_ENV_SLOT 3 +# elif SANITIZER_RISCV64 +# define LONG_JMP_SP_ENV_SLOT 3 +# elif defined(__x86_64__) +# define LONG_JMP_SP_ENV_SLOT 6 +# else +# error unsupported +# endif +# elif SANITIZER_LINUX +# ifdef __aarch64__ +# define LONG_JMP_SP_ENV_SLOT 13 +# elif defined(__loongarch__) +# define LONG_JMP_SP_ENV_SLOT 1 +# elif defined(__mips64) +# define LONG_JMP_SP_ENV_SLOT 1 # elif SANITIZER_RISCV64 # define LONG_JMP_SP_ENV_SLOT 13 # elif defined(__s390x__) @@ -531,7 +578,7 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) { # else # define LONG_JMP_SP_ENV_SLOT 6 # endif -#endif +# endif uptr ExtractLongJmpSp(uptr *env) { uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT]; @@ -628,7 +675,13 @@ ThreadState *cur_thread() { } CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); } - return thr; + + // Skia calls mallopt(M_THREAD_DISABLE_MEM_INIT, 1), which sets the least + // significant bit of TLS_SLOT_SANITIZER to 1. Scudo allocator uses this bit + // as a flag to disable memory initialization. This is a workaround to get the + // correct ThreadState pointer. + uptr addr = reinterpret_cast(thr); + return reinterpret_cast(addr & ~1ULL); } void set_cur_thread(ThreadState *thr) { diff --git a/libsanitizer/tsan/tsan_platform_mac.cpp b/libsanitizer/tsan/tsan_platform_mac.cpp index eb344df168ab..f6fe2405254e 100644 --- a/libsanitizer/tsan/tsan_platform_mac.cpp +++ b/libsanitizer/tsan/tsan_platform_mac.cpp @@ -226,9 +226,20 @@ static void ThreadTerminateCallback(uptr thread) { void InitializePlatformEarly() { # if !SANITIZER_GO && SANITIZER_IOS uptr max_vm = GetMaxUserVirtualAddress() + 1; - if (max_vm != HiAppMemEnd()) { - Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n", - (void *)max_vm, (void *)HiAppMemEnd()); + if (max_vm < HiAppMemEnd()) { + Report( + "ThreadSanitizer: Unsupported virtual memory layout:\n\tVM address " + "limit = %p\n\tExpected %p.\n", + (void*)max_vm, (void*)HiAppMemEnd()); + Die(); + } + // In some configurations, the max_vm is expanded, but much of this space is + // already mapped. TSAN will not work in this configuration. + if (!MemoryRangeIsAvailable(HiAppMemEnd() - 1, HiAppMemEnd())) { + Report( + "ThreadSanitizer: Unsupported virtual memory layout: Address %p is " + "already mapped.\n", + (void*)(HiAppMemEnd() - 1)); Die(); } #endif @@ -248,7 +259,9 @@ void InitializePlatform() { ThreadEventCallbacks callbacks = { .create = ThreadCreateCallback, + .start = nullptr, .terminate = ThreadTerminateCallback, + .destroy = nullptr, }; InstallPthreadIntrospectionHook(callbacks); #endif diff --git a/libsanitizer/tsan/tsan_report.h b/libsanitizer/tsan/tsan_report.h index bfe470797f8f..53bb21964dbb 100644 --- a/libsanitizer/tsan/tsan_report.h +++ b/libsanitizer/tsan/tsan_report.h @@ -12,6 +12,8 @@ #ifndef TSAN_REPORT_H #define TSAN_REPORT_H +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_symbolizer.h" #include "sanitizer_common/sanitizer_thread_registry.h" #include "sanitizer_common/sanitizer_vector.h" @@ -56,6 +58,7 @@ struct ReportMop { bool atomic; uptr external_tag; Vector mset; + StackTrace stack_trace; ReportStack *stack; ReportMop(); @@ -79,25 +82,34 @@ struct ReportLocation { int fd = 0; bool fd_closed = false; bool suppressable = false; + StackID stack_id = 0; ReportStack *stack = nullptr; }; struct ReportThread { Tid id; - tid_t os_id; + ThreadID os_id; bool running; ThreadType thread_type; char *name; Tid parent_tid; + StackID stack_id; ReportStack *stack; + bool suppressable; }; struct ReportMutex { int id; uptr addr; + StackID stack_id; ReportStack *stack; }; +struct AddedLocationAddr { + uptr addr; + usize locs_idx; +}; + class ReportDesc { public: ReportType typ; @@ -105,6 +117,7 @@ class ReportDesc { Vector stacks; Vector mops; Vector locs; + Vector added_location_addrs; Vector mutexes; Vector threads; Vector unique_tids; diff --git a/libsanitizer/tsan/tsan_rtl.cpp b/libsanitizer/tsan/tsan_rtl.cpp index 5a2d39cd3060..feee566f4482 100644 --- a/libsanitizer/tsan/tsan_rtl.cpp +++ b/libsanitizer/tsan/tsan_rtl.cpp @@ -40,6 +40,13 @@ SANITIZER_WEAK_DEFAULT_IMPL void __tsan_test_only_on_fork() {} #endif +#if SANITIZER_APPLE && !SANITIZER_GO +// Override weak symbol from sanitizer_common +extern void __tsan_set_in_internal_write_call(bool value) { + __tsan::cur_thread_init()->in_internal_write_call = value; +} +#endif + namespace __tsan { #if !SANITIZER_GO @@ -566,17 +573,32 @@ static bool IsValidMmapRange(uptr addr, uptr size) { return false; } -void UnmapShadow(ThreadState *thr, uptr addr, uptr size) { +void UnmapShadow(ThreadState* thr, uptr addr, uptr size) { if (size == 0 || !IsValidMmapRange(addr, size)) return; - DontNeedShadowFor(addr, size); + // unmap shadow is related to semantic of mmap/munmap, so we + // should clear the whole shadow range, including the tail shadow + // while addr + size % kShadowCell != 0. + uptr rounded_size_shadow = RoundUp(addr + size, kShadowCell) - addr; + DontNeedShadowFor(addr, rounded_size_shadow); ScopedGlobalProcessor sgp; SlotLocker locker(thr, true); - ctx->metamap.ResetRange(thr->proc(), addr, size, true); + uptr rounded_size_meta = RoundUp(addr + size, kMetaShadowCell) - addr; + ctx->metamap.ResetRange(thr->proc(), addr, rounded_size_meta, true); } #endif void MapShadow(uptr addr, uptr size) { + // Although named MapShadow, this function's semantic is unrelated to + // UnmapShadow. This function currently only used for Go's lazy allocation + // of shadow, whose targets are program section (e.g., bss, data, etc.). + // Therefore, we can guarantee that the addr and size align to kShadowCell + // and kMetaShadowCell by the following assertions. + DCHECK_EQ(addr % kShadowCell, 0); + DCHECK_EQ(size % kShadowCell, 0); + DCHECK_EQ(addr % kMetaShadowCell, 0); + DCHECK_EQ(size % kMetaShadowCell, 0); + // Ensure thead registry lock held, so as to synchronize // with DoReset, which also access the mapped_shadow_* ctxt fields. ThreadRegistryLock lock0(&ctx->thread_registry); @@ -624,6 +646,7 @@ void MapShadow(uptr addr, uptr size) { static uptr mapped_meta_end = 0; uptr meta_begin = (uptr)MemToMeta(addr); uptr meta_end = (uptr)MemToMeta(addr + size); + // Windows wants 64K alignment. meta_begin = RoundDownTo(meta_begin, 64 << 10); meta_end = RoundUpTo(meta_end, 64 << 10); if (!data_mapped) { @@ -634,9 +657,6 @@ void MapShadow(uptr addr, uptr size) { Die(); } else { // Mapping continuous heap. - // Windows wants 64K alignment. - meta_begin = RoundDownTo(meta_begin, 64 << 10); - meta_end = RoundUpTo(meta_end, 64 << 10); CHECK_GT(meta_end, mapped_meta_end); if (meta_begin < mapped_meta_end) meta_begin = mapped_meta_end; @@ -673,11 +693,18 @@ void CheckUnwind() { thr->ignore_reads_and_writes++; atomic_store_relaxed(&thr->in_signal_handler, 0); #endif - PrintCurrentStackSlow(StackTrace::GetCurrentPc()); + PrintCurrentStack(StackTrace::GetCurrentPc(), + common_flags()->fast_unwind_on_fatal); } bool is_initialized; +// Symbolization indirectly calls dl_iterate_phdr. If a CHECK() fails early on +// (prior to the dl_iterate_phdr interceptor setup), resulting in an attempted +// symbolization, it will segfault. +// dl_iterate_phdr is not intercepted for Android. +bool ready_to_symbolize = SANITIZER_ANDROID; + void Initialize(ThreadState *thr) { // Thread safe because done before all threads exist. if (is_initialized) @@ -873,6 +900,13 @@ void ForkChildAfter(ThreadState* thr, uptr pc, bool start_thread) { ThreadIgnoreBegin(thr, pc); ThreadIgnoreSyncBegin(thr, pc); } + +# if SANITIZER_APPLE && !SANITIZER_GO + // This flag can have inheritance disabled - we are the child so act + // accordingly + if (flags()->lock_during_write == kNoLockDuringWritesCurrentProcess) + flags()->lock_during_write = kLockDuringAllWrites; +# endif } #endif diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h index f48be8e0a4fe..635654616b78 100644 --- a/libsanitizer/tsan/tsan_rtl.h +++ b/libsanitizer/tsan/tsan_rtl.h @@ -54,13 +54,15 @@ namespace __tsan { +extern bool ready_to_symbolize; + #if !SANITIZER_GO struct MapUnmapCallback; # if defined(__mips64) || defined(__aarch64__) || defined(__loongarch__) || \ defined(__powerpc__) || SANITIZER_RISCV64 struct AP32 { - static const uptr kSpaceBeg = 0; + static const uptr kSpaceBeg = SANITIZER_MMAP_BEGIN; static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; static const uptr kMetadataSize = 0; typedef __sanitizer::CompactSizeClassMap SizeClassMap; @@ -98,6 +100,7 @@ struct JmpBuf { uptr sp; int int_signal_send; bool in_blocking_func; + uptr oldset_stack_size; uptr in_signal_handler; uptr *shadow_stack_pos; }; @@ -233,6 +236,10 @@ struct alignas(SANITIZER_CACHE_LINE_SIZE) ThreadState { const ReportDesc *current_report; +#if SANITIZER_APPLE && !SANITIZER_GO + bool in_internal_write_call; +#endif + explicit ThreadState(Tid tid); }; @@ -417,6 +424,7 @@ class ScopedReportBase { void AddSleep(StackID stack_id); void SetCount(int count); void SetSigNum(int sig); + void SymbolizeStackElems(void); const ReportDesc *GetReport() const; @@ -495,7 +503,7 @@ void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread); void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, AccessType typ); -bool OutputReport(ThreadState *thr, const ScopedReport &srep); +bool OutputReport(ThreadState *thr, ScopedReport &srep); bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace); bool IsExpectedReport(uptr addr, uptr size); @@ -514,7 +522,7 @@ bool IsExpectedReport(uptr addr, uptr size); StackID CurrentStackId(ThreadState *thr, uptr pc); ReportStack *SymbolizeStackId(StackID stack_id); void PrintCurrentStack(ThreadState *thr, uptr pc); -void PrintCurrentStackSlow(uptr pc); // uses libunwind +void PrintCurrentStack(uptr pc, bool fast); // may uses libunwind MBlock *JavaHeapBlock(uptr addr, uptr *start); void Initialize(ThreadState *thr); @@ -556,7 +564,7 @@ void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc); void ThreadIgnoreSyncEnd(ThreadState *thr); Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); -void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id, +void ThreadStart(ThreadState *thr, Tid tid, ThreadID os_id, ThreadType thread_type); void ThreadFinish(ThreadState *thr); Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid); diff --git a/libsanitizer/tsan/tsan_rtl_aarch64.S b/libsanitizer/tsan/tsan_rtl_aarch64.S index 7d920bee4a2d..124bd59a91f0 100644 --- a/libsanitizer/tsan/tsan_rtl_aarch64.S +++ b/libsanitizer/tsan/tsan_rtl_aarch64.S @@ -4,10 +4,8 @@ #include "sanitizer_common/sanitizer_asm.h" #include "builtins/assembly.h" -#if !defined(__APPLE__) -.section .text -#else -.section __TEXT,__text +TEXT_SECTION +#if defined(__APPLE__) .align 3 #endif @@ -222,6 +220,6 @@ ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)) NO_EXEC_STACK_DIRECTIVE -GNU_PROPERTY_BTI_PAC +GNU_PROPERTY_BTI_PAC_GCS #endif diff --git a/libsanitizer/tsan/tsan_rtl_access.cpp b/libsanitizer/tsan/tsan_rtl_access.cpp index cf07686d968d..b2e70475e0b7 100644 --- a/libsanitizer/tsan/tsan_rtl_access.cpp +++ b/libsanitizer/tsan/tsan_rtl_access.cpp @@ -419,6 +419,11 @@ NOINLINE void TraceRestartMemoryAccess(ThreadState* thr, uptr pc, uptr addr, ALWAYS_INLINE USED void MemoryAccess(ThreadState* thr, uptr pc, uptr addr, uptr size, AccessType typ) { +#if SANITIZER_APPLE && !SANITIZER_GO + // Swift symbolizer can be intercepted and deadlock without this + if (thr->in_symbolizer) + return; +#endif RawShadow* shadow_mem = MemToShadow(addr); UNUSED char memBuf[4][64]; DPrintf2("#%d: Access: %d@%d %p/%zd typ=0x%x {%s, %s, %s, %s}\n", thr->tid, @@ -523,9 +528,9 @@ SECOND: } void ShadowSet(RawShadow* p, RawShadow* end, RawShadow v) { - DCHECK_LE(p, end); + DCHECK_LT(p, end); DCHECK(IsShadowMem(p)); - DCHECK(IsShadowMem(end)); + DCHECK(IsShadowMem(end - 1)); UNUSED const uptr kAlign = kShadowCnt * kShadowSize; DCHECK_EQ(reinterpret_cast(p) % kAlign, 0); DCHECK_EQ(reinterpret_cast(end) % kAlign, 0); @@ -569,6 +574,7 @@ static void MemoryRangeSet(uptr addr, uptr size, RawShadow val) { RawShadow* mid1 = Min(end, reinterpret_cast(RoundUp( reinterpret_cast(begin) + kPageSize / 2, kPageSize))); + // begin must < mid1 ShadowSet(begin, mid1, val); // Reset middle part. RawShadow* mid2 = RoundDown(end, kPageSize); @@ -577,7 +583,10 @@ static void MemoryRangeSet(uptr addr, uptr size, RawShadow val) { Die(); } // Set the ending. - ShadowSet(mid2, end, val); + if (mid2 < end) + ShadowSet(mid2, end, val); + else + DCHECK_EQ(mid2, end); } void MemoryResetRange(ThreadState* thr, uptr pc, uptr addr, uptr size) { @@ -669,7 +678,7 @@ void MemoryAccessRangeT(ThreadState* thr, uptr pc, uptr addr, uptr size) { RawShadow* shadow_mem = MemToShadow(addr); DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_read=%d\n", thr->tid, (void*)pc, (void*)addr, (int)size, is_read); - + DCHECK_NE(size, 0); #if SANITIZER_DEBUG if (!IsAppMem(addr)) { Printf("Access to non app mem start: %p\n", (void*)addr); @@ -680,20 +689,22 @@ void MemoryAccessRangeT(ThreadState* thr, uptr pc, uptr addr, uptr size) { DCHECK(IsAppMem(addr + size - 1)); } if (!IsShadowMem(shadow_mem)) { - Printf("Bad shadow start addr: %p (%p)\n", shadow_mem, (void*)addr); + Printf("Bad shadow start addr: %p (%p)\n", (void*)shadow_mem, (void*)addr); DCHECK(IsShadowMem(shadow_mem)); } - RawShadow* shadow_mem_end = reinterpret_cast( - reinterpret_cast(shadow_mem) + size * kShadowMultiplier - 1); - if (!IsShadowMem(shadow_mem_end)) { - Printf("Bad shadow end addr: %p (%p)\n", shadow_mem_end, + uptr rounded_size = + (RoundUpTo(addr + size, kShadowCell) - RoundDownTo(addr, kShadowCell)); + RawShadow* shadow_mem_end = + shadow_mem + rounded_size / kShadowCell * kShadowCnt; + if (!IsShadowMem(shadow_mem_end - 1)) { + Printf("Bad shadow end addr: %p (%p)\n", (void*)(shadow_mem_end - 1), (void*)(addr + size - 1)); Printf( - "Shadow start addr (ok): %p (%p); size: 0x%zx; kShadowMultiplier: " - "%zx\n", - shadow_mem, (void*)addr, size, kShadowMultiplier); - DCHECK(IsShadowMem(shadow_mem_end)); + "Shadow start addr (ok): %p (%p); size: 0x%zx; rounded_size: 0x%zx; " + "kShadowMultiplier: %zx\n", + (void*)shadow_mem, (void*)addr, size, rounded_size, kShadowMultiplier); + DCHECK(IsShadowMem(shadow_mem_end - 1)); } #endif diff --git a/libsanitizer/tsan/tsan_rtl_amd64.S b/libsanitizer/tsan/tsan_rtl_amd64.S index f848be9dd46c..8b9b706a822d 100644 --- a/libsanitizer/tsan/tsan_rtl_amd64.S +++ b/libsanitizer/tsan/tsan_rtl_amd64.S @@ -3,6 +3,8 @@ #include "sanitizer_common/sanitizer_asm.h" +.att_syntax + #if !defined(__APPLE__) .section .text #else diff --git a/libsanitizer/tsan/tsan_rtl_mutex.cpp b/libsanitizer/tsan/tsan_rtl_mutex.cpp index 2a8aa1915c9a..30f5e964939d 100644 --- a/libsanitizer/tsan/tsan_rtl_mutex.cpp +++ b/libsanitizer/tsan/tsan_rtl_mutex.cpp @@ -11,14 +11,15 @@ //===----------------------------------------------------------------------===// #include +#include #include -#include "tsan_rtl.h" #include "tsan_flags.h" -#include "tsan_sync.h" +#include "tsan_platform.h" #include "tsan_report.h" +#include "tsan_rtl.h" #include "tsan_symbolize.h" -#include "tsan_platform.h" +#include "tsan_sync.h" namespace __tsan { @@ -55,14 +56,28 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ, return; if (!ShouldReport(thr, typ)) return; - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(typ); - rep.AddMutex(addr, creation_stack_id); - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); - rep.AddLocation(addr, 1); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(typ); + rep->AddMutex(addr, creation_stack_id); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep->AddStack(trace, true); + rep->AddLocation(addr, 1); +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE + } +#endif } static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr, @@ -528,51 +543,81 @@ void AfterSleep(ThreadState *thr, uptr pc) { void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock)) return; - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeDeadlock); - for (int i = 0; i < r->n; i++) { - rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]); - rep.AddUniqueTid((int)r->loop[i].thr_ctx); - rep.AddThread((int)r->loop[i].thr_ctx); - } - uptr dummy_pc = 0x42; - for (int i = 0; i < r->n; i++) { - for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { - u32 stk = r->loop[i].stk[j]; - if (stk && stk != kInvalidStackID) { - rep.AddStack(StackDepotGet(stk), true); - } else { - // Sometimes we fail to extract the stack trace (FIXME: investigate), - // but we should still produce some stack trace in the report. - rep.AddStack(StackTrace(&dummy_pc, 1), true); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeDeadlock); + for (int i = 0; i < r->n; i++) { + rep->AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]); + rep->AddUniqueTid((int)r->loop[i].thr_ctx); + rep->AddThread((int)r->loop[i].thr_ctx); + } + uptr dummy_pc = 0x42; + for (int i = 0; i < r->n; i++) { + for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { + u32 stk = r->loop[i].stk[j]; + StackTrace stack; + if (stk && stk != kInvalidStackID) { + stack = StackDepotGet(stk); + } else { + // Sometimes we fail to extract the stack trace (FIXME: investigate), + // but we should still produce some stack trace in the report. + stack = StackTrace(&dummy_pc, 1); + } + rep->AddStack(stack, true); } } +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE } - OutputReport(thr, rep); +#endif } void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr, FastState last_lock, StackID creation_stack_id) { - // We need to lock the slot during RestoreStack because it protects - // the slot journal. - Lock slot_lock(&ctx->slots[static_cast(last_lock.sid())].mtx); - ThreadRegistryLock l0(&ctx->thread_registry); - Lock slots_lock(&ctx->slot_mtx); - ScopedReport rep(ReportTypeMutexDestroyLocked); - rep.AddMutex(addr, creation_stack_id); - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); - - Tid tid; - DynamicMutexSet mset; - uptr tag; - if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr, - 0, kAccessWrite, &tid, &trace, mset, &tag)) - return; - rep.AddStack(trace, true); - rep.AddLocation(addr, 1); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + // We need to lock the slot during RestoreStack because it protects + // the slot journal. + Lock slot_lock(&ctx->slots[static_cast(last_lock.sid())].mtx); + ThreadRegistryLock l0(&ctx->thread_registry); + Lock slots_lock(&ctx->slot_mtx); + new (rep) ScopedReport(ReportTypeMutexDestroyLocked); + rep->AddMutex(addr, creation_stack_id); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep->AddStack(trace, true); + + Tid tid; + DynamicMutexSet mset; + uptr tag; + if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), + addr, 0, kAccessWrite, &tid, &trace, mset, &tag)) + return; + rep->AddStack(trace, true); + rep->AddLocation(addr, 1); +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE + } +#endif } } // namespace __tsan diff --git a/libsanitizer/tsan/tsan_rtl_ppc64.S b/libsanitizer/tsan/tsan_rtl_ppc64.S index 9e533a71a9c4..8285e21aa1ec 100644 --- a/libsanitizer/tsan/tsan_rtl_ppc64.S +++ b/libsanitizer/tsan/tsan_rtl_ppc64.S @@ -1,6 +1,5 @@ #include "tsan_ppc_regs.h" - .machine altivec .section .text .hidden __tsan_setjmp .globl _setjmp diff --git a/libsanitizer/tsan/tsan_rtl_report.cpp b/libsanitizer/tsan/tsan_rtl_report.cpp index 0311df553fdd..43aef30d2f3b 100644 --- a/libsanitizer/tsan/tsan_rtl_report.cpp +++ b/libsanitizer/tsan/tsan_rtl_report.cpp @@ -11,10 +11,12 @@ //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stacktrace.h" +#include "tsan_defs.h" #include "tsan_fd.h" #include "tsan_flags.h" #include "tsan_mman.h" @@ -187,10 +189,8 @@ void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, mop->size = size; mop->write = !(typ & kAccessRead); mop->atomic = typ & kAccessAtomic; - mop->stack = SymbolizeStack(stack); mop->external_tag = external_tag; - if (mop->stack) - mop->stack->suppressable = true; + mop->stack_trace = stack; for (uptr i = 0; i < mset->Size(); i++) { MutexSet::Desc d = mset->Get(i); int id = this->AddMutex(d.addr, d.stack_id); @@ -199,6 +199,56 @@ void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, } } +void ScopedReportBase::SymbolizeStackElems() { + // symbolize memory ops + for (usize i = 0, size = rep_->mops.Size(); i < size; i++) { + ReportMop *mop = rep_->mops[i]; + mop->stack = SymbolizeStack(mop->stack_trace); + if (mop->stack) + mop->stack->suppressable = true; + } + + // symbolize locations + for (usize i = 0, size = rep_->locs.Size(); i < size; i++) { + // added locations have a NULL placeholder - don't dereference them + if (ReportLocation *loc = rep_->locs[i]) + loc->stack = SymbolizeStackId(loc->stack_id); + } + + // symbolize any added locations + for (usize i = 0, size = rep_->added_location_addrs.Size(); i < size; i++) { + AddedLocationAddr *added_loc = &rep_->added_location_addrs[i]; + if (ReportLocation *loc = SymbolizeData(added_loc->addr)) { + loc->suppressable = true; + rep_->locs[added_loc->locs_idx] = loc; + } + } + + // Filter out any added location placeholders that could not be symbolized + usize j = 0; + for (usize i = 0, size = rep_->locs.Size(); i < size; i++) { + if (rep_->locs[i] != nullptr) { + rep_->locs[j] = rep_->locs[i]; + j++; + } + } + rep_->locs.Resize(j); + + // symbolize threads + for (usize i = 0, size = rep_->threads.Size(); i < size; i++) { + ReportThread *rt = rep_->threads[i]; + rt->stack = SymbolizeStackId(rt->stack_id); + if (rt->stack) + rt->stack->suppressable = rt->suppressable; + } + + // symbolize mutexes + for (usize i = 0, size = rep_->mutexes.Size(); i < size; i++) { + ReportMutex *rm = rep_->mutexes[i]; + rm->stack = SymbolizeStackId(rm->stack_id); + } +} + void ScopedReportBase::AddUniqueTid(Tid unique_tid) { rep_->unique_tids.PushBack(unique_tid); } @@ -216,10 +266,8 @@ void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) { rt->name = internal_strdup(tctx->name); rt->parent_tid = tctx->parent_tid; rt->thread_type = tctx->thread_type; - rt->stack = 0; - rt->stack = SymbolizeStackId(tctx->creation_stack_id); - if (rt->stack) - rt->stack->suppressable = suppressable; + rt->stack_id = tctx->creation_stack_id; + rt->suppressable = suppressable; } #if !SANITIZER_GO @@ -270,7 +318,7 @@ int ScopedReportBase::AddMutex(uptr addr, StackID creation_stack_id) { rep_->mutexes.PushBack(rm); rm->id = rep_->mutexes.Size() - 1; rm->addr = addr; - rm->stack = SymbolizeStackId(creation_stack_id); + rm->stack_id = creation_stack_id; return rm->id; } @@ -288,7 +336,7 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) { loc->fd_closed = closed; loc->fd = fd; loc->tid = creat_tid; - loc->stack = SymbolizeStackId(creat_stack); + loc->stack_id = creat_stack; rep_->locs.PushBack(loc); AddThread(creat_tid); return; @@ -310,7 +358,7 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) { loc->heap_chunk_size = b->siz; loc->external_tag = b->tag; loc->tid = b->tid; - loc->stack = SymbolizeStackId(b->stk); + loc->stack_id = b->stk; rep_->locs.PushBack(loc); AddThread(b->tid); return; @@ -324,11 +372,8 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) { AddThread(tctx); } #endif - if (ReportLocation *loc = SymbolizeData(addr)) { - loc->suppressable = true; - rep_->locs.PushBack(loc); - return; - } + rep_->added_location_addrs.PushBack({addr, rep_->locs.Size()}); + rep_->locs.PushBack(nullptr); } #if !SANITIZER_GO @@ -628,11 +673,12 @@ static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) { return false; } -bool OutputReport(ThreadState *thr, const ScopedReport &srep) { +bool OutputReport(ThreadState *thr, ScopedReport &srep) { // These should have been checked in ShouldReport. // It's too late to check them here, we have already taken locks. CHECK(flags()->report_bugs); CHECK(!thr->suppress_reports); + srep.SymbolizeStackElems(); atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime()); const ReportDesc *rep = srep.GetReport(); CHECK_EQ(thr->current_report, nullptr); @@ -761,65 +807,80 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, DynamicMutexSet mset1; MutexSet *mset[kMop] = {&thr->mset, mset1}; - // We need to lock the slot during RestoreStack because it protects - // the slot journal. - Lock slot_lock(&ctx->slots[static_cast(s[1].sid())].mtx); - ThreadRegistryLock l0(&ctx->thread_registry); - Lock slots_lock(&ctx->slot_mtx); - if (SpuriousRace(old)) - return; - if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1, - size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) { - StoreShadow(&ctx->last_spurious_race, old.raw()); - return; - } + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + // We need to lock the slot during RestoreStack because it protects + // the slot journal. + Lock slot_lock(&ctx->slots[static_cast(s[1].sid())].mtx); + ThreadRegistryLock l0(&ctx->thread_registry); + Lock slots_lock(&ctx->slot_mtx); + if (SpuriousRace(old)) + return; + if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1, + size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) { + StoreShadow(&ctx->last_spurious_race, old.raw()); + return; + } - if (IsFiredSuppression(ctx, rep_typ, traces[1])) - return; + if (IsFiredSuppression(ctx, rep_typ, traces[1])) + return; - if (HandleRacyStacks(thr, traces)) - return; + if (HandleRacyStacks(thr, traces)) + return; - // If any of the accesses has a tag, treat this as an "external" race. - uptr tag = kExternalTagNone; - for (uptr i = 0; i < kMop; i++) { - if (tags[i] != kExternalTagNone) { - rep_typ = ReportTypeExternalRace; - tag = tags[i]; - break; + // If any of the accesses has a tag, treat this as an "external" race. + uptr tag = kExternalTagNone; + for (uptr i = 0; i < kMop; i++) { + if (tags[i] != kExternalTagNone) { + rep_typ = ReportTypeExternalRace; + tag = tags[i]; + break; + } } - } - ScopedReport rep(rep_typ, tag); - for (uptr i = 0; i < kMop; i++) - rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]); + new (rep) ScopedReport(rep_typ, tag); + for (uptr i = 0; i < kMop; i++) + rep->AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]); - for (uptr i = 0; i < kMop; i++) { - ThreadContext *tctx = static_cast( - ctx->thread_registry.GetThreadLocked(tids[i])); - rep.AddThread(tctx); - } + for (uptr i = 0; i < kMop; i++) { + ThreadContext *tctx = static_cast( + ctx->thread_registry.GetThreadLocked(tids[i])); + rep->AddThread(tctx); + } - rep.AddLocation(addr_min, addr_max - addr_min); - - if (flags()->print_full_thread_history) { - const ReportDesc *rep_desc = rep.GetReport(); - for (uptr i = 0; i < rep_desc->threads.Size(); i++) { - Tid parent_tid = rep_desc->threads[i]->parent_tid; - if (parent_tid == kMainTid || parent_tid == kInvalidTid) - continue; - ThreadContext *parent_tctx = static_cast( - ctx->thread_registry.GetThreadLocked(parent_tid)); - rep.AddThread(parent_tctx); + rep->AddLocation(addr_min, addr_max - addr_min); + + if (flags()->print_full_thread_history) { + const ReportDesc *rep_desc = rep->GetReport(); + for (uptr i = 0; i < rep_desc->threads.Size(); i++) { + Tid parent_tid = rep_desc->threads[i]->parent_tid; + if (parent_tid == kMainTid || parent_tid == kInvalidTid) + continue; + ThreadContext *parent_tctx = static_cast( + ctx->thread_registry.GetThreadLocked(parent_tid)); + rep->AddThread(parent_tctx); + } } - } #if !SANITIZER_GO - if (!((typ0 | typ1) & kAccessFree) && - s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid())) - rep.AddSleep(thr->last_sleep_stack_id); + if (!((typ0 | typ1) & kAccessFree) && + s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid())) + rep->AddSleep(thr->last_sleep_stack_id); +#endif + +#if SANITIZER_APPLE + } // Close this scope to release the locks +#endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +#if !SANITIZER_APPLE + } #endif - OutputReport(thr, rep); } void PrintCurrentStack(ThreadState *thr, uptr pc) { @@ -828,25 +889,34 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) { PrintStack(SymbolizeStack(trace)); } -// Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes +// Always inlining PrintCurrentStack, because LocatePcInTrace assumes // __sanitizer_print_stack_trace exists in the actual unwinded stack, but -// tail-call to PrintCurrentStackSlow breaks this assumption because +// tail-call to PrintCurrentStack breaks this assumption because // __sanitizer_print_stack_trace disappears after tail-call. // However, this solution is not reliable enough, please see dvyukov's comment // http://reviews.llvm.org/D19148#406208 // Also see PR27280 comment 2 and 3 for breaking examples and analysis. -ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) { +ALWAYS_INLINE USED void PrintCurrentStack(uptr pc, bool fast) { #if !SANITIZER_GO uptr bp = GET_CURRENT_FRAME(); auto *ptrace = New(); - ptrace->Unwind(pc, bp, nullptr, false); + ptrace->Unwind(pc, bp, nullptr, fast); for (uptr i = 0; i < ptrace->size / 2; i++) { uptr tmp = ptrace->trace_buffer[i]; ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; ptrace->trace_buffer[ptrace->size - i - 1] = tmp; } - PrintStack(SymbolizeStack(*ptrace)); + + if (ready_to_symbolize) { + PrintStack(SymbolizeStack(*ptrace)); + } else { + Printf( + "WARNING: PrintCurrentStack() has been called too early, before " + "symbolization is possible. Printing unsymbolized stack trace:\n"); + for (unsigned int i = 0; i < ptrace->size; i++) + Printf(" #%u: 0x%zx\n", i, ptrace->trace[i]); + } #endif } @@ -857,6 +927,6 @@ using namespace __tsan; extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_print_stack_trace() { - PrintCurrentStackSlow(StackTrace::GetCurrentPc()); + PrintCurrentStack(StackTrace::GetCurrentPc(), false); } } // extern "C" diff --git a/libsanitizer/tsan/tsan_rtl_thread.cpp b/libsanitizer/tsan/tsan_rtl_thread.cpp index 8d29e25a6dd2..978d853b0bc7 100644 --- a/libsanitizer/tsan/tsan_rtl_thread.cpp +++ b/libsanitizer/tsan/tsan_rtl_thread.cpp @@ -88,15 +88,33 @@ void ThreadFinalize(ThreadState *thr) { #if !SANITIZER_GO if (!ShouldReport(thr, ReportTypeThreadLeak)) return; - ThreadRegistryLock l(&ctx->thread_registry); Vector leaks; - ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks, - &leaks); + { + ThreadRegistryLock l(&ctx->thread_registry); + ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks, + &leaks); + } + for (uptr i = 0; i < leaks.Size(); i++) { - ScopedReport rep(ReportTypeThreadLeak); - rep.AddThread(leaks[i].tctx, true); - rep.SetCount(leaks[i].count); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeThreadLeak); + rep->AddThread(leaks[i].tctx, true); + rep->SetCount(leaks[i].count); +# if SANITIZER_APPLE + } // Close this scope to release the locks +# endif + OutputReport(thr, *rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); +# if !SANITIZER_APPLE + } +# endif } #endif } @@ -149,7 +167,7 @@ struct OnStartedArgs { uptr tls_size; }; -void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id, +void ThreadStart(ThreadState *thr, Tid tid, ThreadID os_id, ThreadType thread_type) { ctx->thread_registry.StartThread(tid, os_id, thread_type, thr); if (!thr->ignore_sync) { @@ -188,10 +206,14 @@ void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id, } #endif -#if !SANITIZER_GO +#if !SANITIZER_GO && !SANITIZER_ANDROID // Don't imitate stack/TLS writes for the main thread, // because its initialization is synchronized with all // subsequent threads anyway. + // Because thr is created by MmapOrDie, the thr object + // is not in tls, the pointer to the thr object is in + // TLS_SLOT_SANITIZER slot. So skip this check on + // Android platform. if (tid != kMainTid) { if (stk_addr && stk_size) { const uptr pc = StackTrace::GetNextInstructionPc( diff --git a/libsanitizer/tsan/tsan_sync.cpp b/libsanitizer/tsan/tsan_sync.cpp index 09d41780d188..be5829bc823d 100644 --- a/libsanitizer/tsan/tsan_sync.cpp +++ b/libsanitizer/tsan/tsan_sync.cpp @@ -246,12 +246,29 @@ void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) { // there are no concurrent accesses to the regions (e.g. stop-the-world). CHECK_NE(src, dst); CHECK_NE(sz, 0); + + // The current MoveMemory implementation behaves incorrectly when src, dst, + // and sz are not aligned to kMetaShadowCell. + // For example, with kMetaShadowCell == 8: + // - src = 4: unexpectedly clears the metadata for the range [0, 4). + // - src = 16, dst = 4, size = 8: A sync variable for addr = 20, which should + // be moved to the metadata for address 8, is incorrectly moved to the + // metadata for address 0 instead. + // - src = 0, sz = 4: fails to move the tail metadata. + // Therefore, the following assertions is needed. + DCHECK_EQ(src % kMetaShadowCell, 0); + DCHECK_EQ(dst % kMetaShadowCell, 0); + DCHECK_EQ(sz % kMetaShadowCell, 0); + uptr diff = dst - src; - u32 *src_meta = MemToMeta(src); - u32 *dst_meta = MemToMeta(dst); - u32 *src_meta_end = MemToMeta(src + sz); - uptr inc = 1; - if (dst > src) { + u32 *src_meta, *dst_meta, *src_meta_end; + uptr inc; + if (dst < src) { + src_meta = MemToMeta(src); + dst_meta = MemToMeta(dst); + src_meta_end = MemToMeta(src + sz); + inc = 1; + } else { src_meta = MemToMeta(src + sz) - 1; dst_meta = MemToMeta(dst + sz) - 1; src_meta_end = MemToMeta(src) - 1; diff --git a/libsanitizer/ubsan/ubsan_checks.inc b/libsanitizer/ubsan/ubsan_checks.inc index 846cd89ee19f..b1d09a9024e7 100644 --- a/libsanitizer/ubsan/ubsan_checks.inc +++ b/libsanitizer/ubsan/ubsan_checks.inc @@ -53,6 +53,7 @@ UBSAN_CHECK(ImplicitSignedIntegerTruncationOrSignChange, UBSAN_CHECK(InvalidShiftBase, "invalid-shift-base", "shift-base") UBSAN_CHECK(InvalidShiftExponent, "invalid-shift-exponent", "shift-exponent") UBSAN_CHECK(OutOfBoundsIndex, "out-of-bounds-index", "bounds") +UBSAN_CHECK(LocalOutOfBounds, "local-out-of-bounds", "local-bounds") UBSAN_CHECK(UnreachableCall, "unreachable-call", "unreachable") UBSAN_CHECK(MissingReturn, "missing-return", "return") UBSAN_CHECK(NonPositiveVLAIndex, "non-positive-vla-index", "vla-bound") diff --git a/libsanitizer/ubsan/ubsan_flags.cpp b/libsanitizer/ubsan/ubsan_flags.cpp index 9a66bd37518b..25cefd46ce27 100644 --- a/libsanitizer/ubsan/ubsan_flags.cpp +++ b/libsanitizer/ubsan/ubsan_flags.cpp @@ -50,7 +50,6 @@ void InitializeFlags() { { CommonFlags cf; cf.CopyFrom(*common_flags()); - cf.print_summary = false; cf.external_symbolizer_path = GetFlag("UBSAN_SYMBOLIZER_PATH"); OverrideCommonFlags(cf); } diff --git a/libsanitizer/ubsan/ubsan_handlers.cpp b/libsanitizer/ubsan/ubsan_handlers.cpp index ea332013a50d..63319f46734a 100644 --- a/libsanitizer/ubsan/ubsan_handlers.cpp +++ b/libsanitizer/ubsan/ubsan_handlers.cpp @@ -405,6 +405,28 @@ void __ubsan::__ubsan_handle_out_of_bounds_abort(OutOfBoundsData *Data, Die(); } +static void handleLocalOutOfBoundsImpl(ReportOptions Opts) { + // FIXME: Pass more diagnostic info. + SymbolizedStackHolder CallerLoc; + CallerLoc.reset(getCallerLocation(Opts.pc)); + Location Loc; + Loc = CallerLoc; + ErrorType ET = ErrorType::LocalOutOfBounds; + ScopedReport R(Opts, Loc, ET); + Diag(Loc, DL_Error, ET, "access out of bounds"); +} + +void __ubsan::__ubsan_handle_local_out_of_bounds() { + GET_REPORT_OPTIONS(false); + handleLocalOutOfBoundsImpl(Opts); +} + +void __ubsan::__ubsan_handle_local_out_of_bounds_abort() { + GET_REPORT_OPTIONS(true); + handleLocalOutOfBoundsImpl(Opts); + Die(); +} + static void handleBuiltinUnreachableImpl(UnreachableData *Data, ReportOptions Opts) { ErrorType ET = ErrorType::UnreachableCall; @@ -877,10 +899,7 @@ static void handleCFIBadIcall(CFICheckFailData *Data, ValueHandle Function, namespace __ubsan { -#ifdef UBSAN_CAN_USE_CXXABI - #ifdef _WIN32 - extern "C" void __ubsan_handle_cfi_bad_type_default(CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, @@ -889,35 +908,17 @@ extern "C" void __ubsan_handle_cfi_bad_type_default(CFICheckFailData *Data, } WIN_WEAK_ALIAS(__ubsan_handle_cfi_bad_type, __ubsan_handle_cfi_bad_type_default) -#else -SANITIZER_WEAK_ATTRIBUTE -#endif void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, ReportOptions Opts); - #else +SANITIZER_WEAK_ATTRIBUTE void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable, ReportOptions Opts) { Die(); } #endif -} // namespace __ubsan - -void __ubsan::__ubsan_handle_cfi_bad_icall(CFIBadIcallData *CallData, - ValueHandle Function) { - GET_REPORT_OPTIONS(false); - CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type}; - handleCFIBadIcall(&Data, Function, Opts); -} - -void __ubsan::__ubsan_handle_cfi_bad_icall_abort(CFIBadIcallData *CallData, - ValueHandle Function) { - GET_REPORT_OPTIONS(true); - CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type}; - handleCFIBadIcall(&Data, Function, Opts); - Die(); -} +} // namespace __ubsan void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data, ValueHandle Value, diff --git a/libsanitizer/ubsan/ubsan_handlers.h b/libsanitizer/ubsan/ubsan_handlers.h index f99948ff498f..521caa96bc77 100644 --- a/libsanitizer/ubsan/ubsan_handlers.h +++ b/libsanitizer/ubsan/ubsan_handlers.h @@ -90,6 +90,9 @@ struct OutOfBoundsData { /// \brief Handle an array index out of bounds error. RECOVERABLE(out_of_bounds, OutOfBoundsData *Data, ValueHandle Index) +/// \brief Handle an local object access out of bounds error. +RECOVERABLE(local_out_of_bounds) + struct UnreachableData { SourceLocation Loc; }; @@ -217,20 +220,12 @@ enum CFITypeCheckKind : unsigned char { CFITCK_VMFCall, }; -struct CFIBadIcallData { - SourceLocation Loc; - const TypeDescriptor &Type; -}; - struct CFICheckFailData { CFITypeCheckKind CheckKind; SourceLocation Loc; const TypeDescriptor &Type; }; -/// \brief Handle control flow integrity failure for indirect function calls. -RECOVERABLE(cfi_bad_icall, CFIBadIcallData *Data, ValueHandle Function) - /// \brief Handle control flow integrity failures. RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function, uptr VtableIsValid) diff --git a/libsanitizer/ubsan/ubsan_handlers_cxx.cpp b/libsanitizer/ubsan/ubsan_handlers_cxx.cpp index 0317a3d1428c..206a0bb485a9 100644 --- a/libsanitizer/ubsan/ubsan_handlers_cxx.cpp +++ b/libsanitizer/ubsan/ubsan_handlers_cxx.cpp @@ -156,50 +156,6 @@ void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable, Diag(Loc, DL_Note, ET, "check failed in %0, vtable located in %1") << SrcModule << DstModule; } - -static bool handleFunctionTypeMismatch(FunctionTypeMismatchData *Data, - ValueHandle Function, - ValueHandle calleeRTTI, - ValueHandle fnRTTI, ReportOptions Opts) { - if (checkTypeInfoEquality(reinterpret_cast(calleeRTTI), - reinterpret_cast(fnRTTI))) - return false; - - SourceLocation CallLoc = Data->Loc.acquire(); - ErrorType ET = ErrorType::FunctionTypeMismatch; - - if (ignoreReport(CallLoc, Opts, ET)) - return true; - - ScopedReport R(Opts, CallLoc, ET); - - SymbolizedStackHolder FLoc(getSymbolizedLocation(Function)); - const char *FName = FLoc.get()->info.function; - if (!FName) - FName = "(unknown)"; - - Diag(CallLoc, DL_Error, ET, - "call to function %0 through pointer to incorrect function type %1") - << FName << Data->Type; - Diag(FLoc, DL_Note, ET, "%0 defined here") << FName; - return true; -} - -void __ubsan_handle_function_type_mismatch_v1(FunctionTypeMismatchData *Data, - ValueHandle Function, - ValueHandle calleeRTTI, - ValueHandle fnRTTI) { - GET_REPORT_OPTIONS(false); - handleFunctionTypeMismatch(Data, Function, calleeRTTI, fnRTTI, Opts); -} - -void __ubsan_handle_function_type_mismatch_v1_abort( - FunctionTypeMismatchData *Data, ValueHandle Function, - ValueHandle calleeRTTI, ValueHandle fnRTTI) { - GET_REPORT_OPTIONS(true); - if (handleFunctionTypeMismatch(Data, Function, calleeRTTI, fnRTTI, Opts)) - Die(); -} } // namespace __ubsan #endif // CAN_SANITIZE_UB diff --git a/libsanitizer/ubsan/ubsan_handlers_cxx.h b/libsanitizer/ubsan/ubsan_handlers_cxx.h index f6f24e8d63ca..71695cbdc090 100644 --- a/libsanitizer/ubsan/ubsan_handlers_cxx.h +++ b/libsanitizer/ubsan/ubsan_handlers_cxx.h @@ -33,19 +33,6 @@ void __ubsan_handle_dynamic_type_cache_miss( extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __ubsan_handle_dynamic_type_cache_miss_abort( DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash); - -struct FunctionTypeMismatchData; - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE void -__ubsan_handle_function_type_mismatch_v1(FunctionTypeMismatchData *Data, - ValueHandle Val, - ValueHandle calleeRTTI, - ValueHandle fnRTTI); -extern "C" SANITIZER_INTERFACE_ATTRIBUTE void -__ubsan_handle_function_type_mismatch_v1_abort(FunctionTypeMismatchData *Data, - ValueHandle Val, - ValueHandle calleeRTTI, - ValueHandle fnRTTI); } #endif // UBSAN_HANDLERS_CXX_H diff --git a/libsanitizer/ubsan/ubsan_init_standalone.cpp b/libsanitizer/ubsan/ubsan_init_standalone.cpp index 91c3f57b424b..5083246b8b7f 100644 --- a/libsanitizer/ubsan/ubsan_init_standalone.cpp +++ b/libsanitizer/ubsan/ubsan_init_standalone.cpp @@ -19,6 +19,13 @@ #include "ubsan_init.h" #include "ubsan_signals_standalone.h" +#if SANITIZER_FUCHSIA +namespace __sanitizer { +// UBSan doesn't need to do anything else special in the startup hook. +void EarlySanitizerInit() {} +} // namespace __sanitizer +#endif // SANITIZER_FUCHSIA + namespace __ubsan { class UbsanStandaloneInitializer { diff --git a/libsanitizer/ubsan/ubsan_interface.inc b/libsanitizer/ubsan/ubsan_interface.inc index f95f71af3ed1..0eb109f37d44 100644 --- a/libsanitizer/ubsan/ubsan_interface.inc +++ b/libsanitizer/ubsan/ubsan_interface.inc @@ -21,8 +21,6 @@ INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss) INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss_abort) INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow) INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow_abort) -INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_v1) -INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_v1_abort) INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch) INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_abort) INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion) @@ -48,6 +46,8 @@ INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1) INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1_abort) INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds) INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds_abort) +INTERFACE_FUNCTION(__ubsan_handle_local_out_of_bounds) +INTERFACE_FUNCTION(__ubsan_handle_local_out_of_bounds_abort) INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow) INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow_abort) INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds) diff --git a/libsanitizer/ubsan/ubsan_platform.h b/libsanitizer/ubsan/ubsan_platform.h index ad3e883f0f35..c7eb1a967e53 100644 --- a/libsanitizer/ubsan/ubsan_platform.h +++ b/libsanitizer/ubsan/ubsan_platform.h @@ -12,16 +12,14 @@ #ifndef UBSAN_PLATFORM_H #define UBSAN_PLATFORM_H -#ifndef CAN_SANITIZE_UB // Other platforms should be easy to add, and probably work as-is. #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \ defined(__NetBSD__) || defined(__DragonFly__) || \ (defined(__sun__) && defined(__svr4__)) || defined(_WIN32) || \ - defined(__Fuchsia__) + defined(__Fuchsia__) || defined(__HAIKU__) #define CAN_SANITIZE_UB 1 #else # define CAN_SANITIZE_UB 0 #endif -#endif //CAN_SANITIZE_UB #endif diff --git a/libsanitizer/ubsan/ubsan_value.h b/libsanitizer/ubsan/ubsan_value.h index 430c9ea0dc8d..ee523cf5ddda 100644 --- a/libsanitizer/ubsan/ubsan_value.h +++ b/libsanitizer/ubsan/ubsan_value.h @@ -150,9 +150,12 @@ public: unsigned getIntegerBitCount() const { DCHECK(isIntegerTy()); - if (isSignedBitIntTy()) - return *reinterpret_cast(getBitIntBitCountPointer()); - else + if (isSignedBitIntTy()) { + u32 BitCountValue; + internal_memcpy(&BitCountValue, getBitIntBitCountPointer(), + sizeof(BitCountValue)); + return BitCountValue; + } else return getIntegerBitWidth(); }