]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
libsanitizer: Merge with upstream
authorH.J. Lu <hjl.tools@gmail.com>
Mon, 27 Sep 2021 17:43:33 +0000 (10:43 -0700)
committerH.J. Lu <hjl.tools@gmail.com>
Fri, 1 Oct 2021 16:02:54 +0000 (09:02 -0700)
Merged revision: 1c2e5fd66ea27d0c51360ba4e22099124a915562

155 files changed:
libsanitizer/MERGE
libsanitizer/asan/asan_fuchsia.cpp
libsanitizer/asan/asan_globals.cpp
libsanitizer/asan/asan_interceptors.cpp
libsanitizer/asan/asan_interceptors.h
libsanitizer/asan/asan_mapping.h
libsanitizer/asan/asan_report.cpp
libsanitizer/asan/asan_rtl.cpp
libsanitizer/asan/asan_stats.cpp
libsanitizer/asan/asan_thread.cpp
libsanitizer/hwasan/Makefile.am
libsanitizer/hwasan/Makefile.in
libsanitizer/hwasan/hwasan.cpp
libsanitizer/hwasan/hwasan.h
libsanitizer/hwasan/hwasan_allocation_functions.cpp
libsanitizer/hwasan/hwasan_allocator.cpp
libsanitizer/hwasan/hwasan_dynamic_shadow.cpp
libsanitizer/hwasan/hwasan_fuchsia.cpp
libsanitizer/hwasan/hwasan_interceptors.cpp
libsanitizer/hwasan/hwasan_interface_internal.h
libsanitizer/hwasan/hwasan_linux.cpp
libsanitizer/hwasan/hwasan_report.cpp
libsanitizer/hwasan/hwasan_setjmp_aarch64.S [moved from libsanitizer/hwasan/hwasan_setjmp.S with 87% similarity]
libsanitizer/hwasan/hwasan_setjmp_x86_64.S [new file with mode: 0644]
libsanitizer/hwasan/hwasan_thread.cpp
libsanitizer/hwasan/hwasan_type_test.cpp
libsanitizer/include/sanitizer/asan_interface.h
libsanitizer/include/sanitizer/common_interface_defs.h
libsanitizer/include/sanitizer/dfsan_interface.h
libsanitizer/include/sanitizer/linux_syscall_hooks.h
libsanitizer/include/sanitizer/tsan_interface.h
libsanitizer/interception/interception_win.cpp
libsanitizer/lsan/lsan_allocator.h
libsanitizer/lsan/lsan_common.cpp
libsanitizer/sanitizer_common/sancov_flags.inc
libsanitizer/sanitizer_common/sanitizer_addrhashmap.h
libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h
libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h
libsanitizer/sanitizer_common/sanitizer_asm.h
libsanitizer/sanitizer_common/sanitizer_atomic_clang_mips.h
libsanitizer/sanitizer_common/sanitizer_common.h
libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc
libsanitizer/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc
libsanitizer/sanitizer_common/sanitizer_common_nolibc.cpp
libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc
libsanitizer/sanitizer_common/sanitizer_coverage_fuchsia.cpp
libsanitizer/sanitizer_common/sanitizer_coverage_libcdep_new.cpp
libsanitizer/sanitizer_common/sanitizer_file.cpp
libsanitizer/sanitizer_common/sanitizer_file.h
libsanitizer/sanitizer_common/sanitizer_flag_parser.h
libsanitizer/sanitizer_common/sanitizer_flags.inc
libsanitizer/sanitizer_common/sanitizer_fuchsia.cpp
libsanitizer/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
libsanitizer/sanitizer_common/sanitizer_interface_internal.h
libsanitizer/sanitizer_common/sanitizer_internal_defs.h
libsanitizer/sanitizer_common/sanitizer_libc.cpp
libsanitizer/sanitizer_common/sanitizer_libc.h
libsanitizer/sanitizer_common/sanitizer_libignore.cpp
libsanitizer/sanitizer_common/sanitizer_libignore.h
libsanitizer/sanitizer_common/sanitizer_linux.cpp
libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp
libsanitizer/sanitizer_common/sanitizer_local_address_space_view.h
libsanitizer/sanitizer_common/sanitizer_mac.cpp
libsanitizer/sanitizer_common/sanitizer_mac.h
libsanitizer/sanitizer_common/sanitizer_mutex.cpp
libsanitizer/sanitizer_common/sanitizer_mutex.h
libsanitizer/sanitizer_common/sanitizer_platform.h
libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.h
libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp
libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
libsanitizer/sanitizer_common/sanitizer_platform_limits_netbsd.h
libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp
libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.cpp
libsanitizer/sanitizer_common/sanitizer_platform_limits_solaris.h
libsanitizer/sanitizer_common/sanitizer_posix.h
libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cpp
libsanitizer/sanitizer_common/sanitizer_printf.cpp
libsanitizer/sanitizer_common/sanitizer_signal_interceptors.inc
libsanitizer/sanitizer_common/sanitizer_solaris.cpp
libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp
libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cpp
libsanitizer/sanitizer_common/sanitizer_stacktrace_sparc.cpp
libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp
libsanitizer/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp
libsanitizer/sanitizer_common/sanitizer_symbolizer.h
libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h
libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
libsanitizer/sanitizer_common/sanitizer_syscall_linux_hexagon.inc [new file with mode: 0644]
libsanitizer/sanitizer_common/sanitizer_thread_registry.cpp
libsanitizer/sanitizer_common/sanitizer_thread_registry.h
libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cpp
libsanitizer/sanitizer_common/sanitizer_win.cpp
libsanitizer/tsan/Makefile.am
libsanitizer/tsan/Makefile.in
libsanitizer/tsan/tsan_clock.cpp
libsanitizer/tsan/tsan_clock.h
libsanitizer/tsan/tsan_debugging.cpp
libsanitizer/tsan/tsan_defs.h
libsanitizer/tsan/tsan_dense_alloc.h
libsanitizer/tsan/tsan_external.cpp
libsanitizer/tsan/tsan_fd.cpp
libsanitizer/tsan/tsan_fd.h
libsanitizer/tsan/tsan_flags.cpp
libsanitizer/tsan/tsan_flags.inc
libsanitizer/tsan/tsan_ignoreset.cpp
libsanitizer/tsan/tsan_ignoreset.h
libsanitizer/tsan/tsan_ilist.h [new file with mode: 0644]
libsanitizer/tsan/tsan_interceptors.h
libsanitizer/tsan/tsan_interceptors_mac.cpp
libsanitizer/tsan/tsan_interceptors_posix.cpp
libsanitizer/tsan/tsan_interface.cpp
libsanitizer/tsan/tsan_interface.h
libsanitizer/tsan/tsan_interface.inc [new file with mode: 0644]
libsanitizer/tsan/tsan_interface_ann.cpp
libsanitizer/tsan/tsan_interface_atomic.cpp
libsanitizer/tsan/tsan_interface_inl.h [deleted file]
libsanitizer/tsan/tsan_interface_java.cpp
libsanitizer/tsan/tsan_mman.cpp
libsanitizer/tsan/tsan_mman.h
libsanitizer/tsan/tsan_mutex.cpp [deleted file]
libsanitizer/tsan/tsan_mutex.h [deleted file]
libsanitizer/tsan/tsan_mutexset.cpp
libsanitizer/tsan/tsan_mutexset.h
libsanitizer/tsan/tsan_platform.h
libsanitizer/tsan/tsan_platform_linux.cpp
libsanitizer/tsan/tsan_platform_mac.cpp
libsanitizer/tsan/tsan_platform_posix.cpp
libsanitizer/tsan/tsan_platform_windows.cpp
libsanitizer/tsan/tsan_report.cpp
libsanitizer/tsan/tsan_report.h
libsanitizer/tsan/tsan_rtl.cpp
libsanitizer/tsan/tsan_rtl.h
libsanitizer/tsan/tsan_rtl_mutex.cpp
libsanitizer/tsan/tsan_rtl_ppc64.S
libsanitizer/tsan/tsan_rtl_report.cpp
libsanitizer/tsan/tsan_rtl_thread.cpp
libsanitizer/tsan/tsan_shadow.h [new file with mode: 0644]
libsanitizer/tsan/tsan_stack_trace.cpp
libsanitizer/tsan/tsan_symbolize.cpp
libsanitizer/tsan/tsan_sync.cpp
libsanitizer/tsan/tsan_sync.h
libsanitizer/tsan/tsan_trace.h
libsanitizer/tsan/tsan_update_shadow_word.inc [moved from libsanitizer/tsan/tsan_update_shadow_word_inl.h with 96% similarity]
libsanitizer/tsan/tsan_vector_clock.cpp [new file with mode: 0644]
libsanitizer/tsan/tsan_vector_clock.h [new file with mode: 0644]
libsanitizer/ubsan/ubsan_diag.cpp
libsanitizer/ubsan/ubsan_flags.cpp
libsanitizer/ubsan/ubsan_handlers.cpp
libsanitizer/ubsan/ubsan_handlers.h
libsanitizer/ubsan/ubsan_platform.h

index 81d00f27de45653f7783663a169bb7b2f0f18a1f..2094a8beb3e0610243a2e09abe768d54a76f66d2 100644 (file)
@@ -1,4 +1,4 @@
-7704fedfff6ef5676adb6415f3be0ac927d1a746
+1c2e5fd66ea27d0c51360ba4e22099124a915562
 
 The first line of this file holds the git revision number of the
 last merge done from the master library sources.
index b0c7255144ac8ea32adb1a0149424398cf3d6f92..b419019d137d4f17ab981c059ebb23e4b93831fd 100644 (file)
@@ -31,7 +31,8 @@ namespace __asan {
 // AsanInitInternal->InitializeHighMemEnd (asan_rtl.cpp).
 // Just do some additional sanity checks here.
 void InitializeShadowMemory() {
-  if (Verbosity()) PrintAddressSpaceLayout();
+  if (Verbosity())
+    PrintAddressSpaceLayout();
 
   // Make sure SHADOW_OFFSET doesn't use __asan_shadow_memory_dynamic_address.
   __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
@@ -62,7 +63,34 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
   UNIMPLEMENTED();
 }
 
-bool PlatformUnpoisonStacks() { return false; }
+bool PlatformUnpoisonStacks() {
+  // The current sp might not point to the default stack. This
+  // could be because we are in a crash stack from fuzzing for example.
+  // Unpoison the default stack and the current stack page.
+  AsanThread *curr_thread = GetCurrentThread();
+  CHECK(curr_thread != nullptr);
+  uptr top = curr_thread->stack_top();
+  uptr bottom = curr_thread->stack_bottom();
+  // The default stack grows from top to bottom. (bottom < top).
+
+  uptr local_stack = reinterpret_cast<uptr>(__builtin_frame_address(0));
+  if (local_stack >= bottom && local_stack <= top) {
+    // The current stack is the default stack.
+    // We only need to unpoison from where we are using until the end.
+    bottom = RoundDownTo(local_stack, GetPageSize());
+    UnpoisonStack(bottom, top, "default");
+  } else {
+    // The current stack is not the default stack.
+    // Unpoison the entire default stack and the current stack page.
+    UnpoisonStack(bottom, top, "default");
+    bottom = RoundDownTo(local_stack, GetPageSize());
+    top = bottom + GetPageSize();
+    UnpoisonStack(bottom, top, "unknown");
+    return true;
+  }
+
+  return false;
+}
 
 // We can use a plain thread_local variable for TSD.
 static thread_local void *per_thread;
@@ -148,7 +176,8 @@ static void *BeforeThreadCreateHook(uptr user_id, bool detached,
                                     uptr stack_size) {
   EnsureMainThreadIDIsCorrect();
   // Strict init-order checking is thread-hostile.
-  if (flags()->strict_init_order) StopInitOrderChecking();
+  if (flags()->strict_init_order)
+    StopInitOrderChecking();
 
   GET_STACK_TRACE_THREAD;
   u32 parent_tid = GetCurrentTidOrInvalid();
index e045c31cd1c38ec4258a1857ced145934b2aa90c..9bf378f62071da89513da0cf9ddf4d00dbefb9d6 100644 (file)
@@ -35,7 +35,7 @@ struct ListOfGlobals {
   ListOfGlobals *next;
 };
 
-static BlockingMutex mu_for_globals(LINKER_INITIALIZED);
+static Mutex mu_for_globals;
 static LowLevelAllocator allocator_for_globals;
 static ListOfGlobals *list_of_all_globals;
 
@@ -108,7 +108,7 @@ static u32 FindRegistrationSite(const Global *g) {
 int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites,
                          int max_globals) {
   if (!flags()->report_globals) return 0;
-  BlockingMutexLock lock(&mu_for_globals);
+  Lock lock(&mu_for_globals);
   int res = 0;
   for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
     const Global &g = *l->g;
@@ -154,6 +154,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
   }
 }
 
+// Check ODR violation for given global G by checking if it's already poisoned.
+// We use this method in case compiler doesn't use private aliases for global
+// variables.
+static void CheckODRViolationViaPoisoning(const Global *g) {
+  if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
+    // This check may not be enough: if the first global is much larger
+    // the entire redzone of the second global may be within the first global.
+    for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+      if (g->beg == l->g->beg &&
+          (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
+          !IsODRViolationSuppressed(g->name))
+        ReportODRViolation(g, FindRegistrationSite(g),
+                           l->g, FindRegistrationSite(l->g));
+    }
+  }
+}
+
 // Clang provides two different ways for global variables protection:
 // it can poison the global itself or its private alias. In former
 // case we may poison same symbol multiple times, that can help us to
@@ -199,6 +216,8 @@ static void RegisterGlobal(const Global *g) {
     // where two globals with the same name are defined in different modules.
     if (UseODRIndicator(g))
       CheckODRViolationViaIndicator(g);
+    else
+      CheckODRViolationViaPoisoning(g);
   }
   if (CanPoisonMemory())
     PoisonRedZones(*g);
@@ -238,7 +257,7 @@ static void UnregisterGlobal(const Global *g) {
 }
 
 void StopInitOrderChecking() {
-  BlockingMutexLock lock(&mu_for_globals);
+  Lock lock(&mu_for_globals);
   if (!flags()->check_initialization_order || !dynamic_init_globals)
     return;
   flags()->check_initialization_order = false;
@@ -340,7 +359,7 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
   if (!flags()->report_globals) return;
   GET_STACK_TRACE_MALLOC;
   u32 stack_id = StackDepotPut(stack);
-  BlockingMutexLock lock(&mu_for_globals);
+  Lock lock(&mu_for_globals);
   if (!global_registration_site_vector) {
     global_registration_site_vector =
         new (allocator_for_globals) GlobalRegistrationSiteVector;
@@ -379,7 +398,7 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
 // We must do this when a shared objects gets dlclosed.
 void __asan_unregister_globals(__asan_global *globals, uptr n) {
   if (!flags()->report_globals) return;
-  BlockingMutexLock lock(&mu_for_globals);
+  Lock lock(&mu_for_globals);
   for (uptr i = 0; i < n; i++) {
     if (SANITIZER_WINDOWS && globals[i].beg == 0) {
       // Skip globals that look like padding from the MSVC incremental linker.
@@ -405,7 +424,7 @@ void __asan_before_dynamic_init(const char *module_name) {
   bool strict_init_order = flags()->strict_init_order;
   CHECK(module_name);
   CHECK(asan_inited);
-  BlockingMutexLock lock(&mu_for_globals);
+  Lock lock(&mu_for_globals);
   if (flags()->report_globals >= 3)
     Printf("DynInitPoison module: %s\n", module_name);
   for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
@@ -429,7 +448,7 @@ void __asan_after_dynamic_init() {
       !dynamic_init_globals)
     return;
   CHECK(asan_inited);
-  BlockingMutexLock lock(&mu_for_globals);
+  Lock lock(&mu_for_globals);
   // FIXME: Optionally report that we're unpoisoning globals from a module.
   for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
     DynInitGlobal &dyn_g = (*dynamic_init_globals)[i];
index d0a6dd48a74860b7fdffede2075e8c3d623d9967..b28909152e208e7a2ee953971be91a663275afc4 100644 (file)
@@ -49,8 +49,8 @@ namespace __asan {
   ASAN_READ_RANGE((ctx), (s),                                   \
     common_flags()->strict_string_checks ? (len) + 1 : (n))
 
-#define ASAN_READ_STRING(ctx, s, n)                             \
-  ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n))
+#  define ASAN_READ_STRING(ctx, s, n) \
+    ASAN_READ_STRING_OF_LEN((ctx), (s), internal_strlen(s), (n))
 
 static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
 #if SANITIZER_INTERCEPT_STRNLEN
@@ -370,9 +370,9 @@ DEFINE_REAL(char*, index, const char *string, int c)
     ASAN_INTERCEPTOR_ENTER(ctx, strcat);
     ENSURE_ASAN_INITED();
     if (flags()->replace_str) {
-      uptr from_length = REAL(strlen)(from);
+      uptr from_length = internal_strlen(from);
       ASAN_READ_RANGE(ctx, from, from_length + 1);
-      uptr to_length = REAL(strlen)(to);
+      uptr to_length = internal_strlen(to);
       ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length);
       ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
       // If the copying actually happens, the |from| string should not overlap
@@ -394,7 +394,7 @@ INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
     uptr from_length = MaybeRealStrnlen(from, size);
     uptr copy_length = Min(size, from_length + 1);
     ASAN_READ_RANGE(ctx, from, copy_length);
-    uptr to_length = REAL(strlen)(to);
+    uptr to_length = internal_strlen(to);
     ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length);
     ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
     if (from_length > 0) {
@@ -419,7 +419,7 @@ INTERCEPTOR(char *, strcpy, char *to, const char *from) {
   }
   ENSURE_ASAN_INITED();
   if (flags()->replace_str) {
-    uptr from_size = REAL(strlen)(from) + 1;
+    uptr from_size = internal_strlen(from) + 1;
     CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size);
     ASAN_READ_RANGE(ctx, from, from_size);
     ASAN_WRITE_RANGE(ctx, to, from_size);
@@ -432,7 +432,7 @@ INTERCEPTOR(char*, strdup, const char *s) {
   ASAN_INTERCEPTOR_ENTER(ctx, strdup);
   if (UNLIKELY(!asan_inited)) return internal_strdup(s);
   ENSURE_ASAN_INITED();
-  uptr length = REAL(strlen)(s);
+  uptr length = internal_strlen(s);
   if (flags()->replace_str) {
     ASAN_READ_RANGE(ctx, s, length + 1);
   }
@@ -448,7 +448,7 @@ INTERCEPTOR(char*, __strdup, const char *s) {
   ASAN_INTERCEPTOR_ENTER(ctx, strdup);
   if (UNLIKELY(!asan_inited)) return internal_strdup(s);
   ENSURE_ASAN_INITED();
-  uptr length = REAL(strlen)(s);
+  uptr length = internal_strlen(s);
   if (flags()->replace_str) {
     ASAN_READ_RANGE(ctx, s, length + 1);
   }
@@ -581,7 +581,7 @@ INTERCEPTOR(int, atexit, void (*func)()) {
 #if CAN_SANITIZE_LEAKS
   __lsan::ScopedInterceptorDisabler disabler;
 #endif
-  // Avoid calling real atexit as it is unrechable on at least on Linux.
+  // Avoid calling real atexit as it is unreachable on at least on Linux.
   int res = REAL(__cxa_atexit)((void (*)(void *a))func, nullptr, nullptr);
   REAL(__cxa_atexit)(AtCxaAtexit, nullptr, nullptr);
   return res;
index 25e05e458bed53f986d5e18c5abbee23ac1f3923..047b044c8bf47da51e74436dc088b2740fd162dc 100644 (file)
@@ -81,12 +81,7 @@ void InitializePlatformInterceptors();
 #if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
     !SANITIZER_NETBSD
 # define ASAN_INTERCEPT___CXA_THROW 1
-# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
-     || ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
-#   define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
-# else
-#   define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
-# endif
+# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
 # if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
 #  define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
 # else
@@ -138,29 +133,30 @@ DECLARE_REAL(char*, strncpy, char *to, const char *from, uptr size)
 DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen)
 DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
 
-#if !SANITIZER_MAC
-#define ASAN_INTERCEPT_FUNC(name)                                        \
-  do {                                                                   \
-    if (!INTERCEPT_FUNCTION(name))                                       \
-      VReport(1, "AddressSanitizer: failed to intercept '%s'\n", #name); \
-  } while (0)
-#define ASAN_INTERCEPT_FUNC_VER(name, ver)                                  \
-  do {                                                                      \
-    if (!INTERCEPT_FUNCTION_VER(name, ver))                                 \
-      VReport(1, "AddressSanitizer: failed to intercept '%s@@%s'\n", #name, \
-              #ver);                                                        \
-  } while (0)
-#define ASAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver)              \
-  do {                                                                       \
-    if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name))     \
-      VReport(1, "AddressSanitizer: failed to intercept '%s@@%s' or '%s'\n", \
-              #name, #ver, #name);                                           \
-  } while (0)
-
-#else
+#  if !SANITIZER_MAC
+#    define ASAN_INTERCEPT_FUNC(name)                                        \
+      do {                                                                   \
+        if (!INTERCEPT_FUNCTION(name))                                       \
+          VReport(1, "AddressSanitizer: failed to intercept '%s'\n", #name); \
+      } while (0)
+#    define ASAN_INTERCEPT_FUNC_VER(name, ver)                           \
+      do {                                                               \
+        if (!INTERCEPT_FUNCTION_VER(name, ver))                          \
+          VReport(1, "AddressSanitizer: failed to intercept '%s@@%s'\n", \
+                  #name, ver);                                           \
+      } while (0)
+#    define ASAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver)           \
+      do {                                                                    \
+        if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name))  \
+          VReport(1,                                                          \
+                  "AddressSanitizer: failed to intercept '%s@@%s' or '%s'\n", \
+                  #name, ver, #name);                                         \
+      } while (0)
+
+#  else
 // OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
-#define ASAN_INTERCEPT_FUNC(name)
-#endif  // SANITIZER_MAC
+#    define ASAN_INTERCEPT_FUNC(name)
+#  endif  // SANITIZER_MAC
 
 #endif  // !SANITIZER_FUCHSIA
 
index 4b0037fced388ff2739e294c94aa50b014c28f89..e5a7f2007aea8b8208e929e08ee675a55cd252eb 100644 (file)
@@ -165,7 +165,7 @@ static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
 static const u64 kRiscv64_ShadowOffset64 = 0xd55550000;
 static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
 static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
-static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
+static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
 static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
 static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43;  // 0x80000000000
 static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30;  // 0x40000000
index 03f1ed2b0186642551251ec95e75c8cdb9452f45..271d8964038399ac941b0d78b883937a4c3c8696 100644 (file)
@@ -32,12 +32,12 @@ namespace __asan {
 static void (*error_report_callback)(const char*);
 static char *error_message_buffer = nullptr;
 static uptr error_message_buffer_pos = 0;
-static BlockingMutex error_message_buf_mutex(LINKER_INITIALIZED);
+static Mutex error_message_buf_mutex;
 static const unsigned kAsanBuggyPcPoolSize = 25;
 static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize];
 
 void AppendToErrorMessageBuffer(const char *buffer) {
-  BlockingMutexLock l(&error_message_buf_mutex);
+  Lock l(&error_message_buf_mutex);
   if (!error_message_buffer) {
     error_message_buffer =
       (char*)MmapOrDieQuietly(kErrorMessageBufferSize, __func__);
@@ -155,10 +155,10 @@ class ScopedInErrorReport {
       DumpProcessMap();
 
     // Copy the message buffer so that we could start logging without holding a
-    // lock that gets aquired during printing.
+    // lock that gets acquired during printing.
     InternalMmapVector<char> buffer_copy(kErrorMessageBufferSize);
     {
-      BlockingMutexLock l(&error_message_buf_mutex);
+      Lock l(&error_message_buf_mutex);
       internal_memcpy(buffer_copy.data(),
                       error_message_buffer, kErrorMessageBufferSize);
       // Clear error_message_buffer so that if we find other errors
@@ -490,7 +490,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
 }
 
 void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
-  BlockingMutexLock l(&error_message_buf_mutex);
+  Lock l(&error_message_buf_mutex);
   error_report_callback = callback;
 }
 
index e06a1113f4edc35e25e98d691bab75d1bf3435ee..bfaa3bc270274c82ee52e9b3367ee7a6203e6c51 100644 (file)
@@ -82,6 +82,17 @@ void ShowStatsAndAbort() {
   Die();
 }
 
+NOINLINE
+static void ReportGenericErrorWrapper(uptr addr, bool is_write, int size,
+                                      int exp_arg, bool fatal) {
+  if (__asan_test_only_reported_buggy_pointer) {
+    *__asan_test_only_reported_buggy_pointer = addr;
+  } else {
+    GET_CALLER_PC_BP_SP;
+    ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, fatal);
+  }
+}
+
 // --------------- LowLevelAllocateCallbac ---------- {{{1
 static void OnLowLevelAllocate(uptr ptr, uptr size) {
   PoisonShadow(ptr, size, kAsanInternalHeapMagic);
@@ -145,12 +156,7 @@ ASAN_REPORT_ERROR_N(store, true)
     if (UNLIKELY(size >= SHADOW_GRANULARITY ||                                 \
                  ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >=       \
                      (s8)s)) {                                                 \
-      if (__asan_test_only_reported_buggy_pointer) {                           \
-        *__asan_test_only_reported_buggy_pointer = addr;                       \
-      } else {                                                                 \
-        GET_CALLER_PC_BP_SP;                                                   \
-        ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, fatal);  \
-      }                                                                        \
+      ReportGenericErrorWrapper(addr, is_write, size, exp_arg, fatal);         \
     }                                                                          \
   }
 
index 00ded8f5ef50700c5814ba7d4c3b646dac7086b5..4182761083337efa8e05788204ea9227ebff2d5b 100644 (file)
@@ -62,11 +62,11 @@ void AsanStats::MergeFrom(const AsanStats *stats) {
     dst_ptr[i] += src_ptr[i];
 }
 
-static BlockingMutex print_lock(LINKER_INITIALIZED);
+static Mutex print_lock;
 
 static AsanStats unknown_thread_stats(LINKER_INITIALIZED);
 static AsanStats dead_threads_stats(LINKER_INITIALIZED);
-static BlockingMutex dead_threads_stats_lock(LINKER_INITIALIZED);
+static Mutex dead_threads_stats_lock;
 // Required for malloc_zone_statistics() on OS X. This can't be stored in
 // per-thread AsanStats.
 static uptr max_malloced_memory;
@@ -87,7 +87,7 @@ static void GetAccumulatedStats(AsanStats *stats) {
   }
   stats->MergeFrom(&unknown_thread_stats);
   {
-    BlockingMutexLock lock(&dead_threads_stats_lock);
+    Lock lock(&dead_threads_stats_lock);
     stats->MergeFrom(&dead_threads_stats);
   }
   // This is not very accurate: we may miss allocation peaks that happen
@@ -99,7 +99,7 @@ static void GetAccumulatedStats(AsanStats *stats) {
 }
 
 void FlushToDeadThreadStats(AsanStats *stats) {
-  BlockingMutexLock lock(&dead_threads_stats_lock);
+  Lock lock(&dead_threads_stats_lock);
   dead_threads_stats.MergeFrom(stats);
   stats->Clear();
 }
@@ -122,7 +122,7 @@ static void PrintAccumulatedStats() {
   AsanStats stats;
   GetAccumulatedStats(&stats);
   // Use lock to keep reports from mixing up.
-  BlockingMutexLock lock(&print_lock);
+  Lock lock(&print_lock);
   stats.Print();
   StackDepotStats *stack_depot_stats = StackDepotGetStats();
   Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",
index 35d4467e7b53a010bb325557fb55ebf49892b342..d25e8ee4f45fca98a39efdf7ec1a2d8a98f66141 100644 (file)
@@ -43,11 +43,11 @@ void AsanThreadContext::OnFinished() {
 static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
 static ThreadRegistry *asan_thread_registry;
 
-static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED);
+static Mutex mu_for_thread_context;
 static LowLevelAllocator allocator_for_thread_context;
 
 static ThreadContextBase *GetAsanThreadContext(u32 tid) {
-  BlockingMutexLock lock(&mu_for_thread_context);
+  Lock lock(&mu_for_thread_context);
   return new(allocator_for_thread_context) AsanThreadContext(tid);
 }
 
index 5e3a0f1b0a10128710c5e723040ad960dd55f0c2..cfc1bfe8f011aaef62396fd604a0f4957ce03df7 100644 (file)
@@ -28,7 +28,8 @@ hwasan_files = \
        hwasan_new_delete.cpp \
        hwasan_poisoning.cpp \
        hwasan_report.cpp \
-       hwasan_setjmp.S \
+       hwasan_setjmp_aarch64.S \
+       hwasan_setjmp_x86_64.S \
        hwasan_tag_mismatch_aarch64.S \
        hwasan_thread.cpp \
        hwasan_thread_list.cpp \
index 22c5266a120c020e042e63a2a8387fb0966da0e2..f63670b50d1617ea836e00d2bd33074db2d4b973 100644 (file)
@@ -151,9 +151,9 @@ am__objects_1 = hwasan_allocation_functions.lo hwasan_allocator.lo \
        hwasan_fuchsia.lo hwasan_globals.lo hwasan_interceptors.lo \
        hwasan_interceptors_vfork.lo hwasan_linux.lo \
        hwasan_memintrinsics.lo hwasan_new_delete.lo \
-       hwasan_poisoning.lo hwasan_report.lo hwasan_setjmp.lo \
-       hwasan_tag_mismatch_aarch64.lo hwasan_thread.lo \
-       hwasan_thread_list.lo hwasan_type_test.lo
+       hwasan_poisoning.lo hwasan_report.lo hwasan_setjmp_aarch64.lo \
+       hwasan_setjmp_x86_64.lo hwasan_tag_mismatch_aarch64.lo \
+       hwasan_thread.lo hwasan_thread_list.lo hwasan_type_test.lo
 am_libhwasan_la_OBJECTS = $(am__objects_1)
 libhwasan_la_OBJECTS = $(am_libhwasan_la_OBJECTS)
 AM_V_lt = $(am__v_lt_@AM_V@)
@@ -427,7 +427,8 @@ hwasan_files = \
        hwasan_new_delete.cpp \
        hwasan_poisoning.cpp \
        hwasan_report.cpp \
-       hwasan_setjmp.S \
+       hwasan_setjmp_aarch64.S \
+       hwasan_setjmp_x86_64.S \
        hwasan_tag_mismatch_aarch64.S \
        hwasan_thread.cpp \
        hwasan_thread_list.cpp \
@@ -570,7 +571,8 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_new_delete.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_poisoning.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_report.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_setjmp.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_setjmp_aarch64.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_setjmp_x86_64.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_tag_mismatch_aarch64.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_thread.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hwasan_thread_list.Plo@am__quote@
index cbe0dee66dcda7c36d59572ea8355b67b33f5f90..465419022123fe916b40a67c7952b24dfdcce959 100644 (file)
@@ -319,7 +319,7 @@ void __hwasan_init_static() {
     InitializeSingleGlobal(global);
 }
 
-void __hwasan_init() {
+__attribute__((constructor(0))) void __hwasan_init() {
   CHECK(!hwasan_init_is_running);
   if (hwasan_inited) return;
   hwasan_init_is_running = 1;
@@ -360,6 +360,7 @@ void __hwasan_init() {
   HwasanTSDThreadInit();
 
   HwasanAllocatorInit();
+  HwasanInstallAtForkHandler();
 
 #if HWASAN_CONTAINS_UBSAN
   __ubsan::InitAsPlugin();
index 7338b696ad341d844776c491984a1a0b21ce9b58..371c43f3cbde7fe3fa2690683214f00484242e57 100644 (file)
@@ -107,6 +107,8 @@ void InitThreads();
 void InitializeInterceptors();
 
 void HwasanAllocatorInit();
+void HwasanAllocatorLock();
+void HwasanAllocatorUnlock();
 
 void *hwasan_malloc(uptr size, StackTrace *stack);
 void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack);
@@ -140,6 +142,8 @@ void HwasanAtExit();
 
 void HwasanOnDeadlySignal(int signo, void *info, void *context);
 
+void HwasanInstallAtForkHandler();
+
 void UpdateMemoryUsage();
 
 void AppendToErrorMessageBuffer(const char *buffer);
@@ -183,25 +187,34 @@ void HwasanTagMismatch(uptr addr, uptr access_info, uptr *registers_frame,
     RunFreeHooks(ptr);            \
   } while (false)
 
-#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
+#if HWASAN_WITH_INTERCEPTORS
 // For both bionic and glibc __sigset_t is an unsigned long.
 typedef unsigned long __hw_sigset_t;
 // Setjmp and longjmp implementations are platform specific, and hence the
-// interception code is platform specific too.  As yet we've only implemented
-// the interception for AArch64.
-typedef unsigned long long __hw_register_buf[22];
+// interception code is platform specific too.
+#  if defined(__aarch64__)
+constexpr size_t kHwRegisterBufSize = 22;
+#  elif defined(__x86_64__)
+constexpr size_t kHwRegisterBufSize = 8;
+#  endif
+typedef unsigned long long __hw_register_buf[kHwRegisterBufSize];
 struct __hw_jmp_buf_struct {
   // NOTE: The machine-dependent definition of `__sigsetjmp'
   // assume that a `__hw_jmp_buf' begins with a `__hw_register_buf' and that
   // `__mask_was_saved' follows it.  Do not move these members or add others
   // before it.
+  //
+  // We add a __magic field to our struct to catch cases where libc's setjmp
+  // populated the jmp_buf instead of our interceptor.
   __hw_register_buf __jmpbuf; // Calling environment.
-  int __mask_was_saved;       // Saved the signal mask?
+  unsigned __mask_was_saved : 1;  // Saved the signal mask?
+  unsigned __magic : 31;      // Used to distinguish __hw_jmp_buf from jmp_buf.
   __hw_sigset_t __saved_mask; // Saved signal mask.
 };
 typedef struct __hw_jmp_buf_struct __hw_jmp_buf[1];
 typedef struct __hw_jmp_buf_struct __hw_sigjmp_buf[1];
-#endif // HWASAN_WITH_INTERCEPTORS && __aarch64__
+constexpr unsigned kHwJmpBufMagic = 0x248ACE77;
+#endif  // HWASAN_WITH_INTERCEPTORS
 
 #define ENSURE_HWASAN_INITED()      \
   do {                              \
index 6c2a6077866fb572d766d27424096c6206d1bfdc..850daedd0b0ea959e7af2a644608b0152751c643 100644 (file)
@@ -17,6 +17,8 @@
 #include "sanitizer_common/sanitizer_allocator_interface.h"
 #include "sanitizer_common/sanitizer_tls_get_addr.h"
 
+#if !SANITIZER_FUCHSIA
+
 using namespace __hwasan;
 
 static uptr allocated_for_dlsym;
@@ -36,6 +38,9 @@ static void *AllocateFromLocalPool(uptr size_in_bytes) {
   return mem;
 }
 
+extern "C" {
+
+SANITIZER_INTERFACE_ATTRIBUTE
 int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size) {
   GET_MALLOC_STACK_TRACE;
   CHECK_NE(memptr, 0);
@@ -43,16 +48,19 @@ int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size) {
   return res;
 }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 void *__sanitizer_memalign(uptr alignment, uptr size) {
   GET_MALLOC_STACK_TRACE;
   return hwasan_memalign(alignment, size, &stack);
 }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 void *__sanitizer_aligned_alloc(uptr alignment, uptr size) {
   GET_MALLOC_STACK_TRACE;
   return hwasan_aligned_alloc(alignment, size, &stack);
 }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 void *__sanitizer___libc_memalign(uptr alignment, uptr size) {
   GET_MALLOC_STACK_TRACE;
   void *ptr = hwasan_memalign(alignment, size, &stack);
@@ -61,16 +69,19 @@ void *__sanitizer___libc_memalign(uptr alignment, uptr size) {
   return ptr;
 }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 void *__sanitizer_valloc(uptr size) {
   GET_MALLOC_STACK_TRACE;
   return hwasan_valloc(size, &stack);
 }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 void *__sanitizer_pvalloc(uptr size) {
   GET_MALLOC_STACK_TRACE;
   return hwasan_pvalloc(size, &stack);
 }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 void __sanitizer_free(void *ptr) {
   GET_MALLOC_STACK_TRACE;
   if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr)))
@@ -78,6 +89,7 @@ void __sanitizer_free(void *ptr) {
   hwasan_free(ptr, &stack);
 }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 void __sanitizer_cfree(void *ptr) {
   GET_MALLOC_STACK_TRACE;
   if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr)))
@@ -85,22 +97,27 @@ void __sanitizer_cfree(void *ptr) {
   hwasan_free(ptr, &stack);
 }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 uptr __sanitizer_malloc_usable_size(const void *ptr) {
   return __sanitizer_get_allocated_size(ptr);
 }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 struct __sanitizer_struct_mallinfo __sanitizer_mallinfo() {
   __sanitizer_struct_mallinfo sret;
   internal_memset(&sret, 0, sizeof(sret));
   return sret;
 }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 int __sanitizer_mallopt(int cmd, int value) { return 0; }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 void __sanitizer_malloc_stats(void) {
   // FIXME: implement, but don't call REAL(malloc_stats)!
 }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 void *__sanitizer_calloc(uptr nmemb, uptr size) {
   GET_MALLOC_STACK_TRACE;
   if (UNLIKELY(!hwasan_inited))
@@ -109,6 +126,7 @@ void *__sanitizer_calloc(uptr nmemb, uptr size) {
   return hwasan_calloc(nmemb, size, &stack);
 }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 void *__sanitizer_realloc(void *ptr, uptr size) {
   GET_MALLOC_STACK_TRACE;
   if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
@@ -127,11 +145,13 @@ void *__sanitizer_realloc(void *ptr, uptr size) {
   return hwasan_realloc(ptr, size, &stack);
 }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 void *__sanitizer_reallocarray(void *ptr, uptr nmemb, uptr size) {
   GET_MALLOC_STACK_TRACE;
   return hwasan_reallocarray(ptr, nmemb, size, &stack);
 }
 
+SANITIZER_INTERFACE_ATTRIBUTE
 void *__sanitizer_malloc(uptr size) {
   GET_MALLOC_STACK_TRACE;
   if (UNLIKELY(!hwasan_init_is_running))
@@ -142,6 +162,8 @@ void *__sanitizer_malloc(uptr size) {
   return hwasan_malloc(size, &stack);
 }
 
+}  // extern "C"
+
 #if HWASAN_WITH_INTERCEPTORS
 #  define INTERCEPTOR_ALIAS(RET, FN, ARGS...)                                 \
     extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS)               \
@@ -170,3 +192,5 @@ INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
 INTERCEPTOR_ALIAS(void, malloc_stats, void);
 #  endif
 #endif  // #if HWASAN_WITH_INTERCEPTORS
+
+#endif  // SANITIZER_FUCHSIA
index ef6d4d6c7678eb5726de2699369e3f6a051bffec..9e1729964e27703b0b0aeeeb7a6b06031b82490d 100644 (file)
@@ -107,6 +107,10 @@ void HwasanAllocatorInit() {
     tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
 }
 
+void HwasanAllocatorLock() { allocator.ForceLock(); }
+
+void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
+
 void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
   allocator.SwallowCache(cache);
 }
@@ -158,8 +162,11 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
     internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
   }
   if (size != orig_size) {
-    internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
-                    size - orig_size - 1);
+    u8 *tail = reinterpret_cast<u8 *>(allocated) + orig_size;
+    uptr tail_length = size - orig_size;
+    internal_memcpy(tail, tail_magic, tail_length - 1);
+    // Short granule is excluded from magic tail, so we explicitly untag.
+    tail[tail_length - 1] = 0;
   }
 
   void *user_ptr = allocated;
@@ -201,21 +208,37 @@ static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
   return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
 }
 
+static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
+                             void *tagged_ptr) {
+  // This function can return true if halt_on_error is false.
+  if (!MemIsApp(reinterpret_cast<uptr>(untagged_ptr)) ||
+      !PointerAndMemoryTagsMatch(tagged_ptr)) {
+    ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
+    return true;
+  }
+  return false;
+}
+
 static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
   CHECK(tagged_ptr);
   HWASAN_FREE_HOOK(tagged_ptr);
 
-  if (!PointerAndMemoryTagsMatch(tagged_ptr))
-    ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
+  bool in_taggable_region =
+      InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
+  void *untagged_ptr = in_taggable_region ? UntagPtr(tagged_ptr) : tagged_ptr;
+
+  if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
+    return;
 
-  void *untagged_ptr = InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr))
-                           ? UntagPtr(tagged_ptr)
-                           : tagged_ptr;
   void *aligned_ptr = reinterpret_cast<void *>(
       RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
   tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
   Metadata *meta =
       reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
+  if (!meta) {
+    ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
+    return;
+  }
   uptr orig_size = meta->get_requested_size();
   u32 free_context_id = StackDepotPut(*stack);
   u32 alloc_context_id = meta->alloc_context_id;
@@ -228,7 +251,11 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
     CHECK_LT(tail_size, kShadowAlignment);
     void *tail_beg = reinterpret_cast<void *>(
         reinterpret_cast<uptr>(aligned_ptr) + orig_size);
-    if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size))
+    tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(
+        reinterpret_cast<uptr>(tail_beg) + tail_size));
+    if (tail_size &&
+        (internal_memcmp(tail_beg, tail_magic, tail_size) ||
+         (in_taggable_region && pointer_tag != short_granule_memtag)))
       ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
                             orig_size, tail_magic);
   }
@@ -243,8 +270,7 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
         Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
     internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
   }
-  if (InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr)) &&
-      flags()->tag_in_free && malloc_bisect(stack, 0) &&
+  if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
       atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
     // Always store full 8-bit tags on free to maximize UAF detection.
     tag_t tag;
@@ -278,13 +304,15 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
 
 static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
                               uptr new_size, uptr alignment) {
-  if (!PointerAndMemoryTagsMatch(tagged_ptr_old))
-    ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old));
-
+  void *untagged_ptr_old =
+      InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr_old))
+          ? UntagPtr(tagged_ptr_old)
+          : tagged_ptr_old;
+  if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
+    return nullptr;
   void *tagged_ptr_new =
       HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
   if (tagged_ptr_old && tagged_ptr_new) {
-    void *untagged_ptr_old =  UntagPtr(tagged_ptr_old);
     Metadata *meta =
         reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
     internal_memcpy(
@@ -305,6 +333,8 @@ static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
 }
 
 HwasanChunkView FindHeapChunkByAddress(uptr address) {
+  if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))
+    return HwasanChunkView();
   void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
   if (!block)
     return HwasanChunkView();
index bde22dfa4bc4c03028c78baa0422b1394bcbdd04..7642ba6c0bf08fddee2f820a2b4016b6deec0488 100644 (file)
@@ -113,6 +113,15 @@ uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
 }
 
 }  // namespace __hwasan
+
+#elif SANITIZER_FUCHSIA
+
+namespace __hwasan {
+
+void InitShadowGOT() {}
+
+}  // namespace __hwasan
+
 #else
 namespace __hwasan {
 
index e61f6ada72fc47284f9fd8b1e4730d8f465dba81..f51e148197b91468055636c663c98e06d95533ee 100644 (file)
@@ -34,6 +34,15 @@ bool InitShadow() {
   __sanitizer::InitShadowBounds();
   CHECK_NE(__sanitizer::ShadowBounds.shadow_limit, 0);
 
+  // These variables are used by MemIsShadow for asserting we have a correct
+  // shadow address. On Fuchsia, we only have one region of shadow, so the
+  // bounds of Low shadow can be zero while High shadow represents the true
+  // bounds. Note that these are inclusive ranges.
+  kLowShadowStart = 0;
+  kLowShadowEnd = 0;
+  kHighShadowStart = __sanitizer::ShadowBounds.shadow_base;
+  kHighShadowEnd = __sanitizer::ShadowBounds.shadow_limit - 1;
+
   return true;
 }
 
@@ -143,6 +152,14 @@ static void ThreadExitHook(void *hook, thrd_t self) {
   hwasanThreadList().ReleaseThread(thread);
 }
 
+uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
+  CHECK(IsAligned(p, kShadowAlignment));
+  CHECK(IsAligned(size, kShadowAlignment));
+  __sanitizer_fill_shadow(p, size, tag,
+                          common_flags()->clear_shadow_mmap_threshold);
+  return AddTagToPointer(p, tag);
+}
+
 // Not implemented because Fuchsia does not use signal handlers.
 void HwasanOnDeadlySignal(int signo, void *info, void *context) {}
 
@@ -163,6 +180,12 @@ void HwasanTSDThreadInit() {}
 // function is unneeded.
 void InstallAtExitHandler() {}
 
+void HwasanInstallAtForkHandler() {}
+
+// TODO(fxbug.dev/81499): Once we finalize the tagged pointer ABI in zircon, we should come back
+// here and implement the appropriate check that TBI is enabled.
+void InitializeOsSupport() {}
+
 }  // namespace __hwasan
 
 extern "C" {
index 68f8adec0776a7cbfad090cb3e62d7297ef786f0..f96ed880410269e69854a4cc2b1c4814242f87cd 100644 (file)
@@ -49,15 +49,14 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
 
 DEFINE_REAL(int, vfork)
 DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
-#endif // HWASAN_WITH_INTERCEPTORS
 
-#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
 // Get and/or change the set of blocked signals.
 extern "C" int sigprocmask(int __how, const __hw_sigset_t *__restrict __set,
                            __hw_sigset_t *__restrict __oset);
 #define SIG_BLOCK 0
 #define SIG_SETMASK 2
 extern "C" int __sigjmp_save(__hw_sigjmp_buf env, int savemask) {
+  env[0].__magic = kHwJmpBufMagic;
   env[0].__mask_was_saved =
       (savemask && sigprocmask(SIG_BLOCK, (__hw_sigset_t *)0,
                                &env[0].__saved_mask) == 0);
@@ -66,8 +65,14 @@ extern "C" int __sigjmp_save(__hw_sigjmp_buf env, int savemask) {
 
 static void __attribute__((always_inline))
 InternalLongjmp(__hw_register_buf env, int retval) {
+#    if defined(__aarch64__)
+  constexpr size_t kSpIndex = 13;
+#    elif defined(__x86_64__)
+  constexpr size_t kSpIndex = 6;
+#    endif
+
   // Clear all memory tags on the stack between here and where we're going.
-  unsigned long long stack_pointer = env[13];
+  unsigned long long stack_pointer = env[kSpIndex];
   // The stack pointer should never be tagged, so we don't need to clear the
   // tag for this function call.
   __hwasan_handle_longjmp((void *)stack_pointer);
@@ -78,6 +83,7 @@ InternalLongjmp(__hw_register_buf env, int retval) {
   // Must implement this ourselves, since we don't know the order of registers
   // in different libc implementations and many implementations mangle the
   // stack pointer so we can't use it without knowing the demangling scheme.
+#    if defined(__aarch64__)
   register long int retval_tmp asm("x1") = retval;
   register void *env_address asm("x0") = &env[0];
   asm volatile("ldp    x19, x20, [%0, #0<<3];"
@@ -100,9 +106,36 @@ InternalLongjmp(__hw_register_buf env, int retval) {
                "br     x30;"
                : "+r"(env_address)
                : "r"(retval_tmp));
+#    elif defined(__x86_64__)
+  register long int retval_tmp asm("%rsi") = retval;
+  register void *env_address asm("%rdi") = &env[0];
+  asm volatile(
+      // Restore registers.
+      "mov (0*8)(%0),%%rbx;"
+      "mov (1*8)(%0),%%rbp;"
+      "mov (2*8)(%0),%%r12;"
+      "mov (3*8)(%0),%%r13;"
+      "mov (4*8)(%0),%%r14;"
+      "mov (5*8)(%0),%%r15;"
+      "mov (6*8)(%0),%%rsp;"
+      "mov (7*8)(%0),%%rdx;"
+      // Return 1 if retval is 0.
+      "mov $1,%%rax;"
+      "test %1,%1;"
+      "cmovnz %1,%%rax;"
+      "jmp *%%rdx;" ::"r"(env_address),
+      "r"(retval_tmp));
+#    endif
 }
 
 INTERCEPTOR(void, siglongjmp, __hw_sigjmp_buf env, int val) {
+  if (env[0].__magic != kHwJmpBufMagic) {
+    Printf(
+        "WARNING: Unexpected bad jmp_buf. Either setjmp was not called or "
+        "there is a bug in HWASan.\n");
+    return REAL(siglongjmp)(env, val);
+  }
+
   if (env[0].__mask_was_saved)
     // Restore the saved signal mask.
     (void)sigprocmask(SIG_SETMASK, &env[0].__saved_mask,
@@ -114,32 +147,24 @@ INTERCEPTOR(void, siglongjmp, __hw_sigjmp_buf env, int val) {
 // _setjmp on start_thread.  Hence we have to intercept the longjmp on
 // pthread_exit so the __hw_jmp_buf order matches.
 INTERCEPTOR(void, __libc_longjmp, __hw_jmp_buf env, int val) {
+  if (env[0].__magic != kHwJmpBufMagic)
+    return REAL(__libc_longjmp)(env, val);
   InternalLongjmp(env[0].__jmpbuf, val);
 }
 
 INTERCEPTOR(void, longjmp, __hw_jmp_buf env, int val) {
+  if (env[0].__magic != kHwJmpBufMagic) {
+    Printf(
+        "WARNING: Unexpected bad jmp_buf. Either setjmp was not called or "
+        "there is a bug in HWASan.\n");
+    return REAL(longjmp)(env, val);
+  }
   InternalLongjmp(env[0].__jmpbuf, val);
 }
 #undef SIG_BLOCK
 #undef SIG_SETMASK
 
-#endif // HWASAN_WITH_INTERCEPTORS && __aarch64__
-
-static void BeforeFork() {
-  StackDepotLockAll();
-}
-
-static void AfterFork() {
-  StackDepotUnlockAll();
-}
-
-INTERCEPTOR(int, fork, void) {
-  ENSURE_HWASAN_INITED();
-  BeforeFork();
-  int pid = REAL(fork)();
-  AfterFork();
-  return pid;
-}
+#  endif  // HWASAN_WITH_INTERCEPTORS
 
 namespace __hwasan {
 
@@ -156,10 +181,11 @@ void InitializeInterceptors() {
   static int inited = 0;
   CHECK_EQ(inited, 0);
 
-  INTERCEPT_FUNCTION(fork);
-
 #if HWASAN_WITH_INTERCEPTORS
 #if defined(__linux__)
+  INTERCEPT_FUNCTION(__libc_longjmp);
+  INTERCEPT_FUNCTION(longjmp);
+  INTERCEPT_FUNCTION(siglongjmp);
   INTERCEPT_FUNCTION(vfork);
 #endif  // __linux__
   INTERCEPT_FUNCTION(pthread_create);
index 25c0f94fe51f542432b1cc3c76ae925241fbc9ac..ef771add411c9cc2d064c7f408c403b02073fc74 100644 (file)
@@ -168,54 +168,6 @@ void __hwasan_thread_exit();
 SANITIZER_INTERFACE_ATTRIBUTE
 void __hwasan_print_memory_usage();
 
-SANITIZER_INTERFACE_ATTRIBUTE
-int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_memalign(uptr alignment, uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_aligned_alloc(uptr alignment, uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer___libc_memalign(uptr alignment, uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_valloc(uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_pvalloc(uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_free(void *ptr);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_cfree(void *ptr);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-uptr __sanitizer_malloc_usable_size(const void *ptr);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-__hwasan::__sanitizer_struct_mallinfo __sanitizer_mallinfo();
-
-SANITIZER_INTERFACE_ATTRIBUTE
-int __sanitizer_mallopt(int cmd, int value);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_malloc_stats(void);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_calloc(uptr nmemb, uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_realloc(void *ptr, uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_reallocarray(void *ptr, uptr nmemb, uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_malloc(uptr size);
-
 SANITIZER_INTERFACE_ATTRIBUTE
 void *__hwasan_memcpy(void *dst, const void *src, uptr size);
 SANITIZER_INTERFACE_ATTRIBUTE
index e22723529f449e9a8d49c94664fd3b0c32a2960d..a86ec28507f305b2eea76694b28ee7b2fbbf04b3 100644 (file)
 #include "sanitizer_common/sanitizer_platform.h"
 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
 
-#include "hwasan.h"
-#include "hwasan_dynamic_shadow.h"
-#include "hwasan_interface_internal.h"
-#include "hwasan_mapping.h"
-#include "hwasan_report.h"
-#include "hwasan_thread.h"
-#include "hwasan_thread_list.h"
-
-#include <dlfcn.h>
-#include <elf.h>
-#include <link.h>
-#include <pthread.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <unistd.h>
-#include <unwind.h>
-#include <sys/prctl.h>
-#include <errno.h>
-
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_procmaps.h"
+#  include <dlfcn.h>
+#  include <elf.h>
+#  include <errno.h>
+#  include <link.h>
+#  include <pthread.h>
+#  include <signal.h>
+#  include <stdio.h>
+#  include <stdlib.h>
+#  include <sys/prctl.h>
+#  include <sys/resource.h>
+#  include <sys/time.h>
+#  include <unistd.h>
+#  include <unwind.h>
+
+#  include "hwasan.h"
+#  include "hwasan_dynamic_shadow.h"
+#  include "hwasan_interface_internal.h"
+#  include "hwasan_mapping.h"
+#  include "hwasan_report.h"
+#  include "hwasan_thread.h"
+#  include "hwasan_thread_list.h"
+#  include "sanitizer_common/sanitizer_common.h"
+#  include "sanitizer_common/sanitizer_procmaps.h"
+#  include "sanitizer_common/sanitizer_stackdepot.h"
 
 // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
 //
 //    Tested with check-hwasan on x86_64-linux.
 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
 //    Tested with check-hwasan on aarch64-linux-android.
-#if !SANITIZER_ANDROID
+#  if !SANITIZER_ANDROID
 SANITIZER_INTERFACE_ATTRIBUTE
 THREADLOCAL uptr __hwasan_tls;
-#endif
+#  endif
 
 namespace __hwasan {
 
@@ -111,9 +111,9 @@ static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
 }
 
 void InitializeOsSupport() {
-#define PR_SET_TAGGED_ADDR_CTRL 55
-#define PR_GET_TAGGED_ADDR_CTRL 56
-#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+#  define PR_SET_TAGGED_ADDR_CTRL 55
+#  define PR_GET_TAGGED_ADDR_CTRL 56
+#  define PR_TAGGED_ADDR_ENABLE (1UL << 0)
   // Check we're running on a kernel that can use the tagged address ABI.
   int local_errno = 0;
   if (internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0),
@@ -164,9 +164,9 @@ void InitializeOsSupport() {
       Die();
     }
   }
-#undef PR_SET_TAGGED_ADDR_CTRL
-#undef PR_GET_TAGGED_ADDR_CTRL
-#undef PR_TAGGED_ADDR_ENABLE
+#  undef PR_SET_TAGGED_ADDR_CTRL
+#  undef PR_GET_TAGGED_ADDR_CTRL
+#  undef PR_TAGGED_ADDR_ENABLE
 }
 
 bool InitShadow() {
@@ -241,12 +241,11 @@ bool MemIsApp(uptr p) {
   CHECK(GetTagFromPointer(p) == 0);
 #  endif
 
-  return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
+  return (p >= kHighMemStart && p <= kHighMemEnd) ||
+         (p >= kLowMemStart && p <= kLowMemEnd);
 }
 
-void InstallAtExitHandler() {
-  atexit(HwasanAtExit);
-}
+void InstallAtExitHandler() { atexit(HwasanAtExit); }
 
 // ---------------------- TSD ---------------- {{{1
 
@@ -262,7 +261,7 @@ extern "C" void __hwasan_thread_exit() {
     hwasanThreadList().ReleaseThread(t);
 }
 
-#if HWASAN_WITH_INTERCEPTORS
+#  if HWASAN_WITH_INTERCEPTORS
 static pthread_key_t tsd_key;
 static bool tsd_key_inited = false;
 
@@ -286,22 +285,18 @@ void HwasanTSDInit() {
   tsd_key_inited = true;
   CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
 }
-#else
+#  else
 void HwasanTSDInit() {}
 void HwasanTSDThreadInit() {}
-#endif
+#  endif
 
-#if SANITIZER_ANDROID
-uptr *GetCurrentThreadLongPtr() {
-  return (uptr *)get_android_tls_ptr();
-}
-#else
-uptr *GetCurrentThreadLongPtr() {
-  return &__hwasan_tls;
-}
-#endif
+#  if SANITIZER_ANDROID
+uptr *GetCurrentThreadLongPtr() { return (uptr *)get_android_tls_ptr(); }
+#  else
+uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }
+#  endif
 
-#if SANITIZER_ANDROID
+#  if SANITIZER_ANDROID
 void AndroidTestTlsSlot() {
   uptr kMagicValue = 0x010203040A0B0C0D;
   uptr *tls_ptr = GetCurrentThreadLongPtr();
@@ -316,9 +311,9 @@ void AndroidTestTlsSlot() {
   }
   *tls_ptr = old_value;
 }
-#else
+#  else
 void AndroidTestTlsSlot() {}
-#endif
+#  endif
 
 static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
   // Access type is passed in a platform dependent way (see below) and encoded
@@ -326,32 +321,32 @@ static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
   // recoverable. Valid values of Y are 0 to 4, which are interpreted as
   // log2(access_size), and 0xF, which means that access size is passed via
   // platform dependent register (see below).
-#if defined(__aarch64__)
+#  if defined(__aarch64__)
   // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
   // access size is stored in X1 register. Access address is always in X0
   // register.
   uptr pc = (uptr)info->si_addr;
   const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
   if ((code & 0xff00) != 0x900)
-    return AccessInfo{}; // Not ours.
+    return AccessInfo{};  // Not ours.
 
   const bool is_store = code & 0x10;
   const bool recover = code & 0x20;
   const uptr addr = uc->uc_mcontext.regs[0];
   const unsigned size_log = code & 0xf;
   if (size_log > 4 && size_log != 0xf)
-    return AccessInfo{}; // Not ours.
+    return AccessInfo{};  // Not ours.
   const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
 
-#elif defined(__x86_64__)
+#  elif defined(__x86_64__)
   // Access type is encoded in the instruction following INT3 as
   // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
   // RSI register. Access address is always in RDI register.
   uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
-  uint8_t *nop = (uint8_t*)pc;
-  if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40  ||
+  uint8_t *nop = (uint8_t *)pc;
+  if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
       *(nop + 3) < 0x40)
-    return AccessInfo{}; // Not ours.
+    return AccessInfo{};  // Not ours.
   const unsigned code = *(nop + 3);
 
   const bool is_store = code & 0x10;
@@ -359,13 +354,13 @@ static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
   const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
   const unsigned size_log = code & 0xf;
   if (size_log > 4 && size_log != 0xf)
-    return AccessInfo{}; // Not ours.
+    return AccessInfo{};  // Not ours.
   const uptr size =
       size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
 
-#else
-# error Unsupported architecture
-#endif
+#  else
+#    error Unsupported architecture
+#  endif
 
   return AccessInfo{addr, size, is_store, !is_store, recover};
 }
@@ -378,12 +373,12 @@ static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
   SignalContext sig{info, uc};
   HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
 
-#if defined(__aarch64__)
+#  if defined(__aarch64__)
   uc->uc_mcontext.pc += 4;
-#elif defined(__x86_64__)
-#else
-# error Unsupported architecture
-#endif
+#  elif defined(__x86_64__)
+#  else
+#    error Unsupported architecture
+#  endif
   return true;
 }
 
@@ -396,7 +391,7 @@ static void OnStackUnwind(const SignalContext &sig, const void *,
 void HwasanOnDeadlySignal(int signo, void *info, void *context) {
   // Probably a tag mismatch.
   if (signo == SIGTRAP)
-    if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t*)context))
+    if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t *)context))
       return;
 
   HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
@@ -435,6 +430,18 @@ uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
   return AddTagToPointer(p, tag);
 }
 
-} // namespace __hwasan
+void HwasanInstallAtForkHandler() {
+  auto before = []() {
+    HwasanAllocatorLock();
+    StackDepotLockAll();
+  };
+  auto after = []() {
+    StackDepotUnlockAll();
+    HwasanAllocatorUnlock();
+  };
+  pthread_atfork(before, after, after);
+}
+
+}  // namespace __hwasan
 
-#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
+#endif  // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
index 44047c9fdaf8ec3171c0c50bc2831ead348b9887..9b3b661b74bdf3ee973d3af418e4ef388af3782d 100644 (file)
@@ -37,7 +37,7 @@ namespace __hwasan {
 class ScopedReport {
  public:
   ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) {
-    BlockingMutexLock lock(&error_message_lock_);
+    Lock lock(&error_message_lock_);
     error_message_ptr_ = fatal ? &error_message_ : nullptr;
     ++hwasan_report_count;
   }
@@ -45,7 +45,7 @@ class ScopedReport {
   ~ScopedReport() {
     void (*report_cb)(const char *);
     {
-      BlockingMutexLock lock(&error_message_lock_);
+      Lock lock(&error_message_lock_);
       report_cb = error_report_callback_;
       error_message_ptr_ = nullptr;
     }
@@ -61,7 +61,7 @@ class ScopedReport {
   }
 
   static void MaybeAppendToErrorMessage(const char *msg) {
-    BlockingMutexLock lock(&error_message_lock_);
+    Lock lock(&error_message_lock_);
     if (!error_message_ptr_)
       return;
     uptr len = internal_strlen(msg);
@@ -72,7 +72,7 @@ class ScopedReport {
   }
 
   static void SetErrorReportCallback(void (*callback)(const char *)) {
-    BlockingMutexLock lock(&error_message_lock_);
+    Lock lock(&error_message_lock_);
     error_report_callback_ = callback;
   }
 
@@ -82,12 +82,12 @@ class ScopedReport {
   bool fatal;
 
   static InternalMmapVector<char> *error_message_ptr_;
-  static BlockingMutex error_message_lock_;
+  static Mutex error_message_lock_;
   static void (*error_report_callback_)(const char *);
 };
 
 InternalMmapVector<char> *ScopedReport::error_message_ptr_;
-BlockingMutex ScopedReport::error_message_lock_;
+Mutex ScopedReport::error_message_lock_;
 void (*ScopedReport::error_report_callback_)(const char *);
 
 // If there is an active ScopedReport, append to its error message.
@@ -351,14 +351,16 @@ static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
       uptr size = GetGlobalSizeFromDescriptor(mem);
       if (size == 0)
         // We couldn't find the size of the global from the descriptors.
-        Printf("%p is located to the %s of a global variable in (%s+0x%x)\n",
-               untagged_addr, candidate == left ? "right" : "left", module_name,
-               module_address);
+        Printf(
+            "%p is located to the %s of a global variable in "
+            "\n    #0 0x%x (%s+0x%x)\n",
+            untagged_addr, candidate == left ? "right" : "left", mem,
+            module_name, module_address);
       else
         Printf(
             "%p is located to the %s of a %zd-byte global variable in "
-            "(%s+0x%x)\n",
-            untagged_addr, candidate == left ? "right" : "left", size,
+            "\n    #0 0x%x (%s+0x%x)\n",
+            untagged_addr, candidate == left ? "right" : "left", size, mem,
             module_name, module_address);
     }
     Printf("%s", d.Default());
@@ -372,6 +374,12 @@ void PrintAddressDescription(
   int num_descriptions_printed = 0;
   uptr untagged_addr = UntagAddr(tagged_addr);
 
+  if (MemIsShadow(untagged_addr)) {
+    Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr,
+           d.Default());
+    return;
+  }
+
   // Print some very basic information about the address, if it's a heap.
   HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
   if (uptr beg = chunk.Beg()) {
@@ -549,28 +557,48 @@ static void PrintTagsAroundAddr(tag_t *tag_ptr) {
       "description of short granule tags\n");
 }
 
+uptr GetTopPc(StackTrace *stack) {
+  return stack->size ? StackTrace::GetPreviousInstructionPc(stack->trace[0])
+                     : 0;
+}
+
 void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
   ScopedReport R(flags()->halt_on_error);
 
   uptr untagged_addr = UntagAddr(tagged_addr);
   tag_t ptr_tag = GetTagFromPointer(tagged_addr);
-  tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
-  tag_t mem_tag = *tag_ptr;
+  tag_t *tag_ptr = nullptr;
+  tag_t mem_tag = 0;
+  if (MemIsApp(untagged_addr)) {
+    tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr));
+    if (MemIsShadow(reinterpret_cast<uptr>(tag_ptr)))
+      mem_tag = *tag_ptr;
+    else
+      tag_ptr = nullptr;
+  }
   Decorator d;
   Printf("%s", d.Error());
-  uptr pc = stack->size ? stack->trace[0] : 0;
+  uptr pc = GetTopPc(stack);
   const char *bug_type = "invalid-free";
-  Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
-         untagged_addr, pc);
+  const Thread *thread = GetCurrentThread();
+  if (thread) {
+    Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n",
+           SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id());
+  } else {
+    Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n",
+           SanitizerToolName, bug_type, untagged_addr, pc);
+  }
   Printf("%s", d.Access());
-  Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag);
+  if (tag_ptr)
+    Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag);
   Printf("%s", d.Default());
 
   stack->Print();
 
   PrintAddressDescription(tagged_addr, 0, nullptr);
 
-  PrintTagsAroundAddr(tag_ptr);
+  if (tag_ptr)
+    PrintTagsAroundAddr(tag_ptr);
 
   ReportErrorSummary(bug_type, stack);
 }
@@ -578,6 +606,15 @@ void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
 void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
                            const u8 *expected) {
   uptr tail_size = kShadowAlignment - (orig_size % kShadowAlignment);
+  u8 actual_expected[kShadowAlignment];
+  internal_memcpy(actual_expected, expected, tail_size);
+  tag_t ptr_tag = GetTagFromPointer(tagged_addr);
+  // Short granule is stashed in the last byte of the magic string. To avoid
+  // confusion, make the expected magic string contain the short granule tag.
+  if (orig_size % kShadowAlignment != 0) {
+    actual_expected[tail_size - 1] = ptr_tag;
+  }
+
   ScopedReport R(flags()->halt_on_error);
   Decorator d;
   uptr untagged_addr = UntagAddr(tagged_addr);
@@ -614,14 +651,13 @@ void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
   s.append("Expected:      ");
   for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
     s.append(".. ");
-  for (uptr i = 0; i < tail_size; i++)
-    s.append("%02x ", expected[i]);
+  for (uptr i = 0; i < tail_size; i++) s.append("%02x ", actual_expected[i]);
   s.append("\n");
   s.append("               ");
   for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
     s.append("   ");
   for (uptr i = 0; i < tail_size; i++)
-    s.append("%s ", expected[i] != tail[i] ? "^^" : "  ");
+    s.append("%s ", actual_expected[i] != tail[i] ? "^^" : "  ");
 
   s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
     "to the right of a heap object, but within the %zd-byte granule, e.g.\n"
@@ -647,11 +683,11 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
       GetCurrentThread()->stack_allocations());
 
   Decorator d;
-  Printf("%s", d.Error());
   uptr untagged_addr = UntagAddr(tagged_addr);
   // TODO: when possible, try to print heap-use-after-free, etc.
   const char *bug_type = "tag-mismatch";
-  uptr pc = stack->size ? stack->trace[0] : 0;
+  uptr pc = GetTopPc(stack);
+  Printf("%s", d.Error());
   Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
          untagged_addr, pc);
 
similarity index 87%
rename from libsanitizer/hwasan/hwasan_setjmp.S
rename to libsanitizer/hwasan/hwasan_setjmp_aarch64.S
index 381af63363cc3b96f2dd63f1cca706dedbb55e03..744748a5101f5e37d72216c79dddadd3d37612bd 100644 (file)
@@ -1,4 +1,4 @@
-//===-- hwasan_setjmp.S --------------------------------------------------------===//
+//===-- hwasan_setjmp_aarch64.S -------------------------------------------===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -29,7 +29,7 @@
 // Hence we have to write this function in assembly.
 
 .section .text
-.file "hwasan_setjmp.S"
+.file "hwasan_setjmp_aarch64.S"
 
 .global __interceptor_setjmp
 ASM_TYPE_FUNCTION(__interceptor_setjmp)
@@ -80,24 +80,19 @@ __interceptor_sigsetjmp:
 ASM_SIZE(__interceptor_sigsetjmp)
 
 
-.macro ALIAS first second
-  .globl \second
+.macro WEAK_ALIAS first second
+  .weak \second
   .equ \second\(), \first
 .endm
 
 #if SANITIZER_ANDROID
-ALIAS __interceptor_sigsetjmp, sigsetjmp
-.weak sigsetjmp
-
-ALIAS __interceptor_setjmp_bionic, setjmp
-.weak setjmp
+WEAK_ALIAS __interceptor_sigsetjmp, sigsetjmp
+WEAK_ALIAS __interceptor_setjmp_bionic, setjmp
 #else
-ALIAS __interceptor_sigsetjmp, __sigsetjmp
-.weak __sigsetjmp
+WEAK_ALIAS __interceptor_sigsetjmp, __sigsetjmp
 #endif
 
-ALIAS __interceptor_setjmp, _setjmp
-.weak _setjmp
+WEAK_ALIAS __interceptor_setjmp, _setjmp
 #endif
 
 // We do not need executable stack.
diff --git a/libsanitizer/hwasan/hwasan_setjmp_x86_64.S b/libsanitizer/hwasan/hwasan_setjmp_x86_64.S
new file mode 100644 (file)
index 0000000..84512d1
--- /dev/null
@@ -0,0 +1,80 @@
+//===-- hwasan_setjmp_x86_64.S --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// setjmp interceptor for x86_64.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+#if HWASAN_WITH_INTERCEPTORS && defined(__x86_64__)
+#include "sanitizer_common/sanitizer_platform.h"
+
+// We want to save the context of the calling function.
+// That requires
+// 1) No modification of the return address by this function.
+// 2) No modification of the stack pointer by this function.
+// 3) (no modification of any other saved register, but that's not really going
+// to occur, and hence isn't as much of a worry).
+//
+// There's essentially no way to ensure that the compiler will not modify the
+// stack pointer when compiling a C function.
+// Hence we have to write this function in assembly.
+//
+// TODO: Handle Intel CET.
+
+.section .text
+.file "hwasan_setjmp_x86_64.S"
+
+.global __interceptor_setjmp
+ASM_TYPE_FUNCTION(__interceptor_setjmp)
+__interceptor_setjmp:
+  CFI_STARTPROC
+  xorl %esi, %esi
+  jmp  __interceptor_sigsetjmp
+  CFI_ENDPROC
+ASM_SIZE(__interceptor_setjmp)
+
+.global __interceptor_sigsetjmp
+ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
+__interceptor_sigsetjmp:
+  CFI_STARTPROC
+
+  // Save callee save registers.
+  mov %rbx, (0*8)(%rdi)
+  mov %rbp, (1*8)(%rdi)
+  mov %r12, (2*8)(%rdi)
+  mov %r13, (3*8)(%rdi)
+  mov %r14, (4*8)(%rdi)
+  mov %r15, (5*8)(%rdi)
+
+  // Save SP as it was in caller's frame.
+  lea 8(%rsp), %rdx
+  mov %rdx, (6*8)(%rdi)
+
+  // Save return address.
+  mov (%rsp), %rax
+  mov %rax, (7*8)(%rdi)
+
+  jmp __sigjmp_save
+
+  CFI_ENDPROC
+ASM_SIZE(__interceptor_sigsetjmp)
+
+
+.macro WEAK_ALIAS first second
+  .weak \second
+  .equ \second\(), \first
+.endm
+
+WEAK_ALIAS __interceptor_sigsetjmp, __sigsetjmp
+WEAK_ALIAS __interceptor_setjmp, _setjmp
+#endif
+
+// We do not need executable stack.
+NO_EXEC_STACK_DIRECTIVE
index ee747a3beea5e58d7c2203d3de85ca2008d989d8..5b65718c4d3b2587354df9e289153303ad2137b9 100644 (file)
@@ -45,13 +45,13 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
   if (auto sz = flags()->heap_history_size)
     heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
 
-  InitStackAndTls(state);
 #if !SANITIZER_FUCHSIA
   // Do not initialize the stack ring buffer just yet on Fuchsia. Threads will
   // be initialized before we enter the thread itself, so we will instead call
   // this later.
   InitStackRingBuffer(stack_buffer_start, stack_buffer_size);
 #endif
+  InitStackAndTls(state);
 }
 
 void Thread::InitStackRingBuffer(uptr stack_buffer_start,
index 8cff495bae153eb728a9dc7d12e80be3bc976a85..5307073fb40b64220d8f2ec92519678ad2789521 100644 (file)
@@ -19,7 +19,7 @@
 #define CHECK_TYPE_SIZE_FITS(TYPE) \
   COMPILER_CHECK(sizeof(__hw_##TYPE) <= sizeof(TYPE))
 
-#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
+#if HWASAN_WITH_INTERCEPTORS
 CHECK_TYPE_SIZE_FITS(jmp_buf);
 CHECK_TYPE_SIZE_FITS(sigjmp_buf);
 #endif
index 792ef9cfaa32b08fd45be87e583250503076513f..9bff21c117b39a211f0d7b5de041363285897ec8 100644 (file)
@@ -316,7 +316,7 @@ void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
 void __asan_handle_no_return(void);
 
 /// Update allocation stack trace for the given allocation to the current stack
-/// trace. Returns 1 if successfull, 0 if not.
+/// trace. Returns 1 if successful, 0 if not.
 int __asan_update_allocation_context(void* addr);
 
 #ifdef __cplusplus
index cd69285b8d4afec5f5615eff88129811db497912..692b8f70c9697ece8558f75967e840020736a77a 100644 (file)
@@ -28,7 +28,7 @@ typedef struct {
   // Enable sandbox support in sanitizer coverage.
   int coverage_sandboxed;
   // File descriptor to write coverage data to. If -1 is passed, a file will
-  // be pre-opened by __sanitizer_sandobx_on_notify(). This field has no
+  // be pre-opened by __sanitizer_sandbox_on_notify(). This field has no
   // effect if coverage_sandboxed == 0.
   intptr_t coverage_fd;
   // If non-zero, split the coverage data into well-formed blocks. This is
index cd3b6d6e2b163f3be0dce16713c1944f06206765..d6209a3ea2b29dd8daa9275167ddde3b6f617573 100644 (file)
@@ -150,8 +150,7 @@ int dfsan_get_track_origins(void);
 #ifdef __cplusplus
 }  // extern "C"
 
-template <typename T>
-void dfsan_set_label(dfsan_label label, T &data) { // NOLINT
+template <typename T> void dfsan_set_label(dfsan_label label, T &data) {
   dfsan_set_label(label, (void *)&data, sizeof(T));
 }
 
index 56eae3d40f968f6bd5f07e34c25856d9d0be3767..3f3f1e78dfb8594835935aa0473ad459adc808bc 100644 (file)
 #ifndef SANITIZER_LINUX_SYSCALL_HOOKS_H
 #define SANITIZER_LINUX_SYSCALL_HOOKS_H
 
-#define __sanitizer_syscall_pre_time(tloc) \
+#define __sanitizer_syscall_pre_time(tloc)                                     \
   __sanitizer_syscall_pre_impl_time((long)(tloc))
-#define __sanitizer_syscall_post_time(res, tloc) \
+#define __sanitizer_syscall_post_time(res, tloc)                               \
   __sanitizer_syscall_post_impl_time(res, (long)(tloc))
-#define __sanitizer_syscall_pre_stime(tptr) \
+#define __sanitizer_syscall_pre_stime(tptr)                                    \
   __sanitizer_syscall_pre_impl_stime((long)(tptr))
-#define __sanitizer_syscall_post_stime(res, tptr) \
+#define __sanitizer_syscall_post_stime(res, tptr)                              \
   __sanitizer_syscall_post_impl_stime(res, (long)(tptr))
-#define __sanitizer_syscall_pre_gettimeofday(tv, tz) \
+#define __sanitizer_syscall_pre_gettimeofday(tv, tz)                           \
   __sanitizer_syscall_pre_impl_gettimeofday((long)(tv), (long)(tz))
-#define __sanitizer_syscall_post_gettimeofday(res, tv, tz) \
+#define __sanitizer_syscall_post_gettimeofday(res, tv, tz)                     \
   __sanitizer_syscall_post_impl_gettimeofday(res, (long)(tv), (long)(tz))
-#define __sanitizer_syscall_pre_settimeofday(tv, tz) \
+#define __sanitizer_syscall_pre_settimeofday(tv, tz)                           \
   __sanitizer_syscall_pre_impl_settimeofday((long)(tv), (long)(tz))
-#define __sanitizer_syscall_post_settimeofday(res, tv, tz) \
+#define __sanitizer_syscall_post_settimeofday(res, tv, tz)                     \
   __sanitizer_syscall_post_impl_settimeofday(res, (long)(tv), (long)(tz))
-#define __sanitizer_syscall_pre_adjtimex(txc_p) \
+#define __sanitizer_syscall_pre_adjtimex(txc_p)                                \
   __sanitizer_syscall_pre_impl_adjtimex((long)(txc_p))
-#define __sanitizer_syscall_post_adjtimex(res, txc_p) \
+#define __sanitizer_syscall_post_adjtimex(res, txc_p)                          \
   __sanitizer_syscall_post_impl_adjtimex(res, (long)(txc_p))
-#define __sanitizer_syscall_pre_times(tbuf) \
+#define __sanitizer_syscall_pre_times(tbuf)                                    \
   __sanitizer_syscall_pre_impl_times((long)(tbuf))
-#define __sanitizer_syscall_post_times(res, tbuf) \
+#define __sanitizer_syscall_post_times(res, tbuf)                              \
   __sanitizer_syscall_post_impl_times(res, (long)(tbuf))
 #define __sanitizer_syscall_pre_gettid() __sanitizer_syscall_pre_impl_gettid()
-#define __sanitizer_syscall_post_gettid(res) \
+#define __sanitizer_syscall_post_gettid(res)                                   \
   __sanitizer_syscall_post_impl_gettid(res)
-#define __sanitizer_syscall_pre_nanosleep(rqtp, rmtp) \
+#define __sanitizer_syscall_pre_nanosleep(rqtp, rmtp)                          \
   __sanitizer_syscall_pre_impl_nanosleep((long)(rqtp), (long)(rmtp))
-#define __sanitizer_syscall_post_nanosleep(res, rqtp, rmtp) \
+#define __sanitizer_syscall_post_nanosleep(res, rqtp, rmtp)                    \
   __sanitizer_syscall_post_impl_nanosleep(res, (long)(rqtp), (long)(rmtp))
-#define __sanitizer_syscall_pre_alarm(seconds) \
+#define __sanitizer_syscall_pre_alarm(seconds)                                 \
   __sanitizer_syscall_pre_impl_alarm((long)(seconds))
-#define __sanitizer_syscall_post_alarm(res, seconds) \
+#define __sanitizer_syscall_post_alarm(res, seconds)                           \
   __sanitizer_syscall_post_impl_alarm(res, (long)(seconds))
 #define __sanitizer_syscall_pre_getpid() __sanitizer_syscall_pre_impl_getpid()
-#define __sanitizer_syscall_post_getpid(res) \
+#define __sanitizer_syscall_post_getpid(res)                                   \
   __sanitizer_syscall_post_impl_getpid(res)
 #define __sanitizer_syscall_pre_getppid() __sanitizer_syscall_pre_impl_getppid()
-#define __sanitizer_syscall_post_getppid(res) \
+#define __sanitizer_syscall_post_getppid(res)                                  \
   __sanitizer_syscall_post_impl_getppid(res)
 #define __sanitizer_syscall_pre_getuid() __sanitizer_syscall_pre_impl_getuid()
-#define __sanitizer_syscall_post_getuid(res) \
+#define __sanitizer_syscall_post_getuid(res)                                   \
   __sanitizer_syscall_post_impl_getuid(res)
 #define __sanitizer_syscall_pre_geteuid() __sanitizer_syscall_pre_impl_geteuid()
-#define __sanitizer_syscall_post_geteuid(res) \
+#define __sanitizer_syscall_post_geteuid(res)                                  \
   __sanitizer_syscall_post_impl_geteuid(res)
 #define __sanitizer_syscall_pre_getgid() __sanitizer_syscall_pre_impl_getgid()
-#define __sanitizer_syscall_post_getgid(res) \
+#define __sanitizer_syscall_post_getgid(res)                                   \
   __sanitizer_syscall_post_impl_getgid(res)
 #define __sanitizer_syscall_pre_getegid() __sanitizer_syscall_pre_impl_getegid()
-#define __sanitizer_syscall_post_getegid(res) \
+#define __sanitizer_syscall_post_getegid(res)                                  \
   __sanitizer_syscall_post_impl_getegid(res)
-#define __sanitizer_syscall_pre_getresuid(ruid, euid, suid)          \
-  __sanitizer_syscall_pre_impl_getresuid((long)(ruid), (long)(euid), \
+#define __sanitizer_syscall_pre_getresuid(ruid, euid, suid)                    \
+  __sanitizer_syscall_pre_impl_getresuid((long)(ruid), (long)(euid),           \
                                          (long)(suid))
-#define __sanitizer_syscall_post_getresuid(res, ruid, euid, suid)          \
-  __sanitizer_syscall_post_impl_getresuid(res, (long)(ruid), (long)(euid), \
+#define __sanitizer_syscall_post_getresuid(res, ruid, euid, suid)              \
+  __sanitizer_syscall_post_impl_getresuid(res, (long)(ruid), (long)(euid),     \
                                           (long)(suid))
-#define __sanitizer_syscall_pre_getresgid(rgid, egid, sgid)          \
-  __sanitizer_syscall_pre_impl_getresgid((long)(rgid), (long)(egid), \
+#define __sanitizer_syscall_pre_getresgid(rgid, egid, sgid)                    \
+  __sanitizer_syscall_pre_impl_getresgid((long)(rgid), (long)(egid),           \
                                          (long)(sgid))
-#define __sanitizer_syscall_post_getresgid(res, rgid, egid, sgid)          \
-  __sanitizer_syscall_post_impl_getresgid(res, (long)(rgid), (long)(egid), \
+#define __sanitizer_syscall_post_getresgid(res, rgid, egid, sgid)              \
+  __sanitizer_syscall_post_impl_getresgid(res, (long)(rgid), (long)(egid),     \
                                           (long)(sgid))
-#define __sanitizer_syscall_pre_getpgid(pid) \
+#define __sanitizer_syscall_pre_getpgid(pid)                                   \
   __sanitizer_syscall_pre_impl_getpgid((long)(pid))
-#define __sanitizer_syscall_post_getpgid(res, pid) \
+#define __sanitizer_syscall_post_getpgid(res, pid)                             \
   __sanitizer_syscall_post_impl_getpgid(res, (long)(pid))
 #define __sanitizer_syscall_pre_getpgrp() __sanitizer_syscall_pre_impl_getpgrp()
-#define __sanitizer_syscall_post_getpgrp(res) \
+#define __sanitizer_syscall_post_getpgrp(res)                                  \
   __sanitizer_syscall_post_impl_getpgrp(res)
-#define __sanitizer_syscall_pre_getsid(pid) \
+#define __sanitizer_syscall_pre_getsid(pid)                                    \
   __sanitizer_syscall_pre_impl_getsid((long)(pid))
-#define __sanitizer_syscall_post_getsid(res, pid) \
+#define __sanitizer_syscall_post_getsid(res, pid)                              \
   __sanitizer_syscall_post_impl_getsid(res, (long)(pid))
-#define __sanitizer_syscall_pre_getgroups(gidsetsize, grouplist) \
+#define __sanitizer_syscall_pre_getgroups(gidsetsize, grouplist)               \
   __sanitizer_syscall_pre_impl_getgroups((long)(gidsetsize), (long)(grouplist))
-#define __sanitizer_syscall_post_getgroups(res, gidsetsize, grouplist) \
-  __sanitizer_syscall_post_impl_getgroups(res, (long)(gidsetsize),     \
+#define __sanitizer_syscall_post_getgroups(res, gidsetsize, grouplist)         \
+  __sanitizer_syscall_post_impl_getgroups(res, (long)(gidsetsize),             \
                                           (long)(grouplist))
-#define __sanitizer_syscall_pre_setregid(rgid, egid) \
+#define __sanitizer_syscall_pre_setregid(rgid, egid)                           \
   __sanitizer_syscall_pre_impl_setregid((long)(rgid), (long)(egid))
-#define __sanitizer_syscall_post_setregid(res, rgid, egid) \
+#define __sanitizer_syscall_post_setregid(res, rgid, egid)                     \
   __sanitizer_syscall_post_impl_setregid(res, (long)(rgid), (long)(egid))
-#define __sanitizer_syscall_pre_setgid(gid) \
+#define __sanitizer_syscall_pre_setgid(gid)                                    \
   __sanitizer_syscall_pre_impl_setgid((long)(gid))
-#define __sanitizer_syscall_post_setgid(res, gid) \
+#define __sanitizer_syscall_post_setgid(res, gid)                              \
   __sanitizer_syscall_post_impl_setgid(res, (long)(gid))
-#define __sanitizer_syscall_pre_setreuid(ruid, euid) \
+#define __sanitizer_syscall_pre_setreuid(ruid, euid)                           \
   __sanitizer_syscall_pre_impl_setreuid((long)(ruid), (long)(euid))
-#define __sanitizer_syscall_post_setreuid(res, ruid, euid) \
+#define __sanitizer_syscall_post_setreuid(res, ruid, euid)                     \
   __sanitizer_syscall_post_impl_setreuid(res, (long)(ruid), (long)(euid))
-#define __sanitizer_syscall_pre_setuid(uid) \
+#define __sanitizer_syscall_pre_setuid(uid)                                    \
   __sanitizer_syscall_pre_impl_setuid((long)(uid))
-#define __sanitizer_syscall_post_setuid(res, uid) \
+#define __sanitizer_syscall_post_setuid(res, uid)                              \
   __sanitizer_syscall_post_impl_setuid(res, (long)(uid))
-#define __sanitizer_syscall_pre_setresuid(ruid, euid, suid)          \
-  __sanitizer_syscall_pre_impl_setresuid((long)(ruid), (long)(euid), \
+#define __sanitizer_syscall_pre_setresuid(ruid, euid, suid)                    \
+  __sanitizer_syscall_pre_impl_setresuid((long)(ruid), (long)(euid),           \
                                          (long)(suid))
-#define __sanitizer_syscall_post_setresuid(res, ruid, euid, suid)          \
-  __sanitizer_syscall_post_impl_setresuid(res, (long)(ruid), (long)(euid), \
+#define __sanitizer_syscall_post_setresuid(res, ruid, euid, suid)              \
+  __sanitizer_syscall_post_impl_setresuid(res, (long)(ruid), (long)(euid),     \
                                           (long)(suid))
-#define __sanitizer_syscall_pre_setresgid(rgid, egid, sgid)          \
-  __sanitizer_syscall_pre_impl_setresgid((long)(rgid), (long)(egid), \
+#define __sanitizer_syscall_pre_setresgid(rgid, egid, sgid)                    \
+  __sanitizer_syscall_pre_impl_setresgid((long)(rgid), (long)(egid),           \
                                          (long)(sgid))
-#define __sanitizer_syscall_post_setresgid(res, rgid, egid, sgid)          \
-  __sanitizer_syscall_post_impl_setresgid(res, (long)(rgid), (long)(egid), \
+#define __sanitizer_syscall_post_setresgid(res, rgid, egid, sgid)              \
+  __sanitizer_syscall_post_impl_setresgid(res, (long)(rgid), (long)(egid),     \
                                           (long)(sgid))
-#define __sanitizer_syscall_pre_setfsuid(uid) \
+#define __sanitizer_syscall_pre_setfsuid(uid)                                  \
   __sanitizer_syscall_pre_impl_setfsuid((long)(uid))
-#define __sanitizer_syscall_post_setfsuid(res, uid) \
+#define __sanitizer_syscall_post_setfsuid(res, uid)                            \
   __sanitizer_syscall_post_impl_setfsuid(res, (long)(uid))
-#define __sanitizer_syscall_pre_setfsgid(gid) \
+#define __sanitizer_syscall_pre_setfsgid(gid)                                  \
   __sanitizer_syscall_pre_impl_setfsgid((long)(gid))
-#define __sanitizer_syscall_post_setfsgid(res, gid) \
+#define __sanitizer_syscall_post_setfsgid(res, gid)                            \
   __sanitizer_syscall_post_impl_setfsgid(res, (long)(gid))
-#define __sanitizer_syscall_pre_setpgid(pid, pgid) \
+#define __sanitizer_syscall_pre_setpgid(pid, pgid)                             \
   __sanitizer_syscall_pre_impl_setpgid((long)(pid), (long)(pgid))
-#define __sanitizer_syscall_post_setpgid(res, pid, pgid) \
+#define __sanitizer_syscall_post_setpgid(res, pid, pgid)                       \
   __sanitizer_syscall_post_impl_setpgid(res, (long)(pid), (long)(pgid))
 #define __sanitizer_syscall_pre_setsid() __sanitizer_syscall_pre_impl_setsid()
-#define __sanitizer_syscall_post_setsid(res) \
+#define __sanitizer_syscall_post_setsid(res)                                   \
   __sanitizer_syscall_post_impl_setsid(res)
-#define __sanitizer_syscall_pre_setgroups(gidsetsize, grouplist) \
+#define __sanitizer_syscall_pre_setgroups(gidsetsize, grouplist)               \
   __sanitizer_syscall_pre_impl_setgroups((long)(gidsetsize), (long)(grouplist))
-#define __sanitizer_syscall_post_setgroups(res, gidsetsize, grouplist) \
-  __sanitizer_syscall_post_impl_setgroups(res, (long)(gidsetsize),     \
+#define __sanitizer_syscall_post_setgroups(res, gidsetsize, grouplist)         \
+  __sanitizer_syscall_post_impl_setgroups(res, (long)(gidsetsize),             \
                                           (long)(grouplist))
-#define __sanitizer_syscall_pre_acct(name) \
+#define __sanitizer_syscall_pre_acct(name)                                     \
   __sanitizer_syscall_pre_impl_acct((long)(name))
-#define __sanitizer_syscall_post_acct(res, name) \
+#define __sanitizer_syscall_post_acct(res, name)                               \
   __sanitizer_syscall_post_impl_acct(res, (long)(name))
-#define __sanitizer_syscall_pre_capget(header, dataptr) \
+#define __sanitizer_syscall_pre_capget(header, dataptr)                        \
   __sanitizer_syscall_pre_impl_capget((long)(header), (long)(dataptr))
-#define __sanitizer_syscall_post_capget(res, header, dataptr) \
+#define __sanitizer_syscall_post_capget(res, header, dataptr)                  \
   __sanitizer_syscall_post_impl_capget(res, (long)(header), (long)(dataptr))
-#define __sanitizer_syscall_pre_capset(header, data) \
+#define __sanitizer_syscall_pre_capset(header, data)                           \
   __sanitizer_syscall_pre_impl_capset((long)(header), (long)(data))
-#define __sanitizer_syscall_post_capset(res, header, data) \
+#define __sanitizer_syscall_post_capset(res, header, data)                     \
   __sanitizer_syscall_post_impl_capset(res, (long)(header), (long)(data))
-#define __sanitizer_syscall_pre_personality(personality) \
+#define __sanitizer_syscall_pre_personality(personality)                       \
   __sanitizer_syscall_pre_impl_personality((long)(personality))
-#define __sanitizer_syscall_post_personality(res, personality) \
+#define __sanitizer_syscall_post_personality(res, personality)                 \
   __sanitizer_syscall_post_impl_personality(res, (long)(personality))
-#define __sanitizer_syscall_pre_sigpending(set) \
+#define __sanitizer_syscall_pre_sigpending(set)                                \
   __sanitizer_syscall_pre_impl_sigpending((long)(set))
-#define __sanitizer_syscall_post_sigpending(res, set) \
+#define __sanitizer_syscall_post_sigpending(res, set)                          \
   __sanitizer_syscall_post_impl_sigpending(res, (long)(set))
-#define __sanitizer_syscall_pre_sigprocmask(how, set, oset)          \
-  __sanitizer_syscall_pre_impl_sigprocmask((long)(how), (long)(set), \
+#define __sanitizer_syscall_pre_sigprocmask(how, set, oset)                    \
+  __sanitizer_syscall_pre_impl_sigprocmask((long)(how), (long)(set),           \
                                            (long)(oset))
-#define __sanitizer_syscall_post_sigprocmask(res, how, set, oset)          \
-  __sanitizer_syscall_post_impl_sigprocmask(res, (long)(how), (long)(set), \
+#define __sanitizer_syscall_post_sigprocmask(res, how, set, oset)              \
+  __sanitizer_syscall_post_impl_sigprocmask(res, (long)(how), (long)(set),     \
                                             (long)(oset))
-#define __sanitizer_syscall_pre_getitimer(which, value) \
+#define __sanitizer_syscall_pre_getitimer(which, value)                        \
   __sanitizer_syscall_pre_impl_getitimer((long)(which), (long)(value))
-#define __sanitizer_syscall_post_getitimer(res, which, value) \
+#define __sanitizer_syscall_post_getitimer(res, which, value)                  \
   __sanitizer_syscall_post_impl_getitimer(res, (long)(which), (long)(value))
-#define __sanitizer_syscall_pre_setitimer(which, value, ovalue)        \
-  __sanitizer_syscall_pre_impl_setitimer((long)(which), (long)(value), \
+#define __sanitizer_syscall_pre_setitimer(which, value, ovalue)                \
+  __sanitizer_syscall_pre_impl_setitimer((long)(which), (long)(value),         \
                                          (long)(ovalue))
-#define __sanitizer_syscall_post_setitimer(res, which, value, ovalue)        \
-  __sanitizer_syscall_post_impl_setitimer(res, (long)(which), (long)(value), \
+#define __sanitizer_syscall_post_setitimer(res, which, value, ovalue)          \
+  __sanitizer_syscall_post_impl_setitimer(res, (long)(which), (long)(value),   \
                                           (long)(ovalue))
-#define __sanitizer_syscall_pre_timer_create(which_clock, timer_event_spec, \
-                                             created_timer_id)              \
-  __sanitizer_syscall_pre_impl_timer_create(                                \
+#define __sanitizer_syscall_pre_timer_create(which_clock, timer_event_spec,    \
+                                             created_timer_id)                 \
+  __sanitizer_syscall_pre_impl_timer_create(                                   \
       (long)(which_clock), (long)(timer_event_spec), (long)(created_timer_id))
-#define __sanitizer_syscall_post_timer_create(                         \
-    res, which_clock, timer_event_spec, created_timer_id)              \
-  __sanitizer_syscall_post_impl_timer_create(res, (long)(which_clock), \
-                                             (long)(timer_event_spec), \
+#define __sanitizer_syscall_post_timer_create(                                 \
+    res, which_clock, timer_event_spec, created_timer_id)                      \
+  __sanitizer_syscall_post_impl_timer_create(res, (long)(which_clock),         \
+                                             (long)(timer_event_spec),         \
                                              (long)(created_timer_id))
-#define __sanitizer_syscall_pre_timer_gettime(timer_id, setting) \
+#define __sanitizer_syscall_pre_timer_gettime(timer_id, setting)               \
   __sanitizer_syscall_pre_impl_timer_gettime((long)(timer_id), (long)(setting))
-#define __sanitizer_syscall_post_timer_gettime(res, timer_id, setting) \
-  __sanitizer_syscall_post_impl_timer_gettime(res, (long)(timer_id),   \
+#define __sanitizer_syscall_post_timer_gettime(res, timer_id, setting)         \
+  __sanitizer_syscall_post_impl_timer_gettime(res, (long)(timer_id),           \
                                               (long)(setting))
-#define __sanitizer_syscall_pre_timer_getoverrun(timer_id) \
+#define __sanitizer_syscall_pre_timer_getoverrun(timer_id)                     \
   __sanitizer_syscall_pre_impl_timer_getoverrun((long)(timer_id))
-#define __sanitizer_syscall_post_timer_getoverrun(res, timer_id) \
+#define __sanitizer_syscall_post_timer_getoverrun(res, timer_id)               \
   __sanitizer_syscall_post_impl_timer_getoverrun(res, (long)(timer_id))
-#define __sanitizer_syscall_pre_timer_settime(timer_id, flags, new_setting,   \
-                                              old_setting)                    \
-  __sanitizer_syscall_pre_impl_timer_settime((long)(timer_id), (long)(flags), \
-                                             (long)(new_setting),             \
+#define __sanitizer_syscall_pre_timer_settime(timer_id, flags, new_setting,    \
+                                              old_setting)                     \
+  __sanitizer_syscall_pre_impl_timer_settime((long)(timer_id), (long)(flags),  \
+                                             (long)(new_setting),              \
                                              (long)(old_setting))
-#define __sanitizer_syscall_post_timer_settime(res, timer_id, flags,     \
-                                               new_setting, old_setting) \
-  __sanitizer_syscall_post_impl_timer_settime(                           \
-      res, (long)(timer_id), (long)(flags), (long)(new_setting),         \
+#define __sanitizer_syscall_post_timer_settime(res, timer_id, flags,           \
+                                               new_setting, old_setting)       \
+  __sanitizer_syscall_post_impl_timer_settime(                                 \
+      res, (long)(timer_id), (long)(flags), (long)(new_setting),               \
       (long)(old_setting))
-#define __sanitizer_syscall_pre_timer_delete(timer_id) \
+#define __sanitizer_syscall_pre_timer_delete(timer_id)                         \
   __sanitizer_syscall_pre_impl_timer_delete((long)(timer_id))
-#define __sanitizer_syscall_post_timer_delete(res, timer_id) \
+#define __sanitizer_syscall_post_timer_delete(res, timer_id)                   \
   __sanitizer_syscall_post_impl_timer_delete(res, (long)(timer_id))
-#define __sanitizer_syscall_pre_clock_settime(which_clock, tp) \
+#define __sanitizer_syscall_pre_clock_settime(which_clock, tp)                 \
   __sanitizer_syscall_pre_impl_clock_settime((long)(which_clock), (long)(tp))
-#define __sanitizer_syscall_post_clock_settime(res, which_clock, tp)    \
-  __sanitizer_syscall_post_impl_clock_settime(res, (long)(which_clock), \
+#define __sanitizer_syscall_post_clock_settime(res, which_clock, tp)           \
+  __sanitizer_syscall_post_impl_clock_settime(res, (long)(which_clock),        \
                                               (long)(tp))
-#define __sanitizer_syscall_pre_clock_gettime(which_clock, tp) \
+#define __sanitizer_syscall_pre_clock_gettime(which_clock, tp)                 \
   __sanitizer_syscall_pre_impl_clock_gettime((long)(which_clock), (long)(tp))
-#define __sanitizer_syscall_post_clock_gettime(res, which_clock, tp)    \
-  __sanitizer_syscall_post_impl_clock_gettime(res, (long)(which_clock), \
+#define __sanitizer_syscall_post_clock_gettime(res, which_clock, tp)           \
+  __sanitizer_syscall_post_impl_clock_gettime(res, (long)(which_clock),        \
                                               (long)(tp))
-#define __sanitizer_syscall_pre_clock_adjtime(which_clock, tx) \
+#define __sanitizer_syscall_pre_clock_adjtime(which_clock, tx)                 \
   __sanitizer_syscall_pre_impl_clock_adjtime((long)(which_clock), (long)(tx))
-#define __sanitizer_syscall_post_clock_adjtime(res, which_clock, tx)    \
-  __sanitizer_syscall_post_impl_clock_adjtime(res, (long)(which_clock), \
+#define __sanitizer_syscall_post_clock_adjtime(res, which_clock, tx)           \
+  __sanitizer_syscall_post_impl_clock_adjtime(res, (long)(which_clock),        \
                                               (long)(tx))
-#define __sanitizer_syscall_pre_clock_getres(which_clock, tp) \
+#define __sanitizer_syscall_pre_clock_getres(which_clock, tp)                  \
   __sanitizer_syscall_pre_impl_clock_getres((long)(which_clock), (long)(tp))
-#define __sanitizer_syscall_post_clock_getres(res, which_clock, tp)    \
-  __sanitizer_syscall_post_impl_clock_getres(res, (long)(which_clock), \
+#define __sanitizer_syscall_post_clock_getres(res, which_clock, tp)            \
+  __sanitizer_syscall_post_impl_clock_getres(res, (long)(which_clock),         \
                                              (long)(tp))
-#define __sanitizer_syscall_pre_clock_nanosleep(which_clock, flags, rqtp, \
-                                                rmtp)                     \
-  __sanitizer_syscall_pre_impl_clock_nanosleep(                           \
+#define __sanitizer_syscall_pre_clock_nanosleep(which_clock, flags, rqtp,      \
+                                                rmtp)                          \
+  __sanitizer_syscall_pre_impl_clock_nanosleep(                                \
       (long)(which_clock), (long)(flags), (long)(rqtp), (long)(rmtp))
-#define __sanitizer_syscall_post_clock_nanosleep(res, which_clock, flags, \
-                                                 rqtp, rmtp)              \
-  __sanitizer_syscall_post_impl_clock_nanosleep(                          \
+#define __sanitizer_syscall_post_clock_nanosleep(res, which_clock, flags,      \
+                                                 rqtp, rmtp)                   \
+  __sanitizer_syscall_post_impl_clock_nanosleep(                               \
       res, (long)(which_clock), (long)(flags), (long)(rqtp), (long)(rmtp))
-#define __sanitizer_syscall_pre_nice(increment) \
+#define __sanitizer_syscall_pre_nice(increment)                                \
   __sanitizer_syscall_pre_impl_nice((long)(increment))
-#define __sanitizer_syscall_post_nice(res, increment) \
+#define __sanitizer_syscall_post_nice(res, increment)                          \
   __sanitizer_syscall_post_impl_nice(res, (long)(increment))
 #define __sanitizer_syscall_pre_sched_setscheduler(pid, policy, param)         \
   __sanitizer_syscall_pre_impl_sched_setscheduler((long)(pid), (long)(policy), \
                                                   (long)(param))
-#define __sanitizer_syscall_post_sched_setscheduler(res, pid, policy, param) \
-  __sanitizer_syscall_post_impl_sched_setscheduler(                          \
+#define __sanitizer_syscall_post_sched_setscheduler(res, pid, policy, param)   \
+  __sanitizer_syscall_post_impl_sched_setscheduler(                            \
       res, (long)(pid), (long)(policy), (long)(param))
-#define __sanitizer_syscall_pre_sched_setparam(pid, param) \
+#define __sanitizer_syscall_pre_sched_setparam(pid, param)                     \
   __sanitizer_syscall_pre_impl_sched_setparam((long)(pid), (long)(param))
-#define __sanitizer_syscall_post_sched_setparam(res, pid, param) \
+#define __sanitizer_syscall_post_sched_setparam(res, pid, param)               \
   __sanitizer_syscall_post_impl_sched_setparam(res, (long)(pid), (long)(param))
-#define __sanitizer_syscall_pre_sched_getscheduler(pid) \
+#define __sanitizer_syscall_pre_sched_getscheduler(pid)                        \
   __sanitizer_syscall_pre_impl_sched_getscheduler((long)(pid))
-#define __sanitizer_syscall_post_sched_getscheduler(res, pid) \
+#define __sanitizer_syscall_post_sched_getscheduler(res, pid)                  \
   __sanitizer_syscall_post_impl_sched_getscheduler(res, (long)(pid))
-#define __sanitizer_syscall_pre_sched_getparam(pid, param) \
+#define __sanitizer_syscall_pre_sched_getparam(pid, param)                     \
   __sanitizer_syscall_pre_impl_sched_getparam((long)(pid), (long)(param))
-#define __sanitizer_syscall_post_sched_getparam(res, pid, param) \
+#define __sanitizer_syscall_post_sched_getparam(res, pid, param)               \
   __sanitizer_syscall_post_impl_sched_getparam(res, (long)(pid), (long)(param))
-#define __sanitizer_syscall_pre_sched_setaffinity(pid, len, user_mask_ptr) \
-  __sanitizer_syscall_pre_impl_sched_setaffinity((long)(pid), (long)(len), \
+#define __sanitizer_syscall_pre_sched_setaffinity(pid, len, user_mask_ptr)     \
+  __sanitizer_syscall_pre_impl_sched_setaffinity((long)(pid), (long)(len),     \
                                                  (long)(user_mask_ptr))
-#define __sanitizer_syscall_post_sched_setaffinity(res, pid, len, \
-                                                   user_mask_ptr) \
-  __sanitizer_syscall_post_impl_sched_setaffinity(                \
+#define __sanitizer_syscall_post_sched_setaffinity(res, pid, len,              \
+                                                   user_mask_ptr)              \
+  __sanitizer_syscall_post_impl_sched_setaffinity(                             \
       res, (long)(pid), (long)(len), (long)(user_mask_ptr))
-#define __sanitizer_syscall_pre_sched_getaffinity(pid, len, user_mask_ptr) \
-  __sanitizer_syscall_pre_impl_sched_getaffinity((long)(pid), (long)(len), \
+#define __sanitizer_syscall_pre_sched_getaffinity(pid, len, user_mask_ptr)     \
+  __sanitizer_syscall_pre_impl_sched_getaffinity((long)(pid), (long)(len),     \
                                                  (long)(user_mask_ptr))
-#define __sanitizer_syscall_post_sched_getaffinity(res, pid, len, \
-                                                   user_mask_ptr) \
-  __sanitizer_syscall_post_impl_sched_getaffinity(                \
+#define __sanitizer_syscall_post_sched_getaffinity(res, pid, len,              \
+                                                   user_mask_ptr)              \
+  __sanitizer_syscall_post_impl_sched_getaffinity(                             \
       res, (long)(pid), (long)(len), (long)(user_mask_ptr))
-#define __sanitizer_syscall_pre_sched_yield() \
+#define __sanitizer_syscall_pre_sched_yield()                                  \
   __sanitizer_syscall_pre_impl_sched_yield()
-#define __sanitizer_syscall_post_sched_yield(res) \
+#define __sanitizer_syscall_post_sched_yield(res)                              \
   __sanitizer_syscall_post_impl_sched_yield(res)
-#define __sanitizer_syscall_pre_sched_get_priority_max(policy) \
+#define __sanitizer_syscall_pre_sched_get_priority_max(policy)                 \
   __sanitizer_syscall_pre_impl_sched_get_priority_max((long)(policy))
-#define __sanitizer_syscall_post_sched_get_priority_max(res, policy) \
+#define __sanitizer_syscall_post_sched_get_priority_max(res, policy)           \
   __sanitizer_syscall_post_impl_sched_get_priority_max(res, (long)(policy))
-#define __sanitizer_syscall_pre_sched_get_priority_min(policy) \
+#define __sanitizer_syscall_pre_sched_get_priority_min(policy)                 \
   __sanitizer_syscall_pre_impl_sched_get_priority_min((long)(policy))
-#define __sanitizer_syscall_post_sched_get_priority_min(res, policy) \
+#define __sanitizer_syscall_post_sched_get_priority_min(res, policy)           \
   __sanitizer_syscall_post_impl_sched_get_priority_min(res, (long)(policy))
-#define __sanitizer_syscall_pre_sched_rr_get_interval(pid, interval) \
-  __sanitizer_syscall_pre_impl_sched_rr_get_interval((long)(pid),    \
+#define __sanitizer_syscall_pre_sched_rr_get_interval(pid, interval)           \
+  __sanitizer_syscall_pre_impl_sched_rr_get_interval((long)(pid),              \
                                                      (long)(interval))
-#define __sanitizer_syscall_post_sched_rr_get_interval(res, pid, interval) \
-  __sanitizer_syscall_post_impl_sched_rr_get_interval(res, (long)(pid),    \
+#define __sanitizer_syscall_post_sched_rr_get_interval(res, pid, interval)     \
+  __sanitizer_syscall_post_impl_sched_rr_get_interval(res, (long)(pid),        \
                                                       (long)(interval))
-#define __sanitizer_syscall_pre_setpriority(which, who, niceval)       \
-  __sanitizer_syscall_pre_impl_setpriority((long)(which), (long)(who), \
+#define __sanitizer_syscall_pre_setpriority(which, who, niceval)               \
+  __sanitizer_syscall_pre_impl_setpriority((long)(which), (long)(who),         \
                                            (long)(niceval))
-#define __sanitizer_syscall_post_setpriority(res, which, who, niceval)       \
-  __sanitizer_syscall_post_impl_setpriority(res, (long)(which), (long)(who), \
+#define __sanitizer_syscall_post_setpriority(res, which, who, niceval)         \
+  __sanitizer_syscall_post_impl_setpriority(res, (long)(which), (long)(who),   \
                                             (long)(niceval))
-#define __sanitizer_syscall_pre_getpriority(which, who) \
+#define __sanitizer_syscall_pre_getpriority(which, who)                        \
   __sanitizer_syscall_pre_impl_getpriority((long)(which), (long)(who))
-#define __sanitizer_syscall_post_getpriority(res, which, who) \
+#define __sanitizer_syscall_post_getpriority(res, which, who)                  \
   __sanitizer_syscall_post_impl_getpriority(res, (long)(which), (long)(who))
-#define __sanitizer_syscall_pre_shutdown(arg0, arg1) \
+#define __sanitizer_syscall_pre_shutdown(arg0, arg1)                           \
   __sanitizer_syscall_pre_impl_shutdown((long)(arg0), (long)(arg1))
-#define __sanitizer_syscall_post_shutdown(res, arg0, arg1) \
+#define __sanitizer_syscall_post_shutdown(res, arg0, arg1)                     \
   __sanitizer_syscall_post_impl_shutdown(res, (long)(arg0), (long)(arg1))
-#define __sanitizer_syscall_pre_reboot(magic1, magic2, cmd, arg)      \
-  __sanitizer_syscall_pre_impl_reboot((long)(magic1), (long)(magic2), \
+#define __sanitizer_syscall_pre_reboot(magic1, magic2, cmd, arg)               \
+  __sanitizer_syscall_pre_impl_reboot((long)(magic1), (long)(magic2),          \
                                       (long)(cmd), (long)(arg))
-#define __sanitizer_syscall_post_reboot(res, magic1, magic2, cmd, arg)      \
-  __sanitizer_syscall_post_impl_reboot(res, (long)(magic1), (long)(magic2), \
+#define __sanitizer_syscall_post_reboot(res, magic1, magic2, cmd, arg)         \
+  __sanitizer_syscall_post_impl_reboot(res, (long)(magic1), (long)(magic2),    \
                                        (long)(cmd), (long)(arg))
-#define __sanitizer_syscall_pre_restart_syscall() \
+#define __sanitizer_syscall_pre_restart_syscall()                              \
   __sanitizer_syscall_pre_impl_restart_syscall()
-#define __sanitizer_syscall_post_restart_syscall(res) \
+#define __sanitizer_syscall_post_restart_syscall(res)                          \
   __sanitizer_syscall_post_impl_restart_syscall(res)
-#define __sanitizer_syscall_pre_kexec_load(entry, nr_segments, segments,      \
-                                           flags)                             \
-  __sanitizer_syscall_pre_impl_kexec_load((long)(entry), (long)(nr_segments), \
+#define __sanitizer_syscall_pre_kexec_load(entry, nr_segments, segments,       \
+                                           flags)                              \
+  __sanitizer_syscall_pre_impl_kexec_load((long)(entry), (long)(nr_segments),  \
                                           (long)(segments), (long)(flags))
 #define __sanitizer_syscall_post_kexec_load(res, entry, nr_segments, segments, \
                                             flags)                             \
   __sanitizer_syscall_post_impl_kexec_load(res, (long)(entry),                 \
                                            (long)(nr_segments),                \
                                            (long)(segments), (long)(flags))
-#define __sanitizer_syscall_pre_exit(error_code) \
+#define __sanitizer_syscall_pre_exit(error_code)                               \
   __sanitizer_syscall_pre_impl_exit((long)(error_code))
-#define __sanitizer_syscall_post_exit(res, error_code) \
+#define __sanitizer_syscall_post_exit(res, error_code)                         \
   __sanitizer_syscall_post_impl_exit(res, (long)(error_code))
-#define __sanitizer_syscall_pre_exit_group(error_code) \
+#define __sanitizer_syscall_pre_exit_group(error_code)                         \
   __sanitizer_syscall_pre_impl_exit_group((long)(error_code))
-#define __sanitizer_syscall_post_exit_group(res, error_code) \
+#define __sanitizer_syscall_post_exit_group(res, error_code)                   \
   __sanitizer_syscall_post_impl_exit_group(res, (long)(error_code))
-#define __sanitizer_syscall_pre_wait4(pid, stat_addr, options, ru)   \
-  __sanitizer_syscall_pre_impl_wait4((long)(pid), (long)(stat_addr), \
+#define __sanitizer_syscall_pre_wait4(pid, stat_addr, options, ru)             \
+  __sanitizer_syscall_pre_impl_wait4((long)(pid), (long)(stat_addr),           \
                                      (long)(options), (long)(ru))
-#define __sanitizer_syscall_post_wait4(res, pid, stat_addr, options, ru)   \
-  __sanitizer_syscall_post_impl_wait4(res, (long)(pid), (long)(stat_addr), \
+#define __sanitizer_syscall_post_wait4(res, pid, stat_addr, options, ru)       \
+  __sanitizer_syscall_post_impl_wait4(res, (long)(pid), (long)(stat_addr),     \
                                       (long)(options), (long)(ru))
-#define __sanitizer_syscall_pre_waitid(which, pid, infop, options, ru) \
-  __sanitizer_syscall_pre_impl_waitid(                                 \
+#define __sanitizer_syscall_pre_waitid(which, pid, infop, options, ru)         \
+  __sanitizer_syscall_pre_impl_waitid(                                         \
       (long)(which), (long)(pid), (long)(infop), (long)(options), (long)(ru))
-#define __sanitizer_syscall_post_waitid(res, which, pid, infop, options, ru) \
-  __sanitizer_syscall_post_impl_waitid(res, (long)(which), (long)(pid),      \
-                                       (long)(infop), (long)(options),       \
+#define __sanitizer_syscall_post_waitid(res, which, pid, infop, options, ru)   \
+  __sanitizer_syscall_post_impl_waitid(res, (long)(which), (long)(pid),        \
+                                       (long)(infop), (long)(options),         \
                                        (long)(ru))
-#define __sanitizer_syscall_pre_waitpid(pid, stat_addr, options)       \
-  __sanitizer_syscall_pre_impl_waitpid((long)(pid), (long)(stat_addr), \
+#define __sanitizer_syscall_pre_waitpid(pid, stat_addr, options)               \
+  __sanitizer_syscall_pre_impl_waitpid((long)(pid), (long)(stat_addr),         \
                                        (long)(options))
-#define __sanitizer_syscall_post_waitpid(res, pid, stat_addr, options)       \
-  __sanitizer_syscall_post_impl_waitpid(res, (long)(pid), (long)(stat_addr), \
+#define __sanitizer_syscall_post_waitpid(res, pid, stat_addr, options)         \
+  __sanitizer_syscall_post_impl_waitpid(res, (long)(pid), (long)(stat_addr),   \
                                         (long)(options))
-#define __sanitizer_syscall_pre_set_tid_address(tidptr) \
+#define __sanitizer_syscall_pre_set_tid_address(tidptr)                        \
   __sanitizer_syscall_pre_impl_set_tid_address((long)(tidptr))
-#define __sanitizer_syscall_post_set_tid_address(res, tidptr) \
+#define __sanitizer_syscall_post_set_tid_address(res, tidptr)                  \
   __sanitizer_syscall_post_impl_set_tid_address(res, (long)(tidptr))
-#define __sanitizer_syscall_pre_init_module(umod, len, uargs)         \
-  __sanitizer_syscall_pre_impl_init_module((long)(umod), (long)(len), \
+#define __sanitizer_syscall_pre_init_module(umod, len, uargs)                  \
+  __sanitizer_syscall_pre_impl_init_module((long)(umod), (long)(len),          \
                                            (long)(uargs))
-#define __sanitizer_syscall_post_init_module(res, umod, len, uargs)         \
-  __sanitizer_syscall_post_impl_init_module(res, (long)(umod), (long)(len), \
+#define __sanitizer_syscall_post_init_module(res, umod, len, uargs)            \
+  __sanitizer_syscall_post_impl_init_module(res, (long)(umod), (long)(len),    \
                                             (long)(uargs))
-#define __sanitizer_syscall_pre_delete_module(name_user, flags) \
+#define __sanitizer_syscall_pre_delete_module(name_user, flags)                \
   __sanitizer_syscall_pre_impl_delete_module((long)(name_user), (long)(flags))
-#define __sanitizer_syscall_post_delete_module(res, name_user, flags) \
-  __sanitizer_syscall_post_impl_delete_module(res, (long)(name_user), \
+#define __sanitizer_syscall_post_delete_module(res, name_user, flags)          \
+  __sanitizer_syscall_post_impl_delete_module(res, (long)(name_user),          \
                                               (long)(flags))
-#define __sanitizer_syscall_pre_rt_sigprocmask(how, set, oset, sigsetsize) \
-  __sanitizer_syscall_pre_impl_rt_sigprocmask(                             \
+#define __sanitizer_syscall_pre_rt_sigprocmask(how, set, oset, sigsetsize)     \
+  __sanitizer_syscall_pre_impl_rt_sigprocmask(                                 \
       (long)(how), (long)(set), (long)(oset), (long)(sigsetsize))
-#define __sanitizer_syscall_post_rt_sigprocmask(res, how, set, oset, \
-                                                sigsetsize)          \
-  __sanitizer_syscall_post_impl_rt_sigprocmask(                      \
+#define __sanitizer_syscall_post_rt_sigprocmask(res, how, set, oset,           \
+                                                sigsetsize)                    \
+  __sanitizer_syscall_post_impl_rt_sigprocmask(                                \
       res, (long)(how), (long)(set), (long)(oset), (long)(sigsetsize))
-#define __sanitizer_syscall_pre_rt_sigpending(set, sigsetsize) \
+#define __sanitizer_syscall_pre_rt_sigpending(set, sigsetsize)                 \
   __sanitizer_syscall_pre_impl_rt_sigpending((long)(set), (long)(sigsetsize))
-#define __sanitizer_syscall_post_rt_sigpending(res, set, sigsetsize) \
-  __sanitizer_syscall_post_impl_rt_sigpending(res, (long)(set),      \
+#define __sanitizer_syscall_post_rt_sigpending(res, set, sigsetsize)           \
+  __sanitizer_syscall_post_impl_rt_sigpending(res, (long)(set),                \
                                               (long)(sigsetsize))
-#define __sanitizer_syscall_pre_rt_sigtimedwait(uthese, uinfo, uts, \
-                                                sigsetsize)         \
-  __sanitizer_syscall_pre_impl_rt_sigtimedwait(                     \
+#define __sanitizer_syscall_pre_rt_sigtimedwait(uthese, uinfo, uts,            \
+                                                sigsetsize)                    \
+  __sanitizer_syscall_pre_impl_rt_sigtimedwait(                                \
       (long)(uthese), (long)(uinfo), (long)(uts), (long)(sigsetsize))
-#define __sanitizer_syscall_post_rt_sigtimedwait(res, uthese, uinfo, uts, \
-                                                 sigsetsize)              \
-  __sanitizer_syscall_post_impl_rt_sigtimedwait(                          \
+#define __sanitizer_syscall_post_rt_sigtimedwait(res, uthese, uinfo, uts,      \
+                                                 sigsetsize)                   \
+  __sanitizer_syscall_post_impl_rt_sigtimedwait(                               \
       res, (long)(uthese), (long)(uinfo), (long)(uts), (long)(sigsetsize))
-#define __sanitizer_syscall_pre_rt_tgsigqueueinfo(tgid, pid, sig, uinfo)    \
-  __sanitizer_syscall_pre_impl_rt_tgsigqueueinfo((long)(tgid), (long)(pid), \
+#define __sanitizer_syscall_pre_rt_tgsigqueueinfo(tgid, pid, sig, uinfo)       \
+  __sanitizer_syscall_pre_impl_rt_tgsigqueueinfo((long)(tgid), (long)(pid),    \
                                                  (long)(sig), (long)(uinfo))
 #define __sanitizer_syscall_post_rt_tgsigqueueinfo(res, tgid, pid, sig, uinfo) \
   __sanitizer_syscall_post_impl_rt_tgsigqueueinfo(                             \
       res, (long)(tgid), (long)(pid), (long)(sig), (long)(uinfo))
-#define __sanitizer_syscall_pre_kill(pid, sig) \
+#define __sanitizer_syscall_pre_kill(pid, sig)                                 \
   __sanitizer_syscall_pre_impl_kill((long)(pid), (long)(sig))
-#define __sanitizer_syscall_post_kill(res, pid, sig) \
+#define __sanitizer_syscall_post_kill(res, pid, sig)                           \
   __sanitizer_syscall_post_impl_kill(res, (long)(pid), (long)(sig))
-#define __sanitizer_syscall_pre_tgkill(tgid, pid, sig) \
+#define __sanitizer_syscall_pre_tgkill(tgid, pid, sig)                         \
   __sanitizer_syscall_pre_impl_tgkill((long)(tgid), (long)(pid), (long)(sig))
-#define __sanitizer_syscall_post_tgkill(res, tgid, pid, sig)           \
-  __sanitizer_syscall_post_impl_tgkill(res, (long)(tgid), (long)(pid), \
+#define __sanitizer_syscall_post_tgkill(res, tgid, pid, sig)                   \
+  __sanitizer_syscall_post_impl_tgkill(res, (long)(tgid), (long)(pid),         \
                                        (long)(sig))
-#define __sanitizer_syscall_pre_tkill(pid, sig) \
+#define __sanitizer_syscall_pre_tkill(pid, sig)                                \
   __sanitizer_syscall_pre_impl_tkill((long)(pid), (long)(sig))
-#define __sanitizer_syscall_post_tkill(res, pid, sig) \
+#define __sanitizer_syscall_post_tkill(res, pid, sig)                          \
   __sanitizer_syscall_post_impl_tkill(res, (long)(pid), (long)(sig))
-#define __sanitizer_syscall_pre_rt_sigqueueinfo(pid, sig, uinfo)         \
-  __sanitizer_syscall_pre_impl_rt_sigqueueinfo((long)(pid), (long)(sig), \
+#define __sanitizer_syscall_pre_rt_sigqueueinfo(pid, sig, uinfo)               \
+  __sanitizer_syscall_pre_impl_rt_sigqueueinfo((long)(pid), (long)(sig),       \
                                                (long)(uinfo))
 #define __sanitizer_syscall_post_rt_sigqueueinfo(res, pid, sig, uinfo)         \
   __sanitizer_syscall_post_impl_rt_sigqueueinfo(res, (long)(pid), (long)(sig), \
                                                 (long)(uinfo))
-#define __sanitizer_syscall_pre_sgetmask() \
+#define __sanitizer_syscall_pre_sgetmask()                                     \
   __sanitizer_syscall_pre_impl_sgetmask()
-#define __sanitizer_syscall_post_sgetmask(res) \
+#define __sanitizer_syscall_post_sgetmask(res)                                 \
   __sanitizer_syscall_post_impl_sgetmask(res)
-#define __sanitizer_syscall_pre_ssetmask(newmask) \
+#define __sanitizer_syscall_pre_ssetmask(newmask)                              \
   __sanitizer_syscall_pre_impl_ssetmask((long)(newmask))
-#define __sanitizer_syscall_post_ssetmask(res, newmask) \
+#define __sanitizer_syscall_post_ssetmask(res, newmask)                        \
   __sanitizer_syscall_post_impl_ssetmask(res, (long)(newmask))
-#define __sanitizer_syscall_pre_signal(sig, handler) \
+#define __sanitizer_syscall_pre_signal(sig, handler)                           \
   __sanitizer_syscall_pre_impl_signal((long)(sig), (long)(handler))
-#define __sanitizer_syscall_post_signal(res, sig, handler) \
+#define __sanitizer_syscall_post_signal(res, sig, handler)                     \
   __sanitizer_syscall_post_impl_signal(res, (long)(sig), (long)(handler))
 #define __sanitizer_syscall_pre_pause() __sanitizer_syscall_pre_impl_pause()
-#define __sanitizer_syscall_post_pause(res) \
+#define __sanitizer_syscall_post_pause(res)                                    \
   __sanitizer_syscall_post_impl_pause(res)
 #define __sanitizer_syscall_pre_sync() __sanitizer_syscall_pre_impl_sync()
-#define __sanitizer_syscall_post_sync(res) \
+#define __sanitizer_syscall_post_sync(res)                                     \
   __sanitizer_syscall_post_impl_sync(res)
-#define __sanitizer_syscall_pre_fsync(fd) \
+#define __sanitizer_syscall_pre_fsync(fd)                                      \
   __sanitizer_syscall_pre_impl_fsync((long)(fd))
-#define __sanitizer_syscall_post_fsync(res, fd) \
+#define __sanitizer_syscall_post_fsync(res, fd)                                \
   __sanitizer_syscall_post_impl_fsync(res, (long)(fd))
-#define __sanitizer_syscall_pre_fdatasync(fd) \
+#define __sanitizer_syscall_pre_fdatasync(fd)                                  \
   __sanitizer_syscall_pre_impl_fdatasync((long)(fd))
-#define __sanitizer_syscall_post_fdatasync(res, fd) \
+#define __sanitizer_syscall_post_fdatasync(res, fd)                            \
   __sanitizer_syscall_post_impl_fdatasync(res, (long)(fd))
-#define __sanitizer_syscall_pre_bdflush(func, data) \
+#define __sanitizer_syscall_pre_bdflush(func, data)                            \
   __sanitizer_syscall_pre_impl_bdflush((long)(func), (long)(data))
-#define __sanitizer_syscall_post_bdflush(res, func, data) \
+#define __sanitizer_syscall_post_bdflush(res, func, data)                      \
   __sanitizer_syscall_post_impl_bdflush(res, (long)(func), (long)(data))
-#define __sanitizer_syscall_pre_mount(dev_name, dir_name, type, flags, data) \
-  __sanitizer_syscall_pre_impl_mount((long)(dev_name), (long)(dir_name),     \
-                                     (long)(type), (long)(flags),            \
+#define __sanitizer_syscall_pre_mount(dev_name, dir_name, type, flags, data)   \
+  __sanitizer_syscall_pre_impl_mount((long)(dev_name), (long)(dir_name),       \
+                                     (long)(type), (long)(flags),              \
                                      (long)(data))
 #define __sanitizer_syscall_post_mount(res, dev_name, dir_name, type, flags,   \
                                        data)                                   \
   __sanitizer_syscall_post_impl_mount(res, (long)(dev_name), (long)(dir_name), \
                                       (long)(type), (long)(flags),             \
                                       (long)(data))
-#define __sanitizer_syscall_pre_umount(name, flags) \
+#define __sanitizer_syscall_pre_umount(name, flags)                            \
   __sanitizer_syscall_pre_impl_umount((long)(name), (long)(flags))
-#define __sanitizer_syscall_post_umount(res, name, flags) \
+#define __sanitizer_syscall_post_umount(res, name, flags)                      \
   __sanitizer_syscall_post_impl_umount(res, (long)(name), (long)(flags))
-#define __sanitizer_syscall_pre_oldumount(name) \
+#define __sanitizer_syscall_pre_oldumount(name)                                \
   __sanitizer_syscall_pre_impl_oldumount((long)(name))
-#define __sanitizer_syscall_post_oldumount(res, name) \
+#define __sanitizer_syscall_post_oldumount(res, name)                          \
   __sanitizer_syscall_post_impl_oldumount(res, (long)(name))
-#define __sanitizer_syscall_pre_truncate(path, length) \
+#define __sanitizer_syscall_pre_truncate(path, length)                         \
   __sanitizer_syscall_pre_impl_truncate((long)(path), (long)(length))
-#define __sanitizer_syscall_post_truncate(res, path, length) \
+#define __sanitizer_syscall_post_truncate(res, path, length)                   \
   __sanitizer_syscall_post_impl_truncate(res, (long)(path), (long)(length))
-#define __sanitizer_syscall_pre_ftruncate(fd, length) \
+#define __sanitizer_syscall_pre_ftruncate(fd, length)                          \
   __sanitizer_syscall_pre_impl_ftruncate((long)(fd), (long)(length))
-#define __sanitizer_syscall_post_ftruncate(res, fd, length) \
+#define __sanitizer_syscall_post_ftruncate(res, fd, length)                    \
   __sanitizer_syscall_post_impl_ftruncate(res, (long)(fd), (long)(length))
-#define __sanitizer_syscall_pre_stat(filename, statbuf) \
+#define __sanitizer_syscall_pre_stat(filename, statbuf)                        \
   __sanitizer_syscall_pre_impl_stat((long)(filename), (long)(statbuf))
-#define __sanitizer_syscall_post_stat(res, filename, statbuf) \
+#define __sanitizer_syscall_post_stat(res, filename, statbuf)                  \
   __sanitizer_syscall_post_impl_stat(res, (long)(filename), (long)(statbuf))
-#define __sanitizer_syscall_pre_statfs(path, buf) \
+#define __sanitizer_syscall_pre_statfs(path, buf)                              \
   __sanitizer_syscall_pre_impl_statfs((long)(path), (long)(buf))
-#define __sanitizer_syscall_post_statfs(res, path, buf) \
+#define __sanitizer_syscall_post_statfs(res, path, buf)                        \
   __sanitizer_syscall_post_impl_statfs(res, (long)(path), (long)(buf))
-#define __sanitizer_syscall_pre_statfs64(path, sz, buf) \
+#define __sanitizer_syscall_pre_statfs64(path, sz, buf)                        \
   __sanitizer_syscall_pre_impl_statfs64((long)(path), (long)(sz), (long)(buf))
-#define __sanitizer_syscall_post_statfs64(res, path, sz, buf)           \
-  __sanitizer_syscall_post_impl_statfs64(res, (long)(path), (long)(sz), \
+#define __sanitizer_syscall_post_statfs64(res, path, sz, buf)                  \
+  __sanitizer_syscall_post_impl_statfs64(res, (long)(path), (long)(sz),        \
                                          (long)(buf))
-#define __sanitizer_syscall_pre_fstatfs(fd, buf) \
+#define __sanitizer_syscall_pre_fstatfs(fd, buf)                               \
   __sanitizer_syscall_pre_impl_fstatfs((long)(fd), (long)(buf))
-#define __sanitizer_syscall_post_fstatfs(res, fd, buf) \
+#define __sanitizer_syscall_post_fstatfs(res, fd, buf)                         \
   __sanitizer_syscall_post_impl_fstatfs(res, (long)(fd), (long)(buf))
-#define __sanitizer_syscall_pre_fstatfs64(fd, sz, buf) \
+#define __sanitizer_syscall_pre_fstatfs64(fd, sz, buf)                         \
   __sanitizer_syscall_pre_impl_fstatfs64((long)(fd), (long)(sz), (long)(buf))
-#define __sanitizer_syscall_post_fstatfs64(res, fd, sz, buf)           \
-  __sanitizer_syscall_post_impl_fstatfs64(res, (long)(fd), (long)(sz), \
+#define __sanitizer_syscall_post_fstatfs64(res, fd, sz, buf)                   \
+  __sanitizer_syscall_post_impl_fstatfs64(res, (long)(fd), (long)(sz),         \
                                           (long)(buf))
-#define __sanitizer_syscall_pre_lstat(filename, statbuf) \
+#define __sanitizer_syscall_pre_lstat(filename, statbuf)                       \
   __sanitizer_syscall_pre_impl_lstat((long)(filename), (long)(statbuf))
-#define __sanitizer_syscall_post_lstat(res, filename, statbuf) \
+#define __sanitizer_syscall_post_lstat(res, filename, statbuf)                 \
   __sanitizer_syscall_post_impl_lstat(res, (long)(filename), (long)(statbuf))
-#define __sanitizer_syscall_pre_fstat(fd, statbuf) \
+#define __sanitizer_syscall_pre_fstat(fd, statbuf)                             \
   __sanitizer_syscall_pre_impl_fstat((long)(fd), (long)(statbuf))
-#define __sanitizer_syscall_post_fstat(res, fd, statbuf) \
+#define __sanitizer_syscall_post_fstat(res, fd, statbuf)                       \
   __sanitizer_syscall_post_impl_fstat(res, (long)(fd), (long)(statbuf))
-#define __sanitizer_syscall_pre_newstat(filename, statbuf) \
+#define __sanitizer_syscall_pre_newstat(filename, statbuf)                     \
   __sanitizer_syscall_pre_impl_newstat((long)(filename), (long)(statbuf))
-#define __sanitizer_syscall_post_newstat(res, filename, statbuf) \
+#define __sanitizer_syscall_post_newstat(res, filename, statbuf)               \
   __sanitizer_syscall_post_impl_newstat(res, (long)(filename), (long)(statbuf))
-#define __sanitizer_syscall_pre_newlstat(filename, statbuf) \
+#define __sanitizer_syscall_pre_newlstat(filename, statbuf)                    \
   __sanitizer_syscall_pre_impl_newlstat((long)(filename), (long)(statbuf))
-#define __sanitizer_syscall_post_newlstat(res, filename, statbuf) \
+#define __sanitizer_syscall_post_newlstat(res, filename, statbuf)              \
   __sanitizer_syscall_post_impl_newlstat(res, (long)(filename), (long)(statbuf))
-#define __sanitizer_syscall_pre_newfstat(fd, statbuf) \
+#define __sanitizer_syscall_pre_newfstat(fd, statbuf)                          \
   __sanitizer_syscall_pre_impl_newfstat((long)(fd), (long)(statbuf))
-#define __sanitizer_syscall_post_newfstat(res, fd, statbuf) \
+#define __sanitizer_syscall_post_newfstat(res, fd, statbuf)                    \
   __sanitizer_syscall_post_impl_newfstat(res, (long)(fd), (long)(statbuf))
-#define __sanitizer_syscall_pre_ustat(dev, ubuf) \
+#define __sanitizer_syscall_pre_ustat(dev, ubuf)                               \
   __sanitizer_syscall_pre_impl_ustat((long)(dev), (long)(ubuf))
-#define __sanitizer_syscall_post_ustat(res, dev, ubuf) \
+#define __sanitizer_syscall_post_ustat(res, dev, ubuf)                         \
   __sanitizer_syscall_post_impl_ustat(res, (long)(dev), (long)(ubuf))
-#define __sanitizer_syscall_pre_stat64(filename, statbuf) \
+#define __sanitizer_syscall_pre_stat64(filename, statbuf)                      \
   __sanitizer_syscall_pre_impl_stat64((long)(filename), (long)(statbuf))
-#define __sanitizer_syscall_post_stat64(res, filename, statbuf) \
+#define __sanitizer_syscall_post_stat64(res, filename, statbuf)                \
   __sanitizer_syscall_post_impl_stat64(res, (long)(filename), (long)(statbuf))
-#define __sanitizer_syscall_pre_fstat64(fd, statbuf) \
+#define __sanitizer_syscall_pre_fstat64(fd, statbuf)                           \
   __sanitizer_syscall_pre_impl_fstat64((long)(fd), (long)(statbuf))
-#define __sanitizer_syscall_post_fstat64(res, fd, statbuf) \
+#define __sanitizer_syscall_post_fstat64(res, fd, statbuf)                     \
   __sanitizer_syscall_post_impl_fstat64(res, (long)(fd), (long)(statbuf))
-#define __sanitizer_syscall_pre_lstat64(filename, statbuf) \
+#define __sanitizer_syscall_pre_lstat64(filename, statbuf)                     \
   __sanitizer_syscall_pre_impl_lstat64((long)(filename), (long)(statbuf))
-#define __sanitizer_syscall_post_lstat64(res, filename, statbuf) \
+#define __sanitizer_syscall_post_lstat64(res, filename, statbuf)               \
   __sanitizer_syscall_post_impl_lstat64(res, (long)(filename), (long)(statbuf))
-#define __sanitizer_syscall_pre_setxattr(path, name, value, size, flags) \
-  __sanitizer_syscall_pre_impl_setxattr(                                 \
+#define __sanitizer_syscall_pre_setxattr(path, name, value, size, flags)       \
+  __sanitizer_syscall_pre_impl_setxattr(                                       \
       (long)(path), (long)(name), (long)(value), (long)(size), (long)(flags))
 #define __sanitizer_syscall_post_setxattr(res, path, name, value, size, flags) \
   __sanitizer_syscall_post_impl_setxattr(res, (long)(path), (long)(name),      \
                                          (long)(value), (long)(size),          \
                                          (long)(flags))
-#define __sanitizer_syscall_pre_lsetxattr(path, name, value, size, flags) \
-  __sanitizer_syscall_pre_impl_lsetxattr(                                 \
+#define __sanitizer_syscall_pre_lsetxattr(path, name, value, size, flags)      \
+  __sanitizer_syscall_pre_impl_lsetxattr(                                      \
       (long)(path), (long)(name), (long)(value), (long)(size), (long)(flags))
-#define __sanitizer_syscall_post_lsetxattr(res, path, name, value, size,   \
-                                           flags)                          \
-  __sanitizer_syscall_post_impl_lsetxattr(res, (long)(path), (long)(name), \
-                                          (long)(value), (long)(size),     \
+#define __sanitizer_syscall_post_lsetxattr(res, path, name, value, size,       \
+                                           flags)                              \
+  __sanitizer_syscall_post_impl_lsetxattr(res, (long)(path), (long)(name),     \
+                                          (long)(value), (long)(size),         \
                                           (long)(flags))
-#define __sanitizer_syscall_pre_fsetxattr(fd, name, value, size, flags) \
-  __sanitizer_syscall_pre_impl_fsetxattr(                               \
+#define __sanitizer_syscall_pre_fsetxattr(fd, name, value, size, flags)        \
+  __sanitizer_syscall_pre_impl_fsetxattr(                                      \
       (long)(fd), (long)(name), (long)(value), (long)(size), (long)(flags))
-#define __sanitizer_syscall_post_fsetxattr(res, fd, name, value, size, flags) \
-  __sanitizer_syscall_post_impl_fsetxattr(res, (long)(fd), (long)(name),      \
-                                          (long)(value), (long)(size),        \
+#define __sanitizer_syscall_post_fsetxattr(res, fd, name, value, size, flags)  \
+  __sanitizer_syscall_post_impl_fsetxattr(res, (long)(fd), (long)(name),       \
+                                          (long)(value), (long)(size),         \
                                           (long)(flags))
-#define __sanitizer_syscall_pre_getxattr(path, name, value, size)   \
-  __sanitizer_syscall_pre_impl_getxattr((long)(path), (long)(name), \
+#define __sanitizer_syscall_pre_getxattr(path, name, value, size)              \
+  __sanitizer_syscall_pre_impl_getxattr((long)(path), (long)(name),            \
                                         (long)(value), (long)(size))
-#define __sanitizer_syscall_post_getxattr(res, path, name, value, size)   \
-  __sanitizer_syscall_post_impl_getxattr(res, (long)(path), (long)(name), \
+#define __sanitizer_syscall_post_getxattr(res, path, name, value, size)        \
+  __sanitizer_syscall_post_impl_getxattr(res, (long)(path), (long)(name),      \
                                          (long)(value), (long)(size))
-#define __sanitizer_syscall_pre_lgetxattr(path, name, value, size)   \
-  __sanitizer_syscall_pre_impl_lgetxattr((long)(path), (long)(name), \
+#define __sanitizer_syscall_pre_lgetxattr(path, name, value, size)             \
+  __sanitizer_syscall_pre_impl_lgetxattr((long)(path), (long)(name),           \
                                          (long)(value), (long)(size))
-#define __sanitizer_syscall_post_lgetxattr(res, path, name, value, size)   \
-  __sanitizer_syscall_post_impl_lgetxattr(res, (long)(path), (long)(name), \
+#define __sanitizer_syscall_post_lgetxattr(res, path, name, value, size)       \
+  __sanitizer_syscall_post_impl_lgetxattr(res, (long)(path), (long)(name),     \
                                           (long)(value), (long)(size))
-#define __sanitizer_syscall_pre_fgetxattr(fd, name, value, size)   \
-  __sanitizer_syscall_pre_impl_fgetxattr((long)(fd), (long)(name), \
+#define __sanitizer_syscall_pre_fgetxattr(fd, name, value, size)               \
+  __sanitizer_syscall_pre_impl_fgetxattr((long)(fd), (long)(name),             \
                                          (long)(value), (long)(size))
-#define __sanitizer_syscall_post_fgetxattr(res, fd, name, value, size)   \
-  __sanitizer_syscall_post_impl_fgetxattr(res, (long)(fd), (long)(name), \
+#define __sanitizer_syscall_post_fgetxattr(res, fd, name, value, size)         \
+  __sanitizer_syscall_post_impl_fgetxattr(res, (long)(fd), (long)(name),       \
                                           (long)(value), (long)(size))
-#define __sanitizer_syscall_pre_listxattr(path, list, size)          \
-  __sanitizer_syscall_pre_impl_listxattr((long)(path), (long)(list), \
+#define __sanitizer_syscall_pre_listxattr(path, list, size)                    \
+  __sanitizer_syscall_pre_impl_listxattr((long)(path), (long)(list),           \
                                          (long)(size))
-#define __sanitizer_syscall_post_listxattr(res, path, list, size)          \
-  __sanitizer_syscall_post_impl_listxattr(res, (long)(path), (long)(list), \
+#define __sanitizer_syscall_post_listxattr(res, path, list, size)              \
+  __sanitizer_syscall_post_impl_listxattr(res, (long)(path), (long)(list),     \
                                           (long)(size))
-#define __sanitizer_syscall_pre_llistxattr(path, list, size)          \
-  __sanitizer_syscall_pre_impl_llistxattr((long)(path), (long)(list), \
+#define __sanitizer_syscall_pre_llistxattr(path, list, size)                   \
+  __sanitizer_syscall_pre_impl_llistxattr((long)(path), (long)(list),          \
                                           (long)(size))
-#define __sanitizer_syscall_post_llistxattr(res, path, list, size)          \
-  __sanitizer_syscall_post_impl_llistxattr(res, (long)(path), (long)(list), \
+#define __sanitizer_syscall_post_llistxattr(res, path, list, size)             \
+  __sanitizer_syscall_post_impl_llistxattr(res, (long)(path), (long)(list),    \
                                            (long)(size))
-#define __sanitizer_syscall_pre_flistxattr(fd, list, size)          \
-  __sanitizer_syscall_pre_impl_flistxattr((long)(fd), (long)(list), \
+#define __sanitizer_syscall_pre_flistxattr(fd, list, size)                     \
+  __sanitizer_syscall_pre_impl_flistxattr((long)(fd), (long)(list),            \
                                           (long)(size))
-#define __sanitizer_syscall_post_flistxattr(res, fd, list, size)          \
-  __sanitizer_syscall_post_impl_flistxattr(res, (long)(fd), (long)(list), \
+#define __sanitizer_syscall_post_flistxattr(res, fd, list, size)               \
+  __sanitizer_syscall_post_impl_flistxattr(res, (long)(fd), (long)(list),      \
                                            (long)(size))
-#define __sanitizer_syscall_pre_removexattr(path, name) \
+#define __sanitizer_syscall_pre_removexattr(path, name)                        \
   __sanitizer_syscall_pre_impl_removexattr((long)(path), (long)(name))
-#define __sanitizer_syscall_post_removexattr(res, path, name) \
+#define __sanitizer_syscall_post_removexattr(res, path, name)                  \
   __sanitizer_syscall_post_impl_removexattr(res, (long)(path), (long)(name))
-#define __sanitizer_syscall_pre_lremovexattr(path, name) \
+#define __sanitizer_syscall_pre_lremovexattr(path, name)                       \
   __sanitizer_syscall_pre_impl_lremovexattr((long)(path), (long)(name))
-#define __sanitizer_syscall_post_lremovexattr(res, path, name) \
+#define __sanitizer_syscall_post_lremovexattr(res, path, name)                 \
   __sanitizer_syscall_post_impl_lremovexattr(res, (long)(path), (long)(name))
-#define __sanitizer_syscall_pre_fremovexattr(fd, name) \
+#define __sanitizer_syscall_pre_fremovexattr(fd, name)                         \
   __sanitizer_syscall_pre_impl_fremovexattr((long)(fd), (long)(name))
-#define __sanitizer_syscall_post_fremovexattr(res, fd, name) \
+#define __sanitizer_syscall_post_fremovexattr(res, fd, name)                   \
   __sanitizer_syscall_post_impl_fremovexattr(res, (long)(fd), (long)(name))
-#define __sanitizer_syscall_pre_brk(brk) \
+#define __sanitizer_syscall_pre_brk(brk)                                       \
   __sanitizer_syscall_pre_impl_brk((long)(brk))
-#define __sanitizer_syscall_post_brk(res, brk) \
+#define __sanitizer_syscall_post_brk(res, brk)                                 \
   __sanitizer_syscall_post_impl_brk(res, (long)(brk))
-#define __sanitizer_syscall_pre_mprotect(start, len, prot)          \
-  __sanitizer_syscall_pre_impl_mprotect((long)(start), (long)(len), \
+#define __sanitizer_syscall_pre_mprotect(start, len, prot)                     \
+  __sanitizer_syscall_pre_impl_mprotect((long)(start), (long)(len),            \
                                         (long)(prot))
-#define __sanitizer_syscall_post_mprotect(res, start, len, prot)          \
-  __sanitizer_syscall_post_impl_mprotect(res, (long)(start), (long)(len), \
+#define __sanitizer_syscall_post_mprotect(res, start, len, prot)               \
+  __sanitizer_syscall_post_impl_mprotect(res, (long)(start), (long)(len),      \
                                          (long)(prot))
-#define __sanitizer_syscall_pre_mremap(addr, old_len, new_len, flags, \
-                                       new_addr)                      \
-  __sanitizer_syscall_pre_impl_mremap((long)(addr), (long)(old_len),  \
-                                      (long)(new_len), (long)(flags), \
+#define __sanitizer_syscall_pre_mremap(addr, old_len, new_len, flags,          \
+                                       new_addr)                               \
+  __sanitizer_syscall_pre_impl_mremap((long)(addr), (long)(old_len),           \
+                                      (long)(new_len), (long)(flags),          \
                                       (long)(new_addr))
-#define __sanitizer_syscall_post_mremap(res, addr, old_len, new_len, flags, \
-                                        new_addr)                           \
-  __sanitizer_syscall_post_impl_mremap(res, (long)(addr), (long)(old_len),  \
-                                       (long)(new_len), (long)(flags),      \
+#define __sanitizer_syscall_post_mremap(res, addr, old_len, new_len, flags,    \
+                                        new_addr)                              \
+  __sanitizer_syscall_post_impl_mremap(res, (long)(addr), (long)(old_len),     \
+                                       (long)(new_len), (long)(flags),         \
                                        (long)(new_addr))
-#define __sanitizer_syscall_pre_remap_file_pages(start, size, prot, pgoff, \
-                                                 flags)                    \
-  __sanitizer_syscall_pre_impl_remap_file_pages(                           \
+#define __sanitizer_syscall_pre_remap_file_pages(start, size, prot, pgoff,     \
+                                                 flags)                        \
+  __sanitizer_syscall_pre_impl_remap_file_pages(                               \
       (long)(start), (long)(size), (long)(prot), (long)(pgoff), (long)(flags))
-#define __sanitizer_syscall_post_remap_file_pages(res, start, size, prot,    \
-                                                  pgoff, flags)              \
-  __sanitizer_syscall_post_impl_remap_file_pages(res, (long)(start),         \
-                                                 (long)(size), (long)(prot), \
+#define __sanitizer_syscall_post_remap_file_pages(res, start, size, prot,      \
+                                                  pgoff, flags)                \
+  __sanitizer_syscall_post_impl_remap_file_pages(res, (long)(start),           \
+                                                 (long)(size), (long)(prot),   \
                                                  (long)(pgoff), (long)(flags))
-#define __sanitizer_syscall_pre_msync(start, len, flags) \
+#define __sanitizer_syscall_pre_msync(start, len, flags)                       \
   __sanitizer_syscall_pre_impl_msync((long)(start), (long)(len), (long)(flags))
-#define __sanitizer_syscall_post_msync(res, start, len, flags)         \
-  __sanitizer_syscall_post_impl_msync(res, (long)(start), (long)(len), \
+#define __sanitizer_syscall_post_msync(res, start, len, flags)                 \
+  __sanitizer_syscall_post_impl_msync(res, (long)(start), (long)(len),         \
                                       (long)(flags))
-#define __sanitizer_syscall_pre_munmap(addr, len) \
+#define __sanitizer_syscall_pre_munmap(addr, len)                              \
   __sanitizer_syscall_pre_impl_munmap((long)(addr), (long)(len))
-#define __sanitizer_syscall_post_munmap(res, addr, len) \
+#define __sanitizer_syscall_post_munmap(res, addr, len)                        \
   __sanitizer_syscall_post_impl_munmap(res, (long)(addr), (long)(len))
-#define __sanitizer_syscall_pre_mlock(start, len) \
+#define __sanitizer_syscall_pre_mlock(start, len)                              \
   __sanitizer_syscall_pre_impl_mlock((long)(start), (long)(len))
-#define __sanitizer_syscall_post_mlock(res, start, len) \
+#define __sanitizer_syscall_post_mlock(res, start, len)                        \
   __sanitizer_syscall_post_impl_mlock(res, (long)(start), (long)(len))
-#define __sanitizer_syscall_pre_munlock(start, len) \
+#define __sanitizer_syscall_pre_munlock(start, len)                            \
   __sanitizer_syscall_pre_impl_munlock((long)(start), (long)(len))
-#define __sanitizer_syscall_post_munlock(res, start, len) \
+#define __sanitizer_syscall_post_munlock(res, start, len)                      \
   __sanitizer_syscall_post_impl_munlock(res, (long)(start), (long)(len))
-#define __sanitizer_syscall_pre_mlockall(flags) \
+#define __sanitizer_syscall_pre_mlockall(flags)                                \
   __sanitizer_syscall_pre_impl_mlockall((long)(flags))
-#define __sanitizer_syscall_post_mlockall(res, flags) \
+#define __sanitizer_syscall_post_mlockall(res, flags)                          \
   __sanitizer_syscall_post_impl_mlockall(res, (long)(flags))
-#define __sanitizer_syscall_pre_munlockall() \
+#define __sanitizer_syscall_pre_munlockall()                                   \
   __sanitizer_syscall_pre_impl_munlockall()
-#define __sanitizer_syscall_post_munlockall(res) \
+#define __sanitizer_syscall_post_munlockall(res)                               \
   __sanitizer_syscall_post_impl_munlockall(res)
-#define __sanitizer_syscall_pre_madvise(start, len, behavior)      \
-  __sanitizer_syscall_pre_impl_madvise((long)(start), (long)(len), \
+#define __sanitizer_syscall_pre_madvise(start, len, behavior)                  \
+  __sanitizer_syscall_pre_impl_madvise((long)(start), (long)(len),             \
                                        (long)(behavior))
-#define __sanitizer_syscall_post_madvise(res, start, len, behavior)      \
-  __sanitizer_syscall_post_impl_madvise(res, (long)(start), (long)(len), \
+#define __sanitizer_syscall_post_madvise(res, start, len, behavior)            \
+  __sanitizer_syscall_post_impl_madvise(res, (long)(start), (long)(len),       \
                                         (long)(behavior))
-#define __sanitizer_syscall_pre_mincore(start, len, vec) \
+#define __sanitizer_syscall_pre_mincore(start, len, vec)                       \
   __sanitizer_syscall_pre_impl_mincore((long)(start), (long)(len), (long)(vec))
-#define __sanitizer_syscall_post_mincore(res, start, len, vec)           \
-  __sanitizer_syscall_post_impl_mincore(res, (long)(start), (long)(len), \
+#define __sanitizer_syscall_post_mincore(res, start, len, vec)                 \
+  __sanitizer_syscall_post_impl_mincore(res, (long)(start), (long)(len),       \
                                         (long)(vec))
-#define __sanitizer_syscall_pre_pivot_root(new_root, put_old) \
+#define __sanitizer_syscall_pre_pivot_root(new_root, put_old)                  \
   __sanitizer_syscall_pre_impl_pivot_root((long)(new_root), (long)(put_old))
-#define __sanitizer_syscall_post_pivot_root(res, new_root, put_old) \
-  __sanitizer_syscall_post_impl_pivot_root(res, (long)(new_root),   \
+#define __sanitizer_syscall_post_pivot_root(res, new_root, put_old)            \
+  __sanitizer_syscall_post_impl_pivot_root(res, (long)(new_root),              \
                                            (long)(put_old))
-#define __sanitizer_syscall_pre_chroot(filename) \
+#define __sanitizer_syscall_pre_chroot(filename)                               \
   __sanitizer_syscall_pre_impl_chroot((long)(filename))
-#define __sanitizer_syscall_post_chroot(res, filename) \
+#define __sanitizer_syscall_post_chroot(res, filename)                         \
   __sanitizer_syscall_post_impl_chroot(res, (long)(filename))
-#define __sanitizer_syscall_pre_mknod(filename, mode, dev)           \
-  __sanitizer_syscall_pre_impl_mknod((long)(filename), (long)(mode), \
+#define __sanitizer_syscall_pre_mknod(filename, mode, dev)                     \
+  __sanitizer_syscall_pre_impl_mknod((long)(filename), (long)(mode),           \
                                      (long)(dev))
-#define __sanitizer_syscall_post_mknod(res, filename, mode, dev)           \
-  __sanitizer_syscall_post_impl_mknod(res, (long)(filename), (long)(mode), \
+#define __sanitizer_syscall_post_mknod(res, filename, mode, dev)               \
+  __sanitizer_syscall_post_impl_mknod(res, (long)(filename), (long)(mode),     \
                                       (long)(dev))
-#define __sanitizer_syscall_pre_link(oldname, newname) \
+#define __sanitizer_syscall_pre_link(oldname, newname)                         \
   __sanitizer_syscall_pre_impl_link((long)(oldname), (long)(newname))
-#define __sanitizer_syscall_post_link(res, oldname, newname) \
+#define __sanitizer_syscall_post_link(res, oldname, newname)                   \
   __sanitizer_syscall_post_impl_link(res, (long)(oldname), (long)(newname))
-#define __sanitizer_syscall_pre_symlink(old, new_) \
+#define __sanitizer_syscall_pre_symlink(old, new_)                             \
   __sanitizer_syscall_pre_impl_symlink((long)(old), (long)(new_))
-#define __sanitizer_syscall_post_symlink(res, old, new_) \
+#define __sanitizer_syscall_post_symlink(res, old, new_)                       \
   __sanitizer_syscall_post_impl_symlink(res, (long)(old), (long)(new_))
-#define __sanitizer_syscall_pre_unlink(pathname) \
+#define __sanitizer_syscall_pre_unlink(pathname)                               \
   __sanitizer_syscall_pre_impl_unlink((long)(pathname))
-#define __sanitizer_syscall_post_unlink(res, pathname) \
+#define __sanitizer_syscall_post_unlink(res, pathname)                         \
   __sanitizer_syscall_post_impl_unlink(res, (long)(pathname))
-#define __sanitizer_syscall_pre_rename(oldname, newname) \
+#define __sanitizer_syscall_pre_rename(oldname, newname)                       \
   __sanitizer_syscall_pre_impl_rename((long)(oldname), (long)(newname))
-#define __sanitizer_syscall_post_rename(res, oldname, newname) \
+#define __sanitizer_syscall_post_rename(res, oldname, newname)                 \
   __sanitizer_syscall_post_impl_rename(res, (long)(oldname), (long)(newname))
-#define __sanitizer_syscall_pre_chmod(filename, mode) \
+#define __sanitizer_syscall_pre_chmod(filename, mode)                          \
   __sanitizer_syscall_pre_impl_chmod((long)(filename), (long)(mode))
-#define __sanitizer_syscall_post_chmod(res, filename, mode) \
+#define __sanitizer_syscall_post_chmod(res, filename, mode)                    \
   __sanitizer_syscall_post_impl_chmod(res, (long)(filename), (long)(mode))
-#define __sanitizer_syscall_pre_fchmod(fd, mode) \
+#define __sanitizer_syscall_pre_fchmod(fd, mode)                               \
   __sanitizer_syscall_pre_impl_fchmod((long)(fd), (long)(mode))
-#define __sanitizer_syscall_post_fchmod(res, fd, mode) \
+#define __sanitizer_syscall_post_fchmod(res, fd, mode)                         \
   __sanitizer_syscall_post_impl_fchmod(res, (long)(fd), (long)(mode))
-#define __sanitizer_syscall_pre_fcntl(fd, cmd, arg) \
+#define __sanitizer_syscall_pre_fcntl(fd, cmd, arg)                            \
   __sanitizer_syscall_pre_impl_fcntl((long)(fd), (long)(cmd), (long)(arg))
-#define __sanitizer_syscall_post_fcntl(res, fd, cmd, arg) \
+#define __sanitizer_syscall_post_fcntl(res, fd, cmd, arg)                      \
   __sanitizer_syscall_post_impl_fcntl(res, (long)(fd), (long)(cmd), (long)(arg))
-#define __sanitizer_syscall_pre_fcntl64(fd, cmd, arg) \
+#define __sanitizer_syscall_pre_fcntl64(fd, cmd, arg)                          \
   __sanitizer_syscall_pre_impl_fcntl64((long)(fd), (long)(cmd), (long)(arg))
-#define __sanitizer_syscall_post_fcntl64(res, fd, cmd, arg)           \
-  __sanitizer_syscall_post_impl_fcntl64(res, (long)(fd), (long)(cmd), \
+#define __sanitizer_syscall_post_fcntl64(res, fd, cmd, arg)                    \
+  __sanitizer_syscall_post_impl_fcntl64(res, (long)(fd), (long)(cmd),          \
                                         (long)(arg))
-#define __sanitizer_syscall_pre_pipe(fildes) \
+#define __sanitizer_syscall_pre_pipe(fildes)                                   \
   __sanitizer_syscall_pre_impl_pipe((long)(fildes))
-#define __sanitizer_syscall_post_pipe(res, fildes) \
+#define __sanitizer_syscall_post_pipe(res, fildes)                             \
   __sanitizer_syscall_post_impl_pipe(res, (long)(fildes))
-#define __sanitizer_syscall_pre_pipe2(fildes, flags) \
+#define __sanitizer_syscall_pre_pipe2(fildes, flags)                           \
   __sanitizer_syscall_pre_impl_pipe2((long)(fildes), (long)(flags))
-#define __sanitizer_syscall_post_pipe2(res, fildes, flags) \
+#define __sanitizer_syscall_post_pipe2(res, fildes, flags)                     \
   __sanitizer_syscall_post_impl_pipe2(res, (long)(fildes), (long)(flags))
-#define __sanitizer_syscall_pre_dup(fildes) \
+#define __sanitizer_syscall_pre_dup(fildes)                                    \
   __sanitizer_syscall_pre_impl_dup((long)(fildes))
-#define __sanitizer_syscall_post_dup(res, fildes) \
+#define __sanitizer_syscall_post_dup(res, fildes)                              \
   __sanitizer_syscall_post_impl_dup(res, (long)(fildes))
-#define __sanitizer_syscall_pre_dup2(oldfd, newfd) \
+#define __sanitizer_syscall_pre_dup2(oldfd, newfd)                             \
   __sanitizer_syscall_pre_impl_dup2((long)(oldfd), (long)(newfd))
-#define __sanitizer_syscall_post_dup2(res, oldfd, newfd) \
+#define __sanitizer_syscall_post_dup2(res, oldfd, newfd)                       \
   __sanitizer_syscall_post_impl_dup2(res, (long)(oldfd), (long)(newfd))
-#define __sanitizer_syscall_pre_dup3(oldfd, newfd, flags) \
+#define __sanitizer_syscall_pre_dup3(oldfd, newfd, flags)                      \
   __sanitizer_syscall_pre_impl_dup3((long)(oldfd), (long)(newfd), (long)(flags))
-#define __sanitizer_syscall_post_dup3(res, oldfd, newfd, flags)         \
-  __sanitizer_syscall_post_impl_dup3(res, (long)(oldfd), (long)(newfd), \
+#define __sanitizer_syscall_post_dup3(res, oldfd, newfd, flags)                \
+  __sanitizer_syscall_post_impl_dup3(res, (long)(oldfd), (long)(newfd),        \
                                      (long)(flags))
-#define __sanitizer_syscall_pre_ioperm(from, num, on) \
+#define __sanitizer_syscall_pre_ioperm(from, num, on)                          \
   __sanitizer_syscall_pre_impl_ioperm((long)(from), (long)(num), (long)(on))
-#define __sanitizer_syscall_post_ioperm(res, from, num, on)            \
-  __sanitizer_syscall_post_impl_ioperm(res, (long)(from), (long)(num), \
+#define __sanitizer_syscall_post_ioperm(res, from, num, on)                    \
+  __sanitizer_syscall_post_impl_ioperm(res, (long)(from), (long)(num),         \
                                        (long)(on))
-#define __sanitizer_syscall_pre_ioctl(fd, cmd, arg) \
+#define __sanitizer_syscall_pre_ioctl(fd, cmd, arg)                            \
   __sanitizer_syscall_pre_impl_ioctl((long)(fd), (long)(cmd), (long)(arg))
-#define __sanitizer_syscall_post_ioctl(res, fd, cmd, arg) \
+#define __sanitizer_syscall_post_ioctl(res, fd, cmd, arg)                      \
   __sanitizer_syscall_post_impl_ioctl(res, (long)(fd), (long)(cmd), (long)(arg))
-#define __sanitizer_syscall_pre_flock(fd, cmd) \
+#define __sanitizer_syscall_pre_flock(fd, cmd)                                 \
   __sanitizer_syscall_pre_impl_flock((long)(fd), (long)(cmd))
-#define __sanitizer_syscall_post_flock(res, fd, cmd) \
+#define __sanitizer_syscall_post_flock(res, fd, cmd)                           \
   __sanitizer_syscall_post_impl_flock(res, (long)(fd), (long)(cmd))
-#define __sanitizer_syscall_pre_io_setup(nr_reqs, ctx) \
+#define __sanitizer_syscall_pre_io_setup(nr_reqs, ctx)                         \
   __sanitizer_syscall_pre_impl_io_setup((long)(nr_reqs), (long)(ctx))
-#define __sanitizer_syscall_post_io_setup(res, nr_reqs, ctx) \
+#define __sanitizer_syscall_post_io_setup(res, nr_reqs, ctx)                   \
   __sanitizer_syscall_post_impl_io_setup(res, (long)(nr_reqs), (long)(ctx))
-#define __sanitizer_syscall_pre_io_destroy(ctx) \
+#define __sanitizer_syscall_pre_io_destroy(ctx)                                \
   __sanitizer_syscall_pre_impl_io_destroy((long)(ctx))
-#define __sanitizer_syscall_post_io_destroy(res, ctx) \
+#define __sanitizer_syscall_post_io_destroy(res, ctx)                          \
   __sanitizer_syscall_post_impl_io_destroy(res, (long)(ctx))
-#define __sanitizer_syscall_pre_io_getevents(ctx_id, min_nr, nr, events,    \
-                                             timeout)                       \
-  __sanitizer_syscall_pre_impl_io_getevents((long)(ctx_id), (long)(min_nr), \
-                                            (long)(nr), (long)(events),     \
+#define __sanitizer_syscall_pre_io_getevents(ctx_id, min_nr, nr, events,       \
+                                             timeout)                          \
+  __sanitizer_syscall_pre_impl_io_getevents((long)(ctx_id), (long)(min_nr),    \
+                                            (long)(nr), (long)(events),        \
                                             (long)(timeout))
 #define __sanitizer_syscall_post_io_getevents(res, ctx_id, min_nr, nr, events, \
                                               timeout)                         \
   __sanitizer_syscall_post_impl_io_getevents(res, (long)(ctx_id),              \
                                              (long)(min_nr), (long)(nr),       \
                                              (long)(events), (long)(timeout))
-#define __sanitizer_syscall_pre_io_submit(ctx_id, arg1, arg2)          \
-  __sanitizer_syscall_pre_impl_io_submit((long)(ctx_id), (long)(arg1), \
+#define __sanitizer_syscall_pre_io_submit(ctx_id, arg1, arg2)                  \
+  __sanitizer_syscall_pre_impl_io_submit((long)(ctx_id), (long)(arg1),         \
                                          (long)(arg2))
-#define __sanitizer_syscall_post_io_submit(res, ctx_id, arg1, arg2)          \
-  __sanitizer_syscall_post_impl_io_submit(res, (long)(ctx_id), (long)(arg1), \
+#define __sanitizer_syscall_post_io_submit(res, ctx_id, arg1, arg2)            \
+  __sanitizer_syscall_post_impl_io_submit(res, (long)(ctx_id), (long)(arg1),   \
                                           (long)(arg2))
-#define __sanitizer_syscall_pre_io_cancel(ctx_id, iocb, result)        \
-  __sanitizer_syscall_pre_impl_io_cancel((long)(ctx_id), (long)(iocb), \
+#define __sanitizer_syscall_pre_io_cancel(ctx_id, iocb, result)                \
+  __sanitizer_syscall_pre_impl_io_cancel((long)(ctx_id), (long)(iocb),         \
                                          (long)(result))
-#define __sanitizer_syscall_post_io_cancel(res, ctx_id, iocb, result)        \
-  __sanitizer_syscall_post_impl_io_cancel(res, (long)(ctx_id), (long)(iocb), \
+#define __sanitizer_syscall_post_io_cancel(res, ctx_id, iocb, result)          \
+  __sanitizer_syscall_post_impl_io_cancel(res, (long)(ctx_id), (long)(iocb),   \
                                           (long)(result))
-#define __sanitizer_syscall_pre_sendfile(out_fd, in_fd, offset, count) \
-  __sanitizer_syscall_pre_impl_sendfile((long)(out_fd), (long)(in_fd), \
+#define __sanitizer_syscall_pre_sendfile(out_fd, in_fd, offset, count)         \
+  __sanitizer_syscall_pre_impl_sendfile((long)(out_fd), (long)(in_fd),         \
                                         (long)(offset), (long)(count))
-#define __sanitizer_syscall_post_sendfile(res, out_fd, in_fd, offset, count) \
-  __sanitizer_syscall_post_impl_sendfile(res, (long)(out_fd), (long)(in_fd), \
+#define __sanitizer_syscall_post_sendfile(res, out_fd, in_fd, offset, count)   \
+  __sanitizer_syscall_post_impl_sendfile(res, (long)(out_fd), (long)(in_fd),   \
                                          (long)(offset), (long)(count))
-#define __sanitizer_syscall_pre_sendfile64(out_fd, in_fd, offset, count) \
-  __sanitizer_syscall_pre_impl_sendfile64((long)(out_fd), (long)(in_fd), \
+#define __sanitizer_syscall_pre_sendfile64(out_fd, in_fd, offset, count)       \
+  __sanitizer_syscall_pre_impl_sendfile64((long)(out_fd), (long)(in_fd),       \
                                           (long)(offset), (long)(count))
 #define __sanitizer_syscall_post_sendfile64(res, out_fd, in_fd, offset, count) \
   __sanitizer_syscall_post_impl_sendfile64(res, (long)(out_fd), (long)(in_fd), \
                                            (long)(offset), (long)(count))
-#define __sanitizer_syscall_pre_readlink(path, buf, bufsiz)        \
-  __sanitizer_syscall_pre_impl_readlink((long)(path), (long)(buf), \
+#define __sanitizer_syscall_pre_readlink(path, buf, bufsiz)                    \
+  __sanitizer_syscall_pre_impl_readlink((long)(path), (long)(buf),             \
                                         (long)(bufsiz))
-#define __sanitizer_syscall_post_readlink(res, path, buf, bufsiz)        \
-  __sanitizer_syscall_post_impl_readlink(res, (long)(path), (long)(buf), \
+#define __sanitizer_syscall_post_readlink(res, path, buf, bufsiz)              \
+  __sanitizer_syscall_post_impl_readlink(res, (long)(path), (long)(buf),       \
                                          (long)(bufsiz))
-#define __sanitizer_syscall_pre_creat(pathname, mode) \
+#define __sanitizer_syscall_pre_creat(pathname, mode)                          \
   __sanitizer_syscall_pre_impl_creat((long)(pathname), (long)(mode))
-#define __sanitizer_syscall_post_creat(res, pathname, mode) \
+#define __sanitizer_syscall_post_creat(res, pathname, mode)                    \
   __sanitizer_syscall_post_impl_creat(res, (long)(pathname), (long)(mode))
-#define __sanitizer_syscall_pre_open(filename, flags, mode)          \
-  __sanitizer_syscall_pre_impl_open((long)(filename), (long)(flags), \
+#define __sanitizer_syscall_pre_open(filename, flags, mode)                    \
+  __sanitizer_syscall_pre_impl_open((long)(filename), (long)(flags),           \
                                     (long)(mode))
-#define __sanitizer_syscall_post_open(res, filename, flags, mode)          \
-  __sanitizer_syscall_post_impl_open(res, (long)(filename), (long)(flags), \
+#define __sanitizer_syscall_post_open(res, filename, flags, mode)              \
+  __sanitizer_syscall_post_impl_open(res, (long)(filename), (long)(flags),     \
                                      (long)(mode))
-#define __sanitizer_syscall_pre_close(fd) \
+#define __sanitizer_syscall_pre_close(fd)                                      \
   __sanitizer_syscall_pre_impl_close((long)(fd))
-#define __sanitizer_syscall_post_close(res, fd) \
+#define __sanitizer_syscall_post_close(res, fd)                                \
   __sanitizer_syscall_post_impl_close(res, (long)(fd))
-#define __sanitizer_syscall_pre_access(filename, mode) \
+#define __sanitizer_syscall_pre_access(filename, mode)                         \
   __sanitizer_syscall_pre_impl_access((long)(filename), (long)(mode))
-#define __sanitizer_syscall_post_access(res, filename, mode) \
+#define __sanitizer_syscall_post_access(res, filename, mode)                   \
   __sanitizer_syscall_post_impl_access(res, (long)(filename), (long)(mode))
 #define __sanitizer_syscall_pre_vhangup() __sanitizer_syscall_pre_impl_vhangup()
-#define __sanitizer_syscall_post_vhangup(res) \
+#define __sanitizer_syscall_post_vhangup(res)                                  \
   __sanitizer_syscall_post_impl_vhangup(res)
-#define __sanitizer_syscall_pre_chown(filename, user, group)         \
-  __sanitizer_syscall_pre_impl_chown((long)(filename), (long)(user), \
+#define __sanitizer_syscall_pre_chown(filename, user, group)                   \
+  __sanitizer_syscall_pre_impl_chown((long)(filename), (long)(user),           \
                                      (long)(group))
-#define __sanitizer_syscall_post_chown(res, filename, user, group)         \
-  __sanitizer_syscall_post_impl_chown(res, (long)(filename), (long)(user), \
+#define __sanitizer_syscall_post_chown(res, filename, user, group)             \
+  __sanitizer_syscall_post_impl_chown(res, (long)(filename), (long)(user),     \
                                       (long)(group))
-#define __sanitizer_syscall_pre_lchown(filename, user, group)         \
-  __sanitizer_syscall_pre_impl_lchown((long)(filename), (long)(user), \
+#define __sanitizer_syscall_pre_lchown(filename, user, group)                  \
+  __sanitizer_syscall_pre_impl_lchown((long)(filename), (long)(user),          \
                                       (long)(group))
-#define __sanitizer_syscall_post_lchown(res, filename, user, group)         \
-  __sanitizer_syscall_post_impl_lchown(res, (long)(filename), (long)(user), \
+#define __sanitizer_syscall_post_lchown(res, filename, user, group)            \
+  __sanitizer_syscall_post_impl_lchown(res, (long)(filename), (long)(user),    \
                                        (long)(group))
-#define __sanitizer_syscall_pre_fchown(fd, user, group) \
+#define __sanitizer_syscall_pre_fchown(fd, user, group)                        \
   __sanitizer_syscall_pre_impl_fchown((long)(fd), (long)(user), (long)(group))
-#define __sanitizer_syscall_post_fchown(res, fd, user, group)         \
-  __sanitizer_syscall_post_impl_fchown(res, (long)(fd), (long)(user), \
+#define __sanitizer_syscall_post_fchown(res, fd, user, group)                  \
+  __sanitizer_syscall_post_impl_fchown(res, (long)(fd), (long)(user),          \
                                        (long)(group))
-#define __sanitizer_syscall_pre_chown16(filename, user, group)       \
-  __sanitizer_syscall_pre_impl_chown16((long)(filename), (long)user, \
+#define __sanitizer_syscall_pre_chown16(filename, user, group)                 \
+  __sanitizer_syscall_pre_impl_chown16((long)(filename), (long)user,           \
                                        (long)group)
-#define __sanitizer_syscall_post_chown16(res, filename, user, group)       \
-  __sanitizer_syscall_post_impl_chown16(res, (long)(filename), (long)user, \
+#define __sanitizer_syscall_post_chown16(res, filename, user, group)           \
+  __sanitizer_syscall_post_impl_chown16(res, (long)(filename), (long)user,     \
                                         (long)group)
-#define __sanitizer_syscall_pre_lchown16(filename, user, group)       \
-  __sanitizer_syscall_pre_impl_lchown16((long)(filename), (long)user, \
+#define __sanitizer_syscall_pre_lchown16(filename, user, group)                \
+  __sanitizer_syscall_pre_impl_lchown16((long)(filename), (long)user,          \
                                         (long)group)
-#define __sanitizer_syscall_post_lchown16(res, filename, user, group)       \
-  __sanitizer_syscall_post_impl_lchown16(res, (long)(filename), (long)user, \
+#define __sanitizer_syscall_post_lchown16(res, filename, user, group)          \
+  __sanitizer_syscall_post_impl_lchown16(res, (long)(filename), (long)user,    \
                                          (long)group)
-#define __sanitizer_syscall_pre_fchown16(fd, user, group) \
+#define __sanitizer_syscall_pre_fchown16(fd, user, group)                      \
   __sanitizer_syscall_pre_impl_fchown16((long)(fd), (long)user, (long)group)
-#define __sanitizer_syscall_post_fchown16(res, fd, user, group)       \
-  __sanitizer_syscall_post_impl_fchown16(res, (long)(fd), (long)user, \
+#define __sanitizer_syscall_post_fchown16(res, fd, user, group)                \
+  __sanitizer_syscall_post_impl_fchown16(res, (long)(fd), (long)user,          \
                                          (long)group)
-#define __sanitizer_syscall_pre_setregid16(rgid, egid) \
+#define __sanitizer_syscall_pre_setregid16(rgid, egid)                         \
   __sanitizer_syscall_pre_impl_setregid16((long)rgid, (long)egid)
-#define __sanitizer_syscall_post_setregid16(res, rgid, egid) \
+#define __sanitizer_syscall_post_setregid16(res, rgid, egid)                   \
   __sanitizer_syscall_post_impl_setregid16(res, (long)rgid, (long)egid)
-#define __sanitizer_syscall_pre_setgid16(gid) \
+#define __sanitizer_syscall_pre_setgid16(gid)                                  \
   __sanitizer_syscall_pre_impl_setgid16((long)gid)
-#define __sanitizer_syscall_post_setgid16(res, gid) \
+#define __sanitizer_syscall_post_setgid16(res, gid)                            \
   __sanitizer_syscall_post_impl_setgid16(res, (long)gid)
-#define __sanitizer_syscall_pre_setreuid16(ruid, euid) \
+#define __sanitizer_syscall_pre_setreuid16(ruid, euid)                         \
   __sanitizer_syscall_pre_impl_setreuid16((long)ruid, (long)euid)
-#define __sanitizer_syscall_post_setreuid16(res, ruid, euid) \
+#define __sanitizer_syscall_post_setreuid16(res, ruid, euid)                   \
   __sanitizer_syscall_post_impl_setreuid16(res, (long)ruid, (long)euid)
-#define __sanitizer_syscall_pre_setuid16(uid) \
+#define __sanitizer_syscall_pre_setuid16(uid)                                  \
   __sanitizer_syscall_pre_impl_setuid16((long)uid)
-#define __sanitizer_syscall_post_setuid16(res, uid) \
+#define __sanitizer_syscall_post_setuid16(res, uid)                            \
   __sanitizer_syscall_post_impl_setuid16(res, (long)uid)
-#define __sanitizer_syscall_pre_setresuid16(ruid, euid, suid) \
+#define __sanitizer_syscall_pre_setresuid16(ruid, euid, suid)                  \
   __sanitizer_syscall_pre_impl_setresuid16((long)ruid, (long)euid, (long)suid)
-#define __sanitizer_syscall_post_setresuid16(res, ruid, euid, suid)      \
-  __sanitizer_syscall_post_impl_setresuid16(res, (long)ruid, (long)euid, \
+#define __sanitizer_syscall_post_setresuid16(res, ruid, euid, suid)            \
+  __sanitizer_syscall_post_impl_setresuid16(res, (long)ruid, (long)euid,       \
                                             (long)suid)
-#define __sanitizer_syscall_pre_getresuid16(ruid, euid, suid)          \
-  __sanitizer_syscall_pre_impl_getresuid16((long)(ruid), (long)(euid), \
+#define __sanitizer_syscall_pre_getresuid16(ruid, euid, suid)                  \
+  __sanitizer_syscall_pre_impl_getresuid16((long)(ruid), (long)(euid),         \
                                            (long)(suid))
-#define __sanitizer_syscall_post_getresuid16(res, ruid, euid, suid)          \
-  __sanitizer_syscall_post_impl_getresuid16(res, (long)(ruid), (long)(euid), \
+#define __sanitizer_syscall_post_getresuid16(res, ruid, euid, suid)            \
+  __sanitizer_syscall_post_impl_getresuid16(res, (long)(ruid), (long)(euid),   \
                                             (long)(suid))
-#define __sanitizer_syscall_pre_setresgid16(rgid, egid, sgid) \
+#define __sanitizer_syscall_pre_setresgid16(rgid, egid, sgid)                  \
   __sanitizer_syscall_pre_impl_setresgid16((long)rgid, (long)egid, (long)sgid)
-#define __sanitizer_syscall_post_setresgid16(res, rgid, egid, sgid)      \
-  __sanitizer_syscall_post_impl_setresgid16(res, (long)rgid, (long)egid, \
+#define __sanitizer_syscall_post_setresgid16(res, rgid, egid, sgid)            \
+  __sanitizer_syscall_post_impl_setresgid16(res, (long)rgid, (long)egid,       \
                                             (long)sgid)
-#define __sanitizer_syscall_pre_getresgid16(rgid, egid, sgid)          \
-  __sanitizer_syscall_pre_impl_getresgid16((long)(rgid), (long)(egid), \
+#define __sanitizer_syscall_pre_getresgid16(rgid, egid, sgid)                  \
+  __sanitizer_syscall_pre_impl_getresgid16((long)(rgid), (long)(egid),         \
                                            (long)(sgid))
-#define __sanitizer_syscall_post_getresgid16(res, rgid, egid, sgid)          \
-  __sanitizer_syscall_post_impl_getresgid16(res, (long)(rgid), (long)(egid), \
+#define __sanitizer_syscall_post_getresgid16(res, rgid, egid, sgid)            \
+  __sanitizer_syscall_post_impl_getresgid16(res, (long)(rgid), (long)(egid),   \
                                             (long)(sgid))
-#define __sanitizer_syscall_pre_setfsuid16(uid) \
+#define __sanitizer_syscall_pre_setfsuid16(uid)                                \
   __sanitizer_syscall_pre_impl_setfsuid16((long)uid)
-#define __sanitizer_syscall_post_setfsuid16(res, uid) \
+#define __sanitizer_syscall_post_setfsuid16(res, uid)                          \
   __sanitizer_syscall_post_impl_setfsuid16(res, (long)uid)
-#define __sanitizer_syscall_pre_setfsgid16(gid) \
+#define __sanitizer_syscall_pre_setfsgid16(gid)                                \
   __sanitizer_syscall_pre_impl_setfsgid16((long)gid)
-#define __sanitizer_syscall_post_setfsgid16(res, gid) \
+#define __sanitizer_syscall_post_setfsgid16(res, gid)                          \
   __sanitizer_syscall_post_impl_setfsgid16(res, (long)gid)
-#define __sanitizer_syscall_pre_getgroups16(gidsetsize, grouplist) \
-  __sanitizer_syscall_pre_impl_getgroups16((long)(gidsetsize),     \
+#define __sanitizer_syscall_pre_getgroups16(gidsetsize, grouplist)             \
+  __sanitizer_syscall_pre_impl_getgroups16((long)(gidsetsize),                 \
                                            (long)(grouplist))
-#define __sanitizer_syscall_post_getgroups16(res, gidsetsize, grouplist) \
-  __sanitizer_syscall_post_impl_getgroups16(res, (long)(gidsetsize),     \
+#define __sanitizer_syscall_post_getgroups16(res, gidsetsize, grouplist)       \
+  __sanitizer_syscall_post_impl_getgroups16(res, (long)(gidsetsize),           \
                                             (long)(grouplist))
-#define __sanitizer_syscall_pre_setgroups16(gidsetsize, grouplist) \
-  __sanitizer_syscall_pre_impl_setgroups16((long)(gidsetsize),     \
+#define __sanitizer_syscall_pre_setgroups16(gidsetsize, grouplist)             \
+  __sanitizer_syscall_pre_impl_setgroups16((long)(gidsetsize),                 \
                                            (long)(grouplist))
-#define __sanitizer_syscall_post_setgroups16(res, gidsetsize, grouplist) \
-  __sanitizer_syscall_post_impl_setgroups16(res, (long)(gidsetsize),     \
+#define __sanitizer_syscall_post_setgroups16(res, gidsetsize, grouplist)       \
+  __sanitizer_syscall_post_impl_setgroups16(res, (long)(gidsetsize),           \
                                             (long)(grouplist))
-#define __sanitizer_syscall_pre_getuid16() \
+#define __sanitizer_syscall_pre_getuid16()                                     \
   __sanitizer_syscall_pre_impl_getuid16()
-#define __sanitizer_syscall_post_getuid16(res) \
+#define __sanitizer_syscall_post_getuid16(res)                                 \
   __sanitizer_syscall_post_impl_getuid16(res)
-#define __sanitizer_syscall_pre_geteuid16() \
+#define __sanitizer_syscall_pre_geteuid16()                                    \
   __sanitizer_syscall_pre_impl_geteuid16()
-#define __sanitizer_syscall_post_geteuid16(res) \
+#define __sanitizer_syscall_post_geteuid16(res)                                \
   __sanitizer_syscall_post_impl_geteuid16(res)
-#define __sanitizer_syscall_pre_getgid16() \
+#define __sanitizer_syscall_pre_getgid16()                                     \
   __sanitizer_syscall_pre_impl_getgid16()
-#define __sanitizer_syscall_post_getgid16(res) \
+#define __sanitizer_syscall_post_getgid16(res)                                 \
   __sanitizer_syscall_post_impl_getgid16(res)
-#define __sanitizer_syscall_pre_getegid16() \
+#define __sanitizer_syscall_pre_getegid16()                                    \
   __sanitizer_syscall_pre_impl_getegid16()
-#define __sanitizer_syscall_post_getegid16(res) \
+#define __sanitizer_syscall_post_getegid16(res)                                \
   __sanitizer_syscall_post_impl_getegid16(res)
-#define __sanitizer_syscall_pre_utime(filename, times) \
+#define __sanitizer_syscall_pre_utime(filename, times)                         \
   __sanitizer_syscall_pre_impl_utime((long)(filename), (long)(times))
-#define __sanitizer_syscall_post_utime(res, filename, times) \
+#define __sanitizer_syscall_post_utime(res, filename, times)                   \
   __sanitizer_syscall_post_impl_utime(res, (long)(filename), (long)(times))
-#define __sanitizer_syscall_pre_utimes(filename, utimes) \
+#define __sanitizer_syscall_pre_utimes(filename, utimes)                       \
   __sanitizer_syscall_pre_impl_utimes((long)(filename), (long)(utimes))
-#define __sanitizer_syscall_post_utimes(res, filename, utimes) \
+#define __sanitizer_syscall_post_utimes(res, filename, utimes)                 \
   __sanitizer_syscall_post_impl_utimes(res, (long)(filename), (long)(utimes))
-#define __sanitizer_syscall_pre_lseek(fd, offset, origin) \
+#define __sanitizer_syscall_pre_lseek(fd, offset, origin)                      \
   __sanitizer_syscall_pre_impl_lseek((long)(fd), (long)(offset), (long)(origin))
-#define __sanitizer_syscall_post_lseek(res, fd, offset, origin)        \
-  __sanitizer_syscall_post_impl_lseek(res, (long)(fd), (long)(offset), \
+#define __sanitizer_syscall_post_lseek(res, fd, offset, origin)                \
+  __sanitizer_syscall_post_impl_lseek(res, (long)(fd), (long)(offset),         \
                                       (long)(origin))
-#define __sanitizer_syscall_pre_llseek(fd, offset_high, offset_low, result, \
-                                       origin)                              \
-  __sanitizer_syscall_pre_impl_llseek((long)(fd), (long)(offset_high),      \
-                                      (long)(offset_low), (long)(result),   \
+#define __sanitizer_syscall_pre_llseek(fd, offset_high, offset_low, result,    \
+                                       origin)                                 \
+  __sanitizer_syscall_pre_impl_llseek((long)(fd), (long)(offset_high),         \
+                                      (long)(offset_low), (long)(result),      \
                                       (long)(origin))
-#define __sanitizer_syscall_post_llseek(res, fd, offset_high, offset_low,    \
-                                        result, origin)                      \
-  __sanitizer_syscall_post_impl_llseek(res, (long)(fd), (long)(offset_high), \
-                                       (long)(offset_low), (long)(result),   \
+#define __sanitizer_syscall_post_llseek(res, fd, offset_high, offset_low,      \
+                                        result, origin)                        \
+  __sanitizer_syscall_post_impl_llseek(res, (long)(fd), (long)(offset_high),   \
+                                       (long)(offset_low), (long)(result),     \
                                        (long)(origin))
-#define __sanitizer_syscall_pre_read(fd, buf, count) \
+#define __sanitizer_syscall_pre_read(fd, buf, count)                           \
   __sanitizer_syscall_pre_impl_read((long)(fd), (long)(buf), (long)(count))
-#define __sanitizer_syscall_post_read(res, fd, buf, count)         \
-  __sanitizer_syscall_post_impl_read(res, (long)(fd), (long)(buf), \
+#define __sanitizer_syscall_post_read(res, fd, buf, count)                     \
+  __sanitizer_syscall_post_impl_read(res, (long)(fd), (long)(buf),             \
                                      (long)(count))
-#define __sanitizer_syscall_pre_readv(fd, vec, vlen) \
+#define __sanitizer_syscall_pre_readv(fd, vec, vlen)                           \
   __sanitizer_syscall_pre_impl_readv((long)(fd), (long)(vec), (long)(vlen))
-#define __sanitizer_syscall_post_readv(res, fd, vec, vlen)          \
-  __sanitizer_syscall_post_impl_readv(res, (long)(fd), (long)(vec), \
+#define __sanitizer_syscall_post_readv(res, fd, vec, vlen)                     \
+  __sanitizer_syscall_post_impl_readv(res, (long)(fd), (long)(vec),            \
                                       (long)(vlen))
-#define __sanitizer_syscall_pre_write(fd, buf, count) \
+#define __sanitizer_syscall_pre_write(fd, buf, count)                          \
   __sanitizer_syscall_pre_impl_write((long)(fd), (long)(buf), (long)(count))
-#define __sanitizer_syscall_post_write(res, fd, buf, count)         \
-  __sanitizer_syscall_post_impl_write(res, (long)(fd), (long)(buf), \
+#define __sanitizer_syscall_post_write(res, fd, buf, count)                    \
+  __sanitizer_syscall_post_impl_write(res, (long)(fd), (long)(buf),            \
                                       (long)(count))
-#define __sanitizer_syscall_pre_writev(fd, vec, vlen) \
+#define __sanitizer_syscall_pre_writev(fd, vec, vlen)                          \
   __sanitizer_syscall_pre_impl_writev((long)(fd), (long)(vec), (long)(vlen))
-#define __sanitizer_syscall_post_writev(res, fd, vec, vlen)          \
-  __sanitizer_syscall_post_impl_writev(res, (long)(fd), (long)(vec), \
+#define __sanitizer_syscall_post_writev(res, fd, vec, vlen)                    \
+  __sanitizer_syscall_post_impl_writev(res, (long)(fd), (long)(vec),           \
                                        (long)(vlen))
 
 #ifdef _LP64
 #define __sanitizer_syscall_pre_pread64(fd, buf, count, pos)                   \
   __sanitizer_syscall_pre_impl_pread64((long)(fd), (long)(buf), (long)(count), \
                                        (long)(pos))
-#define __sanitizer_syscall_post_pread64(res, fd, buf, count, pos)    \
-  __sanitizer_syscall_post_impl_pread64(res, (long)(fd), (long)(buf), \
+#define __sanitizer_syscall_post_pread64(res, fd, buf, count, pos)             \
+  __sanitizer_syscall_post_impl_pread64(res, (long)(fd), (long)(buf),          \
                                         (long)(count), (long)(pos))
-#define __sanitizer_syscall_pre_pwrite64(fd, buf, count, pos)    \
-  __sanitizer_syscall_pre_impl_pwrite64((long)(fd), (long)(buf), \
+#define __sanitizer_syscall_pre_pwrite64(fd, buf, count, pos)                  \
+  __sanitizer_syscall_pre_impl_pwrite64((long)(fd), (long)(buf),               \
                                         (long)(count), (long)(pos))
-#define __sanitizer_syscall_post_pwrite64(res, fd, buf, count, pos)    \
-  __sanitizer_syscall_post_impl_pwrite64(res, (long)(fd), (long)(buf), \
+#define __sanitizer_syscall_post_pwrite64(res, fd, buf, count, pos)            \
+  __sanitizer_syscall_post_impl_pwrite64(res, (long)(fd), (long)(buf),         \
                                          (long)(count), (long)(pos))
 #else
 #define __sanitizer_syscall_pre_pread64(fd, buf, count, pos0, pos1)            \
   __sanitizer_syscall_pre_impl_pread64((long)(fd), (long)(buf), (long)(count), \
                                        (long)(pos0), (long)(pos1))
-#define __sanitizer_syscall_post_pread64(res, fd, buf, count, pos0, pos1) \
-  __sanitizer_syscall_post_impl_pread64(res, (long)(fd), (long)(buf),     \
-                                        (long)(count), (long)(pos0), \
-                                        (long)(pos1))
-#define __sanitizer_syscall_pre_pwrite64(fd, buf, count, pos0, pos1) \
-  __sanitizer_syscall_pre_impl_pwrite64(                             \
+#define __sanitizer_syscall_post_pread64(res, fd, buf, count, pos0, pos1)      \
+  __sanitizer_syscall_post_impl_pread64(                                       \
+      res, (long)(fd), (long)(buf), (long)(count), (long)(pos0), (long)(pos1))
+#define __sanitizer_syscall_pre_pwrite64(fd, buf, count, pos0, pos1)           \
+  __sanitizer_syscall_pre_impl_pwrite64(                                       \
       (long)(fd), (long)(buf), (long)(count), (long)(pos0), (long)(pos1))
-#define __sanitizer_syscall_post_pwrite64(res, fd, buf, count, pos0, pos1) \
-  __sanitizer_syscall_post_impl_pwrite64(                                  \
+#define __sanitizer_syscall_post_pwrite64(res, fd, buf, count, pos0, pos1)     \
+  __sanitizer_syscall_post_impl_pwrite64(                                      \
       res, (long)(fd), (long)(buf), (long)(count), (long)(pos0), (long)(pos1))
 #endif
 
-#define __sanitizer_syscall_pre_preadv(fd, vec, vlen, pos_l, pos_h)          \
-  __sanitizer_syscall_pre_impl_preadv((long)(fd), (long)(vec), (long)(vlen), \
+#define __sanitizer_syscall_pre_preadv(fd, vec, vlen, pos_l, pos_h)            \
+  __sanitizer_syscall_pre_impl_preadv((long)(fd), (long)(vec), (long)(vlen),   \
                                       (long)(pos_l), (long)(pos_h))
-#define __sanitizer_syscall_post_preadv(res, fd, vec, vlen, pos_l, pos_h) \
-  __sanitizer_syscall_post_impl_preadv(res, (long)(fd), (long)(vec),      \
-                                       (long)(vlen), (long)(pos_l),       \
+#define __sanitizer_syscall_post_preadv(res, fd, vec, vlen, pos_l, pos_h)      \
+  __sanitizer_syscall_post_impl_preadv(res, (long)(fd), (long)(vec),           \
+                                       (long)(vlen), (long)(pos_l),            \
                                        (long)(pos_h))
-#define __sanitizer_syscall_pre_pwritev(fd, vec, vlen, pos_l, pos_h)          \
-  __sanitizer_syscall_pre_impl_pwritev((long)(fd), (long)(vec), (long)(vlen), \
+#define __sanitizer_syscall_pre_pwritev(fd, vec, vlen, pos_l, pos_h)           \
+  __sanitizer_syscall_pre_impl_pwritev((long)(fd), (long)(vec), (long)(vlen),  \
                                        (long)(pos_l), (long)(pos_h))
-#define __sanitizer_syscall_post_pwritev(res, fd, vec, vlen, pos_l, pos_h) \
-  __sanitizer_syscall_post_impl_pwritev(res, (long)(fd), (long)(vec),      \
-                                        (long)(vlen), (long)(pos_l),       \
+#define __sanitizer_syscall_post_pwritev(res, fd, vec, vlen, pos_l, pos_h)     \
+  __sanitizer_syscall_post_impl_pwritev(res, (long)(fd), (long)(vec),          \
+                                        (long)(vlen), (long)(pos_l),           \
                                         (long)(pos_h))
-#define __sanitizer_syscall_pre_getcwd(buf, size) \
+#define __sanitizer_syscall_pre_getcwd(buf, size)                              \
   __sanitizer_syscall_pre_impl_getcwd((long)(buf), (long)(size))
-#define __sanitizer_syscall_post_getcwd(res, buf, size) \
+#define __sanitizer_syscall_post_getcwd(res, buf, size)                        \
   __sanitizer_syscall_post_impl_getcwd(res, (long)(buf), (long)(size))
-#define __sanitizer_syscall_pre_mkdir(pathname, mode) \
+#define __sanitizer_syscall_pre_mkdir(pathname, mode)                          \
   __sanitizer_syscall_pre_impl_mkdir((long)(pathname), (long)(mode))
-#define __sanitizer_syscall_post_mkdir(res, pathname, mode) \
+#define __sanitizer_syscall_post_mkdir(res, pathname, mode)                    \
   __sanitizer_syscall_post_impl_mkdir(res, (long)(pathname), (long)(mode))
-#define __sanitizer_syscall_pre_chdir(filename) \
+#define __sanitizer_syscall_pre_chdir(filename)                                \
   __sanitizer_syscall_pre_impl_chdir((long)(filename))
-#define __sanitizer_syscall_post_chdir(res, filename) \
+#define __sanitizer_syscall_post_chdir(res, filename)                          \
   __sanitizer_syscall_post_impl_chdir(res, (long)(filename))
-#define __sanitizer_syscall_pre_fchdir(fd) \
+#define __sanitizer_syscall_pre_fchdir(fd)                                     \
   __sanitizer_syscall_pre_impl_fchdir((long)(fd))
-#define __sanitizer_syscall_post_fchdir(res, fd) \
+#define __sanitizer_syscall_post_fchdir(res, fd)                               \
   __sanitizer_syscall_post_impl_fchdir(res, (long)(fd))
-#define __sanitizer_syscall_pre_rmdir(pathname) \
+#define __sanitizer_syscall_pre_rmdir(pathname)                                \
   __sanitizer_syscall_pre_impl_rmdir((long)(pathname))
-#define __sanitizer_syscall_post_rmdir(res, pathname) \
+#define __sanitizer_syscall_post_rmdir(res, pathname)                          \
   __sanitizer_syscall_post_impl_rmdir(res, (long)(pathname))
-#define __sanitizer_syscall_pre_lookup_dcookie(cookie64, buf, len)           \
-  __sanitizer_syscall_pre_impl_lookup_dcookie((long)(cookie64), (long)(buf), \
+#define __sanitizer_syscall_pre_lookup_dcookie(cookie64, buf, len)             \
+  __sanitizer_syscall_pre_impl_lookup_dcookie((long)(cookie64), (long)(buf),   \
                                               (long)(len))
-#define __sanitizer_syscall_post_lookup_dcookie(res, cookie64, buf, len) \
-  __sanitizer_syscall_post_impl_lookup_dcookie(res, (long)(cookie64),    \
+#define __sanitizer_syscall_post_lookup_dcookie(res, cookie64, buf, len)       \
+  __sanitizer_syscall_post_impl_lookup_dcookie(res, (long)(cookie64),          \
                                                (long)(buf), (long)(len))
-#define __sanitizer_syscall_pre_quotactl(cmd, special, id, addr)      \
-  __sanitizer_syscall_pre_impl_quotactl((long)(cmd), (long)(special), \
+#define __sanitizer_syscall_pre_quotactl(cmd, special, id, addr)               \
+  __sanitizer_syscall_pre_impl_quotactl((long)(cmd), (long)(special),          \
                                         (long)(id), (long)(addr))
-#define __sanitizer_syscall_post_quotactl(res, cmd, special, id, addr)      \
-  __sanitizer_syscall_post_impl_quotactl(res, (long)(cmd), (long)(special), \
+#define __sanitizer_syscall_post_quotactl(res, cmd, special, id, addr)         \
+  __sanitizer_syscall_post_impl_quotactl(res, (long)(cmd), (long)(special),    \
                                          (long)(id), (long)(addr))
-#define __sanitizer_syscall_pre_getdents(fd, dirent, count)         \
-  __sanitizer_syscall_pre_impl_getdents((long)(fd), (long)(dirent), \
+#define __sanitizer_syscall_pre_getdents(fd, dirent, count)                    \
+  __sanitizer_syscall_pre_impl_getdents((long)(fd), (long)(dirent),            \
                                         (long)(count))
-#define __sanitizer_syscall_post_getdents(res, fd, dirent, count)         \
-  __sanitizer_syscall_post_impl_getdents(res, (long)(fd), (long)(dirent), \
+#define __sanitizer_syscall_post_getdents(res, fd, dirent, count)              \
+  __sanitizer_syscall_post_impl_getdents(res, (long)(fd), (long)(dirent),      \
                                          (long)(count))
-#define __sanitizer_syscall_pre_getdents64(fd, dirent, count)         \
-  __sanitizer_syscall_pre_impl_getdents64((long)(fd), (long)(dirent), \
+#define __sanitizer_syscall_pre_getdents64(fd, dirent, count)                  \
+  __sanitizer_syscall_pre_impl_getdents64((long)(fd), (long)(dirent),          \
                                           (long)(count))
-#define __sanitizer_syscall_post_getdents64(res, fd, dirent, count)         \
-  __sanitizer_syscall_post_impl_getdents64(res, (long)(fd), (long)(dirent), \
+#define __sanitizer_syscall_post_getdents64(res, fd, dirent, count)            \
+  __sanitizer_syscall_post_impl_getdents64(res, (long)(fd), (long)(dirent),    \
                                            (long)(count))
 #define __sanitizer_syscall_pre_setsockopt(fd, level, optname, optval, optlen) \
   __sanitizer_syscall_pre_impl_setsockopt((long)(fd), (long)(level),           \
                                           (long)(optname), (long)(optval),     \
                                           (long)(optlen))
-#define __sanitizer_syscall_post_setsockopt(res, fd, level, optname, optval, \
-                                            optlen)                          \
-  __sanitizer_syscall_post_impl_setsockopt(res, (long)(fd), (long)(level),   \
-                                           (long)(optname), (long)(optval),  \
+#define __sanitizer_syscall_post_setsockopt(res, fd, level, optname, optval,   \
+                                            optlen)                            \
+  __sanitizer_syscall_post_impl_setsockopt(res, (long)(fd), (long)(level),     \
+                                           (long)(optname), (long)(optval),    \
                                            (long)(optlen))
 #define __sanitizer_syscall_pre_getsockopt(fd, level, optname, optval, optlen) \
   __sanitizer_syscall_pre_impl_getsockopt((long)(fd), (long)(level),           \
                                           (long)(optname), (long)(optval),     \
                                           (long)(optlen))
-#define __sanitizer_syscall_post_getsockopt(res, fd, level, optname, optval, \
-                                            optlen)                          \
-  __sanitizer_syscall_post_impl_getsockopt(res, (long)(fd), (long)(level),   \
-                                           (long)(optname), (long)(optval),  \
+#define __sanitizer_syscall_post_getsockopt(res, fd, level, optname, optval,   \
+                                            optlen)                            \
+  __sanitizer_syscall_post_impl_getsockopt(res, (long)(fd), (long)(level),     \
+                                           (long)(optname), (long)(optval),    \
                                            (long)(optlen))
-#define __sanitizer_syscall_pre_bind(arg0, arg1, arg2) \
+#define __sanitizer_syscall_pre_bind(arg0, arg1, arg2)                         \
   __sanitizer_syscall_pre_impl_bind((long)(arg0), (long)(arg1), (long)(arg2))
-#define __sanitizer_syscall_post_bind(res, arg0, arg1, arg2)          \
-  __sanitizer_syscall_post_impl_bind(res, (long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_post_bind(res, arg0, arg1, arg2)                   \
+  __sanitizer_syscall_post_impl_bind(res, (long)(arg0), (long)(arg1),          \
                                      (long)(arg2))
-#define __sanitizer_syscall_pre_connect(arg0, arg1, arg2) \
+#define __sanitizer_syscall_pre_connect(arg0, arg1, arg2)                      \
   __sanitizer_syscall_pre_impl_connect((long)(arg0), (long)(arg1), (long)(arg2))
-#define __sanitizer_syscall_post_connect(res, arg0, arg1, arg2)          \
-  __sanitizer_syscall_post_impl_connect(res, (long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_post_connect(res, arg0, arg1, arg2)                \
+  __sanitizer_syscall_post_impl_connect(res, (long)(arg0), (long)(arg1),       \
                                         (long)(arg2))
-#define __sanitizer_syscall_pre_accept(arg0, arg1, arg2) \
+#define __sanitizer_syscall_pre_accept(arg0, arg1, arg2)                       \
   __sanitizer_syscall_pre_impl_accept((long)(arg0), (long)(arg1), (long)(arg2))
-#define __sanitizer_syscall_post_accept(res, arg0, arg1, arg2)          \
-  __sanitizer_syscall_post_impl_accept(res, (long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_post_accept(res, arg0, arg1, arg2)                 \
+  __sanitizer_syscall_post_impl_accept(res, (long)(arg0), (long)(arg1),        \
                                        (long)(arg2))
-#define __sanitizer_syscall_pre_accept4(arg0, arg1, arg2, arg3)    \
-  __sanitizer_syscall_pre_impl_accept4((long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_pre_accept4(arg0, arg1, arg2, arg3)                \
+  __sanitizer_syscall_pre_impl_accept4((long)(arg0), (long)(arg1),             \
                                        (long)(arg2), (long)(arg3))
-#define __sanitizer_syscall_post_accept4(res, arg0, arg1, arg2, arg3)    \
-  __sanitizer_syscall_post_impl_accept4(res, (long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_post_accept4(res, arg0, arg1, arg2, arg3)          \
+  __sanitizer_syscall_post_impl_accept4(res, (long)(arg0), (long)(arg1),       \
                                         (long)(arg2), (long)(arg3))
-#define __sanitizer_syscall_pre_getsockname(arg0, arg1, arg2)          \
-  __sanitizer_syscall_pre_impl_getsockname((long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_pre_getsockname(arg0, arg1, arg2)                  \
+  __sanitizer_syscall_pre_impl_getsockname((long)(arg0), (long)(arg1),         \
                                            (long)(arg2))
-#define __sanitizer_syscall_post_getsockname(res, arg0, arg1, arg2)          \
-  __sanitizer_syscall_post_impl_getsockname(res, (long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_post_getsockname(res, arg0, arg1, arg2)            \
+  __sanitizer_syscall_post_impl_getsockname(res, (long)(arg0), (long)(arg1),   \
                                             (long)(arg2))
-#define __sanitizer_syscall_pre_getpeername(arg0, arg1, arg2)          \
-  __sanitizer_syscall_pre_impl_getpeername((long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_pre_getpeername(arg0, arg1, arg2)                  \
+  __sanitizer_syscall_pre_impl_getpeername((long)(arg0), (long)(arg1),         \
                                            (long)(arg2))
-#define __sanitizer_syscall_post_getpeername(res, arg0, arg1, arg2)          \
-  __sanitizer_syscall_post_impl_getpeername(res, (long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_post_getpeername(res, arg0, arg1, arg2)            \
+  __sanitizer_syscall_post_impl_getpeername(res, (long)(arg0), (long)(arg1),   \
                                             (long)(arg2))
-#define __sanitizer_syscall_pre_send(arg0, arg1, arg2, arg3)                  \
-  __sanitizer_syscall_pre_impl_send((long)(arg0), (long)(arg1), (long)(arg2), \
+#define __sanitizer_syscall_pre_send(arg0, arg1, arg2, arg3)                   \
+  __sanitizer_syscall_pre_impl_send((long)(arg0), (long)(arg1), (long)(arg2),  \
                                     (long)(arg3))
-#define __sanitizer_syscall_post_send(res, arg0, arg1, arg2, arg3)    \
-  __sanitizer_syscall_post_impl_send(res, (long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_post_send(res, arg0, arg1, arg2, arg3)             \
+  __sanitizer_syscall_post_impl_send(res, (long)(arg0), (long)(arg1),          \
                                      (long)(arg2), (long)(arg3))
-#define __sanitizer_syscall_pre_sendto(arg0, arg1, arg2, arg3, arg4, arg5) \
-  __sanitizer_syscall_pre_impl_sendto((long)(arg0), (long)(arg1),          \
-                                      (long)(arg2), (long)(arg3),          \
+#define __sanitizer_syscall_pre_sendto(arg0, arg1, arg2, arg3, arg4, arg5)     \
+  __sanitizer_syscall_pre_impl_sendto((long)(arg0), (long)(arg1),              \
+                                      (long)(arg2), (long)(arg3),              \
                                       (long)(arg4), (long)(arg5))
-#define __sanitizer_syscall_post_sendto(res, arg0, arg1, arg2, arg3, arg4, \
-                                        arg5)                              \
-  __sanitizer_syscall_post_impl_sendto(res, (long)(arg0), (long)(arg1),    \
-                                       (long)(arg2), (long)(arg3),         \
+#define __sanitizer_syscall_post_sendto(res, arg0, arg1, arg2, arg3, arg4,     \
+                                        arg5)                                  \
+  __sanitizer_syscall_post_impl_sendto(res, (long)(arg0), (long)(arg1),        \
+                                       (long)(arg2), (long)(arg3),             \
                                        (long)(arg4), (long)(arg5))
-#define __sanitizer_syscall_pre_sendmsg(fd, msg, flags) \
+#define __sanitizer_syscall_pre_sendmsg(fd, msg, flags)                        \
   __sanitizer_syscall_pre_impl_sendmsg((long)(fd), (long)(msg), (long)(flags))
-#define __sanitizer_syscall_post_sendmsg(res, fd, msg, flags)         \
-  __sanitizer_syscall_post_impl_sendmsg(res, (long)(fd), (long)(msg), \
+#define __sanitizer_syscall_post_sendmsg(res, fd, msg, flags)                  \
+  __sanitizer_syscall_post_impl_sendmsg(res, (long)(fd), (long)(msg),          \
                                         (long)(flags))
 #define __sanitizer_syscall_pre_sendmmsg(fd, msg, vlen, flags)                 \
   __sanitizer_syscall_pre_impl_sendmmsg((long)(fd), (long)(msg), (long)(vlen), \
                                         (long)(flags))
-#define __sanitizer_syscall_post_sendmmsg(res, fd, msg, vlen, flags)   \
-  __sanitizer_syscall_post_impl_sendmmsg(res, (long)(fd), (long)(msg), \
+#define __sanitizer_syscall_post_sendmmsg(res, fd, msg, vlen, flags)           \
+  __sanitizer_syscall_post_impl_sendmmsg(res, (long)(fd), (long)(msg),         \
                                          (long)(vlen), (long)(flags))
-#define __sanitizer_syscall_pre_recv(arg0, arg1, arg2, arg3)                  \
-  __sanitizer_syscall_pre_impl_recv((long)(arg0), (long)(arg1), (long)(arg2), \
+#define __sanitizer_syscall_pre_recv(arg0, arg1, arg2, arg3)                   \
+  __sanitizer_syscall_pre_impl_recv((long)(arg0), (long)(arg1), (long)(arg2),  \
                                     (long)(arg3))
-#define __sanitizer_syscall_post_recv(res, arg0, arg1, arg2, arg3)    \
-  __sanitizer_syscall_post_impl_recv(res, (long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_post_recv(res, arg0, arg1, arg2, arg3)             \
+  __sanitizer_syscall_post_impl_recv(res, (long)(arg0), (long)(arg1),          \
                                      (long)(arg2), (long)(arg3))
-#define __sanitizer_syscall_pre_recvfrom(arg0, arg1, arg2, arg3, arg4, arg5) \
-  __sanitizer_syscall_pre_impl_recvfrom((long)(arg0), (long)(arg1),          \
-                                        (long)(arg2), (long)(arg3),          \
+#define __sanitizer_syscall_pre_recvfrom(arg0, arg1, arg2, arg3, arg4, arg5)   \
+  __sanitizer_syscall_pre_impl_recvfrom((long)(arg0), (long)(arg1),            \
+                                        (long)(arg2), (long)(arg3),            \
                                         (long)(arg4), (long)(arg5))
-#define __sanitizer_syscall_post_recvfrom(res, arg0, arg1, arg2, arg3, arg4, \
-                                          arg5)                              \
-  __sanitizer_syscall_post_impl_recvfrom(res, (long)(arg0), (long)(arg1),    \
-                                         (long)(arg2), (long)(arg3),         \
+#define __sanitizer_syscall_post_recvfrom(res, arg0, arg1, arg2, arg3, arg4,   \
+                                          arg5)                                \
+  __sanitizer_syscall_post_impl_recvfrom(res, (long)(arg0), (long)(arg1),      \
+                                         (long)(arg2), (long)(arg3),           \
                                          (long)(arg4), (long)(arg5))
-#define __sanitizer_syscall_pre_recvmsg(fd, msg, flags) \
+#define __sanitizer_syscall_pre_recvmsg(fd, msg, flags)                        \
   __sanitizer_syscall_pre_impl_recvmsg((long)(fd), (long)(msg), (long)(flags))
-#define __sanitizer_syscall_post_recvmsg(res, fd, msg, flags)         \
-  __sanitizer_syscall_post_impl_recvmsg(res, (long)(fd), (long)(msg), \
+#define __sanitizer_syscall_post_recvmsg(res, fd, msg, flags)                  \
+  __sanitizer_syscall_post_impl_recvmsg(res, (long)(fd), (long)(msg),          \
                                         (long)(flags))
 #define __sanitizer_syscall_pre_recvmmsg(fd, msg, vlen, flags, timeout)        \
   __sanitizer_syscall_pre_impl_recvmmsg((long)(fd), (long)(msg), (long)(vlen), \
                                         (long)(flags), (long)(timeout))
-#define __sanitizer_syscall_post_recvmmsg(res, fd, msg, vlen, flags, timeout) \
-  __sanitizer_syscall_post_impl_recvmmsg(res, (long)(fd), (long)(msg),        \
-                                         (long)(vlen), (long)(flags),         \
+#define __sanitizer_syscall_post_recvmmsg(res, fd, msg, vlen, flags, timeout)  \
+  __sanitizer_syscall_post_impl_recvmmsg(res, (long)(fd), (long)(msg),         \
+                                         (long)(vlen), (long)(flags),          \
                                          (long)(timeout))
-#define __sanitizer_syscall_pre_socket(arg0, arg1, arg2) \
+#define __sanitizer_syscall_pre_socket(arg0, arg1, arg2)                       \
   __sanitizer_syscall_pre_impl_socket((long)(arg0), (long)(arg1), (long)(arg2))
-#define __sanitizer_syscall_post_socket(res, arg0, arg1, arg2)          \
-  __sanitizer_syscall_post_impl_socket(res, (long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_post_socket(res, arg0, arg1, arg2)                 \
+  __sanitizer_syscall_post_impl_socket(res, (long)(arg0), (long)(arg1),        \
                                        (long)(arg2))
-#define __sanitizer_syscall_pre_socketpair(arg0, arg1, arg2, arg3)    \
-  __sanitizer_syscall_pre_impl_socketpair((long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_pre_socketpair(arg0, arg1, arg2, arg3)             \
+  __sanitizer_syscall_pre_impl_socketpair((long)(arg0), (long)(arg1),          \
                                           (long)(arg2), (long)(arg3))
-#define __sanitizer_syscall_post_socketpair(res, arg0, arg1, arg2, arg3)    \
-  __sanitizer_syscall_post_impl_socketpair(res, (long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_post_socketpair(res, arg0, arg1, arg2, arg3)       \
+  __sanitizer_syscall_post_impl_socketpair(res, (long)(arg0), (long)(arg1),    \
                                            (long)(arg2), (long)(arg3))
-#define __sanitizer_syscall_pre_socketcall(call, args) \
+#define __sanitizer_syscall_pre_socketcall(call, args)                         \
   __sanitizer_syscall_pre_impl_socketcall((long)(call), (long)(args))
-#define __sanitizer_syscall_post_socketcall(res, call, args) \
+#define __sanitizer_syscall_post_socketcall(res, call, args)                   \
   __sanitizer_syscall_post_impl_socketcall(res, (long)(call), (long)(args))
-#define __sanitizer_syscall_pre_listen(arg0, arg1) \
+#define __sanitizer_syscall_pre_listen(arg0, arg1)                             \
   __sanitizer_syscall_pre_impl_listen((long)(arg0), (long)(arg1))
-#define __sanitizer_syscall_post_listen(res, arg0, arg1) \
+#define __sanitizer_syscall_post_listen(res, arg0, arg1)                       \
   __sanitizer_syscall_post_impl_listen(res, (long)(arg0), (long)(arg1))
-#define __sanitizer_syscall_pre_poll(ufds, nfds, timeout) \
+#define __sanitizer_syscall_pre_poll(ufds, nfds, timeout)                      \
   __sanitizer_syscall_pre_impl_poll((long)(ufds), (long)(nfds), (long)(timeout))
-#define __sanitizer_syscall_post_poll(res, ufds, nfds, timeout)       \
-  __sanitizer_syscall_post_impl_poll(res, (long)(ufds), (long)(nfds), \
+#define __sanitizer_syscall_post_poll(res, ufds, nfds, timeout)                \
+  __sanitizer_syscall_post_impl_poll(res, (long)(ufds), (long)(nfds),          \
                                      (long)(timeout))
-#define __sanitizer_syscall_pre_select(n, inp, outp, exp, tvp)              \
-  __sanitizer_syscall_pre_impl_select((long)(n), (long)(inp), (long)(outp), \
+#define __sanitizer_syscall_pre_select(n, inp, outp, exp, tvp)                 \
+  __sanitizer_syscall_pre_impl_select((long)(n), (long)(inp), (long)(outp),    \
                                       (long)(exp), (long)(tvp))
-#define __sanitizer_syscall_post_select(res, n, inp, outp, exp, tvp) \
-  __sanitizer_syscall_post_impl_select(res, (long)(n), (long)(inp),  \
+#define __sanitizer_syscall_post_select(res, n, inp, outp, exp, tvp)           \
+  __sanitizer_syscall_post_impl_select(res, (long)(n), (long)(inp),            \
                                        (long)(outp), (long)(exp), (long)(tvp))
-#define __sanitizer_syscall_pre_old_select(arg) \
+#define __sanitizer_syscall_pre_old_select(arg)                                \
   __sanitizer_syscall_pre_impl_old_select((long)(arg))
-#define __sanitizer_syscall_post_old_select(res, arg) \
+#define __sanitizer_syscall_post_old_select(res, arg)                          \
   __sanitizer_syscall_post_impl_old_select(res, (long)(arg))
-#define __sanitizer_syscall_pre_epoll_create(size) \
+#define __sanitizer_syscall_pre_epoll_create(size)                             \
   __sanitizer_syscall_pre_impl_epoll_create((long)(size))
-#define __sanitizer_syscall_post_epoll_create(res, size) \
+#define __sanitizer_syscall_post_epoll_create(res, size)                       \
   __sanitizer_syscall_post_impl_epoll_create(res, (long)(size))
-#define __sanitizer_syscall_pre_epoll_create1(flags) \
+#define __sanitizer_syscall_pre_epoll_create1(flags)                           \
   __sanitizer_syscall_pre_impl_epoll_create1((long)(flags))
-#define __sanitizer_syscall_post_epoll_create1(res, flags) \
+#define __sanitizer_syscall_post_epoll_create1(res, flags)                     \
   __sanitizer_syscall_post_impl_epoll_create1(res, (long)(flags))
 #define __sanitizer_syscall_pre_epoll_ctl(epfd, op, fd, event)                 \
   __sanitizer_syscall_pre_impl_epoll_ctl((long)(epfd), (long)(op), (long)(fd), \
                                          (long)(event))
-#define __sanitizer_syscall_post_epoll_ctl(res, epfd, op, fd, event)     \
-  __sanitizer_syscall_post_impl_epoll_ctl(res, (long)(epfd), (long)(op), \
+#define __sanitizer_syscall_post_epoll_ctl(res, epfd, op, fd, event)           \
+  __sanitizer_syscall_post_impl_epoll_ctl(res, (long)(epfd), (long)(op),       \
                                           (long)(fd), (long)(event))
-#define __sanitizer_syscall_pre_epoll_wait(epfd, events, maxevents, timeout) \
-  __sanitizer_syscall_pre_impl_epoll_wait((long)(epfd), (long)(events),      \
+#define __sanitizer_syscall_pre_epoll_wait(epfd, events, maxevents, timeout)   \
+  __sanitizer_syscall_pre_impl_epoll_wait((long)(epfd), (long)(events),        \
                                           (long)(maxevents), (long)(timeout))
-#define __sanitizer_syscall_post_epoll_wait(res, epfd, events, maxevents,     \
-                                            timeout)                          \
-  __sanitizer_syscall_post_impl_epoll_wait(res, (long)(epfd), (long)(events), \
+#define __sanitizer_syscall_post_epoll_wait(res, epfd, events, maxevents,      \
+                                            timeout)                           \
+  __sanitizer_syscall_post_impl_epoll_wait(res, (long)(epfd), (long)(events),  \
                                            (long)(maxevents), (long)(timeout))
-#define __sanitizer_syscall_pre_epoll_pwait(epfd, events, maxevents, timeout, \
-                                            sigmask, sigsetsize)              \
-  __sanitizer_syscall_pre_impl_epoll_pwait(                                   \
-      (long)(epfd), (long)(events), (long)(maxevents), (long)(timeout),       \
+#define __sanitizer_syscall_pre_epoll_pwait(epfd, events, maxevents, timeout,  \
+                                            sigmask, sigsetsize)               \
+  __sanitizer_syscall_pre_impl_epoll_pwait(                                    \
+      (long)(epfd), (long)(events), (long)(maxevents), (long)(timeout),        \
+      (long)(sigmask), (long)(sigsetsize))
+#define __sanitizer_syscall_post_epoll_pwait(res, epfd, events, maxevents,     \
+                                             timeout, sigmask, sigsetsize)     \
+  __sanitizer_syscall_post_impl_epoll_pwait(                                   \
+      res, (long)(epfd), (long)(events), (long)(maxevents), (long)(timeout),   \
       (long)(sigmask), (long)(sigsetsize))
-#define __sanitizer_syscall_post_epoll_pwait(res, epfd, events, maxevents,   \
-                                             timeout, sigmask, sigsetsize)   \
-  __sanitizer_syscall_post_impl_epoll_pwait(                                 \
-      res, (long)(epfd), (long)(events), (long)(maxevents), (long)(timeout), \
+#define __sanitizer_syscall_pre_epoll_pwait2(epfd, events, maxevents, timeout, \
+                                             sigmask, sigsetsize)              \
+  __sanitizer_syscall_pre_impl_epoll_pwait2(                                   \
+      (long)(epfd), (long)(events), (long)(maxevents), (long)(timeout),        \
       (long)(sigmask), (long)(sigsetsize))
-#define __sanitizer_syscall_pre_gethostname(name, len) \
+#define __sanitizer_syscall_post_epoll_pwait2(res, epfd, events, maxevents,    \
+                                              timeout, sigmask, sigsetsize)    \
+  __sanitizer_syscall_post_impl_epoll_pwait2(                                  \
+      res, (long)(epfd), (long)(events), (long)(maxevents), (long)(timeout),   \
+      (long)(sigmask), (long)(sigsetsize))
+#define __sanitizer_syscall_pre_gethostname(name, len)                         \
   __sanitizer_syscall_pre_impl_gethostname((long)(name), (long)(len))
-#define __sanitizer_syscall_post_gethostname(res, name, len) \
+#define __sanitizer_syscall_post_gethostname(res, name, len)                   \
   __sanitizer_syscall_post_impl_gethostname(res, (long)(name), (long)(len))
-#define __sanitizer_syscall_pre_sethostname(name, len) \
+#define __sanitizer_syscall_pre_sethostname(name, len)                         \
   __sanitizer_syscall_pre_impl_sethostname((long)(name), (long)(len))
-#define __sanitizer_syscall_post_sethostname(res, name, len) \
+#define __sanitizer_syscall_post_sethostname(res, name, len)                   \
   __sanitizer_syscall_post_impl_sethostname(res, (long)(name), (long)(len))
-#define __sanitizer_syscall_pre_setdomainname(name, len) \
+#define __sanitizer_syscall_pre_setdomainname(name, len)                       \
   __sanitizer_syscall_pre_impl_setdomainname((long)(name), (long)(len))
-#define __sanitizer_syscall_post_setdomainname(res, name, len) \
+#define __sanitizer_syscall_post_setdomainname(res, name, len)                 \
   __sanitizer_syscall_post_impl_setdomainname(res, (long)(name), (long)(len))
-#define __sanitizer_syscall_pre_newuname(name) \
+#define __sanitizer_syscall_pre_newuname(name)                                 \
   __sanitizer_syscall_pre_impl_newuname((long)(name))
-#define __sanitizer_syscall_post_newuname(res, name) \
+#define __sanitizer_syscall_post_newuname(res, name)                           \
   __sanitizer_syscall_post_impl_newuname(res, (long)(name))
-#define __sanitizer_syscall_pre_uname(arg0) \
+#define __sanitizer_syscall_pre_uname(arg0)                                    \
   __sanitizer_syscall_pre_impl_uname((long)(arg0))
-#define __sanitizer_syscall_post_uname(res, arg0) \
+#define __sanitizer_syscall_post_uname(res, arg0)                              \
   __sanitizer_syscall_post_impl_uname(res, (long)(arg0))
-#define __sanitizer_syscall_pre_olduname(arg0) \
+#define __sanitizer_syscall_pre_olduname(arg0)                                 \
   __sanitizer_syscall_pre_impl_olduname((long)(arg0))
-#define __sanitizer_syscall_post_olduname(res, arg0) \
+#define __sanitizer_syscall_post_olduname(res, arg0)                           \
   __sanitizer_syscall_post_impl_olduname(res, (long)(arg0))
-#define __sanitizer_syscall_pre_getrlimit(resource, rlim) \
+#define __sanitizer_syscall_pre_getrlimit(resource, rlim)                      \
   __sanitizer_syscall_pre_impl_getrlimit((long)(resource), (long)(rlim))
-#define __sanitizer_syscall_post_getrlimit(res, resource, rlim) \
+#define __sanitizer_syscall_post_getrlimit(res, resource, rlim)                \
   __sanitizer_syscall_post_impl_getrlimit(res, (long)(resource), (long)(rlim))
-#define __sanitizer_syscall_pre_old_getrlimit(resource, rlim) \
+#define __sanitizer_syscall_pre_old_getrlimit(resource, rlim)                  \
   __sanitizer_syscall_pre_impl_old_getrlimit((long)(resource), (long)(rlim))
-#define __sanitizer_syscall_post_old_getrlimit(res, resource, rlim)  \
-  __sanitizer_syscall_post_impl_old_getrlimit(res, (long)(resource), \
+#define __sanitizer_syscall_post_old_getrlimit(res, resource, rlim)            \
+  __sanitizer_syscall_post_impl_old_getrlimit(res, (long)(resource),           \
                                               (long)(rlim))
-#define __sanitizer_syscall_pre_setrlimit(resource, rlim) \
+#define __sanitizer_syscall_pre_setrlimit(resource, rlim)                      \
   __sanitizer_syscall_pre_impl_setrlimit((long)(resource), (long)(rlim))
-#define __sanitizer_syscall_post_setrlimit(res, resource, rlim) \
+#define __sanitizer_syscall_post_setrlimit(res, resource, rlim)                \
   __sanitizer_syscall_post_impl_setrlimit(res, (long)(resource), (long)(rlim))
-#define __sanitizer_syscall_pre_prlimit64(pid, resource, new_rlim, old_rlim) \
-  __sanitizer_syscall_pre_impl_prlimit64((long)(pid), (long)(resource),      \
+#define __sanitizer_syscall_pre_prlimit64(pid, resource, new_rlim, old_rlim)   \
+  __sanitizer_syscall_pre_impl_prlimit64((long)(pid), (long)(resource),        \
                                          (long)(new_rlim), (long)(old_rlim))
-#define __sanitizer_syscall_post_prlimit64(res, pid, resource, new_rlim,      \
-                                           old_rlim)                          \
-  __sanitizer_syscall_post_impl_prlimit64(res, (long)(pid), (long)(resource), \
+#define __sanitizer_syscall_post_prlimit64(res, pid, resource, new_rlim,       \
+                                           old_rlim)                           \
+  __sanitizer_syscall_post_impl_prlimit64(res, (long)(pid), (long)(resource),  \
                                           (long)(new_rlim), (long)(old_rlim))
-#define __sanitizer_syscall_pre_getrusage(who, ru) \
+#define __sanitizer_syscall_pre_getrusage(who, ru)                             \
   __sanitizer_syscall_pre_impl_getrusage((long)(who), (long)(ru))
-#define __sanitizer_syscall_post_getrusage(res, who, ru) \
+#define __sanitizer_syscall_post_getrusage(res, who, ru)                       \
   __sanitizer_syscall_post_impl_getrusage(res, (long)(who), (long)(ru))
-#define __sanitizer_syscall_pre_umask(mask) \
+#define __sanitizer_syscall_pre_umask(mask)                                    \
   __sanitizer_syscall_pre_impl_umask((long)(mask))
-#define __sanitizer_syscall_post_umask(res, mask) \
+#define __sanitizer_syscall_post_umask(res, mask)                              \
   __sanitizer_syscall_post_impl_umask(res, (long)(mask))
-#define __sanitizer_syscall_pre_msgget(key, msgflg) \
+#define __sanitizer_syscall_pre_msgget(key, msgflg)                            \
   __sanitizer_syscall_pre_impl_msgget((long)(key), (long)(msgflg))
-#define __sanitizer_syscall_post_msgget(res, key, msgflg) \
+#define __sanitizer_syscall_post_msgget(res, key, msgflg)                      \
   __sanitizer_syscall_post_impl_msgget(res, (long)(key), (long)(msgflg))
-#define __sanitizer_syscall_pre_msgsnd(msqid, msgp, msgsz, msgflg) \
-  __sanitizer_syscall_pre_impl_msgsnd((long)(msqid), (long)(msgp), \
+#define __sanitizer_syscall_pre_msgsnd(msqid, msgp, msgsz, msgflg)             \
+  __sanitizer_syscall_pre_impl_msgsnd((long)(msqid), (long)(msgp),             \
                                       (long)(msgsz), (long)(msgflg))
-#define __sanitizer_syscall_post_msgsnd(res, msqid, msgp, msgsz, msgflg) \
-  __sanitizer_syscall_post_impl_msgsnd(res, (long)(msqid), (long)(msgp), \
+#define __sanitizer_syscall_post_msgsnd(res, msqid, msgp, msgsz, msgflg)       \
+  __sanitizer_syscall_post_impl_msgsnd(res, (long)(msqid), (long)(msgp),       \
                                        (long)(msgsz), (long)(msgflg))
-#define __sanitizer_syscall_pre_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg) \
-  __sanitizer_syscall_pre_impl_msgrcv((long)(msqid), (long)(msgp),         \
-                                      (long)(msgsz), (long)(msgtyp),       \
+#define __sanitizer_syscall_pre_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg)     \
+  __sanitizer_syscall_pre_impl_msgrcv((long)(msqid), (long)(msgp),             \
+                                      (long)(msgsz), (long)(msgtyp),           \
                                       (long)(msgflg))
-#define __sanitizer_syscall_post_msgrcv(res, msqid, msgp, msgsz, msgtyp, \
-                                        msgflg)                          \
-  __sanitizer_syscall_post_impl_msgrcv(res, (long)(msqid), (long)(msgp), \
-                                       (long)(msgsz), (long)(msgtyp),    \
+#define __sanitizer_syscall_post_msgrcv(res, msqid, msgp, msgsz, msgtyp,       \
+                                        msgflg)                                \
+  __sanitizer_syscall_post_impl_msgrcv(res, (long)(msqid), (long)(msgp),       \
+                                       (long)(msgsz), (long)(msgtyp),          \
                                        (long)(msgflg))
-#define __sanitizer_syscall_pre_msgctl(msqid, cmd, buf) \
+#define __sanitizer_syscall_pre_msgctl(msqid, cmd, buf)                        \
   __sanitizer_syscall_pre_impl_msgctl((long)(msqid), (long)(cmd), (long)(buf))
-#define __sanitizer_syscall_post_msgctl(res, msqid, cmd, buf)           \
-  __sanitizer_syscall_post_impl_msgctl(res, (long)(msqid), (long)(cmd), \
+#define __sanitizer_syscall_post_msgctl(res, msqid, cmd, buf)                  \
+  __sanitizer_syscall_post_impl_msgctl(res, (long)(msqid), (long)(cmd),        \
                                        (long)(buf))
-#define __sanitizer_syscall_pre_semget(key, nsems, semflg)        \
-  __sanitizer_syscall_pre_impl_semget((long)(key), (long)(nsems), \
+#define __sanitizer_syscall_pre_semget(key, nsems, semflg)                     \
+  __sanitizer_syscall_pre_impl_semget((long)(key), (long)(nsems),              \
                                       (long)(semflg))
-#define __sanitizer_syscall_post_semget(res, key, nsems, semflg)        \
-  __sanitizer_syscall_post_impl_semget(res, (long)(key), (long)(nsems), \
+#define __sanitizer_syscall_post_semget(res, key, nsems, semflg)               \
+  __sanitizer_syscall_post_impl_semget(res, (long)(key), (long)(nsems),        \
                                        (long)(semflg))
-#define __sanitizer_syscall_pre_semop(semid, sops, nsops) \
+#define __sanitizer_syscall_pre_semop(semid, sops, nsops)                      \
   __sanitizer_syscall_pre_impl_semop((long)(semid), (long)(sops), (long)(nsops))
-#define __sanitizer_syscall_post_semop(res, semid, sops, nsops)         \
-  __sanitizer_syscall_post_impl_semop(res, (long)(semid), (long)(sops), \
+#define __sanitizer_syscall_post_semop(res, semid, sops, nsops)                \
+  __sanitizer_syscall_post_impl_semop(res, (long)(semid), (long)(sops),        \
                                       (long)(nsops))
-#define __sanitizer_syscall_pre_semctl(semid, semnum, cmd, arg)      \
-  __sanitizer_syscall_pre_impl_semctl((long)(semid), (long)(semnum), \
+#define __sanitizer_syscall_pre_semctl(semid, semnum, cmd, arg)                \
+  __sanitizer_syscall_pre_impl_semctl((long)(semid), (long)(semnum),           \
                                       (long)(cmd), (long)(arg))
-#define __sanitizer_syscall_post_semctl(res, semid, semnum, cmd, arg)      \
-  __sanitizer_syscall_post_impl_semctl(res, (long)(semid), (long)(semnum), \
+#define __sanitizer_syscall_post_semctl(res, semid, semnum, cmd, arg)          \
+  __sanitizer_syscall_post_impl_semctl(res, (long)(semid), (long)(semnum),     \
                                        (long)(cmd), (long)(arg))
-#define __sanitizer_syscall_pre_semtimedop(semid, sops, nsops, timeout) \
-  __sanitizer_syscall_pre_impl_semtimedop((long)(semid), (long)(sops),  \
+#define __sanitizer_syscall_pre_semtimedop(semid, sops, nsops, timeout)        \
+  __sanitizer_syscall_pre_impl_semtimedop((long)(semid), (long)(sops),         \
                                           (long)(nsops), (long)(timeout))
-#define __sanitizer_syscall_post_semtimedop(res, semid, sops, nsops, timeout) \
-  __sanitizer_syscall_post_impl_semtimedop(res, (long)(semid), (long)(sops),  \
+#define __sanitizer_syscall_post_semtimedop(res, semid, sops, nsops, timeout)  \
+  __sanitizer_syscall_post_impl_semtimedop(res, (long)(semid), (long)(sops),   \
                                            (long)(nsops), (long)(timeout))
-#define __sanitizer_syscall_pre_shmat(shmid, shmaddr, shmflg)        \
-  __sanitizer_syscall_pre_impl_shmat((long)(shmid), (long)(shmaddr), \
+#define __sanitizer_syscall_pre_shmat(shmid, shmaddr, shmflg)                  \
+  __sanitizer_syscall_pre_impl_shmat((long)(shmid), (long)(shmaddr),           \
                                      (long)(shmflg))
-#define __sanitizer_syscall_post_shmat(res, shmid, shmaddr, shmflg)        \
-  __sanitizer_syscall_post_impl_shmat(res, (long)(shmid), (long)(shmaddr), \
+#define __sanitizer_syscall_post_shmat(res, shmid, shmaddr, shmflg)            \
+  __sanitizer_syscall_post_impl_shmat(res, (long)(shmid), (long)(shmaddr),     \
                                       (long)(shmflg))
-#define __sanitizer_syscall_pre_shmget(key, size, flag) \
+#define __sanitizer_syscall_pre_shmget(key, size, flag)                        \
   __sanitizer_syscall_pre_impl_shmget((long)(key), (long)(size), (long)(flag))
-#define __sanitizer_syscall_post_shmget(res, key, size, flag)          \
-  __sanitizer_syscall_post_impl_shmget(res, (long)(key), (long)(size), \
+#define __sanitizer_syscall_post_shmget(res, key, size, flag)                  \
+  __sanitizer_syscall_post_impl_shmget(res, (long)(key), (long)(size),         \
                                        (long)(flag))
-#define __sanitizer_syscall_pre_shmdt(shmaddr) \
+#define __sanitizer_syscall_pre_shmdt(shmaddr)                                 \
   __sanitizer_syscall_pre_impl_shmdt((long)(shmaddr))
-#define __sanitizer_syscall_post_shmdt(res, shmaddr) \
+#define __sanitizer_syscall_post_shmdt(res, shmaddr)                           \
   __sanitizer_syscall_post_impl_shmdt(res, (long)(shmaddr))
-#define __sanitizer_syscall_pre_shmctl(shmid, cmd, buf) \
+#define __sanitizer_syscall_pre_shmctl(shmid, cmd, buf)                        \
   __sanitizer_syscall_pre_impl_shmctl((long)(shmid), (long)(cmd), (long)(buf))
-#define __sanitizer_syscall_post_shmctl(res, shmid, cmd, buf)           \
-  __sanitizer_syscall_post_impl_shmctl(res, (long)(shmid), (long)(cmd), \
+#define __sanitizer_syscall_post_shmctl(res, shmid, cmd, buf)                  \
+  __sanitizer_syscall_post_impl_shmctl(res, (long)(shmid), (long)(cmd),        \
                                        (long)(buf))
 #define __sanitizer_syscall_pre_ipc(call, first, second, third, ptr, fifth)    \
   __sanitizer_syscall_pre_impl_ipc((long)(call), (long)(first),                \
                                    (long)(second), (long)(third), (long)(ptr), \
                                    (long)(fifth))
-#define __sanitizer_syscall_post_ipc(res, call, first, second, third, ptr, \
-                                     fifth)                                \
-  __sanitizer_syscall_post_impl_ipc(res, (long)(call), (long)(first),      \
-                                    (long)(second), (long)(third),         \
+#define __sanitizer_syscall_post_ipc(res, call, first, second, third, ptr,     \
+                                     fifth)                                    \
+  __sanitizer_syscall_post_impl_ipc(res, (long)(call), (long)(first),          \
+                                    (long)(second), (long)(third),             \
                                     (long)(ptr), (long)(fifth))
-#define __sanitizer_syscall_pre_mq_open(name, oflag, mode, attr)    \
-  __sanitizer_syscall_pre_impl_mq_open((long)(name), (long)(oflag), \
+#define __sanitizer_syscall_pre_mq_open(name, oflag, mode, attr)               \
+  __sanitizer_syscall_pre_impl_mq_open((long)(name), (long)(oflag),            \
                                        (long)(mode), (long)(attr))
-#define __sanitizer_syscall_post_mq_open(res, name, oflag, mode, attr)    \
-  __sanitizer_syscall_post_impl_mq_open(res, (long)(name), (long)(oflag), \
+#define __sanitizer_syscall_post_mq_open(res, name, oflag, mode, attr)         \
+  __sanitizer_syscall_post_impl_mq_open(res, (long)(name), (long)(oflag),      \
                                         (long)(mode), (long)(attr))
-#define __sanitizer_syscall_pre_mq_unlink(name) \
+#define __sanitizer_syscall_pre_mq_unlink(name)                                \
   __sanitizer_syscall_pre_impl_mq_unlink((long)(name))
-#define __sanitizer_syscall_post_mq_unlink(res, name) \
+#define __sanitizer_syscall_post_mq_unlink(res, name)                          \
   __sanitizer_syscall_post_impl_mq_unlink(res, (long)(name))
 #define __sanitizer_syscall_pre_mq_timedsend(mqdes, msg_ptr, msg_len,          \
                                              msg_prio, abs_timeout)            \
   __sanitizer_syscall_pre_impl_mq_timedsend((long)(mqdes), (long)(msg_ptr),    \
                                             (long)(msg_len), (long)(msg_prio), \
                                             (long)(abs_timeout))
-#define __sanitizer_syscall_post_mq_timedsend(res, mqdes, msg_ptr, msg_len,   \
-                                              msg_prio, abs_timeout)          \
-  __sanitizer_syscall_post_impl_mq_timedsend(                                 \
-      res, (long)(mqdes), (long)(msg_ptr), (long)(msg_len), (long)(msg_prio), \
+#define __sanitizer_syscall_post_mq_timedsend(res, mqdes, msg_ptr, msg_len,    \
+                                              msg_prio, abs_timeout)           \
+  __sanitizer_syscall_post_impl_mq_timedsend(                                  \
+      res, (long)(mqdes), (long)(msg_ptr), (long)(msg_len), (long)(msg_prio),  \
       (long)(abs_timeout))
-#define __sanitizer_syscall_pre_mq_timedreceive(mqdes, msg_ptr, msg_len, \
-                                                msg_prio, abs_timeout)   \
-  __sanitizer_syscall_pre_impl_mq_timedreceive(                          \
-      (long)(mqdes), (long)(msg_ptr), (long)(msg_len), (long)(msg_prio), \
+#define __sanitizer_syscall_pre_mq_timedreceive(mqdes, msg_ptr, msg_len,       \
+                                                msg_prio, abs_timeout)         \
+  __sanitizer_syscall_pre_impl_mq_timedreceive(                                \
+      (long)(mqdes), (long)(msg_ptr), (long)(msg_len), (long)(msg_prio),       \
       (long)(abs_timeout))
 #define __sanitizer_syscall_post_mq_timedreceive(res, mqdes, msg_ptr, msg_len, \
                                                  msg_prio, abs_timeout)        \
   __sanitizer_syscall_post_impl_mq_timedreceive(                               \
       res, (long)(mqdes), (long)(msg_ptr), (long)(msg_len), (long)(msg_prio),  \
       (long)(abs_timeout))
-#define __sanitizer_syscall_pre_mq_notify(mqdes, notification) \
+#define __sanitizer_syscall_pre_mq_notify(mqdes, notification)                 \
   __sanitizer_syscall_pre_impl_mq_notify((long)(mqdes), (long)(notification))
-#define __sanitizer_syscall_post_mq_notify(res, mqdes, notification) \
-  __sanitizer_syscall_post_impl_mq_notify(res, (long)(mqdes),        \
+#define __sanitizer_syscall_post_mq_notify(res, mqdes, notification)           \
+  __sanitizer_syscall_post_impl_mq_notify(res, (long)(mqdes),                  \
                                           (long)(notification))
-#define __sanitizer_syscall_pre_mq_getsetattr(mqdes, mqstat, omqstat)       \
-  __sanitizer_syscall_pre_impl_mq_getsetattr((long)(mqdes), (long)(mqstat), \
+#define __sanitizer_syscall_pre_mq_getsetattr(mqdes, mqstat, omqstat)          \
+  __sanitizer_syscall_pre_impl_mq_getsetattr((long)(mqdes), (long)(mqstat),    \
                                              (long)(omqstat))
-#define __sanitizer_syscall_post_mq_getsetattr(res, mqdes, mqstat, omqstat) \
-  __sanitizer_syscall_post_impl_mq_getsetattr(res, (long)(mqdes),           \
+#define __sanitizer_syscall_post_mq_getsetattr(res, mqdes, mqstat, omqstat)    \
+  __sanitizer_syscall_post_impl_mq_getsetattr(res, (long)(mqdes),              \
                                               (long)(mqstat), (long)(omqstat))
-#define __sanitizer_syscall_pre_pciconfig_iobase(which, bus, devfn)         \
-  __sanitizer_syscall_pre_impl_pciconfig_iobase((long)(which), (long)(bus), \
+#define __sanitizer_syscall_pre_pciconfig_iobase(which, bus, devfn)            \
+  __sanitizer_syscall_pre_impl_pciconfig_iobase((long)(which), (long)(bus),    \
                                                 (long)(devfn))
-#define __sanitizer_syscall_post_pciconfig_iobase(res, which, bus, devfn) \
-  __sanitizer_syscall_post_impl_pciconfig_iobase(res, (long)(which),      \
+#define __sanitizer_syscall_post_pciconfig_iobase(res, which, bus, devfn)      \
+  __sanitizer_syscall_post_impl_pciconfig_iobase(res, (long)(which),           \
                                                  (long)(bus), (long)(devfn))
-#define __sanitizer_syscall_pre_pciconfig_read(bus, dfn, off, len, buf) \
-  __sanitizer_syscall_pre_impl_pciconfig_read(                          \
+#define __sanitizer_syscall_pre_pciconfig_read(bus, dfn, off, len, buf)        \
+  __sanitizer_syscall_pre_impl_pciconfig_read(                                 \
       (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf))
-#define __sanitizer_syscall_post_pciconfig_read(res, bus, dfn, off, len, buf) \
-  __sanitizer_syscall_post_impl_pciconfig_read(                               \
+#define __sanitizer_syscall_post_pciconfig_read(res, bus, dfn, off, len, buf)  \
+  __sanitizer_syscall_post_impl_pciconfig_read(                                \
       res, (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf))
-#define __sanitizer_syscall_pre_pciconfig_write(bus, dfn, off, len, buf) \
-  __sanitizer_syscall_pre_impl_pciconfig_write(                          \
+#define __sanitizer_syscall_pre_pciconfig_write(bus, dfn, off, len, buf)       \
+  __sanitizer_syscall_pre_impl_pciconfig_write(                                \
       (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf))
 #define __sanitizer_syscall_post_pciconfig_write(res, bus, dfn, off, len, buf) \
   __sanitizer_syscall_post_impl_pciconfig_write(                               \
       res, (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf))
-#define __sanitizer_syscall_pre_swapon(specialfile, swap_flags) \
+#define __sanitizer_syscall_pre_swapon(specialfile, swap_flags)                \
   __sanitizer_syscall_pre_impl_swapon((long)(specialfile), (long)(swap_flags))
-#define __sanitizer_syscall_post_swapon(res, specialfile, swap_flags) \
-  __sanitizer_syscall_post_impl_swapon(res, (long)(specialfile),      \
+#define __sanitizer_syscall_post_swapon(res, specialfile, swap_flags)          \
+  __sanitizer_syscall_post_impl_swapon(res, (long)(specialfile),               \
                                        (long)(swap_flags))
-#define __sanitizer_syscall_pre_swapoff(specialfile) \
+#define __sanitizer_syscall_pre_swapoff(specialfile)                           \
   __sanitizer_syscall_pre_impl_swapoff((long)(specialfile))
-#define __sanitizer_syscall_post_swapoff(res, specialfile) \
+#define __sanitizer_syscall_post_swapoff(res, specialfile)                     \
   __sanitizer_syscall_post_impl_swapoff(res, (long)(specialfile))
-#define __sanitizer_syscall_pre_sysctl(args) \
+#define __sanitizer_syscall_pre_sysctl(args)                                   \
   __sanitizer_syscall_pre_impl_sysctl((long)(args))
-#define __sanitizer_syscall_post_sysctl(res, args) \
+#define __sanitizer_syscall_post_sysctl(res, args)                             \
   __sanitizer_syscall_post_impl_sysctl(res, (long)(args))
-#define __sanitizer_syscall_pre_sysinfo(info) \
+#define __sanitizer_syscall_pre_sysinfo(info)                                  \
   __sanitizer_syscall_pre_impl_sysinfo((long)(info))
-#define __sanitizer_syscall_post_sysinfo(res, info) \
+#define __sanitizer_syscall_post_sysinfo(res, info)                            \
   __sanitizer_syscall_post_impl_sysinfo(res, (long)(info))
-#define __sanitizer_syscall_pre_sysfs(option, arg1, arg2) \
+#define __sanitizer_syscall_pre_sysfs(option, arg1, arg2)                      \
   __sanitizer_syscall_pre_impl_sysfs((long)(option), (long)(arg1), (long)(arg2))
-#define __sanitizer_syscall_post_sysfs(res, option, arg1, arg2)          \
-  __sanitizer_syscall_post_impl_sysfs(res, (long)(option), (long)(arg1), \
+#define __sanitizer_syscall_post_sysfs(res, option, arg1, arg2)                \
+  __sanitizer_syscall_post_impl_sysfs(res, (long)(option), (long)(arg1),       \
                                       (long)(arg2))
-#define __sanitizer_syscall_pre_syslog(type, buf, len) \
+#define __sanitizer_syscall_pre_syslog(type, buf, len)                         \
   __sanitizer_syscall_pre_impl_syslog((long)(type), (long)(buf), (long)(len))
-#define __sanitizer_syscall_post_syslog(res, type, buf, len)           \
-  __sanitizer_syscall_post_impl_syslog(res, (long)(type), (long)(buf), \
+#define __sanitizer_syscall_post_syslog(res, type, buf, len)                   \
+  __sanitizer_syscall_post_impl_syslog(res, (long)(type), (long)(buf),         \
                                        (long)(len))
-#define __sanitizer_syscall_pre_uselib(library) \
+#define __sanitizer_syscall_pre_uselib(library)                                \
   __sanitizer_syscall_pre_impl_uselib((long)(library))
-#define __sanitizer_syscall_post_uselib(res, library) \
+#define __sanitizer_syscall_post_uselib(res, library)                          \
   __sanitizer_syscall_post_impl_uselib(res, (long)(library))
-#define __sanitizer_syscall_pre_ni_syscall() \
+#define __sanitizer_syscall_pre_ni_syscall()                                   \
   __sanitizer_syscall_pre_impl_ni_syscall()
-#define __sanitizer_syscall_post_ni_syscall(res) \
+#define __sanitizer_syscall_post_ni_syscall(res)                               \
   __sanitizer_syscall_post_impl_ni_syscall(res)
-#define __sanitizer_syscall_pre_ptrace(request, pid, addr, data)    \
-  __sanitizer_syscall_pre_impl_ptrace((long)(request), (long)(pid), \
+#define __sanitizer_syscall_pre_ptrace(request, pid, addr, data)               \
+  __sanitizer_syscall_pre_impl_ptrace((long)(request), (long)(pid),            \
                                       (long)(addr), (long)(data))
-#define __sanitizer_syscall_post_ptrace(res, request, pid, addr, data)    \
-  __sanitizer_syscall_post_impl_ptrace(res, (long)(request), (long)(pid), \
+#define __sanitizer_syscall_post_ptrace(res, request, pid, addr, data)         \
+  __sanitizer_syscall_post_impl_ptrace(res, (long)(request), (long)(pid),      \
                                        (long)(addr), (long)(data))
-#define __sanitizer_syscall_pre_add_key(_type, _description, _payload, plen, \
-                                        destringid)                          \
-  __sanitizer_syscall_pre_impl_add_key((long)(_type), (long)(_description),  \
-                                       (long)(_payload), (long)(plen),       \
+#define __sanitizer_syscall_pre_add_key(_type, _description, _payload, plen,   \
+                                        destringid)                            \
+  __sanitizer_syscall_pre_impl_add_key((long)(_type), (long)(_description),    \
+                                       (long)(_payload), (long)(plen),         \
                                        (long)(destringid))
-#define __sanitizer_syscall_post_add_key(res, _type, _description, _payload, \
-                                         plen, destringid)                   \
-  __sanitizer_syscall_post_impl_add_key(                                     \
-      res, (long)(_type), (long)(_description), (long)(_payload),            \
+#define __sanitizer_syscall_post_add_key(res, _type, _description, _payload,   \
+                                         plen, destringid)                     \
+  __sanitizer_syscall_post_impl_add_key(                                       \
+      res, (long)(_type), (long)(_description), (long)(_payload),              \
       (long)(plen), (long)(destringid))
-#define __sanitizer_syscall_pre_request_key(_type, _description,       \
-                                            _callout_info, destringid) \
-  __sanitizer_syscall_pre_impl_request_key(                            \
-      (long)(_type), (long)(_description), (long)(_callout_info),      \
+#define __sanitizer_syscall_pre_request_key(_type, _description,               \
+                                            _callout_info, destringid)         \
+  __sanitizer_syscall_pre_impl_request_key(                                    \
+      (long)(_type), (long)(_description), (long)(_callout_info),              \
       (long)(destringid))
-#define __sanitizer_syscall_post_request_key(res, _type, _description,  \
-                                             _callout_info, destringid) \
-  __sanitizer_syscall_post_impl_request_key(                            \
-      res, (long)(_type), (long)(_description), (long)(_callout_info),  \
+#define __sanitizer_syscall_post_request_key(res, _type, _description,         \
+                                             _callout_info, destringid)        \
+  __sanitizer_syscall_post_impl_request_key(                                   \
+      res, (long)(_type), (long)(_description), (long)(_callout_info),         \
       (long)(destringid))
 #define __sanitizer_syscall_pre_keyctl(cmd, arg2, arg3, arg4, arg5)            \
   __sanitizer_syscall_pre_impl_keyctl((long)(cmd), (long)(arg2), (long)(arg3), \
                                       (long)(arg4), (long)(arg5))
-#define __sanitizer_syscall_post_keyctl(res, cmd, arg2, arg3, arg4, arg5) \
-  __sanitizer_syscall_post_impl_keyctl(res, (long)(cmd), (long)(arg2),    \
-                                       (long)(arg3), (long)(arg4),        \
+#define __sanitizer_syscall_post_keyctl(res, cmd, arg2, arg3, arg4, arg5)      \
+  __sanitizer_syscall_post_impl_keyctl(res, (long)(cmd), (long)(arg2),         \
+                                       (long)(arg3), (long)(arg4),             \
                                        (long)(arg5))
-#define __sanitizer_syscall_pre_ioprio_set(which, who, ioprio)        \
-  __sanitizer_syscall_pre_impl_ioprio_set((long)(which), (long)(who), \
+#define __sanitizer_syscall_pre_ioprio_set(which, who, ioprio)                 \
+  __sanitizer_syscall_pre_impl_ioprio_set((long)(which), (long)(who),          \
                                           (long)(ioprio))
-#define __sanitizer_syscall_post_ioprio_set(res, which, who, ioprio)        \
-  __sanitizer_syscall_post_impl_ioprio_set(res, (long)(which), (long)(who), \
+#define __sanitizer_syscall_post_ioprio_set(res, which, who, ioprio)           \
+  __sanitizer_syscall_post_impl_ioprio_set(res, (long)(which), (long)(who),    \
                                            (long)(ioprio))
-#define __sanitizer_syscall_pre_ioprio_get(which, who) \
+#define __sanitizer_syscall_pre_ioprio_get(which, who)                         \
   __sanitizer_syscall_pre_impl_ioprio_get((long)(which), (long)(who))
-#define __sanitizer_syscall_post_ioprio_get(res, which, who) \
+#define __sanitizer_syscall_post_ioprio_get(res, which, who)                   \
   __sanitizer_syscall_post_impl_ioprio_get(res, (long)(which), (long)(who))
-#define __sanitizer_syscall_pre_set_mempolicy(mode, nmask, maxnode)       \
-  __sanitizer_syscall_pre_impl_set_mempolicy((long)(mode), (long)(nmask), \
+#define __sanitizer_syscall_pre_set_mempolicy(mode, nmask, maxnode)            \
+  __sanitizer_syscall_pre_impl_set_mempolicy((long)(mode), (long)(nmask),      \
                                              (long)(maxnode))
-#define __sanitizer_syscall_post_set_mempolicy(res, mode, nmask, maxnode) \
-  __sanitizer_syscall_post_impl_set_mempolicy(res, (long)(mode),          \
+#define __sanitizer_syscall_post_set_mempolicy(res, mode, nmask, maxnode)      \
+  __sanitizer_syscall_post_impl_set_mempolicy(res, (long)(mode),               \
                                               (long)(nmask), (long)(maxnode))
-#define __sanitizer_syscall_pre_migrate_pages(pid, maxnode, from, to)      \
-  __sanitizer_syscall_pre_impl_migrate_pages((long)(pid), (long)(maxnode), \
+#define __sanitizer_syscall_pre_migrate_pages(pid, maxnode, from, to)          \
+  __sanitizer_syscall_pre_impl_migrate_pages((long)(pid), (long)(maxnode),     \
                                              (long)(from), (long)(to))
-#define __sanitizer_syscall_post_migrate_pages(res, pid, maxnode, from, to) \
-  __sanitizer_syscall_post_impl_migrate_pages(                              \
+#define __sanitizer_syscall_post_migrate_pages(res, pid, maxnode, from, to)    \
+  __sanitizer_syscall_post_impl_migrate_pages(                                 \
       res, (long)(pid), (long)(maxnode), (long)(from), (long)(to))
-#define __sanitizer_syscall_pre_move_pages(pid, nr_pages, pages, nodes,  \
-                                           status, flags)                \
-  __sanitizer_syscall_pre_impl_move_pages((long)(pid), (long)(nr_pages), \
-                                          (long)(pages), (long)(nodes),  \
+#define __sanitizer_syscall_pre_move_pages(pid, nr_pages, pages, nodes,        \
+                                           status, flags)                      \
+  __sanitizer_syscall_pre_impl_move_pages((long)(pid), (long)(nr_pages),       \
+                                          (long)(pages), (long)(nodes),        \
                                           (long)(status), (long)(flags))
 #define __sanitizer_syscall_post_move_pages(res, pid, nr_pages, pages, nodes,  \
                                             status, flags)                     \
   __sanitizer_syscall_pre_impl_mbind((long)(start), (long)(len), (long)(mode), \
                                      (long)(nmask), (long)(maxnode),           \
                                      (long)(flags))
-#define __sanitizer_syscall_post_mbind(res, start, len, mode, nmask, maxnode, \
-                                       flags)                                 \
-  __sanitizer_syscall_post_impl_mbind(res, (long)(start), (long)(len),        \
-                                      (long)(mode), (long)(nmask),            \
+#define __sanitizer_syscall_post_mbind(res, start, len, mode, nmask, maxnode,  \
+                                       flags)                                  \
+  __sanitizer_syscall_post_impl_mbind(res, (long)(start), (long)(len),         \
+                                      (long)(mode), (long)(nmask),             \
                                       (long)(maxnode), (long)(flags))
-#define __sanitizer_syscall_pre_get_mempolicy(policy, nmask, maxnode, addr, \
-                                              flags)                        \
-  __sanitizer_syscall_pre_impl_get_mempolicy((long)(policy), (long)(nmask), \
-                                             (long)(maxnode), (long)(addr), \
+#define __sanitizer_syscall_pre_get_mempolicy(policy, nmask, maxnode, addr,    \
+                                              flags)                           \
+  __sanitizer_syscall_pre_impl_get_mempolicy((long)(policy), (long)(nmask),    \
+                                             (long)(maxnode), (long)(addr),    \
                                              (long)(flags))
-#define __sanitizer_syscall_post_get_mempolicy(res, policy, nmask, maxnode,   \
-                                               addr, flags)                   \
-  __sanitizer_syscall_post_impl_get_mempolicy(res, (long)(policy),            \
-                                              (long)(nmask), (long)(maxnode), \
+#define __sanitizer_syscall_post_get_mempolicy(res, policy, nmask, maxnode,    \
+                                               addr, flags)                    \
+  __sanitizer_syscall_post_impl_get_mempolicy(res, (long)(policy),             \
+                                              (long)(nmask), (long)(maxnode),  \
                                               (long)(addr), (long)(flags))
-#define __sanitizer_syscall_pre_inotify_init() \
+#define __sanitizer_syscall_pre_inotify_init()                                 \
   __sanitizer_syscall_pre_impl_inotify_init()
-#define __sanitizer_syscall_post_inotify_init(res) \
+#define __sanitizer_syscall_post_inotify_init(res)                             \
   __sanitizer_syscall_post_impl_inotify_init(res)
-#define __sanitizer_syscall_pre_inotify_init1(flags) \
+#define __sanitizer_syscall_pre_inotify_init1(flags)                           \
   __sanitizer_syscall_pre_impl_inotify_init1((long)(flags))
-#define __sanitizer_syscall_post_inotify_init1(res, flags) \
+#define __sanitizer_syscall_post_inotify_init1(res, flags)                     \
   __sanitizer_syscall_post_impl_inotify_init1(res, (long)(flags))
-#define __sanitizer_syscall_pre_inotify_add_watch(fd, path, mask)          \
-  __sanitizer_syscall_pre_impl_inotify_add_watch((long)(fd), (long)(path), \
+#define __sanitizer_syscall_pre_inotify_add_watch(fd, path, mask)              \
+  __sanitizer_syscall_pre_impl_inotify_add_watch((long)(fd), (long)(path),     \
                                                  (long)(mask))
-#define __sanitizer_syscall_post_inotify_add_watch(res, fd, path, mask) \
-  __sanitizer_syscall_post_impl_inotify_add_watch(res, (long)(fd),      \
+#define __sanitizer_syscall_post_inotify_add_watch(res, fd, path, mask)        \
+  __sanitizer_syscall_post_impl_inotify_add_watch(res, (long)(fd),             \
                                                   (long)(path), (long)(mask))
-#define __sanitizer_syscall_pre_inotify_rm_watch(fd, wd) \
+#define __sanitizer_syscall_pre_inotify_rm_watch(fd, wd)                       \
   __sanitizer_syscall_pre_impl_inotify_rm_watch((long)(fd), (long)(wd))
-#define __sanitizer_syscall_post_inotify_rm_watch(res, fd, wd) \
+#define __sanitizer_syscall_post_inotify_rm_watch(res, fd, wd)                 \
   __sanitizer_syscall_post_impl_inotify_rm_watch(res, (long)(fd), (long)(wd))
-#define __sanitizer_syscall_pre_spu_run(fd, unpc, ustatus)       \
-  __sanitizer_syscall_pre_impl_spu_run((long)(fd), (long)(unpc), \
+#define __sanitizer_syscall_pre_spu_run(fd, unpc, ustatus)                     \
+  __sanitizer_syscall_pre_impl_spu_run((long)(fd), (long)(unpc),               \
                                        (long)(ustatus))
-#define __sanitizer_syscall_post_spu_run(res, fd, unpc, ustatus)       \
-  __sanitizer_syscall_post_impl_spu_run(res, (long)(fd), (long)(unpc), \
+#define __sanitizer_syscall_post_spu_run(res, fd, unpc, ustatus)               \
+  __sanitizer_syscall_post_impl_spu_run(res, (long)(fd), (long)(unpc),         \
                                         (long)(ustatus))
-#define __sanitizer_syscall_pre_spu_create(name, flags, mode, fd)      \
-  __sanitizer_syscall_pre_impl_spu_create((long)(name), (long)(flags), \
+#define __sanitizer_syscall_pre_spu_create(name, flags, mode, fd)              \
+  __sanitizer_syscall_pre_impl_spu_create((long)(name), (long)(flags),         \
                                           (long)(mode), (long)(fd))
-#define __sanitizer_syscall_post_spu_create(res, name, flags, mode, fd)      \
-  __sanitizer_syscall_post_impl_spu_create(res, (long)(name), (long)(flags), \
+#define __sanitizer_syscall_post_spu_create(res, name, flags, mode, fd)        \
+  __sanitizer_syscall_post_impl_spu_create(res, (long)(name), (long)(flags),   \
                                            (long)(mode), (long)(fd))
-#define __sanitizer_syscall_pre_mknodat(dfd, filename, mode, dev)     \
-  __sanitizer_syscall_pre_impl_mknodat((long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_pre_mknodat(dfd, filename, mode, dev)              \
+  __sanitizer_syscall_pre_impl_mknodat((long)(dfd), (long)(filename),          \
                                        (long)(mode), (long)(dev))
-#define __sanitizer_syscall_post_mknodat(res, dfd, filename, mode, dev)     \
-  __sanitizer_syscall_post_impl_mknodat(res, (long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_post_mknodat(res, dfd, filename, mode, dev)        \
+  __sanitizer_syscall_post_impl_mknodat(res, (long)(dfd), (long)(filename),    \
                                         (long)(mode), (long)(dev))
-#define __sanitizer_syscall_pre_mkdirat(dfd, pathname, mode)          \
-  __sanitizer_syscall_pre_impl_mkdirat((long)(dfd), (long)(pathname), \
+#define __sanitizer_syscall_pre_mkdirat(dfd, pathname, mode)                   \
+  __sanitizer_syscall_pre_impl_mkdirat((long)(dfd), (long)(pathname),          \
                                        (long)(mode))
-#define __sanitizer_syscall_post_mkdirat(res, dfd, pathname, mode)          \
-  __sanitizer_syscall_post_impl_mkdirat(res, (long)(dfd), (long)(pathname), \
+#define __sanitizer_syscall_post_mkdirat(res, dfd, pathname, mode)             \
+  __sanitizer_syscall_post_impl_mkdirat(res, (long)(dfd), (long)(pathname),    \
                                         (long)(mode))
-#define __sanitizer_syscall_pre_unlinkat(dfd, pathname, flag)          \
-  __sanitizer_syscall_pre_impl_unlinkat((long)(dfd), (long)(pathname), \
+#define __sanitizer_syscall_pre_unlinkat(dfd, pathname, flag)                  \
+  __sanitizer_syscall_pre_impl_unlinkat((long)(dfd), (long)(pathname),         \
                                         (long)(flag))
-#define __sanitizer_syscall_post_unlinkat(res, dfd, pathname, flag)          \
-  __sanitizer_syscall_post_impl_unlinkat(res, (long)(dfd), (long)(pathname), \
+#define __sanitizer_syscall_post_unlinkat(res, dfd, pathname, flag)            \
+  __sanitizer_syscall_post_impl_unlinkat(res, (long)(dfd), (long)(pathname),   \
                                          (long)(flag))
-#define __sanitizer_syscall_pre_symlinkat(oldname, newdfd, newname)       \
-  __sanitizer_syscall_pre_impl_symlinkat((long)(oldname), (long)(newdfd), \
+#define __sanitizer_syscall_pre_symlinkat(oldname, newdfd, newname)            \
+  __sanitizer_syscall_pre_impl_symlinkat((long)(oldname), (long)(newdfd),      \
                                          (long)(newname))
-#define __sanitizer_syscall_post_symlinkat(res, oldname, newdfd, newname) \
-  __sanitizer_syscall_post_impl_symlinkat(res, (long)(oldname),           \
+#define __sanitizer_syscall_post_symlinkat(res, oldname, newdfd, newname)      \
+  __sanitizer_syscall_post_impl_symlinkat(res, (long)(oldname),                \
                                           (long)(newdfd), (long)(newname))
-#define __sanitizer_syscall_pre_linkat(olddfd, oldname, newdfd, newname, \
-                                       flags)                            \
-  __sanitizer_syscall_pre_impl_linkat((long)(olddfd), (long)(oldname),   \
-                                      (long)(newdfd), (long)(newname),   \
+#define __sanitizer_syscall_pre_linkat(olddfd, oldname, newdfd, newname,       \
+                                       flags)                                  \
+  __sanitizer_syscall_pre_impl_linkat((long)(olddfd), (long)(oldname),         \
+                                      (long)(newdfd), (long)(newname),         \
                                       (long)(flags))
 #define __sanitizer_syscall_post_linkat(res, olddfd, oldname, newdfd, newname, \
                                         flags)                                 \
   __sanitizer_syscall_post_impl_linkat(res, (long)(olddfd), (long)(oldname),   \
                                        (long)(newdfd), (long)(newname),        \
                                        (long)(flags))
-#define __sanitizer_syscall_pre_renameat(olddfd, oldname, newdfd, newname) \
-  __sanitizer_syscall_pre_impl_renameat((long)(olddfd), (long)(oldname),   \
+#define __sanitizer_syscall_pre_renameat(olddfd, oldname, newdfd, newname)     \
+  __sanitizer_syscall_pre_impl_renameat((long)(olddfd), (long)(oldname),       \
                                         (long)(newdfd), (long)(newname))
 #define __sanitizer_syscall_post_renameat(res, olddfd, oldname, newdfd,        \
                                           newname)                             \
   __sanitizer_syscall_post_impl_renameat(res, (long)(olddfd), (long)(oldname), \
                                          (long)(newdfd), (long)(newname))
-#define __sanitizer_syscall_pre_futimesat(dfd, filename, utimes)        \
-  __sanitizer_syscall_pre_impl_futimesat((long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_pre_futimesat(dfd, filename, utimes)               \
+  __sanitizer_syscall_pre_impl_futimesat((long)(dfd), (long)(filename),        \
                                          (long)(utimes))
-#define __sanitizer_syscall_post_futimesat(res, dfd, filename, utimes)        \
-  __sanitizer_syscall_post_impl_futimesat(res, (long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_post_futimesat(res, dfd, filename, utimes)         \
+  __sanitizer_syscall_post_impl_futimesat(res, (long)(dfd), (long)(filename),  \
                                           (long)(utimes))
-#define __sanitizer_syscall_pre_faccessat(dfd, filename, mode)          \
-  __sanitizer_syscall_pre_impl_faccessat((long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_pre_faccessat(dfd, filename, mode)                 \
+  __sanitizer_syscall_pre_impl_faccessat((long)(dfd), (long)(filename),        \
                                          (long)(mode))
-#define __sanitizer_syscall_post_faccessat(res, dfd, filename, mode)          \
-  __sanitizer_syscall_post_impl_faccessat(res, (long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_post_faccessat(res, dfd, filename, mode)           \
+  __sanitizer_syscall_post_impl_faccessat(res, (long)(dfd), (long)(filename),  \
                                           (long)(mode))
-#define __sanitizer_syscall_pre_fchmodat(dfd, filename, mode)          \
-  __sanitizer_syscall_pre_impl_fchmodat((long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_pre_fchmodat(dfd, filename, mode)                  \
+  __sanitizer_syscall_pre_impl_fchmodat((long)(dfd), (long)(filename),         \
                                         (long)(mode))
-#define __sanitizer_syscall_post_fchmodat(res, dfd, filename, mode)          \
-  __sanitizer_syscall_post_impl_fchmodat(res, (long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_post_fchmodat(res, dfd, filename, mode)            \
+  __sanitizer_syscall_post_impl_fchmodat(res, (long)(dfd), (long)(filename),   \
                                          (long)(mode))
-#define __sanitizer_syscall_pre_fchownat(dfd, filename, user, group, flag) \
-  __sanitizer_syscall_pre_impl_fchownat((long)(dfd), (long)(filename),     \
-                                        (long)(user), (long)(group),       \
+#define __sanitizer_syscall_pre_fchownat(dfd, filename, user, group, flag)     \
+  __sanitizer_syscall_pre_impl_fchownat((long)(dfd), (long)(filename),         \
+                                        (long)(user), (long)(group),           \
                                         (long)(flag))
-#define __sanitizer_syscall_post_fchownat(res, dfd, filename, user, group,   \
-                                          flag)                              \
-  __sanitizer_syscall_post_impl_fchownat(res, (long)(dfd), (long)(filename), \
-                                         (long)(user), (long)(group),        \
+#define __sanitizer_syscall_post_fchownat(res, dfd, filename, user, group,     \
+                                          flag)                                \
+  __sanitizer_syscall_post_impl_fchownat(res, (long)(dfd), (long)(filename),   \
+                                         (long)(user), (long)(group),          \
                                          (long)(flag))
-#define __sanitizer_syscall_pre_openat(dfd, filename, flags, mode)   \
-  __sanitizer_syscall_pre_impl_openat((long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_pre_openat(dfd, filename, flags, mode)             \
+  __sanitizer_syscall_pre_impl_openat((long)(dfd), (long)(filename),           \
                                       (long)(flags), (long)(mode))
-#define __sanitizer_syscall_post_openat(res, dfd, filename, flags, mode)   \
-  __sanitizer_syscall_post_impl_openat(res, (long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_post_openat(res, dfd, filename, flags, mode)       \
+  __sanitizer_syscall_post_impl_openat(res, (long)(dfd), (long)(filename),     \
                                        (long)(flags), (long)(mode))
-#define __sanitizer_syscall_pre_newfstatat(dfd, filename, statbuf, flag) \
-  __sanitizer_syscall_pre_impl_newfstatat((long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_pre_newfstatat(dfd, filename, statbuf, flag)       \
+  __sanitizer_syscall_pre_impl_newfstatat((long)(dfd), (long)(filename),       \
                                           (long)(statbuf), (long)(flag))
 #define __sanitizer_syscall_post_newfstatat(res, dfd, filename, statbuf, flag) \
   __sanitizer_syscall_post_impl_newfstatat(res, (long)(dfd), (long)(filename), \
                                            (long)(statbuf), (long)(flag))
-#define __sanitizer_syscall_pre_fstatat64(dfd, filename, statbuf, flag) \
-  __sanitizer_syscall_pre_impl_fstatat64((long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_pre_fstatat64(dfd, filename, statbuf, flag)        \
+  __sanitizer_syscall_pre_impl_fstatat64((long)(dfd), (long)(filename),        \
                                          (long)(statbuf), (long)(flag))
-#define __sanitizer_syscall_post_fstatat64(res, dfd, filename, statbuf, flag) \
-  __sanitizer_syscall_post_impl_fstatat64(res, (long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_post_fstatat64(res, dfd, filename, statbuf, flag)  \
+  __sanitizer_syscall_post_impl_fstatat64(res, (long)(dfd), (long)(filename),  \
                                           (long)(statbuf), (long)(flag))
-#define __sanitizer_syscall_pre_readlinkat(dfd, path, buf, bufsiz)   \
-  __sanitizer_syscall_pre_impl_readlinkat((long)(dfd), (long)(path), \
+#define __sanitizer_syscall_pre_readlinkat(dfd, path, buf, bufsiz)             \
+  __sanitizer_syscall_pre_impl_readlinkat((long)(dfd), (long)(path),           \
                                           (long)(buf), (long)(bufsiz))
-#define __sanitizer_syscall_post_readlinkat(res, dfd, path, buf, bufsiz)   \
-  __sanitizer_syscall_post_impl_readlinkat(res, (long)(dfd), (long)(path), \
+#define __sanitizer_syscall_post_readlinkat(res, dfd, path, buf, bufsiz)       \
+  __sanitizer_syscall_post_impl_readlinkat(res, (long)(dfd), (long)(path),     \
                                            (long)(buf), (long)(bufsiz))
-#define __sanitizer_syscall_pre_utimensat(dfd, filename, utimes, flags) \
-  __sanitizer_syscall_pre_impl_utimensat((long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_pre_utimensat(dfd, filename, utimes, flags)        \
+  __sanitizer_syscall_pre_impl_utimensat((long)(dfd), (long)(filename),        \
                                          (long)(utimes), (long)(flags))
-#define __sanitizer_syscall_post_utimensat(res, dfd, filename, utimes, flags) \
-  __sanitizer_syscall_post_impl_utimensat(res, (long)(dfd), (long)(filename), \
+#define __sanitizer_syscall_post_utimensat(res, dfd, filename, utimes, flags)  \
+  __sanitizer_syscall_post_impl_utimensat(res, (long)(dfd), (long)(filename),  \
                                           (long)(utimes), (long)(flags))
-#define __sanitizer_syscall_pre_unshare(unshare_flags) \
+#define __sanitizer_syscall_pre_unshare(unshare_flags)                         \
   __sanitizer_syscall_pre_impl_unshare((long)(unshare_flags))
-#define __sanitizer_syscall_post_unshare(res, unshare_flags) \
+#define __sanitizer_syscall_post_unshare(res, unshare_flags)                   \
   __sanitizer_syscall_post_impl_unshare(res, (long)(unshare_flags))
-#define __sanitizer_syscall_pre_splice(fd_in, off_in, fd_out, off_out, len, \
-                                       flags)                               \
-  __sanitizer_syscall_pre_impl_splice((long)(fd_in), (long)(off_in),        \
-                                      (long)(fd_out), (long)(off_out),      \
+#define __sanitizer_syscall_pre_splice(fd_in, off_in, fd_out, off_out, len,    \
+                                       flags)                                  \
+  __sanitizer_syscall_pre_impl_splice((long)(fd_in), (long)(off_in),           \
+                                      (long)(fd_out), (long)(off_out),         \
                                       (long)(len), (long)(flags))
-#define __sanitizer_syscall_post_splice(res, fd_in, off_in, fd_out, off_out, \
-                                        len, flags)                          \
-  __sanitizer_syscall_post_impl_splice(res, (long)(fd_in), (long)(off_in),   \
-                                       (long)(fd_out), (long)(off_out),      \
+#define __sanitizer_syscall_post_splice(res, fd_in, off_in, fd_out, off_out,   \
+                                        len, flags)                            \
+  __sanitizer_syscall_post_impl_splice(res, (long)(fd_in), (long)(off_in),     \
+                                       (long)(fd_out), (long)(off_out),        \
                                        (long)(len), (long)(flags))
-#define __sanitizer_syscall_pre_vmsplice(fd, iov, nr_segs, flags) \
-  __sanitizer_syscall_pre_impl_vmsplice((long)(fd), (long)(iov),  \
+#define __sanitizer_syscall_pre_vmsplice(fd, iov, nr_segs, flags)              \
+  __sanitizer_syscall_pre_impl_vmsplice((long)(fd), (long)(iov),               \
                                         (long)(nr_segs), (long)(flags))
-#define __sanitizer_syscall_post_vmsplice(res, fd, iov, nr_segs, flags) \
-  __sanitizer_syscall_post_impl_vmsplice(res, (long)(fd), (long)(iov),  \
+#define __sanitizer_syscall_post_vmsplice(res, fd, iov, nr_segs, flags)        \
+  __sanitizer_syscall_post_impl_vmsplice(res, (long)(fd), (long)(iov),         \
                                          (long)(nr_segs), (long)(flags))
-#define __sanitizer_syscall_pre_tee(fdin, fdout, len, flags)                 \
-  __sanitizer_syscall_pre_impl_tee((long)(fdin), (long)(fdout), (long)(len), \
+#define __sanitizer_syscall_pre_tee(fdin, fdout, len, flags)                   \
+  __sanitizer_syscall_pre_impl_tee((long)(fdin), (long)(fdout), (long)(len),   \
                                    (long)(flags))
-#define __sanitizer_syscall_post_tee(res, fdin, fdout, len, flags)    \
-  __sanitizer_syscall_post_impl_tee(res, (long)(fdin), (long)(fdout), \
+#define __sanitizer_syscall_post_tee(res, fdin, fdout, len, flags)             \
+  __sanitizer_syscall_post_impl_tee(res, (long)(fdin), (long)(fdout),          \
                                     (long)(len), (long)(flags))
-#define __sanitizer_syscall_pre_get_robust_list(pid, head_ptr, len_ptr)       \
-  __sanitizer_syscall_pre_impl_get_robust_list((long)(pid), (long)(head_ptr), \
+#define __sanitizer_syscall_pre_get_robust_list(pid, head_ptr, len_ptr)        \
+  __sanitizer_syscall_pre_impl_get_robust_list((long)(pid), (long)(head_ptr),  \
                                                (long)(len_ptr))
-#define __sanitizer_syscall_post_get_robust_list(res, pid, head_ptr, len_ptr) \
-  __sanitizer_syscall_post_impl_get_robust_list(                              \
+#define __sanitizer_syscall_post_get_robust_list(res, pid, head_ptr, len_ptr)  \
+  __sanitizer_syscall_post_impl_get_robust_list(                               \
       res, (long)(pid), (long)(head_ptr), (long)(len_ptr))
-#define __sanitizer_syscall_pre_set_robust_list(head, len) \
+#define __sanitizer_syscall_pre_set_robust_list(head, len)                     \
   __sanitizer_syscall_pre_impl_set_robust_list((long)(head), (long)(len))
-#define __sanitizer_syscall_post_set_robust_list(res, head, len) \
+#define __sanitizer_syscall_post_set_robust_list(res, head, len)               \
   __sanitizer_syscall_post_impl_set_robust_list(res, (long)(head), (long)(len))
-#define __sanitizer_syscall_pre_getcpu(cpu, node, cache) \
+#define __sanitizer_syscall_pre_getcpu(cpu, node, cache)                       \
   __sanitizer_syscall_pre_impl_getcpu((long)(cpu), (long)(node), (long)(cache))
-#define __sanitizer_syscall_post_getcpu(res, cpu, node, cache)         \
-  __sanitizer_syscall_post_impl_getcpu(res, (long)(cpu), (long)(node), \
+#define __sanitizer_syscall_post_getcpu(res, cpu, node, cache)                 \
+  __sanitizer_syscall_post_impl_getcpu(res, (long)(cpu), (long)(node),         \
                                        (long)(cache))
-#define __sanitizer_syscall_pre_signalfd(ufd, user_mask, sizemask)      \
-  __sanitizer_syscall_pre_impl_signalfd((long)(ufd), (long)(user_mask), \
+#define __sanitizer_syscall_pre_signalfd(ufd, user_mask, sizemask)             \
+  __sanitizer_syscall_pre_impl_signalfd((long)(ufd), (long)(user_mask),        \
                                         (long)(sizemask))
-#define __sanitizer_syscall_post_signalfd(res, ufd, user_mask, sizemask)      \
-  __sanitizer_syscall_post_impl_signalfd(res, (long)(ufd), (long)(user_mask), \
+#define __sanitizer_syscall_post_signalfd(res, ufd, user_mask, sizemask)       \
+  __sanitizer_syscall_post_impl_signalfd(res, (long)(ufd), (long)(user_mask),  \
                                          (long)(sizemask))
-#define __sanitizer_syscall_pre_signalfd4(ufd, user_mask, sizemask, flags) \
-  __sanitizer_syscall_pre_impl_signalfd4((long)(ufd), (long)(user_mask),   \
+#define __sanitizer_syscall_pre_signalfd4(ufd, user_mask, sizemask, flags)     \
+  __sanitizer_syscall_pre_impl_signalfd4((long)(ufd), (long)(user_mask),       \
                                          (long)(sizemask), (long)(flags))
 #define __sanitizer_syscall_post_signalfd4(res, ufd, user_mask, sizemask,      \
                                            flags)                              \
   __sanitizer_syscall_post_impl_signalfd4(res, (long)(ufd), (long)(user_mask), \
                                           (long)(sizemask), (long)(flags))
-#define __sanitizer_syscall_pre_timerfd_create(clockid, flags) \
+#define __sanitizer_syscall_pre_timerfd_create(clockid, flags)                 \
   __sanitizer_syscall_pre_impl_timerfd_create((long)(clockid), (long)(flags))
-#define __sanitizer_syscall_post_timerfd_create(res, clockid, flags) \
-  __sanitizer_syscall_post_impl_timerfd_create(res, (long)(clockid), \
+#define __sanitizer_syscall_post_timerfd_create(res, clockid, flags)           \
+  __sanitizer_syscall_post_impl_timerfd_create(res, (long)(clockid),           \
                                                (long)(flags))
-#define __sanitizer_syscall_pre_timerfd_settime(ufd, flags, utmr, otmr)    \
-  __sanitizer_syscall_pre_impl_timerfd_settime((long)(ufd), (long)(flags), \
+#define __sanitizer_syscall_pre_timerfd_settime(ufd, flags, utmr, otmr)        \
+  __sanitizer_syscall_pre_impl_timerfd_settime((long)(ufd), (long)(flags),     \
                                                (long)(utmr), (long)(otmr))
-#define __sanitizer_syscall_post_timerfd_settime(res, ufd, flags, utmr, otmr) \
-  __sanitizer_syscall_post_impl_timerfd_settime(                              \
+#define __sanitizer_syscall_post_timerfd_settime(res, ufd, flags, utmr, otmr)  \
+  __sanitizer_syscall_post_impl_timerfd_settime(                               \
       res, (long)(ufd), (long)(flags), (long)(utmr), (long)(otmr))
-#define __sanitizer_syscall_pre_timerfd_gettime(ufd, otmr) \
+#define __sanitizer_syscall_pre_timerfd_gettime(ufd, otmr)                     \
   __sanitizer_syscall_pre_impl_timerfd_gettime((long)(ufd), (long)(otmr))
-#define __sanitizer_syscall_post_timerfd_gettime(res, ufd, otmr) \
+#define __sanitizer_syscall_post_timerfd_gettime(res, ufd, otmr)               \
   __sanitizer_syscall_post_impl_timerfd_gettime(res, (long)(ufd), (long)(otmr))
-#define __sanitizer_syscall_pre_eventfd(count) \
+#define __sanitizer_syscall_pre_eventfd(count)                                 \
   __sanitizer_syscall_pre_impl_eventfd((long)(count))
-#define __sanitizer_syscall_post_eventfd(res, count) \
+#define __sanitizer_syscall_post_eventfd(res, count)                           \
   __sanitizer_syscall_post_impl_eventfd(res, (long)(count))
-#define __sanitizer_syscall_pre_eventfd2(count, flags) \
+#define __sanitizer_syscall_pre_eventfd2(count, flags)                         \
   __sanitizer_syscall_pre_impl_eventfd2((long)(count), (long)(flags))
-#define __sanitizer_syscall_post_eventfd2(res, count, flags) \
+#define __sanitizer_syscall_post_eventfd2(res, count, flags)                   \
   __sanitizer_syscall_post_impl_eventfd2(res, (long)(count), (long)(flags))
-#define __sanitizer_syscall_pre_old_readdir(arg0, arg1, arg2)          \
-  __sanitizer_syscall_pre_impl_old_readdir((long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_pre_old_readdir(arg0, arg1, arg2)                  \
+  __sanitizer_syscall_pre_impl_old_readdir((long)(arg0), (long)(arg1),         \
                                            (long)(arg2))
-#define __sanitizer_syscall_post_old_readdir(res, arg0, arg1, arg2)          \
-  __sanitizer_syscall_post_impl_old_readdir(res, (long)(arg0), (long)(arg1), \
+#define __sanitizer_syscall_post_old_readdir(res, arg0, arg1, arg2)            \
+  __sanitizer_syscall_post_impl_old_readdir(res, (long)(arg0), (long)(arg1),   \
                                             (long)(arg2))
-#define __sanitizer_syscall_pre_pselect6(arg0, arg1, arg2, arg3, arg4, arg5) \
-  __sanitizer_syscall_pre_impl_pselect6((long)(arg0), (long)(arg1),          \
-                                        (long)(arg2), (long)(arg3),          \
+#define __sanitizer_syscall_pre_pselect6(arg0, arg1, arg2, arg3, arg4, arg5)   \
+  __sanitizer_syscall_pre_impl_pselect6((long)(arg0), (long)(arg1),            \
+                                        (long)(arg2), (long)(arg3),            \
                                         (long)(arg4), (long)(arg5))
-#define __sanitizer_syscall_post_pselect6(res, arg0, arg1, arg2, arg3, arg4, \
-                                          arg5)                              \
-  __sanitizer_syscall_post_impl_pselect6(res, (long)(arg0), (long)(arg1),    \
-                                         (long)(arg2), (long)(arg3),         \
+#define __sanitizer_syscall_post_pselect6(res, arg0, arg1, arg2, arg3, arg4,   \
+                                          arg5)                                \
+  __sanitizer_syscall_post_impl_pselect6(res, (long)(arg0), (long)(arg1),      \
+                                         (long)(arg2), (long)(arg3),           \
                                          (long)(arg4), (long)(arg5))
 #define __sanitizer_syscall_pre_ppoll(arg0, arg1, arg2, arg3, arg4)            \
   __sanitizer_syscall_pre_impl_ppoll((long)(arg0), (long)(arg1), (long)(arg2), \
                                      (long)(arg3), (long)(arg4))
-#define __sanitizer_syscall_post_ppoll(res, arg0, arg1, arg2, arg3, arg4) \
-  __sanitizer_syscall_post_impl_ppoll(res, (long)(arg0), (long)(arg1),    \
-                                      (long)(arg2), (long)(arg3),         \
+#define __sanitizer_syscall_post_ppoll(res, arg0, arg1, arg2, arg3, arg4)      \
+  __sanitizer_syscall_post_impl_ppoll(res, (long)(arg0), (long)(arg1),         \
+                                      (long)(arg2), (long)(arg3),              \
                                       (long)(arg4))
-#define __sanitizer_syscall_pre_syncfs(fd) \
+#define __sanitizer_syscall_pre_syncfs(fd)                                     \
   __sanitizer_syscall_pre_impl_syncfs((long)(fd))
-#define __sanitizer_syscall_post_syncfs(res, fd) \
+#define __sanitizer_syscall_post_syncfs(res, fd)                               \
   __sanitizer_syscall_post_impl_syncfs(res, (long)(fd))
 #define __sanitizer_syscall_pre_perf_event_open(attr_uptr, pid, cpu, group_fd, \
                                                 flags)                         \
   __sanitizer_syscall_pre_impl_perf_event_open((long)(attr_uptr), (long)(pid), \
                                                (long)(cpu), (long)(group_fd),  \
                                                (long)(flags))
-#define __sanitizer_syscall_post_perf_event_open(res, attr_uptr, pid, cpu, \
-                                                 group_fd, flags)          \
-  __sanitizer_syscall_post_impl_perf_event_open(                           \
-      res, (long)(attr_uptr), (long)(pid), (long)(cpu), (long)(group_fd),  \
+#define __sanitizer_syscall_post_perf_event_open(res, attr_uptr, pid, cpu,     \
+                                                 group_fd, flags)              \
+  __sanitizer_syscall_post_impl_perf_event_open(                               \
+      res, (long)(attr_uptr), (long)(pid), (long)(cpu), (long)(group_fd),      \
       (long)(flags))
-#define __sanitizer_syscall_pre_mmap_pgoff(addr, len, prot, flags, fd, pgoff) \
-  __sanitizer_syscall_pre_impl_mmap_pgoff((long)(addr), (long)(len),          \
-                                          (long)(prot), (long)(flags),        \
+#define __sanitizer_syscall_pre_mmap_pgoff(addr, len, prot, flags, fd, pgoff)  \
+  __sanitizer_syscall_pre_impl_mmap_pgoff((long)(addr), (long)(len),           \
+                                          (long)(prot), (long)(flags),         \
                                           (long)(fd), (long)(pgoff))
-#define __sanitizer_syscall_post_mmap_pgoff(res, addr, len, prot, flags, fd, \
-                                            pgoff)                           \
-  __sanitizer_syscall_post_impl_mmap_pgoff(res, (long)(addr), (long)(len),   \
-                                           (long)(prot), (long)(flags),      \
+#define __sanitizer_syscall_post_mmap_pgoff(res, addr, len, prot, flags, fd,   \
+                                            pgoff)                             \
+  __sanitizer_syscall_post_impl_mmap_pgoff(res, (long)(addr), (long)(len),     \
+                                           (long)(prot), (long)(flags),        \
                                            (long)(fd), (long)(pgoff))
-#define __sanitizer_syscall_pre_old_mmap(arg) \
+#define __sanitizer_syscall_pre_old_mmap(arg)                                  \
   __sanitizer_syscall_pre_impl_old_mmap((long)(arg))
-#define __sanitizer_syscall_post_old_mmap(res, arg) \
+#define __sanitizer_syscall_post_old_mmap(res, arg)                            \
   __sanitizer_syscall_post_impl_old_mmap(res, (long)(arg))
-#define __sanitizer_syscall_pre_name_to_handle_at(dfd, name, handle, mnt_id, \
-                                                  flag)                      \
-  __sanitizer_syscall_pre_impl_name_to_handle_at(                            \
+#define __sanitizer_syscall_pre_name_to_handle_at(dfd, name, handle, mnt_id,   \
+                                                  flag)                        \
+  __sanitizer_syscall_pre_impl_name_to_handle_at(                              \
       (long)(dfd), (long)(name), (long)(handle), (long)(mnt_id), (long)(flag))
-#define __sanitizer_syscall_post_name_to_handle_at(res, dfd, name, handle, \
-                                                   mnt_id, flag)           \
-  __sanitizer_syscall_post_impl_name_to_handle_at(                         \
-      res, (long)(dfd), (long)(name), (long)(handle), (long)(mnt_id),      \
+#define __sanitizer_syscall_post_name_to_handle_at(res, dfd, name, handle,     \
+                                                   mnt_id, flag)               \
+  __sanitizer_syscall_post_impl_name_to_handle_at(                             \
+      res, (long)(dfd), (long)(name), (long)(handle), (long)(mnt_id),          \
       (long)(flag))
-#define __sanitizer_syscall_pre_open_by_handle_at(mountdirfd, handle, flags) \
-  __sanitizer_syscall_pre_impl_open_by_handle_at(                            \
+#define __sanitizer_syscall_pre_open_by_handle_at(mountdirfd, handle, flags)   \
+  __sanitizer_syscall_pre_impl_open_by_handle_at(                              \
       (long)(mountdirfd), (long)(handle), (long)(flags))
-#define __sanitizer_syscall_post_open_by_handle_at(res, mountdirfd, handle, \
-                                                   flags)                   \
-  __sanitizer_syscall_post_impl_open_by_handle_at(                          \
+#define __sanitizer_syscall_post_open_by_handle_at(res, mountdirfd, handle,    \
+                                                   flags)                      \
+  __sanitizer_syscall_post_impl_open_by_handle_at(                             \
       res, (long)(mountdirfd), (long)(handle), (long)(flags))
-#define __sanitizer_syscall_pre_setns(fd, nstype) \
+#define __sanitizer_syscall_pre_setns(fd, nstype)                              \
   __sanitizer_syscall_pre_impl_setns((long)(fd), (long)(nstype))
-#define __sanitizer_syscall_post_setns(res, fd, nstype) \
+#define __sanitizer_syscall_post_setns(res, fd, nstype)                        \
   __sanitizer_syscall_post_impl_setns(res, (long)(fd), (long)(nstype))
-#define __sanitizer_syscall_pre_process_vm_readv(pid, lvec, liovcnt, rvec, \
-                                                 riovcnt, flags)           \
-  __sanitizer_syscall_pre_impl_process_vm_readv(                           \
-      (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec),            \
+#define __sanitizer_syscall_pre_process_vm_readv(pid, lvec, liovcnt, rvec,     \
+                                                 riovcnt, flags)               \
+  __sanitizer_syscall_pre_impl_process_vm_readv(                               \
+      (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec),                \
       (long)(riovcnt), (long)(flags))
-#define __sanitizer_syscall_post_process_vm_readv(res, pid, lvec, liovcnt, \
-                                                  rvec, riovcnt, flags)    \
-  __sanitizer_syscall_post_impl_process_vm_readv(                          \
-      res, (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec),       \
+#define __sanitizer_syscall_post_process_vm_readv(res, pid, lvec, liovcnt,     \
+                                                  rvec, riovcnt, flags)        \
+  __sanitizer_syscall_post_impl_process_vm_readv(                              \
+      res, (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec),           \
       (long)(riovcnt), (long)(flags))
-#define __sanitizer_syscall_pre_process_vm_writev(pid, lvec, liovcnt, rvec, \
-                                                  riovcnt, flags)           \
-  __sanitizer_syscall_pre_impl_process_vm_writev(                           \
-      (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec),             \
+#define __sanitizer_syscall_pre_process_vm_writev(pid, lvec, liovcnt, rvec,    \
+                                                  riovcnt, flags)              \
+  __sanitizer_syscall_pre_impl_process_vm_writev(                              \
+      (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec),                \
       (long)(riovcnt), (long)(flags))
-#define __sanitizer_syscall_post_process_vm_writev(res, pid, lvec, liovcnt, \
-                                                   rvec, riovcnt, flags)    \
-  __sanitizer_syscall_post_impl_process_vm_writev(                          \
-      res, (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec),        \
+#define __sanitizer_syscall_post_process_vm_writev(res, pid, lvec, liovcnt,    \
+                                                   rvec, riovcnt, flags)       \
+  __sanitizer_syscall_post_impl_process_vm_writev(                             \
+      res, (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec),           \
       (long)(riovcnt), (long)(flags))
-#define __sanitizer_syscall_pre_fork() \
-  __sanitizer_syscall_pre_impl_fork()
-#define __sanitizer_syscall_post_fork(res) \
+#define __sanitizer_syscall_pre_fork() __sanitizer_syscall_pre_impl_fork()
+#define __sanitizer_syscall_post_fork(res)                                     \
   __sanitizer_syscall_post_impl_fork(res)
-#define __sanitizer_syscall_pre_vfork() \
-  __sanitizer_syscall_pre_impl_vfork()
-#define __sanitizer_syscall_post_vfork(res) \
+#define __sanitizer_syscall_pre_vfork() __sanitizer_syscall_pre_impl_vfork()
+#define __sanitizer_syscall_post_vfork(res)                                    \
   __sanitizer_syscall_post_impl_vfork(res)
 #define __sanitizer_syscall_pre_sigaction(signum, act, oldact)                 \
   __sanitizer_syscall_pre_impl_sigaction((long)signum, (long)act, (long)oldact)
@@ -2699,6 +2706,13 @@ void __sanitizer_syscall_pre_impl_epoll_pwait(long epfd, long events,
 void __sanitizer_syscall_post_impl_epoll_pwait(long res, long epfd, long events,
                                                long maxevents, long timeout,
                                                long sigmask, long sigsetsize);
+void __sanitizer_syscall_pre_impl_epoll_pwait2(long epfd, long events,
+                                               long maxevents, long timeout,
+                                               long sigmask, long sigsetsize);
+void __sanitizer_syscall_post_impl_epoll_pwait2(long res, long epfd,
+                                                long events, long maxevents,
+                                                long timeout, long sigmask,
+                                                long sigsetsize);
 void __sanitizer_syscall_pre_impl_gethostname(long name, long len);
 void __sanitizer_syscall_post_impl_gethostname(long res, long name, long len);
 void __sanitizer_syscall_pre_impl_sethostname(long name, long len);
@@ -3080,7 +3094,7 @@ void __sanitizer_syscall_post_impl_rt_sigaction(long res, long signum, long act,
 void __sanitizer_syscall_pre_impl_sigaltstack(long ss, long oss);
 void __sanitizer_syscall_post_impl_sigaltstack(long res, long ss, long oss);
 #ifdef __cplusplus
-}  // extern "C"
+} // extern "C"
 #endif
 
-#endif  // SANITIZER_LINUX_SYSCALL_HOOKS_H
+#endif // SANITIZER_LINUX_SYSCALL_HOOKS_H
index 565aa391a9fac0a97fc06576efe603121cf6ee71..2782e61fb8c79d7b1dfd318c4b98ecdbd152ef79 100644 (file)
@@ -169,6 +169,9 @@ void __tsan_on_initialize();
 // if TSan should exit as if issues were detected.
 int __tsan_on_finalize(int failed);
 
+// Release TSan internal memory in a best-effort manner.
+void __tsan_flush_memory();
+
 #ifdef __cplusplus
 }  // extern "C"
 #endif
index 98bc756ae53aa574ba98e04649978640d597b2d3..38b8c058246a251c0a9d5c8e7f962002599bbdfa 100644 (file)
@@ -56,7 +56,7 @@
 //                                      tramp:  jmp QWORD [addr]
 //                                       addr:  .bytes <hook>
 //
-//    Note: <real> is equilavent to <label>.
+//    Note: <real> is equivalent to <label>.
 //
 // 3) HotPatch
 //
@@ -398,8 +398,42 @@ static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
   return allocated_space;
 }
 
+// The following prologues cannot be patched because of the short jump
+// jumping to the patching region.
+
+// ntdll!wcslen in Win11
+//   488bc1          mov     rax,rcx
+//   0fb710          movzx   edx,word ptr [rax]
+//   4883c002        add     rax,2
+//   6685d2          test    dx,dx
+//   75f4            jne     -12
+static const u8 kPrologueWithShortJump1[] = {
+    0x48, 0x8b, 0xc1, 0x0f, 0xb7, 0x10, 0x48, 0x83,
+    0xc0, 0x02, 0x66, 0x85, 0xd2, 0x75, 0xf4,
+};
+
+// ntdll!strrchr in Win11
+//   4c8bc1          mov     r8,rcx
+//   8a01            mov     al,byte ptr [rcx]
+//   48ffc1          inc     rcx
+//   84c0            test    al,al
+//   75f7            jne     -9
+static const u8 kPrologueWithShortJump2[] = {
+    0x4c, 0x8b, 0xc1, 0x8a, 0x01, 0x48, 0xff, 0xc1,
+    0x84, 0xc0, 0x75, 0xf7,
+};
+
 // Returns 0 on error.
 static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
+#if SANITIZER_WINDOWS64
+  if (memcmp((u8*)address, kPrologueWithShortJump1,
+             sizeof(kPrologueWithShortJump1)) == 0 ||
+      memcmp((u8*)address, kPrologueWithShortJump2,
+             sizeof(kPrologueWithShortJump2)) == 0) {
+    return 0;
+  }
+#endif
+
   switch (*(u64*)address) {
     case 0x90909090909006EB:  // stub: jmp over 6 x nop.
       return 8;
@@ -477,6 +511,14 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
     case 0xA1:  // A1 XX XX XX XX XX XX XX XX :
                 //   movabs eax, dword ptr ds:[XXXXXXXX]
       return 9;
+
+    case 0x83:
+      const u8 next_byte = *(u8*)(address + 1);
+      const u8 mod = next_byte >> 6;
+      const u8 rm = next_byte & 7;
+      if (mod == 1 && rm == 4)
+        return 5;  // 83 ModR/M SIB Disp8 Imm8
+                   //   add|or|adc|sbb|and|sub|xor|cmp [r+disp8], imm8
   }
 
   switch (*(u16*)address) {
@@ -493,6 +535,8 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
     case 0x5641:  // push r14
     case 0x5741:  // push r15
     case 0x9066:  // Two-byte NOP
+    case 0xc084:  // test al, al
+    case 0x018a:  // mov al, byte ptr [rcx]
       return 2;
 
     case 0x058B:  // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX]
@@ -509,6 +553,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
     case 0xd12b48:    // 48 2b d1 : sub rdx, rcx
     case 0x07c1f6:    // f6 c1 07 : test cl, 0x7
     case 0xc98548:    // 48 85 C9 : test rcx, rcx
+    case 0xd28548:    // 48 85 d2 : test rdx, rdx
     case 0xc0854d:    // 4d 85 c0 : test r8, r8
     case 0xc2b60f:    // 0f b6 c2 : movzx eax, dl
     case 0xc03345:    // 45 33 c0 : xor r8d, r8d
@@ -522,6 +567,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
     case 0xca2b48:    // 48 2b ca : sub rcx, rdx
     case 0x10b70f:    // 0f b7 10 : movzx edx, WORD PTR [rax]
     case 0xc00b4d:    // 3d 0b c0 : or r8, r8
+    case 0xc08b41:    // 41 8b c0 : mov eax, r8d
     case 0xd18b48:    // 48 8b d1 : mov rdx, rcx
     case 0xdc8b4c:    // 4c 8b dc : mov r11, rsp
     case 0xd18b4c:    // 4c 8b d1 : mov r10, rcx
index 9d763789154fb57a45e7fc5faf2a6d7578cc3d0d..45c6ac406f8a195506517bbc13490aa2104e8d95 100644 (file)
@@ -50,7 +50,7 @@ struct ChunkMetadata {
 };
 
 #if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
-    defined(__arm__) || SANITIZER_RISCV64
+    defined(__arm__) || SANITIZER_RISCV64 || defined(__hexagon__)
 template <typename AddressSpaceViewTy>
 struct AP32 {
   static const uptr kSpaceBeg = 0;
index 74400d2e8426d01e99a7832e7bd0c2bc1aec49e3..96a487e037c552c06192a22d792ad735675127b9 100644 (file)
@@ -30,7 +30,7 @@ namespace __lsan {
 
 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
 // also to protect the global list of root regions.
-BlockingMutex global_mutex(LINKER_INITIALIZED);
+Mutex global_mutex;
 
 Flags lsan_flags;
 
@@ -742,7 +742,7 @@ static bool has_reported_leaks = false;
 bool HasReportedLeaks() { return has_reported_leaks; }
 
 void DoLeakCheck() {
-  BlockingMutexLock l(&global_mutex);
+  Lock l(&global_mutex);
   static bool already_done;
   if (already_done) return;
   already_done = true;
@@ -751,7 +751,7 @@ void DoLeakCheck() {
 }
 
 static int DoRecoverableLeakCheck() {
-  BlockingMutexLock l(&global_mutex);
+  Lock l(&global_mutex);
   bool have_leaks = CheckForLeaks();
   return have_leaks ? 1 : 0;
 }
@@ -954,7 +954,7 @@ void __lsan_ignore_object(const void *p) {
     return;
   // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
   // locked.
-  BlockingMutexLock l(&global_mutex);
+  Lock l(&global_mutex);
   IgnoreObjectResult res = IgnoreObjectLocked(p);
   if (res == kIgnoreObjectInvalid)
     VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
@@ -969,7 +969,7 @@ void __lsan_ignore_object(const void *p) {
 SANITIZER_INTERFACE_ATTRIBUTE
 void __lsan_register_root_region(const void *begin, uptr size) {
 #if CAN_SANITIZE_LEAKS
-  BlockingMutexLock l(&global_mutex);
+  Lock l(&global_mutex);
   CHECK(root_regions);
   RootRegion region = {reinterpret_cast<uptr>(begin), size};
   root_regions->push_back(region);
@@ -980,7 +980,7 @@ void __lsan_register_root_region(const void *begin, uptr size) {
 SANITIZER_INTERFACE_ATTRIBUTE
 void __lsan_unregister_root_region(const void *begin, uptr size) {
 #if CAN_SANITIZE_LEAKS
-  BlockingMutexLock l(&global_mutex);
+  Lock l(&global_mutex);
   CHECK(root_regions);
   bool removed = false;
   for (uptr i = 0; i < root_regions->size(); i++) {
index cca33fc359f4ff59b8e9b2db6379afb1fc9b949d..de9ede217fc34b2f39d6ee3291940a0d03f1a7e4 100644 (file)
@@ -14,7 +14,7 @@
 #endif
 
 SANCOV_FLAG(bool, symbolize, true,
-            "If set, converage information will be symbolized by sancov tool "
+            "If set, coverage information will be symbolized by sancov tool "
             "after dumping.")
 
 SANCOV_FLAG(bool, help, false, "Print flags help.")
index 15f81a04350f2270398dd69286dc76e56b4bd8af..73b48cb27ddca6e1b5b2d8299d9301058910fc88 100644 (file)
@@ -56,7 +56,7 @@ class AddrHashMap {
   static const uptr kBucketSize = 3;
 
   struct Bucket {
-    RWMutex          mtx;
+    Mutex mtx;
     atomic_uintptr_t add;
     Cell             cells[kBucketSize];
   };
index b142ee0131b2bfb6d05b24aaeaeafa032d5aa1e2..3710947e78cdc9d3bfb9e0fd55a20b7f6d302cec 100644 (file)
@@ -161,7 +161,7 @@ class SizeClassAllocator64 {
   void ForceReleaseToOS() {
     MemoryMapperT memory_mapper(*this);
     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
-      BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
+      Lock l(&GetRegionInfo(class_id)->mutex);
       MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);
     }
   }
@@ -178,7 +178,7 @@ class SizeClassAllocator64 {
     uptr region_beg = GetRegionBeginBySizeClass(class_id);
     CompactPtrT *free_array = GetFreeArray(region_beg);
 
-    BlockingMutexLock l(&region->mutex);
+    Lock l(&region->mutex);
     uptr old_num_chunks = region->num_freed_chunks;
     uptr new_num_freed_chunks = old_num_chunks + n_chunks;
     // Failure to allocate free array space while releasing memory is non
@@ -204,7 +204,7 @@ class SizeClassAllocator64 {
     uptr region_beg = GetRegionBeginBySizeClass(class_id);
     CompactPtrT *free_array = GetFreeArray(region_beg);
 
-    BlockingMutexLock l(&region->mutex);
+    Lock l(&region->mutex);
 #if SANITIZER_WINDOWS
     /* On Windows unmapping of memory during __sanitizer_purge_allocator is
     explicit and immediate, so unmapped regions must be explicitly mapped back
@@ -282,6 +282,8 @@ class SizeClassAllocator64 {
     CHECK(kMetadataSize);
     uptr class_id = GetSizeClass(p);
     uptr size = ClassIdToSize(class_id);
+    if (!size)
+      return nullptr;
     uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
     uptr region_beg = GetRegionBeginBySizeClass(class_id);
     return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
@@ -315,7 +317,7 @@ class SizeClassAllocator64 {
     Printf(
         "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
         "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
-        "last released: %6zdK region: 0x%zx\n",
+        "last released: %6lldK region: 0x%zx\n",
         region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
         region->mapped_user >> 10, region->stats.n_allocated,
         region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
@@ -623,7 +625,7 @@ class SizeClassAllocator64 {
 
   static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
   // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
-  // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
+  // In the worst case it may require kRegionSize/SizeClassMap::kMinSize
   // elements, but in reality this will not happen. For simplicity we
   // dedicate 1/8 of the region's virtual space to FreeArray.
   static const uptr kFreeArraySize = kRegionSize / 8;
@@ -665,7 +667,7 @@ class SizeClassAllocator64 {
   };
 
   struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
-    BlockingMutex mutex;
+    Mutex mutex;
     uptr num_freed_chunks;  // Number of elements in the freearray.
     uptr mapped_free_array;  // Bytes mapped for freearray.
     uptr allocated_user;  // Bytes allocated for user memory.
index c50d13303edecd77bac4676502892cf6104f20a6..361793f2490ace739145345e8cad8f1dea549632 100644 (file)
@@ -193,13 +193,13 @@ class SizeClassMap {
       uptr cached = MaxCachedHint(s) * s;
       if (i == kBatchClassID)
         d = p = l = 0;
-      Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
-             "cached: %zd %zd; id %zd\n",
-             i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
+      Printf(
+          "c%02zu => s: %zu diff: +%zu %02zu%% l %zu cached: %zu %zu; id %zu\n",
+          i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
       total_cached += cached;
       prev_s = s;
     }
-    Printf("Total cached: %zd\n", total_cached);
+    Printf("Total cached: %zu\n", total_cached);
   }
 
   static void Validate() {
index 803af3285e189b581be1dd28766d3ffb0f21b7df..b544542c26a74d4ea4e78aae5d2a86a18f8750d5 100644 (file)
@@ -6,7 +6,7 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// Various support for assemebler.
+// Various support for assembler.
 //
 //===----------------------------------------------------------------------===//
 
@@ -61,7 +61,7 @@
 #if defined(__ELF__) && (defined(__GNU__) || defined(__FreeBSD__) || \
                          defined(__Fuchsia__) || defined(__linux__))
 // clang-format off
-#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits  // NOLINT
+#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
 // clang-format on
 #else
 #define NO_EXEC_STACK_DIRECTIVE
index 2b39097112d4ac83a36fbd92cd477a8edd955121..f3d3052e5b7c5c22b5171d3c14c5d325618ab1a4 100644 (file)
@@ -18,7 +18,7 @@ namespace __sanitizer {
 
 // MIPS32 does not support atomics > 4 bytes. To address this lack of
 // functionality, the sanitizer library provides helper methods which use an
-// internal spin lock mechanism to emulate atomic oprations when the size is
+// internal spin lock mechanism to emulate atomic operations when the size is
 // 8 bytes.
 static void __spin_lock(volatile int *lock) {
   while (__sync_lock_test_and_set(lock, 1))
index cbdbb0c4c4bd4b3e67d40cdb7f40d1cbc52d4c57..17c29c75046422aef3f20d89750b368edbd6cf0f 100644 (file)
@@ -222,8 +222,8 @@ void CatastrophicErrorWrite(const char *buffer, uptr length);
 void RawWrite(const char *buffer);
 bool ColorizeReports();
 void RemoveANSIEscapeSequencesFromString(char *buffer);
-void Printf(const char *format, ...);
-void Report(const char *format, ...);
+void Printf(const char *format, ...) FORMAT(1, 2);
+void Report(const char *format, ...) FORMAT(1, 2);
 void SetPrintfAndReportCallback(void (*callback)(const char *));
 #define VReport(level, ...)                                              \
   do {                                                                   \
@@ -618,7 +618,7 @@ class InternalScopedString {
     buffer_.resize(1);
     buffer_[0] = '\0';
   }
-  void append(const char *format, ...);
+  void append(const char *format, ...) FORMAT(2, 3);
   const char *data() const { return buffer_.data(); }
   char *data() { return buffer_.data(); }
 
@@ -697,7 +697,8 @@ enum ModuleArch {
   kModuleArchARMV7S,
   kModuleArchARMV7K,
   kModuleArchARM64,
-  kModuleArchRISCV64
+  kModuleArchRISCV64,
+  kModuleArchHexagon
 };
 
 // Sorts and removes duplicates from the container.
@@ -764,6 +765,8 @@ inline const char *ModuleArchToString(ModuleArch arch) {
       return "arm64";
     case kModuleArchRISCV64:
       return "riscv64";
+    case kModuleArchHexagon:
+      return "hexagon";
   }
   CHECK(0 && "Invalid module arch");
   return "";
@@ -1063,17 +1066,10 @@ class ArrayRef {
   T *end_ = nullptr;
 };
 
-#define PRINTF_128(v)                                                         \
-  (*((u8 *)&v + 0)), (*((u8 *)&v + 1)), (*((u8 *)&v + 2)), (*((u8 *)&v + 3)), \
-      (*((u8 *)&v + 4)), (*((u8 *)&v + 5)), (*((u8 *)&v + 6)),                \
-      (*((u8 *)&v + 7)), (*((u8 *)&v + 8)), (*((u8 *)&v + 9)),                \
-      (*((u8 *)&v + 10)), (*((u8 *)&v + 11)), (*((u8 *)&v + 12)),             \
-      (*((u8 *)&v + 13)), (*((u8 *)&v + 14)), (*((u8 *)&v + 15))
-
 }  // namespace __sanitizer
 
 inline void *operator new(__sanitizer::operator_new_size_type size,
-                          __sanitizer::LowLevelAllocator &alloc) {  // NOLINT
+                          __sanitizer::LowLevelAllocator &alloc) {
   return alloc.Allocate(size);
 }
 
index 5ac6cf45fd2ab5c8161d9f4b8a3cabef7aad1cec..9511a3b19a0f790fe0af3fa3be2a7b0ef4483efd 100644 (file)
@@ -204,7 +204,7 @@ extern const short *_tolower_tab_;
 
 #define COMMON_INTERCEPTOR_READ_STRING(ctx, s, n)                   \
     COMMON_INTERCEPTOR_READ_RANGE((ctx), (s),                       \
-      common_flags()->strict_string_checks ? (REAL(strlen)(s)) + 1 : (n) )
+      common_flags()->strict_string_checks ? (internal_strlen(s)) + 1 : (n) )
 
 #ifndef COMMON_INTERCEPTOR_ON_DLOPEN
 #define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
@@ -435,7 +435,7 @@ INTERCEPTOR(char*, textdomain, const char *domainname) {
   if (domainname) COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0);
   char *domain = REAL(textdomain)(domainname);
   if (domain) {
-    COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, REAL(strlen)(domain) + 1);
+    COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, internal_strlen(domain) + 1);
   }
   return domain;
 }
@@ -575,8 +575,8 @@ INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T size) {
 #if SANITIZER_INTERCEPT_STRSTR || SANITIZER_INTERCEPT_STRCASESTR
 static inline void StrstrCheck(void *ctx, char *r, const char *s1,
                                const char *s2) {
-    uptr len1 = REAL(strlen)(s1);
-    uptr len2 = REAL(strlen)(s2);
+    uptr len1 = internal_strlen(s1);
+    uptr len2 = internal_strlen(s2);
     COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r ? r - s1 + len2 : len1 + 1);
     COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2 + 1);
 }
@@ -640,10 +640,10 @@ INTERCEPTOR(char*, strtok, char *str, const char *delimiters) {
     // for subsequent calls). We do not need to check strtok's result.
     // As the delimiters can change, we check them every call.
     if (str != nullptr) {
-      COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+      COMMON_INTERCEPTOR_READ_RANGE(ctx, str, internal_strlen(str) + 1);
     }
     COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters,
-                                  REAL(strlen)(delimiters) + 1);
+                                  internal_strlen(delimiters) + 1);
     return REAL(strtok)(str, delimiters);
   } else {
     // However, when strict_string_checks is disabled we cannot check the
@@ -657,11 +657,11 @@ INTERCEPTOR(char*, strtok, char *str, const char *delimiters) {
     COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters, 1);
     char *result = REAL(strtok)(str, delimiters);
     if (result != nullptr) {
-      COMMON_INTERCEPTOR_READ_RANGE(ctx, result, REAL(strlen)(result) + 1);
+      COMMON_INTERCEPTOR_READ_RANGE(ctx, result, internal_strlen(result) + 1);
     } else if (str != nullptr) {
       // No delimiter were found, it's safe to assume that the entire str was
       // scanned.
-      COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+      COMMON_INTERCEPTOR_READ_RANGE(ctx, str, internal_strlen(str) + 1);
     }
     return result;
   }
@@ -706,7 +706,7 @@ INTERCEPTOR(char*, strchr, const char *s, int c) {
   if (common_flags()->intercept_strchr) {
     // Keep strlen as macro argument, as macro may ignore it.
     COMMON_INTERCEPTOR_READ_STRING(ctx, s,
-      (result ? result - s : REAL(strlen)(s)) + 1);
+      (result ? result - s : internal_strlen(s)) + 1);
   }
   return result;
 }
@@ -737,7 +737,7 @@ INTERCEPTOR(char*, strrchr, const char *s, int c) {
     return internal_strrchr(s, c);
   COMMON_INTERCEPTOR_ENTER(ctx, strrchr, s, c);
   if (common_flags()->intercept_strchr)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);
   return REAL(strrchr)(s, c);
 }
 #define INIT_STRRCHR COMMON_INTERCEPT_FUNCTION(strrchr)
@@ -751,7 +751,7 @@ INTERCEPTOR(SIZE_T, strspn, const char *s1, const char *s2) {
   COMMON_INTERCEPTOR_ENTER(ctx, strspn, s1, s2);
   SIZE_T r = REAL(strspn)(s1, s2);
   if (common_flags()->intercept_strspn) {
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, internal_strlen(s2) + 1);
     COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1);
   }
   return r;
@@ -762,7 +762,7 @@ INTERCEPTOR(SIZE_T, strcspn, const char *s1, const char *s2) {
   COMMON_INTERCEPTOR_ENTER(ctx, strcspn, s1, s2);
   SIZE_T r = REAL(strcspn)(s1, s2);
   if (common_flags()->intercept_strspn) {
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, internal_strlen(s2) + 1);
     COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1);
   }
   return r;
@@ -781,9 +781,9 @@ INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {
   COMMON_INTERCEPTOR_ENTER(ctx, strpbrk, s1, s2);
   char *r = REAL(strpbrk)(s1, s2);
   if (common_flags()->intercept_strpbrk) {
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, internal_strlen(s2) + 1);
     COMMON_INTERCEPTOR_READ_STRING(ctx, s1,
-        r ? r - s1 + 1 : REAL(strlen)(s1) + 1);
+        r ? r - s1 + 1 : internal_strlen(s1) + 1);
   }
   return r;
 }
@@ -1251,7 +1251,7 @@ INTERCEPTOR(char *, fgets, char *s, SIZE_T size, void *file) {
   // https://github.com/google/sanitizers/issues/321.
   char *res = REAL(fgets)(s, size, file);
   if (res)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, internal_strlen(s) + 1);
   return res;
 }
 #define INIT_FGETS COMMON_INTERCEPT_FUNCTION(fgets)
@@ -1265,7 +1265,7 @@ INTERCEPTOR_WITH_SUFFIX(int, fputs, char *s, void *file) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, fputs, s, file);
   if (!SANITIZER_MAC || s) {  // `fputs(NULL, file)` is supported on Darwin.
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);
   }
   return REAL(fputs)(s, file);
 }
@@ -1280,7 +1280,7 @@ INTERCEPTOR(int, puts, char *s) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, puts, s);
   if (!SANITIZER_MAC || s) {  // `puts(NULL)` is supported on Darwin.
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);
   }
   return REAL(puts)(s);
 }
@@ -1334,7 +1334,7 @@ static void unpoison_tm(void *ctx, __sanitizer_tm *tm) {
     // Can not use COMMON_INTERCEPTOR_WRITE_RANGE here, because tm->tm_zone
     // can point to shared memory and tsan would report a data race.
     COMMON_INTERCEPTOR_INITIALIZE_RANGE(tm->tm_zone,
-                                        REAL(strlen(tm->tm_zone)) + 1);
+                                        internal_strlen(tm->tm_zone) + 1);
   }
 #endif
 }
@@ -1387,7 +1387,7 @@ INTERCEPTOR(char *, ctime, unsigned long *timep) {
   char *res = REAL(ctime)(timep);
   if (res) {
     COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
   }
   return res;
 }
@@ -1400,7 +1400,7 @@ INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) {
   char *res = REAL(ctime_r)(timep, result);
   if (res) {
     COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
   }
   return res;
 }
@@ -1413,7 +1413,7 @@ INTERCEPTOR(char *, asctime, __sanitizer_tm *tm) {
   char *res = REAL(asctime)(tm);
   if (res) {
     COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
   }
   return res;
 }
@@ -1426,7 +1426,7 @@ INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
   char *res = REAL(asctime_r)(tm, result);
   if (res) {
     COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
   }
   return res;
 }
@@ -1463,7 +1463,7 @@ INTERCEPTOR(char *, strptime, char *s, char *format, __sanitizer_tm *tm) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, strptime, s, format, tm);
   if (format)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, format, REAL(strlen)(format) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -1843,9 +1843,9 @@ INTERCEPTOR(int, ioctl, int d, unsigned long request, ...) {
   const ioctl_desc *desc = ioctl_lookup(request);
   ioctl_desc decoded_desc;
   if (!desc) {
-    VPrintf(2, "Decoding unknown ioctl 0x%x\n", request);
+    VPrintf(2, "Decoding unknown ioctl 0x%lx\n", request);
     if (!ioctl_decode(request, &decoded_desc))
-      Printf("WARNING: failed decoding unknown ioctl 0x%x\n", request);
+      Printf("WARNING: failed decoding unknown ioctl 0x%lx\n", request);
     else
       desc = &decoded_desc;
   }
@@ -1869,26 +1869,26 @@ UNUSED static void unpoison_passwd(void *ctx, __sanitizer_passwd *pwd) {
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, sizeof(*pwd));
     if (pwd->pw_name)
       COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_name,
-                                     REAL(strlen)(pwd->pw_name) + 1);
+                                     internal_strlen(pwd->pw_name) + 1);
     if (pwd->pw_passwd)
       COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_passwd,
-                                     REAL(strlen)(pwd->pw_passwd) + 1);
+                                     internal_strlen(pwd->pw_passwd) + 1);
 #if !SANITIZER_ANDROID
     if (pwd->pw_gecos)
       COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_gecos,
-                                     REAL(strlen)(pwd->pw_gecos) + 1);
+                                     internal_strlen(pwd->pw_gecos) + 1);
 #endif
 #if SANITIZER_MAC || SANITIZER_FREEBSD || SANITIZER_NETBSD
     if (pwd->pw_class)
       COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_class,
-                                     REAL(strlen)(pwd->pw_class) + 1);
+                                     internal_strlen(pwd->pw_class) + 1);
 #endif
     if (pwd->pw_dir)
       COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_dir,
-                                     REAL(strlen)(pwd->pw_dir) + 1);
+                                     internal_strlen(pwd->pw_dir) + 1);
     if (pwd->pw_shell)
       COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_shell,
-                                     REAL(strlen)(pwd->pw_shell) + 1);
+                                     internal_strlen(pwd->pw_shell) + 1);
   }
 }
 
@@ -1897,13 +1897,13 @@ UNUSED static void unpoison_group(void *ctx, __sanitizer_group *grp) {
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, sizeof(*grp));
     if (grp->gr_name)
       COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_name,
-                                     REAL(strlen)(grp->gr_name) + 1);
+                                     internal_strlen(grp->gr_name) + 1);
     if (grp->gr_passwd)
       COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_passwd,
-                                     REAL(strlen)(grp->gr_passwd) + 1);
+                                     internal_strlen(grp->gr_passwd) + 1);
     char **p = grp->gr_mem;
     for (; *p; ++p) {
-      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, internal_strlen(*p) + 1);
     }
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_mem,
                                    (p - grp->gr_mem + 1) * sizeof(*p));
@@ -1916,7 +1916,7 @@ INTERCEPTOR(__sanitizer_passwd *, getpwnam, const char *name) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, getpwnam, name);
   if (name)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
   __sanitizer_passwd *res = REAL(getpwnam)(name);
   unpoison_passwd(ctx, res);
   return res;
@@ -1931,7 +1931,7 @@ INTERCEPTOR(__sanitizer_passwd *, getpwuid, u32 uid) {
 INTERCEPTOR(__sanitizer_group *, getgrnam, const char *name) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, getgrnam, name);
-  COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
   __sanitizer_group *res = REAL(getgrnam)(name);
   unpoison_group(ctx, res);
   return res;
@@ -1957,7 +1957,7 @@ INTERCEPTOR(int, getpwnam_r, const char *name, __sanitizer_passwd *pwd,
             char *buf, SIZE_T buflen, __sanitizer_passwd **result) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, getpwnam_r, name, pwd, buf, buflen, result);
-  COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -1984,7 +1984,7 @@ INTERCEPTOR(int, getgrnam_r, const char *name, __sanitizer_group *grp,
             char *buf, SIZE_T buflen, __sanitizer_group **result) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, getgrnam_r, name, grp, buf, buflen, result);
-  COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -2229,8 +2229,20 @@ INTERCEPTOR(int, clock_getcpuclockid, pid_t pid,
   return res;
 }
 
-#define INIT_CLOCK_GETCPUCLOCKID                  \
-  COMMON_INTERCEPT_FUNCTION(clock_getcpuclockid);
+INTERCEPTOR(int, pthread_getcpuclockid, uptr thread,
+            __sanitizer_clockid_t *clockid) {
+  void *ctx;
+  COMMON_INTERCEPTOR_ENTER(ctx, pthread_getcpuclockid, thread, clockid);
+  int res = REAL(pthread_getcpuclockid)(thread, clockid);
+  if (!res && clockid) {
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, clockid, sizeof *clockid);
+  }
+  return res;
+}
+
+#define INIT_CLOCK_GETCPUCLOCKID                   \
+  COMMON_INTERCEPT_FUNCTION(clock_getcpuclockid);  \
+  COMMON_INTERCEPT_FUNCTION(pthread_getcpuclockid);
 #else
 #define INIT_CLOCK_GETCPUCLOCKID
 #endif
@@ -2289,7 +2301,7 @@ static void unpoison_glob_t(void *ctx, __sanitizer_glob_t *pglob) {
         ctx, pglob->gl_pathv, (pglob->gl_pathc + 1) * sizeof(*pglob->gl_pathv));
   for (SIZE_T i = 0; i < pglob->gl_pathc; ++i) {
     char *p = pglob->gl_pathv[i];
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, REAL(strlen)(p) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, internal_strlen(p) + 1);
   }
 }
 
@@ -2319,19 +2331,19 @@ static void *wrapped_gl_readdir(void *dir) {
 
 static void *wrapped_gl_opendir(const char *s) {
   COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
-  COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+  COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, internal_strlen(s) + 1);
   return pglob_copy->gl_opendir(s);
 }
 
 static int wrapped_gl_lstat(const char *s, void *st) {
   COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
-  COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+  COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, internal_strlen(s) + 1);
   return pglob_copy->gl_lstat(s, st);
 }
 
 static int wrapped_gl_stat(const char *s, void *st) {
   COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
-  COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+  COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, internal_strlen(s) + 1);
   return pglob_copy->gl_stat(s, st);
 }
 
@@ -2519,7 +2531,7 @@ INTERCEPTOR(char *, inet_ntop, int af, const void *src, char *dst, u32 size) {
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
   char *res = REAL(inet_ntop)(af, src, dst, size);
-  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
   return res;
 }
 INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) {
@@ -2548,7 +2560,7 @@ INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) {
 INTERCEPTOR(int, inet_aton, const char *cp, void *dst) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, inet_aton, cp, dst);
-  if (cp) COMMON_INTERCEPTOR_READ_RANGE(ctx, cp, REAL(strlen)(cp) + 1);
+  if (cp) COMMON_INTERCEPTOR_READ_RANGE(ctx, cp, internal_strlen(cp) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -2590,9 +2602,9 @@ INTERCEPTOR(int, getaddrinfo, char *node, char *service,
             struct __sanitizer_addrinfo **out) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, getaddrinfo, node, service, hints, out);
-  if (node) COMMON_INTERCEPTOR_READ_RANGE(ctx, node, REAL(strlen)(node) + 1);
+  if (node) COMMON_INTERCEPTOR_READ_RANGE(ctx, node, internal_strlen(node) + 1);
   if (service)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, service, REAL(strlen)(service) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, service, internal_strlen(service) + 1);
   if (hints)
     COMMON_INTERCEPTOR_READ_RANGE(ctx, hints, sizeof(__sanitizer_addrinfo));
   // FIXME: under ASan the call below may write to freed memory and corrupt
@@ -2608,7 +2620,7 @@ INTERCEPTOR(int, getaddrinfo, char *node, char *service,
         COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_addr, p->ai_addrlen);
       if (p->ai_canonname)
         COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_canonname,
-                                       REAL(strlen)(p->ai_canonname) + 1);
+                                       internal_strlen(p->ai_canonname) + 1);
       p = p->ai_next;
     }
   }
@@ -2634,9 +2646,9 @@ INTERCEPTOR(int, getnameinfo, void *sockaddr, unsigned salen, char *host,
       REAL(getnameinfo)(sockaddr, salen, host, hostlen, serv, servlen, flags);
   if (res == 0) {
     if (host && hostlen)
-      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, host, REAL(strlen)(host) + 1);
+      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, host, internal_strlen(host) + 1);
     if (serv && servlen)
-      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, serv, REAL(strlen)(serv) + 1);
+      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, serv, internal_strlen(serv) + 1);
   }
   return res;
 }
@@ -2669,10 +2681,10 @@ INTERCEPTOR(int, getsockname, int sock_fd, void *addr, int *addrlen) {
 static void write_hostent(void *ctx, struct __sanitizer_hostent *h) {
   COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h, sizeof(__sanitizer_hostent));
   if (h->h_name)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h->h_name, REAL(strlen)(h->h_name) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h->h_name, internal_strlen(h->h_name) + 1);
   char **p = h->h_aliases;
   while (*p) {
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, internal_strlen(*p) + 1);
     ++p;
   }
   COMMON_INTERCEPTOR_WRITE_RANGE(
@@ -3196,7 +3208,7 @@ INTERCEPTOR(int, sysinfo, void *info) {
 INTERCEPTOR(__sanitizer_dirent *, opendir, const char *path) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, opendir, path);
-  COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   __sanitizer_dirent *res = REAL(opendir)(path);
   if (res)
     COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path);
@@ -3351,10 +3363,10 @@ INTERCEPTOR(char *, setlocale, int category, char *locale) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, setlocale, category, locale);
   if (locale)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, REAL(strlen)(locale) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, internal_strlen(locale) + 1);
   char *res = REAL(setlocale)(category, locale);
   if (res) {
-    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
     unpoison_ctype_arrays(ctx);
   }
   return res;
@@ -3373,7 +3385,7 @@ INTERCEPTOR(char *, getcwd, char *buf, SIZE_T size) {
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
   char *res = REAL(getcwd)(buf, size);
-  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
   return res;
 }
 #define INIT_GETCWD COMMON_INTERCEPT_FUNCTION(getcwd);
@@ -3389,7 +3401,7 @@ INTERCEPTOR(char *, get_current_dir_name, int fake) {
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
   char *res = REAL(get_current_dir_name)(fake);
-  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
   return res;
 }
 
@@ -3663,12 +3675,23 @@ INTERCEPTOR(int, tcgetattr, int fd, void *termios_p) {
 INTERCEPTOR(char *, realpath, const char *path, char *resolved_path) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, realpath, path, resolved_path);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+
+  // Workaround a bug in glibc where dlsym(RTLD_NEXT, ...) returns the oldest
+  // version of a versioned symbol. For realpath(), this gives us something
+  // (called __old_realpath) that does not handle NULL in the second argument.
+  // Handle it as part of the interceptor.
+  char *allocated_path = nullptr;
+  if (!resolved_path)
+    allocated_path = resolved_path = (char *)WRAP(malloc)(path_max + 1);
+
   char *res = REAL(realpath)(path, resolved_path);
-  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+  if (allocated_path && !res)
+    WRAP(free)(allocated_path);
+  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
   return res;
 }
-#define INIT_REALPATH COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(realpath, "GLIBC_2.3");
+#  define INIT_REALPATH COMMON_INTERCEPT_FUNCTION(realpath);
 #else
 #define INIT_REALPATH
 #endif
@@ -3677,9 +3700,9 @@ INTERCEPTOR(char *, realpath, const char *path, char *resolved_path) {
 INTERCEPTOR(char *, canonicalize_file_name, const char *path) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, canonicalize_file_name, path);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   char *res = REAL(canonicalize_file_name)(path);
-  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
   return res;
 }
 #define INIT_CANONICALIZE_FILE_NAME \
@@ -3740,7 +3763,7 @@ INTERCEPTOR(char *, strerror, int errnum) {
   COMMON_INTERCEPTOR_ENTER(ctx, strerror, errnum);
   COMMON_INTERCEPTOR_STRERROR();
   char *res = REAL(strerror)(errnum);
-  if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+  if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
   return res;
 }
 #define INIT_STRERROR COMMON_INTERCEPT_FUNCTION(strerror);
@@ -3782,9 +3805,9 @@ INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
   // https://github.com/google/sanitizers/issues/321.
   char *res = REAL(strerror_r)(errnum, buf, buflen);
   if (res == buf)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
   else
-    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
   return res;
 }
 #endif //(_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE ||
@@ -3804,7 +3827,7 @@ INTERCEPTOR(int, __xpg_strerror_r, int errnum, char *buf, SIZE_T buflen) {
   int res = REAL(__xpg_strerror_r)(errnum, buf, buflen);
   // This version always returns a null-terminated string.
   if (buf && buflen)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
   return res;
 }
 #define INIT_XPG_STRERROR_R COMMON_INTERCEPT_FUNCTION(__xpg_strerror_r);
@@ -3840,7 +3863,7 @@ INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
             scandir_filter_f filter, scandir_compar_f compar) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, scandir, dirp, namelist, filter, compar);
-  if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
+  if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, internal_strlen(dirp) + 1);
   scandir_filter = filter;
   scandir_compar = compar;
   // FIXME: under ASan the call below may write to freed memory and corrupt
@@ -3893,7 +3916,7 @@ INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
             scandir64_filter_f filter, scandir64_compar_f compar) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, scandir64, dirp, namelist, filter, compar);
-  if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
+  if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, internal_strlen(dirp) + 1);
   scandir64_filter = filter;
   scandir64_compar = compar;
   // FIXME: under ASan the call below may write to freed memory and corrupt
@@ -3989,19 +4012,20 @@ INTERCEPTOR(int, ppoll, __sanitizer_pollfd *fds, __sanitizer_nfds_t nfds,
 INTERCEPTOR(int, wordexp, char *s, __sanitizer_wordexp_t *p, int flags) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, wordexp, s, p, flags);
-  if (s) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+  if (s) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
   int res = REAL(wordexp)(s, p, flags);
   if (!res && p) {
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
-    if (p->we_wordc)
-      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->we_wordv,
-                                     sizeof(*p->we_wordv) * p->we_wordc);
-    for (uptr i = 0; i < p->we_wordc; ++i) {
+    uptr we_wordc =
+        ((flags & wordexp_wrde_dooffs) ? p->we_offs : 0) + p->we_wordc;
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->we_wordv,
+                                   sizeof(*p->we_wordv) * (we_wordc + 1));
+    for (uptr i = 0; i < we_wordc; ++i) {
       char *w = p->we_wordv[i];
-      if (w) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, w, REAL(strlen)(w) + 1);
+      if (w) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, w, internal_strlen(w) + 1);
     }
   }
   return res;
@@ -4207,7 +4231,7 @@ INTERCEPTOR(char **, backtrace_symbols, void **buffer, int size) {
   if (res && size) {
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, size * sizeof(*res));
     for (int i = 0; i < size; ++i)
-      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res[i], REAL(strlen(res[i])) + 1);
+      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res[i], internal_strlen(res[i]) + 1);
   }
   return res;
 }
@@ -4325,16 +4349,16 @@ static void write_mntent(void *ctx, __sanitizer_mntent *mnt) {
   COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt, sizeof(*mnt));
   if (mnt->mnt_fsname)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_fsname,
-                                   REAL(strlen)(mnt->mnt_fsname) + 1);
+                                   internal_strlen(mnt->mnt_fsname) + 1);
   if (mnt->mnt_dir)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_dir,
-                                   REAL(strlen)(mnt->mnt_dir) + 1);
+                                   internal_strlen(mnt->mnt_dir) + 1);
   if (mnt->mnt_type)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_type,
-                                   REAL(strlen)(mnt->mnt_type) + 1);
+                                   internal_strlen(mnt->mnt_type) + 1);
   if (mnt->mnt_opts)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_opts,
-                                   REAL(strlen)(mnt->mnt_opts) + 1);
+                                   internal_strlen(mnt->mnt_opts) + 1);
 }
 #endif
 
@@ -4369,7 +4393,7 @@ INTERCEPTOR(__sanitizer_mntent *, getmntent_r, void *fp,
 INTERCEPTOR(int, statfs, char *path, void *buf) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, statfs, path, buf);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -4398,7 +4422,7 @@ INTERCEPTOR(int, fstatfs, int fd, void *buf) {
 INTERCEPTOR(int, statfs64, char *path, void *buf) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, statfs64, path, buf);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -4427,7 +4451,7 @@ INTERCEPTOR(int, fstatfs64, int fd, void *buf) {
 INTERCEPTOR(int, statvfs, char *path, void *buf) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -4461,7 +4485,7 @@ INTERCEPTOR(int, fstatvfs, int fd, void *buf) {
 INTERCEPTOR(int, statvfs64, char *path, void *buf) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, statvfs64, path, buf);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -4490,7 +4514,7 @@ INTERCEPTOR(int, fstatvfs64, int fd, void *buf) {
 INTERCEPTOR(int, initgroups, char *user, u32 group) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, initgroups, user, group);
-  if (user) COMMON_INTERCEPTOR_READ_RANGE(ctx, user, REAL(strlen)(user) + 1);
+  if (user) COMMON_INTERCEPTOR_READ_RANGE(ctx, user, internal_strlen(user) + 1);
   int res = REAL(initgroups)(user, group);
   return res;
 }
@@ -4505,13 +4529,13 @@ INTERCEPTOR(char *, ether_ntoa, __sanitizer_ether_addr *addr) {
   COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa, addr);
   if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
   char *res = REAL(ether_ntoa)(addr);
-  if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+  if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
   return res;
 }
 INTERCEPTOR(__sanitizer_ether_addr *, ether_aton, char *buf) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, ether_aton, buf);
-  if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+  if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, internal_strlen(buf) + 1);
   __sanitizer_ether_addr *res = REAL(ether_aton)(buf);
   if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, sizeof(*res));
   return res;
@@ -4533,14 +4557,14 @@ INTERCEPTOR(int, ether_ntohost, char *hostname, __sanitizer_ether_addr *addr) {
   // https://github.com/google/sanitizers/issues/321.
   int res = REAL(ether_ntohost)(hostname, addr);
   if (!res && hostname)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, internal_strlen(hostname) + 1);
   return res;
 }
 INTERCEPTOR(int, ether_hostton, char *hostname, __sanitizer_ether_addr *addr) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, ether_hostton, hostname, addr);
   if (hostname)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, hostname, internal_strlen(hostname) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -4552,7 +4576,7 @@ INTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr,
             char *hostname) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, ether_line, line, addr, hostname);
-  if (line) COMMON_INTERCEPTOR_READ_RANGE(ctx, line, REAL(strlen)(line) + 1);
+  if (line) COMMON_INTERCEPTOR_READ_RANGE(ctx, line, internal_strlen(line) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -4560,7 +4584,7 @@ INTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr,
   if (!res) {
     if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
     if (hostname)
-      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, internal_strlen(hostname) + 1);
   }
   return res;
 }
@@ -4581,14 +4605,14 @@ INTERCEPTOR(char *, ether_ntoa_r, __sanitizer_ether_addr *addr, char *buf) {
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
   char *res = REAL(ether_ntoa_r)(addr, buf);
-  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+  if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
   return res;
 }
 INTERCEPTOR(__sanitizer_ether_addr *, ether_aton_r, char *buf,
             __sanitizer_ether_addr *addr) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, ether_aton_r, buf, addr);
-  if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+  if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, internal_strlen(buf) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -4854,9 +4878,9 @@ INTERCEPTOR(char *, tmpnam, char *s) {
       // FIXME: under ASan the call below may write to freed memory and corrupt
       // its metadata. See
       // https://github.com/google/sanitizers/issues/321.
-      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, internal_strlen(s) + 1);
     else
-      COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+      COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
   }
   return res;
 }
@@ -4873,7 +4897,7 @@ INTERCEPTOR(char *, tmpnam_r, char *s) {
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
   char *res = REAL(tmpnam_r)(s);
-  if (res && s) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+  if (res && s) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, internal_strlen(s) + 1);
   return res;
 }
 #define INIT_TMPNAM_R COMMON_INTERCEPT_FUNCTION(tmpnam_r);
@@ -4887,7 +4911,7 @@ INTERCEPTOR(char *, ptsname, int fd) {
   COMMON_INTERCEPTOR_ENTER(ctx, ptsname, fd);
   char *res = REAL(ptsname)(fd);
   if (res != nullptr)
-    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
   return res;
 }
 #define INIT_PTSNAME COMMON_INTERCEPT_FUNCTION(ptsname);
@@ -4901,7 +4925,7 @@ INTERCEPTOR(int, ptsname_r, int fd, char *name, SIZE_T namesize) {
   COMMON_INTERCEPTOR_ENTER(ctx, ptsname_r, fd, name, namesize);
   int res = REAL(ptsname_r)(fd, name, namesize);
   if (res == 0)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);
   return res;
 }
 #define INIT_PTSNAME_R COMMON_INTERCEPT_FUNCTION(ptsname_r);
@@ -4915,7 +4939,7 @@ INTERCEPTOR(char *, ttyname, int fd) {
   COMMON_INTERCEPTOR_ENTER(ctx, ttyname, fd);
   char *res = REAL(ttyname)(fd);
   if (res != nullptr)
-    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
   return res;
 }
 #define INIT_TTYNAME COMMON_INTERCEPT_FUNCTION(ttyname);
@@ -4929,7 +4953,7 @@ INTERCEPTOR(int, ttyname_r, int fd, char *name, SIZE_T namesize) {
   COMMON_INTERCEPTOR_ENTER(ctx, ttyname_r, fd, name, namesize);
   int res = REAL(ttyname_r)(fd, name, namesize);
   if (res == 0)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);
   return res;
 }
 #define INIT_TTYNAME_R COMMON_INTERCEPT_FUNCTION(ttyname_r);
@@ -4941,10 +4965,10 @@ INTERCEPTOR(int, ttyname_r, int fd, char *name, SIZE_T namesize) {
 INTERCEPTOR(char *, tempnam, char *dir, char *pfx) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, tempnam, dir, pfx);
-  if (dir) COMMON_INTERCEPTOR_READ_RANGE(ctx, dir, REAL(strlen)(dir) + 1);
-  if (pfx) COMMON_INTERCEPTOR_READ_RANGE(ctx, pfx, REAL(strlen)(pfx) + 1);
+  if (dir) COMMON_INTERCEPTOR_READ_RANGE(ctx, dir, internal_strlen(dir) + 1);
+  if (pfx) COMMON_INTERCEPTOR_READ_RANGE(ctx, pfx, internal_strlen(pfx) + 1);
   char *res = REAL(tempnam)(dir, pfx);
-  if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+  if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
   return res;
 }
 #define INIT_TEMPNAM COMMON_INTERCEPT_FUNCTION(tempnam);
@@ -5404,7 +5428,7 @@ asm(
 INTERCEPTOR(SSIZE_T, listxattr, const char *path, char *list, SIZE_T size) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, listxattr, path, list, size);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -5417,7 +5441,7 @@ INTERCEPTOR(SSIZE_T, listxattr, const char *path, char *list, SIZE_T size) {
 INTERCEPTOR(SSIZE_T, llistxattr, const char *path, char *list, SIZE_T size) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, llistxattr, path, list, size);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -5448,8 +5472,8 @@ INTERCEPTOR(SSIZE_T, getxattr, const char *path, const char *name, char *value,
             SIZE_T size) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, getxattr, path, name, value, size);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
-  if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+  if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -5461,8 +5485,8 @@ INTERCEPTOR(SSIZE_T, lgetxattr, const char *path, const char *name, char *value,
             SIZE_T size) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, lgetxattr, path, name, value, size);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
-  if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+  if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -5474,7 +5498,7 @@ INTERCEPTOR(SSIZE_T, fgetxattr, int fd, const char *name, char *value,
             SIZE_T size) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, fgetxattr, fd, name, value, size);
-  if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+  if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -5544,7 +5568,7 @@ INTERCEPTOR(int, getifaddrs, __sanitizer_ifaddrs **ifap) {
       COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(__sanitizer_ifaddrs));
       if (p->ifa_name)
         COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_name,
-                                       REAL(strlen)(p->ifa_name) + 1);
+                                       internal_strlen(p->ifa_name) + 1);
       if (p->ifa_addr)
         COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_addr, struct_sockaddr_sz);
       if (p->ifa_netmask)
@@ -5574,14 +5598,14 @@ INTERCEPTOR(char *, if_indextoname, unsigned int ifindex, char* ifname) {
   // https://github.com/google/sanitizers/issues/321.
   char *res = REAL(if_indextoname)(ifindex, ifname);
   if (res && ifname)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, internal_strlen(ifname) + 1);
   return res;
 }
 INTERCEPTOR(unsigned int, if_nametoindex, const char* ifname) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, if_nametoindex, ifname);
   if (ifname)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, ifname, internal_strlen(ifname) + 1);
   return REAL(if_nametoindex)(ifname);
 }
 #define INIT_IF_INDEXTONAME                  \
@@ -5839,7 +5863,7 @@ INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,
   COMMON_INTERCEPTOR_ENTER(ctx, xdr_string, xdrs, p, maxsize);
   if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) {
     COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, internal_strlen(*p) + 1);
   }
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
@@ -5848,7 +5872,7 @@ INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,
   if (p && xdrs->x_op == __sanitizer_XDR_DECODE) {
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
     if (res && *p)
-      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, internal_strlen(*p) + 1);
   }
   return res;
 }
@@ -6059,8 +6083,8 @@ INTERCEPTOR(int, __woverflow, __sanitizer_FILE *fp, int ch) {
 INTERCEPTOR(__sanitizer_FILE *, fopen, const char *path, const char *mode) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, fopen, path, mode);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
-  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
   __sanitizer_FILE *res = REAL(fopen)(path, mode);
   COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
   if (res) unpoison_file(res);
@@ -6069,7 +6093,7 @@ INTERCEPTOR(__sanitizer_FILE *, fopen, const char *path, const char *mode) {
 INTERCEPTOR(__sanitizer_FILE *, fdopen, int fd, const char *mode) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, fdopen, fd, mode);
-  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
   __sanitizer_FILE *res = REAL(fdopen)(fd, mode);
   if (res) unpoison_file(res);
   return res;
@@ -6078,8 +6102,8 @@ INTERCEPTOR(__sanitizer_FILE *, freopen, const char *path, const char *mode,
             __sanitizer_FILE *fp) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, freopen, path, mode, fp);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
-  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
   COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
   __sanitizer_FILE *res = REAL(freopen)(path, mode, fp);
   COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
@@ -6103,7 +6127,7 @@ INTERCEPTOR(int, flopen, const char *path, int flags, ...) {
   va_end(ap);
   COMMON_INTERCEPTOR_ENTER(ctx, flopen, path, flags, mode);
   if (path) {
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   }
   return REAL(flopen)(path, flags, mode);
 }
@@ -6116,7 +6140,7 @@ INTERCEPTOR(int, flopenat, int dirfd, const char *path, int flags, ...) {
   va_end(ap);
   COMMON_INTERCEPTOR_ENTER(ctx, flopen, path, flags, mode);
   if (path) {
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   }
   return REAL(flopenat)(dirfd, path, flags, mode);
 }
@@ -6132,8 +6156,8 @@ INTERCEPTOR(int, flopenat, int dirfd, const char *path, int flags, ...) {
 INTERCEPTOR(__sanitizer_FILE *, fopen64, const char *path, const char *mode) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, fopen64, path, mode);
-  COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
-  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
   __sanitizer_FILE *res = REAL(fopen64)(path, mode);
   COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
   if (res) unpoison_file(res);
@@ -6143,8 +6167,8 @@ INTERCEPTOR(__sanitizer_FILE *, freopen64, const char *path, const char *mode,
             __sanitizer_FILE *fp) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, freopen64, path, mode, fp);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
-  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
   COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
   __sanitizer_FILE *res = REAL(freopen64)(path, mode, fp);
   COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
@@ -6322,9 +6346,9 @@ INTERCEPTOR(char *, getpass, const char *prompt) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, getpass, prompt);
   if (prompt)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, prompt, REAL(strlen)(prompt)+1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, prompt, internal_strlen(prompt)+1);
   char *res = REAL(getpass)(prompt);
-  if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res)+1);
+  if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res)+1);
   return res;
 }
 
@@ -6528,17 +6552,42 @@ INTERCEPTOR(int, sem_getvalue, __sanitizer_sem_t *s, int *sval) {
   }
   return res;
 }
-#define INIT_SEM                                                               \
-  COMMON_INTERCEPT_FUNCTION(sem_init);                                         \
-  COMMON_INTERCEPT_FUNCTION(sem_destroy);                                      \
-  COMMON_INTERCEPT_FUNCTION(sem_wait);                                         \
-  COMMON_INTERCEPT_FUNCTION(sem_trywait);                                      \
-  COMMON_INTERCEPT_FUNCTION(sem_timedwait);                                    \
-  COMMON_INTERCEPT_FUNCTION(sem_post);                                         \
-  COMMON_INTERCEPT_FUNCTION(sem_getvalue);
+
+INTERCEPTOR(__sanitizer_sem_t *, sem_open, const char *name, int oflag, ...) {
+  void *ctx;
+  va_list ap;
+  va_start(ap, oflag);
+  u32 mode = va_arg(ap, u32);
+  u32 value = va_arg(ap, u32);
+  COMMON_INTERCEPTOR_ENTER(ctx, sem_open, name, oflag, mode, value);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
+  __sanitizer_sem_t *s = REAL(sem_open)(name, oflag, mode, value);
+  if (s)
+    COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, sizeof(*s));
+  va_end(ap);
+  return s;
+}
+
+INTERCEPTOR(int, sem_unlink, const char *name) {
+  void *ctx;
+  COMMON_INTERCEPTOR_ENTER(ctx, sem_unlink, name);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
+  return REAL(sem_unlink)(name);
+}
+
+#  define INIT_SEM                            \
+    COMMON_INTERCEPT_FUNCTION(sem_init);      \
+    COMMON_INTERCEPT_FUNCTION(sem_destroy);   \
+    COMMON_INTERCEPT_FUNCTION(sem_wait);      \
+    COMMON_INTERCEPT_FUNCTION(sem_trywait);   \
+    COMMON_INTERCEPT_FUNCTION(sem_timedwait); \
+    COMMON_INTERCEPT_FUNCTION(sem_post);      \
+    COMMON_INTERCEPT_FUNCTION(sem_getvalue);  \
+    COMMON_INTERCEPT_FUNCTION(sem_open);      \
+    COMMON_INTERCEPT_FUNCTION(sem_unlink);
 #else
-#define INIT_SEM
-#endif // SANITIZER_INTERCEPT_SEM
+#  define INIT_SEM
+#endif  // SANITIZER_INTERCEPT_SEM
 
 #if SANITIZER_INTERCEPT_PTHREAD_SETCANCEL
 INTERCEPTOR(int, pthread_setcancelstate, int state, int *oldstate) {
@@ -6621,7 +6670,7 @@ INTERCEPTOR(char *, ctermid, char *s) {
   COMMON_INTERCEPTOR_ENTER(ctx, ctermid, s);
   char *res = REAL(ctermid)(s);
   if (res) {
-    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
   }
   return res;
 }
@@ -6636,7 +6685,7 @@ INTERCEPTOR(char *, ctermid_r, char *s) {
   COMMON_INTERCEPTOR_ENTER(ctx, ctermid_r, s);
   char *res = REAL(ctermid_r)(s);
   if (res) {
-    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+    COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
   }
   return res;
 }
@@ -6973,8 +7022,8 @@ INTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) {
 INTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, wcscat, dst, src);
-  SIZE_T src_size = REAL(wcslen)(src);
-  SIZE_T dst_size = REAL(wcslen)(dst);
+  SIZE_T src_size = internal_wcslen(src);
+  SIZE_T dst_size = internal_wcslen(dst);
   COMMON_INTERCEPTOR_READ_RANGE(ctx, src, (src_size + 1) * sizeof(wchar_t));
   COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t));
   COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size,
@@ -6985,8 +7034,8 @@ INTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) {
 INTERCEPTOR(wchar_t *, wcsncat, wchar_t *dst, const wchar_t *src, SIZE_T n) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, wcsncat, dst, src, n);
-  SIZE_T src_size = REAL(wcsnlen)(src, n);
-  SIZE_T dst_size = REAL(wcslen)(dst);
+  SIZE_T src_size = internal_wcsnlen(src, n);
+  SIZE_T dst_size = internal_wcslen(dst);
   COMMON_INTERCEPTOR_READ_RANGE(ctx, src,
                                 Min(src_size + 1, n) * sizeof(wchar_t));
   COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t));
@@ -7005,7 +7054,7 @@ INTERCEPTOR(wchar_t *, wcsncat, wchar_t *dst, const wchar_t *src, SIZE_T n) {
 INTERCEPTOR(wchar_t *, wcsdup, wchar_t *s) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, wcsdup, s);
-  SIZE_T len = REAL(wcslen)(s);
+  SIZE_T len = internal_wcslen(s);
   COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * (len + 1));
   wchar_t *result = REAL(wcsdup)(s);
   if (result)
@@ -7019,9 +7068,9 @@ INTERCEPTOR(wchar_t *, wcsdup, wchar_t *s) {
 #endif
 
 #if SANITIZER_INTERCEPT_STRXFRM
-static SIZE_T RealStrLen(const char *str) { return REAL(strlen)(str); }
+static SIZE_T RealStrLen(const char *str) { return internal_strlen(str); }
 
-static SIZE_T RealStrLen(const wchar_t *str) { return REAL(wcslen)(str); }
+static SIZE_T RealStrLen(const wchar_t *str) { return internal_wcslen(str); }
 
 #define STRXFRM_INTERCEPTOR_IMPL(strxfrm, dest, src, len, ...)             \
   {                                                                        \
@@ -7095,7 +7144,7 @@ INTERCEPTOR(int, acct, const char *file) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, acct, file);
   if (file)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, file, REAL(strlen)(file) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, file, internal_strlen(file) + 1);
   return REAL(acct)(file);
 }
 #define INIT_ACCT COMMON_INTERCEPT_FUNCTION(acct)
@@ -7110,7 +7159,7 @@ INTERCEPTOR(const char *, user_from_uid, u32 uid, int nouser) {
   COMMON_INTERCEPTOR_ENTER(ctx, user_from_uid, uid, nouser);
   user = REAL(user_from_uid)(uid, nouser);
   if (user)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, user, REAL(strlen)(user) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, user, internal_strlen(user) + 1);
   return user;
 }
 #define INIT_USER_FROM_UID COMMON_INTERCEPT_FUNCTION(user_from_uid)
@@ -7124,7 +7173,7 @@ INTERCEPTOR(int, uid_from_user, const char *name, u32 *uid) {
   int res;
   COMMON_INTERCEPTOR_ENTER(ctx, uid_from_user, name, uid);
   if (name)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
   res = REAL(uid_from_user)(name, uid);
   if (uid)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, uid, sizeof(*uid));
@@ -7142,7 +7191,7 @@ INTERCEPTOR(const char *, group_from_gid, u32 gid, int nogroup) {
   COMMON_INTERCEPTOR_ENTER(ctx, group_from_gid, gid, nogroup);
   group = REAL(group_from_gid)(gid, nogroup);
   if (group)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, group, REAL(strlen)(group) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, group, internal_strlen(group) + 1);
   return group;
 }
 #define INIT_GROUP_FROM_GID COMMON_INTERCEPT_FUNCTION(group_from_gid)
@@ -7156,7 +7205,7 @@ INTERCEPTOR(int, gid_from_group, const char *group, u32 *gid) {
   int res;
   COMMON_INTERCEPTOR_ENTER(ctx, gid_from_group, group, gid);
   if (group)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, group, REAL(strlen)(group) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, group, internal_strlen(group) + 1);
   res = REAL(gid_from_group)(group, gid);
   if (gid)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, gid, sizeof(*gid));
@@ -7172,7 +7221,7 @@ INTERCEPTOR(int, access, const char *path, int mode) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, access, path, mode);
   if (path)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   return REAL(access)(path, mode);
 }
 #define INIT_ACCESS COMMON_INTERCEPT_FUNCTION(access)
@@ -7185,7 +7234,7 @@ INTERCEPTOR(int, faccessat, int fd, const char *path, int mode, int flags) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, faccessat, fd, path, mode, flags);
   if (path)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   return REAL(faccessat)(fd, path, mode, flags);
 }
 #define INIT_FACCESSAT COMMON_INTERCEPT_FUNCTION(faccessat)
@@ -7200,7 +7249,7 @@ INTERCEPTOR(int, getgrouplist, const char *name, u32 basegid, u32 *groups,
   int res;
   COMMON_INTERCEPTOR_ENTER(ctx, getgrouplist, name, basegid, groups, ngroups);
   if (name)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
   if (ngroups)
     COMMON_INTERCEPTOR_READ_RANGE(ctx, ngroups, sizeof(*ngroups));
   res = REAL(getgrouplist)(name, basegid, groups, ngroups);
@@ -7224,7 +7273,7 @@ INTERCEPTOR(int, getgroupmembership, const char *name, u32 basegid, u32 *groups,
   COMMON_INTERCEPTOR_ENTER(ctx, getgroupmembership, name, basegid, groups,
                            maxgrp, ngroups);
   if (name)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
   res = REAL(getgroupmembership)(name, basegid, groups, maxgrp, ngroups);
   if (!res && groups && ngroups) {
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, groups, sizeof(*groups) * (*ngroups));
@@ -7242,7 +7291,7 @@ INTERCEPTOR(int, getgroupmembership, const char *name, u32 basegid, u32 *groups,
 INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) {
   void* ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, readlink, path, buf, bufsiz);
-  COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   SSIZE_T res = REAL(readlink)(path, buf, bufsiz);
   if (res > 0)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res);
@@ -7259,7 +7308,7 @@ INTERCEPTOR(SSIZE_T, readlinkat, int dirfd, const char *path, char *buf,
             SIZE_T bufsiz) {
   void* ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, readlinkat, dirfd, path, buf, bufsiz);
-  COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   SSIZE_T res = REAL(readlinkat)(dirfd, path, buf, bufsiz);
   if (res > 0)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res);
@@ -7277,7 +7326,7 @@ INTERCEPTOR(int, name_to_handle_at, int dirfd, const char *pathname,
   void* ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, name_to_handle_at, dirfd, pathname, handle,
                            mount_id, flags);
-  COMMON_INTERCEPTOR_READ_RANGE(ctx, pathname, REAL(strlen)(pathname) + 1);
+  COMMON_INTERCEPTOR_READ_RANGE(ctx, pathname, internal_strlen(pathname) + 1);
 
   __sanitizer_file_handle *sanitizer_handle =
       reinterpret_cast<__sanitizer_file_handle*>(handle);
@@ -7341,7 +7390,7 @@ INTERCEPTOR(SIZE_T, strlcpy, char *dst, char *src, SIZE_T size) {
         ctx, src, Min(internal_strnlen(src, size), size - 1) + 1);
   }
   res = REAL(strlcpy)(dst, src, size);
-  COMMON_INTERCEPTOR_COPY_STRING(ctx, dst, src, REAL(strlen)(dst) + 1);
+  COMMON_INTERCEPTOR_COPY_STRING(ctx, dst, src, internal_strlen(dst) + 1);
   return res;
 }
 
@@ -7416,7 +7465,7 @@ INTERCEPTOR(char *, devname, u64 dev, u32 type) {
   COMMON_INTERCEPTOR_ENTER(ctx, devname, dev, type);
   name = REAL(devname)(dev, type);
   if (name)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);
   return name;
 }
 #define INIT_DEVNAME COMMON_INTERCEPT_FUNCTION(devname);
@@ -7438,7 +7487,7 @@ INTERCEPTOR(DEVNAME_R_RETTYPE, devname_r, u64 dev, u32 type, char *path,
   COMMON_INTERCEPTOR_ENTER(ctx, devname_r, dev, type, path, len);
   DEVNAME_R_RETTYPE res = REAL(devname_r)(dev, type, path, len);
   if (DEVNAME_R_SUCCESS(res))
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, path, REAL(strlen)(path) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, path, internal_strlen(path) + 1);
   return res;
 }
 #define INIT_DEVNAME_R COMMON_INTERCEPT_FUNCTION(devname_r);
@@ -7468,7 +7517,7 @@ INTERCEPTOR(void, strmode, u32 mode, char *bp) {
   COMMON_INTERCEPTOR_ENTER(ctx, strmode, mode, bp);
   REAL(strmode)(mode, bp);
   if (bp)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, bp, REAL(strlen)(bp) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, bp, internal_strlen(bp) + 1);
 }
 #define INIT_STRMODE COMMON_INTERCEPT_FUNCTION(strmode)
 #else
@@ -7488,37 +7537,42 @@ INTERCEPTOR(struct __sanitizer_ttyent *, getttynam, char *name) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, getttynam, name);
   if (name)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
   struct __sanitizer_ttyent *ttyent = REAL(getttynam)(name);
   if (ttyent)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ttyent, struct_ttyent_sz);
   return ttyent;
 }
+#define INIT_TTYENT \
+  COMMON_INTERCEPT_FUNCTION(getttyent); \
+  COMMON_INTERCEPT_FUNCTION(getttynam);
+#else
+#define INIT_TTYENT
+#endif
+
+#if SANITIZER_INTERCEPT_TTYENTPATH
 INTERCEPTOR(int, setttyentpath, char *path) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, setttyentpath, path);
   if (path)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   return REAL(setttyentpath)(path);
 }
-#define INIT_TTYENT \
-  COMMON_INTERCEPT_FUNCTION(getttyent); \
-  COMMON_INTERCEPT_FUNCTION(getttynam); \
-  COMMON_INTERCEPT_FUNCTION(setttyentpath)
+#define INIT_TTYENTPATH COMMON_INTERCEPT_FUNCTION(setttyentpath);
 #else
-#define INIT_TTYENT
+#define INIT_TTYENTPATH
 #endif
 
 #if SANITIZER_INTERCEPT_PROTOENT
 static void write_protoent(void *ctx, struct __sanitizer_protoent *p) {
   COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
 
-  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
+  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, internal_strlen(p->p_name) + 1);
 
   SIZE_T pp_size = 1; // One handles the trailing \0
 
   for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
-      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
+      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, internal_strlen(*pp) + 1);
 
   COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
                                   pp_size * sizeof(char **));
@@ -7537,7 +7591,7 @@ INTERCEPTOR(struct __sanitizer_protoent *, getprotobyname, const char *name) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname, name);
   if (name)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
   struct __sanitizer_protoent *p = REAL(getprotobyname)(name);
   if (p)
     write_protoent(ctx, p);
@@ -7581,7 +7635,7 @@ INTERCEPTOR(int, getprotobyname_r, const char *name,
   COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname_r, name, result_buf, buf,
                            buflen, result);
   if (name)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
   int res = REAL(getprotobyname_r)(name, result_buf, buf, buflen, result);
 
   COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);
@@ -7620,12 +7674,12 @@ INTERCEPTOR(struct __sanitizer_netent *, getnetent) {
   if (n) {
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
 
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, internal_strlen(n->n_name) + 1);
 
     SIZE_T nn_size = 1; // One handles the trailing \0
 
     for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
-      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);
 
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
                                    nn_size * sizeof(char **));
@@ -7637,17 +7691,17 @@ INTERCEPTOR(struct __sanitizer_netent *, getnetbyname, const char *name) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, getnetbyname, name);
   if (name)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
   struct __sanitizer_netent *n = REAL(getnetbyname)(name);
   if (n) {
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
 
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, internal_strlen(n->n_name) + 1);
 
     SIZE_T nn_size = 1; // One handles the trailing \0
 
     for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
-      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);
 
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
                                    nn_size * sizeof(char **));
@@ -7662,12 +7716,12 @@ INTERCEPTOR(struct __sanitizer_netent *, getnetbyaddr, u32 net, int type) {
   if (n) {
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
 
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, internal_strlen(n->n_name) + 1);
 
     SIZE_T nn_size = 1; // One handles the trailing \0
 
     for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
-      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);
 
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
                                    nn_size * sizeof(char **));
@@ -7788,7 +7842,7 @@ INTERCEPTOR(int, regcomp, void *preg, const char *pattern, int cflags) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, regcomp, preg, pattern, cflags);
   if (pattern)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, pattern, REAL(strlen)(pattern) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, pattern, internal_strlen(pattern) + 1);
   int res = REAL(regcomp)(preg, pattern, cflags);
   if (!res)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, preg, struct_regex_sz);
@@ -7801,7 +7855,7 @@ INTERCEPTOR(int, regexec, const void *preg, const char *string, SIZE_T nmatch,
   if (preg)
     COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);
   if (string)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, string, REAL(strlen)(string) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, string, internal_strlen(string) + 1);
   int res = REAL(regexec)(preg, string, nmatch, pmatch, eflags);
   if (!res && pmatch)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pmatch, nmatch * struct_regmatch_sz);
@@ -7815,7 +7869,7 @@ INTERCEPTOR(SIZE_T, regerror, int errcode, const void *preg, char *errbuf,
     COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);
   SIZE_T res = REAL(regerror)(errcode, preg, errbuf, errbuf_size);
   if (errbuf)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errbuf, REAL(strlen)(errbuf) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errbuf, internal_strlen(errbuf) + 1);
   return res;
 }
 INTERCEPTOR(void, regfree, const void *preg) {
@@ -7840,15 +7894,15 @@ INTERCEPTOR(SSIZE_T, regnsub, char *buf, SIZE_T bufsiz, const char *sub,
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, regnsub, buf, bufsiz, sub, rm, str);
   if (sub)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, REAL(strlen)(sub) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, internal_strlen(sub) + 1);
   // The implementation demands and hardcodes 10 elements
   if (rm)
     COMMON_INTERCEPTOR_READ_RANGE(ctx, rm, 10 * struct_regmatch_sz);
   if (str)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, str, internal_strlen(str) + 1);
   SSIZE_T res = REAL(regnsub)(buf, bufsiz, sub, rm, str);
   if (res > 0 && buf)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
   return res;
 }
 INTERCEPTOR(SSIZE_T, regasub, char **buf, const char *sub,
@@ -7856,16 +7910,16 @@ INTERCEPTOR(SSIZE_T, regasub, char **buf, const char *sub,
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, regasub, buf, sub, rm, sstr);
   if (sub)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, REAL(strlen)(sub) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, internal_strlen(sub) + 1);
   // Hardcode 10 elements as this is hardcoded size
   if (rm)
     COMMON_INTERCEPTOR_READ_RANGE(ctx, rm, 10 * struct_regmatch_sz);
   if (sstr)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, sstr, REAL(strlen)(sstr) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, sstr, internal_strlen(sstr) + 1);
   SSIZE_T res = REAL(regasub)(buf, sub, rm, sstr);
   if (res > 0 && buf) {
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sizeof(char *));
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *buf, REAL(strlen)(*buf) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *buf, internal_strlen(*buf) + 1);
   }
   return res;
 }
@@ -7887,7 +7941,7 @@ INTERCEPTOR(void *, fts_open, char *const *path_argv, int options,
       COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
       if (!*pa)
         break;
-      COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+      COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, internal_strlen(*pa) + 1);
     }
   }
   // TODO(kamil): handle compar callback
@@ -7979,7 +8033,7 @@ INTERCEPTOR(int, sysctlbyname, char *sname, void *oldp, SIZE_T *oldlenp,
   COMMON_INTERCEPTOR_ENTER(ctx, sysctlbyname, sname, oldp, oldlenp, newp,
                            newlen);
   if (sname)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);
   if (oldlenp)
     COMMON_INTERCEPTOR_READ_RANGE(ctx, oldlenp, sizeof(*oldlenp));
   if (newp && newlen)
@@ -8000,7 +8054,7 @@ INTERCEPTOR(int, sysctlnametomib, const char *sname, int *name,
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, sysctlnametomib, sname, name, namelenp);
   if (sname)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);
   if (namelenp)
     COMMON_INTERCEPTOR_READ_RANGE(ctx, namelenp, sizeof(*namelenp));
   int res = REAL(sysctlnametomib)(sname, name, namelenp);
@@ -8040,7 +8094,7 @@ INTERCEPTOR(void *, asysctlbyname, const char *sname, SIZE_T *len) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, asysctlbyname, sname, len);
   if (sname)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);
   void *res = REAL(asysctlbyname)(sname, len);
   if (res && len) {
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));
@@ -8063,7 +8117,7 @@ INTERCEPTOR(int, sysctlgetmibinfo, char *sname, int *name,
   COMMON_INTERCEPTOR_ENTER(ctx, sysctlgetmibinfo, sname, name, namelenp, cname,
                            csz, rnode, v);
   if (sname)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);
   if (namelenp)
     COMMON_INTERCEPTOR_READ_RANGE(ctx, namelenp, sizeof(*namelenp));
   if (csz)
@@ -8097,7 +8151,7 @@ INTERCEPTOR(char *, nl_langinfo, long item) {
   COMMON_INTERCEPTOR_ENTER(ctx, nl_langinfo, item);
   char *ret = REAL(nl_langinfo)(item);
   if (ret)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, REAL(strlen)(ret) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, internal_strlen(ret) + 1);
   return ret;
 }
 #define INIT_NL_LANGINFO COMMON_INTERCEPT_FUNCTION(nl_langinfo)
@@ -8117,7 +8171,7 @@ INTERCEPTOR(int, modctl, int operation, void *argp) {
       COMMON_INTERCEPTOR_READ_RANGE(ctx, ml, sizeof(*ml));
       if (ml->ml_filename)
         COMMON_INTERCEPTOR_READ_RANGE(ctx, ml->ml_filename,
-                                      REAL(strlen)(ml->ml_filename) + 1);
+                                      internal_strlen(ml->ml_filename) + 1);
       if (ml->ml_props)
         COMMON_INTERCEPTOR_READ_RANGE(ctx, ml->ml_props, ml->ml_propslen);
     }
@@ -8125,7 +8179,7 @@ INTERCEPTOR(int, modctl, int operation, void *argp) {
   } else if (operation == modctl_unload) {
     if (argp) {
       const char *name = (const char *)argp;
-      COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+      COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
     }
     ret = REAL(modctl)(operation, argp);
   } else if (operation == modctl_stat) {
@@ -8167,7 +8221,7 @@ INTERCEPTOR(long long, strtonum, const char *nptr, long long minval,
   if (errstr) {
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errstr, sizeof(const char *));
      if (*errstr)
-      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *errstr, REAL(strlen)(*errstr) + 1);
+      COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *errstr, internal_strlen(*errstr) + 1);
   }
   return ret;
 }
@@ -8187,7 +8241,7 @@ INTERCEPTOR(char *, fparseln, __sanitizer_FILE *stream, SIZE_T *len,
     COMMON_INTERCEPTOR_READ_RANGE(ctx, delim, sizeof(delim[0]) * 3);
   char *ret = REAL(fparseln)(stream, len, lineno, delim, flags);
   if (ret) {
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, REAL(strlen)(ret) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, internal_strlen(ret) + 1);
     if (len)
       COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));
     if (lineno)
@@ -8204,7 +8258,7 @@ INTERCEPTOR(char *, fparseln, __sanitizer_FILE *stream, SIZE_T *len,
 INTERCEPTOR(int, statvfs1, const char *path, void *buf, int flags) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, statvfs1, path, buf, flags);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   int res = REAL(statvfs1)(path, buf, flags);
   if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
   return res;
@@ -8485,7 +8539,7 @@ INTERCEPTOR(char *, SHA1File, char *filename, char *buf) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, SHA1File, filename, buf);
   if (filename)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
   char *ret = REAL(SHA1File)(filename, buf);
   if (ret)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);
@@ -8496,7 +8550,7 @@ INTERCEPTOR(char *, SHA1FileChunk, char *filename, char *buf, OFF_T offset,
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, SHA1FileChunk, filename, buf, offset, length);
   if (filename)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
   char *ret = REAL(SHA1FileChunk)(filename, buf, offset, length);
   if (ret)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);
@@ -8572,7 +8626,7 @@ INTERCEPTOR(char *, MD4File, const char *filename, char *buf) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, MD4File, filename, buf);
   if (filename)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
   char *ret = REAL(MD4File)(filename, buf);
   if (ret)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD4_return_length);
@@ -8655,7 +8709,7 @@ INTERCEPTOR(char *, RMD160File, char *filename, char *buf) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, RMD160File, filename, buf);
   if (filename)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
   char *ret = REAL(RMD160File)(filename, buf);
   if (ret)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);
@@ -8666,7 +8720,7 @@ INTERCEPTOR(char *, RMD160FileChunk, char *filename, char *buf, OFF_T offset,
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, RMD160FileChunk, filename, buf, offset, length);
   if (filename)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
   char *ret = REAL(RMD160FileChunk)(filename, buf, offset, length);
   if (ret)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);
@@ -8742,7 +8796,7 @@ INTERCEPTOR(char *, MD5File, const char *filename, char *buf) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, MD5File, filename, buf);
   if (filename)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
   char *ret = REAL(MD5File)(filename, buf);
   if (ret)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD5_return_length);
@@ -8872,7 +8926,7 @@ INTERCEPTOR(char *, MD2File, const char *filename, char *buf) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, MD2File, filename, buf);
   if (filename)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
   char *ret = REAL(MD2File)(filename, buf);
   if (ret)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD2_return_length);
@@ -8950,7 +9004,7 @@ INTERCEPTOR(char *, MD2Data, const unsigned char *data, unsigned int len,
     void *ctx; \
     COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_File, filename, buf); \
     if (filename) \
-      COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);\
+      COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\
     char *ret = REAL(SHA##LEN##_File)(filename, buf); \
     if (ret) \
       COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \
@@ -8962,7 +9016,7 @@ INTERCEPTOR(char *, MD2Data, const unsigned char *data, unsigned int len,
     COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_FileChunk, filename, buf, offset, \
   length); \
     if (filename) \
-      COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);\
+      COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\
     char *ret = REAL(SHA##LEN##_FileChunk)(filename, buf, offset, length); \
     if (ret) \
       COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \
@@ -9026,7 +9080,7 @@ INTERCEPTOR(int, strvis, char *dst, const char *src, int flag) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, strvis, dst, src, flag);
   if (src)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
   int len = REAL(strvis)(dst, src, flag);
   if (dst)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);
@@ -9036,7 +9090,7 @@ INTERCEPTOR(int, stravis, char **dst, const char *src, int flag) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, stravis, dst, src, flag);
   if (src)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
   int len = REAL(stravis)(dst, src, flag);
   if (dst) {
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(char *));
@@ -9049,7 +9103,7 @@ INTERCEPTOR(int, strnvis, char *dst, SIZE_T dlen, const char *src, int flag) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, strnvis, dst, dlen, src, flag);
   if (src)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
   int len = REAL(strnvis)(dst, dlen, src, flag);
   // The interface will be valid even if there is no space for NULL char
   if (dst && len > 0)
@@ -9099,7 +9153,7 @@ INTERCEPTOR(char *, svis, char *dst, int c, int flag, int nextc,
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, svis, dst, c, flag, nextc, extra);
   if (extra)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
   char *end = REAL(svis)(dst, c, flag, nextc, extra);
   if (dst && end)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, end - dst + 1);
@@ -9110,7 +9164,7 @@ INTERCEPTOR(char *, snvis, char *dst, SIZE_T dlen, int c, int flag, int nextc,
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, snvis, dst, dlen, c, flag, nextc, extra);
   if (extra)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
   char *end = REAL(snvis)(dst, dlen, c, flag, nextc, extra);
   if (dst && end)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst,
@@ -9122,9 +9176,9 @@ INTERCEPTOR(int, strsvis, char *dst, const char *src, int flag,
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, strsvis, dst, src, flag, extra);
   if (src)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
   if (extra)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
   int len = REAL(strsvis)(dst, src, flag, extra);
   if (dst)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);
@@ -9135,9 +9189,9 @@ INTERCEPTOR(int, strsnvis, char *dst, SIZE_T dlen, const char *src, int flag,
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, strsnvis, dst, dlen, src, flag, extra);
   if (src)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
   if (extra)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
   int len = REAL(strsnvis)(dst, dlen, src, flag, extra);
   // The interface will be valid even if there is no space for NULL char
   if (dst && len >= 0)
@@ -9151,7 +9205,7 @@ INTERCEPTOR(int, strsvisx, char *dst, const char *src, SIZE_T len, int flag,
   if (src)
     COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
   if (extra)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
   int ret = REAL(strsvisx)(dst, src, len, flag, extra);
   if (dst)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9164,7 +9218,7 @@ INTERCEPTOR(int, strsnvisx, char *dst, SIZE_T dlen, const char *src, SIZE_T len,
   if (src)
     COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
   if (extra)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
   int ret = REAL(strsnvisx)(dst, dlen, src, len, flag, extra);
   if (dst && ret >= 0)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9178,7 +9232,7 @@ INTERCEPTOR(int, strsenvisx, char *dst, SIZE_T dlen, const char *src,
   if (src)
     COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
   if (extra)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
   // FIXME: only need to be checked when "flag | VIS_NOLOCALE" doesn't hold
   // according to the implementation
   if (cerr_ptr)
@@ -9205,7 +9259,7 @@ INTERCEPTOR(int, strunvis, char *dst, const char *src) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, strunvis, dst, src);
   if (src)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
   int ret = REAL(strunvis)(dst, src);
   if (ret != -1)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9215,7 +9269,7 @@ INTERCEPTOR(int, strnunvis, char *dst, SIZE_T dlen, const char *src) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, strnunvis, dst, dlen, src);
   if (src)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
   int ret = REAL(strnunvis)(dst, dlen, src);
   if (ret != -1)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9225,7 +9279,7 @@ INTERCEPTOR(int, strunvisx, char *dst, const char *src, int flag) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, strunvisx, dst, src, flag);
   if (src)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
   int ret = REAL(strunvisx)(dst, src, flag);
   if (ret != -1)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9236,7 +9290,7 @@ INTERCEPTOR(int, strnunvisx, char *dst, SIZE_T dlen, const char *src,
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, strnunvisx, dst, dlen, src, flag);
   if (src)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
   int ret = REAL(strnunvisx)(dst, dlen, src, flag);
   if (ret != -1)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9272,7 +9326,7 @@ INTERCEPTOR(struct __sanitizer_cdbr *, cdbr_open, const char *path, int flags) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, cdbr_open, path, flags);
   if (path)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   struct __sanitizer_cdbr *cdbr = REAL(cdbr_open)(path, flags);
   if (cdbr)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbr, sizeof(*cdbr));
@@ -9464,7 +9518,7 @@ INTERCEPTOR(void *, getfsspec, const char *spec) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, getfsspec, spec);
   if (spec)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, spec, REAL(strlen)(spec) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, spec, internal_strlen(spec) + 1);
   void *ret = REAL(getfsspec)(spec);
   if (ret)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);
@@ -9475,7 +9529,7 @@ INTERCEPTOR(void *, getfsfile, const char *file) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, getfsfile, file);
   if (file)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, file, REAL(strlen)(file) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, file, internal_strlen(file) + 1);
   void *ret = REAL(getfsfile)(file);
   if (ret)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);
@@ -9519,9 +9573,9 @@ INTERCEPTOR(__sanitizer_FILE *, popen, const char *command, const char *type) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, popen, command, type);
   if (command)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, command, REAL(strlen)(command) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, command, internal_strlen(command) + 1);
   if (type)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, type, REAL(strlen)(type) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, type, internal_strlen(type) + 1);
   __sanitizer_FILE *res = REAL(popen)(command, type);
   COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, nullptr);
   if (res) unpoison_file(res);
@@ -9538,13 +9592,13 @@ INTERCEPTOR(__sanitizer_FILE *, popenve, const char *path,
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, popenve, path, argv, envp, type);
   if (path)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   if (argv) {
     for (char *const *pa = argv; ; ++pa) {
       COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
       if (!*pa)
         break;
-      COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+      COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, internal_strlen(*pa) + 1);
     }
   }
   if (envp) {
@@ -9552,11 +9606,11 @@ INTERCEPTOR(__sanitizer_FILE *, popenve, const char *path,
       COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
       if (!*pa)
         break;
-      COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+      COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, internal_strlen(*pa) + 1);
     }
   }
   if (type)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, type, REAL(strlen)(type) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, type, internal_strlen(type) + 1);
   __sanitizer_FILE *res = REAL(popenve)(path, argv, envp, type);
   COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, nullptr);
   if (res) unpoison_file(res);
@@ -9752,7 +9806,7 @@ INTERCEPTOR(char *, fdevname,  int fd) {
   COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
   char *name = REAL(fdevname)(fd);
   if (name) {
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);
     if (fd > 0)
       COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
   }
@@ -9765,7 +9819,7 @@ INTERCEPTOR(char *, fdevname_r,  int fd, char *buf, SIZE_T len) {
   COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
   char *name = REAL(fdevname_r)(fd, buf, len);
   if (name && buf && len > 0) {
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
     if (fd > 0)
       COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
   }
@@ -9785,7 +9839,7 @@ INTERCEPTOR(char *, getusershell) {
   COMMON_INTERCEPTOR_ENTER(ctx, getusershell);
   char *res = REAL(getusershell)();
   if (res)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
   return res;
 }
 
@@ -9810,7 +9864,7 @@ INTERCEPTOR(int, sl_add, void *sl, char *item) {
   if (sl)
     COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
   if (item)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, item, REAL(strlen)(item) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, item, internal_strlen(item) + 1);
   int res = REAL(sl_add)(sl, item);
   if (!res)
     COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
@@ -9823,10 +9877,10 @@ INTERCEPTOR(char *, sl_find, void *sl, const char *item) {
   if (sl)
     COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
   if (item)
-    COMMON_INTERCEPTOR_READ_RANGE(ctx, item, REAL(strlen)(item) + 1);
+    COMMON_INTERCEPTOR_READ_RANGE(ctx, item, internal_strlen(item) + 1);
   char *res = REAL(sl_find)(sl, item);
   if (res)
-    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+    COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
   return res;
 }
 
@@ -9912,7 +9966,52 @@ INTERCEPTOR(int, getentropy, void *buf, SIZE_T buflen) {
 #define INIT_GETENTROPY
 #endif
 
-#if SANITIZER_INTERCEPT_QSORT
+#if SANITIZER_INTERCEPT_QSORT_R
+typedef int (*qsort_r_compar_f)(const void *, const void *, void *);
+struct qsort_r_compar_params {
+  SIZE_T size;
+  qsort_r_compar_f compar;
+  void *arg;
+};
+static int wrapped_qsort_r_compar(const void *a, const void *b, void *arg) {
+  qsort_r_compar_params *params = (qsort_r_compar_params *)arg;
+  COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+  COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, params->size);
+  COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, params->size);
+  return params->compar(a, b, params->arg);
+}
+
+INTERCEPTOR(void, qsort_r, void *base, SIZE_T nmemb, SIZE_T size,
+            qsort_r_compar_f compar, void *arg) {
+  void *ctx;
+  COMMON_INTERCEPTOR_ENTER(ctx, qsort_r, base, nmemb, size, compar, arg);
+  // Run the comparator over all array elements to detect any memory issues.
+  if (nmemb > 1) {
+    for (SIZE_T i = 0; i < nmemb - 1; ++i) {
+      void *p = (void *)((char *)base + i * size);
+      void *q = (void *)((char *)base + (i + 1) * size);
+      COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+      compar(p, q, arg);
+    }
+  }
+  qsort_r_compar_params params = {size, compar, arg};
+  REAL(qsort_r)(base, nmemb, size, wrapped_qsort_r_compar, &params);
+  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
+}
+#  define INIT_QSORT_R COMMON_INTERCEPT_FUNCTION(qsort_r)
+#else
+#  define INIT_QSORT_R
+#endif
+
+#if SANITIZER_INTERCEPT_QSORT && SANITIZER_INTERCEPT_QSORT_R
+INTERCEPTOR(void, qsort, void *base, SIZE_T nmemb, SIZE_T size,
+            qsort_r_compar_f compar) {
+  void *ctx;
+  COMMON_INTERCEPTOR_ENTER(ctx, qsort, base, nmemb, size, compar);
+  WRAP(qsort_r)(base, nmemb, size, compar, nullptr);
+}
+#  define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)
+#elif SANITIZER_INTERCEPT_QSORT && !SANITIZER_INTERCEPT_QSORT_R
 // Glibc qsort uses a temporary buffer allocated either on stack or on heap.
 // Poisoned memory from there may get copied into the comparator arguments,
 // where it needs to be dealt with. But even that is not enough - the results of
@@ -9927,7 +10026,7 @@ INTERCEPTOR(int, getentropy, void *buf, SIZE_T buflen) {
 typedef int (*qsort_compar_f)(const void *, const void *);
 static THREADLOCAL qsort_compar_f qsort_compar;
 static THREADLOCAL SIZE_T qsort_size;
-int wrapped_qsort_compar(const void *a, const void *b) {
+static int wrapped_qsort_compar(const void *a, const void *b) {
   COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
   COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, qsort_size);
   COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, qsort_size);
@@ -9969,60 +10068,34 @@ INTERCEPTOR(void, qsort, void *base, SIZE_T nmemb, SIZE_T size,
   }
   COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
 }
-#define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)
+#  define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)
 #else
-#define INIT_QSORT
+#  define INIT_QSORT
 #endif
 
-#if SANITIZER_INTERCEPT_QSORT_R
-typedef int (*qsort_r_compar_f)(const void *, const void *, void *);
-static THREADLOCAL qsort_r_compar_f qsort_r_compar;
-static THREADLOCAL SIZE_T qsort_r_size;
-int wrapped_qsort_r_compar(const void *a, const void *b, void *arg) {
-  COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
-  COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, qsort_r_size);
-  COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, qsort_r_size);
-  return qsort_r_compar(a, b, arg);
+#if SANITIZER_INTERCEPT_BSEARCH
+typedef int (*bsearch_compar_f)(const void *, const void *);
+struct bsearch_compar_params {
+  const void *key;
+  bsearch_compar_f compar;
+};
+
+static int wrapped_bsearch_compar(const void *key, const void *b) {
+  const bsearch_compar_params *params = (const bsearch_compar_params *)key;
+  COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+  return params->compar(params->key, b);
 }
 
-INTERCEPTOR(void, qsort_r, void *base, SIZE_T nmemb, SIZE_T size,
-            qsort_r_compar_f compar, void *arg) {
+INTERCEPTOR(void *, bsearch, const void *key, const void *base, SIZE_T nmemb,
+            SIZE_T size, bsearch_compar_f compar) {
   void *ctx;
-  COMMON_INTERCEPTOR_ENTER(ctx, qsort_r, base, nmemb, size, compar, arg);
-  // Run the comparator over all array elements to detect any memory issues.
-  if (nmemb > 1) {
-    for (SIZE_T i = 0; i < nmemb - 1; ++i) {
-      void *p = (void *)((char *)base + i * size);
-      void *q = (void *)((char *)base + (i + 1) * size);
-      COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
-      compar(p, q, arg);
-    }
-  }
-  qsort_r_compar_f old_compar = qsort_r_compar;
-  SIZE_T old_size = qsort_r_size;
-  // Handle qsort_r() implementations that recurse using an
-  // interposable function call:
-  bool already_wrapped = compar == wrapped_qsort_r_compar;
-  if (already_wrapped) {
-    // This case should only happen if the qsort() implementation calls itself
-    // using a preemptible function call (e.g. the FreeBSD libc version).
-    // Check that the size and comparator arguments are as expected.
-    CHECK_NE(compar, qsort_r_compar);
-    CHECK_EQ(qsort_r_size, size);
-  } else {
-    qsort_r_compar = compar;
-    qsort_r_size = size;
-  }
-  REAL(qsort_r)(base, nmemb, size, wrapped_qsort_r_compar, arg);
-  if (!already_wrapped) {
-    qsort_r_compar = old_compar;
-    qsort_r_size = old_size;
-  }
-  COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
+  COMMON_INTERCEPTOR_ENTER(ctx, bsearch, key, base, nmemb, size, compar);
+  bsearch_compar_params params = {key, compar};
+  return REAL(bsearch)(&params, base, nmemb, size, wrapped_bsearch_compar);
 }
-#define INIT_QSORT_R COMMON_INTERCEPT_FUNCTION(qsort_r)
+#  define INIT_BSEARCH COMMON_INTERCEPT_FUNCTION(bsearch)
 #else
-#define INIT_QSORT_R
+#  define INIT_BSEARCH
 #endif
 
 #if SANITIZER_INTERCEPT_SIGALTSTACK
@@ -10391,6 +10464,7 @@ static void InitializeCommonInterceptors() {
   INIT_GETENTROPY;
   INIT_QSORT;
   INIT_QSORT_R;
+  INIT_BSEARCH;
   INIT_SIGALTSTACK;
   INIT_UNAME;
   INIT___XUNAME;
index 082398ba960afd090b98531ee5cfaa211f9ebc3e..220abb89c3beba8457c1abfe996a72516e282e73 100644 (file)
@@ -324,8 +324,8 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
       continue;
     int size = scanf_get_value_size(&dir);
     if (size == FSS_INVALID) {
-      Report("%s: WARNING: unexpected format specifier in scanf interceptor: ",
-             SanitizerToolName, "%.*s\n", dir.end - dir.begin, dir.begin);
+      Report("%s: WARNING: unexpected format specifier in scanf interceptor: %.*s\n",
+             SanitizerToolName, static_cast<int>(dir.end - dir.begin), dir.begin);
       break;
     }
     void *argp = va_arg(aq, void *);
@@ -469,7 +469,7 @@ static int printf_get_value_size(PrintfDirective *dir) {
         break;                                                     \
       default:                                                     \
         Report("WARNING: unexpected floating-point arg size"       \
-               " in printf interceptor: %d\n", size);              \
+               " in printf interceptor: %zu\n", static_cast<uptr>(size));             \
         return;                                                    \
       }                                                            \
     } else {                                                       \
@@ -484,7 +484,7 @@ static int printf_get_value_size(PrintfDirective *dir) {
         break;                                                     \
       default:                                                     \
         Report("WARNING: unexpected arg size"                      \
-               " in printf interceptor: %d\n", size);              \
+               " in printf interceptor: %zu\n", static_cast<uptr>(size));             \
         return;                                                    \
       }                                                            \
     }                                                              \
@@ -530,7 +530,7 @@ static void printf_common(void *ctx, const char *format, va_list aq) {
         Report(
             "%s: WARNING: unexpected format specifier in printf "
             "interceptor: %.*s (reported once per process)\n",
-            SanitizerToolName, dir.end - dir.begin, dir.begin);
+            SanitizerToolName, static_cast<int>(dir.end - dir.begin), dir.begin);
       break;
     }
     if (dir.convSpecifier == 'n') {
index 6aa73ec8c6a2b940d9d83a86d0ffa04016ae154e..f6ac3fa5af18424517230b5cc2862e9038ddd685 100644 (file)
@@ -33,7 +33,7 @@
 INTERCEPTOR(int, statvfs, char *path, void *buf) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   // FIXME: under ASan the call below may write to freed memory and corrupt
   // its metadata. See
   // https://github.com/google/sanitizers/issues/321.
@@ -99,7 +99,7 @@ INTERCEPTOR(int, getvfsstat, void *buf, SIZE_T bufsize, int flags) {
 INTERCEPTOR(int, statvfs1, const char *path, void *buf, int flags) {
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, statvfs1, path, buf, flags);
-  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+  if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
   int res = REAL(statvfs1)(path, buf, flags);
   if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
   return res;
index 9a4e5388f24d13de44947f31eb27ee0e06fd2041..a20602d8b95a73ab029908a779e8d84edd6cfd26 100644 (file)
@@ -25,6 +25,7 @@ void LogMessageOnPrintf(const char *str) {}
 #endif
 void WriteToSyslog(const char *buffer) {}
 void Abort() { internal__exit(1); }
+bool CreateDir(const char *pathname) { return false; }
 #endif // !SANITIZER_WINDOWS
 
 #if !SANITIZER_WINDOWS && !SANITIZER_MAC
index 1b89d6e176840570667808f8301c9a1c01b68c3b..a38b134085aabde4e4d589bc98de222f0fc297e5 100644 (file)
 #include "sanitizer_platform.h"
 #if SANITIZER_LINUX
 
-#include "sanitizer_libc.h"
+#  include "sanitizer_libc.h"
 
-#define PRE_SYSCALL(name)                                                      \
-  SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_impl_##name
-#define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s)
-#define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s)
+#  define PRE_SYSCALL(name) \
+    SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_impl_##name
+#  define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s)
+#  define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s)
 
-#define POST_SYSCALL(name)                                                     \
-  SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_impl_##name
-#define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)
-#define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)
+#  define POST_SYSCALL(name) \
+    SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_impl_##name
+#  define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)
+#  define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)
 
-#ifndef COMMON_SYSCALL_ACQUIRE
-# define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr))
-#endif
+#  ifndef COMMON_SYSCALL_ACQUIRE
+#    define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr))
+#  endif
 
-#ifndef COMMON_SYSCALL_RELEASE
-# define COMMON_SYSCALL_RELEASE(addr) ((void)(addr))
-#endif
+#  ifndef COMMON_SYSCALL_RELEASE
+#    define COMMON_SYSCALL_RELEASE(addr) ((void)(addr))
+#  endif
 
-#ifndef COMMON_SYSCALL_FD_CLOSE
-# define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd))
-#endif
+#  ifndef COMMON_SYSCALL_FD_CLOSE
+#    define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd))
+#  endif
 
-#ifndef COMMON_SYSCALL_FD_ACQUIRE
-# define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd))
-#endif
+#  ifndef COMMON_SYSCALL_FD_ACQUIRE
+#    define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd))
+#  endif
 
-#ifndef COMMON_SYSCALL_FD_RELEASE
-# define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd))
-#endif
+#  ifndef COMMON_SYSCALL_FD_RELEASE
+#    define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd))
+#  endif
 
-#ifndef COMMON_SYSCALL_PRE_FORK
-# define COMMON_SYSCALL_PRE_FORK() {}
-#endif
+#  ifndef COMMON_SYSCALL_PRE_FORK
+#    define COMMON_SYSCALL_PRE_FORK() \
+      {}
+#  endif
 
-#ifndef COMMON_SYSCALL_POST_FORK
-# define COMMON_SYSCALL_POST_FORK(res) {}
-#endif
+#  ifndef COMMON_SYSCALL_POST_FORK
+#    define COMMON_SYSCALL_POST_FORK(res) \
+      {}
+#  endif
 
 // FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such).
 
@@ -130,8 +132,8 @@ struct sanitizer_kernel_sockaddr {
 // Declare it "void" to catch sizeof(kernel_sigset_t).
 typedef void kernel_sigset_t;
 
-static void kernel_write_iovec(const __sanitizer_iovec *iovec,
-                        SIZE_T iovlen, SIZE_T maxlen) {
+static void kernel_write_iovec(const __sanitizer_iovec *iovec, SIZE_T iovlen,
+                               SIZE_T maxlen) {
   for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
     SSIZE_T sz = Min(iovec[i].iov_len, maxlen);
     POST_WRITE(iovec[i].iov_base, sz);
@@ -141,8 +143,8 @@ static void kernel_write_iovec(const __sanitizer_iovec *iovec,
 
 // This functions uses POST_READ, because it needs to run after syscall to know
 // the real read range.
-static void kernel_read_iovec(const __sanitizer_iovec *iovec,
-                       SIZE_T iovlen, SIZE_T maxlen) {
+static void kernel_read_iovec(const __sanitizer_iovec *iovec, SIZE_T iovlen,
+                              SIZE_T maxlen) {
   POST_READ(iovec, sizeof(*iovec) * iovlen);
   for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
     SSIZE_T sz = Min(iovec[i].iov_len, maxlen);
@@ -155,8 +157,8 @@ PRE_SYSCALL(recvmsg)(long sockfd, sanitizer_kernel_msghdr *msg, long flags) {
   PRE_READ(msg, sizeof(*msg));
 }
 
-POST_SYSCALL(recvmsg)(long res, long sockfd, sanitizer_kernel_msghdr *msg,
-                      long flags) {
+POST_SYSCALL(recvmsg)
+(long res, long sockfd, sanitizer_kernel_msghdr *msg, long flags) {
   if (res >= 0) {
     if (msg) {
       for (unsigned long i = 0; i < msg->msg_iovlen; ++i) {
@@ -167,13 +169,14 @@ POST_SYSCALL(recvmsg)(long res, long sockfd, sanitizer_kernel_msghdr *msg,
   }
 }
 
-PRE_SYSCALL(recvmmsg)(long fd, sanitizer_kernel_mmsghdr *msg, long vlen,
-                      long flags, void *timeout) {
+PRE_SYSCALL(recvmmsg)
+(long fd, sanitizer_kernel_mmsghdr *msg, long vlen, long flags, void *timeout) {
   PRE_READ(msg, vlen * sizeof(*msg));
 }
 
-POST_SYSCALL(recvmmsg)(long res, long fd, sanitizer_kernel_mmsghdr *msg,
-                       long vlen, long flags, void *timeout) {
+POST_SYSCALL(recvmmsg)
+(long res, long fd, sanitizer_kernel_mmsghdr *msg, long vlen, long flags,
+ void *timeout) {
   if (res >= 0) {
     if (msg) {
       for (unsigned long i = 0; i < msg->msg_hdr.msg_iovlen; ++i) {
@@ -183,7 +186,8 @@ POST_SYSCALL(recvmmsg)(long res, long fd, sanitizer_kernel_mmsghdr *msg,
       POST_WRITE(msg->msg_hdr.msg_control, msg->msg_hdr.msg_controllen);
       POST_WRITE(&msg->msg_len, sizeof(msg->msg_len));
     }
-    if (timeout) POST_WRITE(timeout, struct_timespec_sz);
+    if (timeout)
+      POST_WRITE(timeout, struct_timespec_sz);
   }
 }
 
@@ -203,7 +207,8 @@ PRE_SYSCALL(time)(void *tloc) {}
 
 POST_SYSCALL(time)(long res, void *tloc) {
   if (res >= 0) {
-    if (tloc) POST_WRITE(tloc, sizeof(long));
+    if (tloc)
+      POST_WRITE(tloc, sizeof(long));
   }
 }
 
@@ -211,7 +216,8 @@ PRE_SYSCALL(stime)(void *tptr) {}
 
 POST_SYSCALL(stime)(long res, void *tptr) {
   if (res >= 0) {
-    if (tptr) POST_WRITE(tptr, sizeof(long));
+    if (tptr)
+      POST_WRITE(tptr, sizeof(long));
   }
 }
 
@@ -219,8 +225,10 @@ PRE_SYSCALL(gettimeofday)(void *tv, void *tz) {}
 
 POST_SYSCALL(gettimeofday)(long res, void *tv, void *tz) {
   if (res >= 0) {
-    if (tv) POST_WRITE(tv, timeval_sz);
-    if (tz) POST_WRITE(tz, struct_timezone_sz);
+    if (tv)
+      POST_WRITE(tv, timeval_sz);
+    if (tz)
+      POST_WRITE(tz, struct_timezone_sz);
   }
 }
 
@@ -228,26 +236,30 @@ PRE_SYSCALL(settimeofday)(void *tv, void *tz) {}
 
 POST_SYSCALL(settimeofday)(long res, void *tv, void *tz) {
   if (res >= 0) {
-    if (tv) POST_WRITE(tv, timeval_sz);
-    if (tz) POST_WRITE(tz, struct_timezone_sz);
+    if (tv)
+      POST_WRITE(tv, timeval_sz);
+    if (tz)
+      POST_WRITE(tz, struct_timezone_sz);
   }
 }
 
-#if !SANITIZER_ANDROID
+#  if !SANITIZER_ANDROID
 PRE_SYSCALL(adjtimex)(void *txc_p) {}
 
 POST_SYSCALL(adjtimex)(long res, void *txc_p) {
   if (res >= 0) {
-    if (txc_p) POST_WRITE(txc_p, struct_timex_sz);
+    if (txc_p)
+      POST_WRITE(txc_p, struct_timex_sz);
   }
 }
-#endif
+#  endif
 
 PRE_SYSCALL(times)(void *tbuf) {}
 
 POST_SYSCALL(times)(long res, void *tbuf) {
   if (res >= 0) {
-    if (tbuf) POST_WRITE(tbuf, struct_tms_sz);
+    if (tbuf)
+      POST_WRITE(tbuf, struct_tms_sz);
   }
 }
 
@@ -259,8 +271,10 @@ PRE_SYSCALL(nanosleep)(void *rqtp, void *rmtp) {}
 
 POST_SYSCALL(nanosleep)(long res, void *rqtp, void *rmtp) {
   if (res >= 0) {
-    if (rqtp) POST_WRITE(rqtp, struct_timespec_sz);
-    if (rmtp) POST_WRITE(rmtp, struct_timespec_sz);
+    if (rqtp)
+      POST_WRITE(rqtp, struct_timespec_sz);
+    if (rmtp)
+      POST_WRITE(rmtp, struct_timespec_sz);
   }
 }
 
@@ -296,9 +310,12 @@ PRE_SYSCALL(getresuid)(void *ruid, void *euid, void *suid) {}
 
 POST_SYSCALL(getresuid)(long res, void *ruid, void *euid, void *suid) {
   if (res >= 0) {
-    if (ruid) POST_WRITE(ruid, sizeof(unsigned));
-    if (euid) POST_WRITE(euid, sizeof(unsigned));
-    if (suid) POST_WRITE(suid, sizeof(unsigned));
+    if (ruid)
+      POST_WRITE(ruid, sizeof(unsigned));
+    if (euid)
+      POST_WRITE(euid, sizeof(unsigned));
+    if (suid)
+      POST_WRITE(suid, sizeof(unsigned));
   }
 }
 
@@ -306,9 +323,12 @@ PRE_SYSCALL(getresgid)(void *rgid, void *egid, void *sgid) {}
 
 POST_SYSCALL(getresgid)(long res, void *rgid, void *egid, void *sgid) {
   if (res >= 0) {
-    if (rgid) POST_WRITE(rgid, sizeof(unsigned));
-    if (egid) POST_WRITE(egid, sizeof(unsigned));
-    if (sgid) POST_WRITE(sgid, sizeof(unsigned));
+    if (rgid)
+      POST_WRITE(rgid, sizeof(unsigned));
+    if (egid)
+      POST_WRITE(egid, sizeof(unsigned));
+    if (sgid)
+      POST_WRITE(sgid, sizeof(unsigned));
   }
 }
 
@@ -326,10 +346,11 @@ POST_SYSCALL(getsid)(long res, long pid) {}
 
 PRE_SYSCALL(getgroups)(long gidsetsize, void *grouplist) {}
 
-POST_SYSCALL(getgroups)(long res, long gidsetsize,
-                        __sanitizer___kernel_gid_t *grouplist) {
+POST_SYSCALL(getgroups)
+(long res, long gidsetsize, __sanitizer___kernel_gid_t *grouplist) {
   if (res >= 0) {
-    if (grouplist) POST_WRITE(grouplist, res * sizeof(*grouplist));
+    if (grouplist)
+      POST_WRITE(grouplist, res * sizeof(*grouplist));
   }
 }
 
@@ -374,11 +395,12 @@ PRE_SYSCALL(setsid)() {}
 POST_SYSCALL(setsid)(long res) {}
 
 PRE_SYSCALL(setgroups)(long gidsetsize, __sanitizer___kernel_gid_t *grouplist) {
-  if (grouplist) POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
+  if (grouplist)
+    POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
 }
 
-POST_SYSCALL(setgroups)(long res, long gidsetsize,
-                        __sanitizer___kernel_gid_t *grouplist) {}
+POST_SYSCALL(setgroups)
+(long res, long gidsetsize, __sanitizer___kernel_gid_t *grouplist) {}
 
 PRE_SYSCALL(acct)(const void *name) {
   if (name)
@@ -388,17 +410,21 @@ PRE_SYSCALL(acct)(const void *name) {
 POST_SYSCALL(acct)(long res, const void *name) {}
 
 PRE_SYSCALL(capget)(void *header, void *dataptr) {
-  if (header) PRE_READ(header, __user_cap_header_struct_sz);
+  if (header)
+    PRE_READ(header, __user_cap_header_struct_sz);
 }
 
 POST_SYSCALL(capget)(long res, void *header, void *dataptr) {
   if (res >= 0)
-    if (dataptr) POST_WRITE(dataptr, __user_cap_data_struct_sz);
+    if (dataptr)
+      POST_WRITE(dataptr, __user_cap_data_struct_sz);
 }
 
 PRE_SYSCALL(capset)(void *header, const void *data) {
-  if (header) PRE_READ(header, __user_cap_header_struct_sz);
-  if (data) PRE_READ(data, __user_cap_data_struct_sz);
+  if (header)
+    PRE_READ(header, __user_cap_header_struct_sz);
+  if (data)
+    PRE_READ(data, __user_cap_data_struct_sz);
 }
 
 POST_SYSCALL(capset)(long res, void *header, const void *data) {}
@@ -411,7 +437,8 @@ PRE_SYSCALL(sigpending)(void *set) {}
 
 POST_SYSCALL(sigpending)(long res, void *set) {
   if (res >= 0) {
-    if (set) POST_WRITE(set, old_sigset_t_sz);
+    if (set)
+      POST_WRITE(set, old_sigset_t_sz);
   }
 }
 
@@ -419,8 +446,10 @@ PRE_SYSCALL(sigprocmask)(long how, void *set, void *oset) {}
 
 POST_SYSCALL(sigprocmask)(long res, long how, void *set, void *oset) {
   if (res >= 0) {
-    if (set) POST_WRITE(set, old_sigset_t_sz);
-    if (oset) POST_WRITE(oset, old_sigset_t_sz);
+    if (set)
+      POST_WRITE(set, old_sigset_t_sz);
+    if (oset)
+      POST_WRITE(oset, old_sigset_t_sz);
   }
 }
 
@@ -428,7 +457,8 @@ PRE_SYSCALL(getitimer)(long which, void *value) {}
 
 POST_SYSCALL(getitimer)(long res, long which, void *value) {
   if (res >= 0) {
-    if (value) POST_WRITE(value, struct_itimerval_sz);
+    if (value)
+      POST_WRITE(value, struct_itimerval_sz);
   }
 }
 
@@ -436,19 +466,23 @@ PRE_SYSCALL(setitimer)(long which, void *value, void *ovalue) {}
 
 POST_SYSCALL(setitimer)(long res, long which, void *value, void *ovalue) {
   if (res >= 0) {
-    if (value) POST_WRITE(value, struct_itimerval_sz);
-    if (ovalue) POST_WRITE(ovalue, struct_itimerval_sz);
+    if (value)
+      POST_WRITE(value, struct_itimerval_sz);
+    if (ovalue)
+      POST_WRITE(ovalue, struct_itimerval_sz);
   }
 }
 
-PRE_SYSCALL(timer_create)(long which_clock, void *timer_event_spec,
-                          void *created_timer_id) {}
+PRE_SYSCALL(timer_create)
+(long which_clock, void *timer_event_spec, void *created_timer_id) {}
 
-POST_SYSCALL(timer_create)(long res, long which_clock, void *timer_event_spec,
-                           void *created_timer_id) {
+POST_SYSCALL(timer_create)
+(long res, long which_clock, void *timer_event_spec, void *created_timer_id) {
   if (res >= 0) {
-    if (timer_event_spec) POST_WRITE(timer_event_spec, struct_sigevent_sz);
-    if (created_timer_id) POST_WRITE(created_timer_id, sizeof(long));
+    if (timer_event_spec)
+      POST_WRITE(timer_event_spec, struct_sigevent_sz);
+    if (created_timer_id)
+      POST_WRITE(created_timer_id, sizeof(long));
   }
 }
 
@@ -456,7 +490,8 @@ PRE_SYSCALL(timer_gettime)(long timer_id, void *setting) {}
 
 POST_SYSCALL(timer_gettime)(long res, long timer_id, void *setting) {
   if (res >= 0) {
-    if (setting) POST_WRITE(setting, struct_itimerspec_sz);
+    if (setting)
+      POST_WRITE(setting, struct_itimerspec_sz);
   }
 }
 
@@ -464,15 +499,18 @@ PRE_SYSCALL(timer_getoverrun)(long timer_id) {}
 
 POST_SYSCALL(timer_getoverrun)(long res, long timer_id) {}
 
-PRE_SYSCALL(timer_settime)(long timer_id, long flags, const void *new_setting,
-                           void *old_setting) {
-  if (new_setting) PRE_READ(new_setting, struct_itimerspec_sz);
+PRE_SYSCALL(timer_settime)
+(long timer_id, long flags, const void *new_setting, void *old_setting) {
+  if (new_setting)
+    PRE_READ(new_setting, struct_itimerspec_sz);
 }
 
-POST_SYSCALL(timer_settime)(long res, long timer_id, long flags,
-                            const void *new_setting, void *old_setting) {
+POST_SYSCALL(timer_settime)
+(long res, long timer_id, long flags, const void *new_setting,
+ void *old_setting) {
   if (res >= 0) {
-    if (old_setting) POST_WRITE(old_setting, struct_itimerspec_sz);
+    if (old_setting)
+      POST_WRITE(old_setting, struct_itimerspec_sz);
   }
 }
 
@@ -481,7 +519,8 @@ PRE_SYSCALL(timer_delete)(long timer_id) {}
 POST_SYSCALL(timer_delete)(long res, long timer_id) {}
 
 PRE_SYSCALL(clock_settime)(long which_clock, const void *tp) {
-  if (tp) PRE_READ(tp, struct_timespec_sz);
+  if (tp)
+    PRE_READ(tp, struct_timespec_sz);
 }
 
 POST_SYSCALL(clock_settime)(long res, long which_clock, const void *tp) {}
@@ -490,37 +529,42 @@ PRE_SYSCALL(clock_gettime)(long which_clock, void *tp) {}
 
 POST_SYSCALL(clock_gettime)(long res, long which_clock, void *tp) {
   if (res >= 0) {
-    if (tp) POST_WRITE(tp, struct_timespec_sz);
+    if (tp)
+      POST_WRITE(tp, struct_timespec_sz);
   }
 }
 
-#if !SANITIZER_ANDROID
+#  if !SANITIZER_ANDROID
 PRE_SYSCALL(clock_adjtime)(long which_clock, void *tx) {}
 
 POST_SYSCALL(clock_adjtime)(long res, long which_clock, void *tx) {
   if (res >= 0) {
-    if (tx) POST_WRITE(tx, struct_timex_sz);
+    if (tx)
+      POST_WRITE(tx, struct_timex_sz);
   }
 }
-#endif
+#  endif
 
 PRE_SYSCALL(clock_getres)(long which_clock, void *tp) {}
 
 POST_SYSCALL(clock_getres)(long res, long which_clock, void *tp) {
   if (res >= 0) {
-    if (tp) POST_WRITE(tp, struct_timespec_sz);
+    if (tp)
+      POST_WRITE(tp, struct_timespec_sz);
   }
 }
 
-PRE_SYSCALL(clock_nanosleep)(long which_clock, long flags, const void *rqtp,
-                             void *rmtp) {
-  if (rqtp) PRE_READ(rqtp, struct_timespec_sz);
+PRE_SYSCALL(clock_nanosleep)
+(long which_clock, long flags, const void *rqtp, void *rmtp) {
+  if (rqtp)
+    PRE_READ(rqtp, struct_timespec_sz);
 }
 
-POST_SYSCALL(clock_nanosleep)(long res, long which_clock, long flags,
-                              const void *rqtp, void *rmtp) {
+POST_SYSCALL(clock_nanosleep)
+(long res, long which_clock, long flags, const void *rqtp, void *rmtp) {
   if (res >= 0) {
-    if (rmtp) POST_WRITE(rmtp, struct_timespec_sz);
+    if (rmtp)
+      POST_WRITE(rmtp, struct_timespec_sz);
   }
 }
 
@@ -532,12 +576,14 @@ PRE_SYSCALL(sched_setscheduler)(long pid, long policy, void *param) {}
 
 POST_SYSCALL(sched_setscheduler)(long res, long pid, long policy, void *param) {
   if (res >= 0) {
-    if (param) POST_WRITE(param, struct_sched_param_sz);
+    if (param)
+      POST_WRITE(param, struct_sched_param_sz);
   }
 }
 
 PRE_SYSCALL(sched_setparam)(long pid, void *param) {
-  if (param) PRE_READ(param, struct_sched_param_sz);
+  if (param)
+    PRE_READ(param, struct_sched_param_sz);
 }
 
 POST_SYSCALL(sched_setparam)(long res, long pid, void *param) {}
@@ -550,23 +596,26 @@ PRE_SYSCALL(sched_getparam)(long pid, void *param) {}
 
 POST_SYSCALL(sched_getparam)(long res, long pid, void *param) {
   if (res >= 0) {
-    if (param) POST_WRITE(param, struct_sched_param_sz);
+    if (param)
+      POST_WRITE(param, struct_sched_param_sz);
   }
 }
 
 PRE_SYSCALL(sched_setaffinity)(long pid, long len, void *user_mask_ptr) {
-  if (user_mask_ptr) PRE_READ(user_mask_ptr, len);
+  if (user_mask_ptr)
+    PRE_READ(user_mask_ptr, len);
 }
 
-POST_SYSCALL(sched_setaffinity)(long res, long pid, long len,
-                                void *user_mask_ptr) {}
+POST_SYSCALL(sched_setaffinity)
+(long res, long pid, long len, void *user_mask_ptr) {}
 
 PRE_SYSCALL(sched_getaffinity)(long pid, long len, void *user_mask_ptr) {}
 
-POST_SYSCALL(sched_getaffinity)(long res, long pid, long len,
-                                void *user_mask_ptr) {
+POST_SYSCALL(sched_getaffinity)
+(long res, long pid, long len, void *user_mask_ptr) {
   if (res >= 0) {
-    if (user_mask_ptr) POST_WRITE(user_mask_ptr, len);
+    if (user_mask_ptr)
+      POST_WRITE(user_mask_ptr, len);
   }
 }
 
@@ -586,7 +635,8 @@ PRE_SYSCALL(sched_rr_get_interval)(long pid, void *interval) {}
 
 POST_SYSCALL(sched_rr_get_interval)(long res, long pid, void *interval) {
   if (res >= 0) {
-    if (interval) POST_WRITE(interval, struct_timespec_sz);
+    if (interval)
+      POST_WRITE(interval, struct_timespec_sz);
   }
 }
 
@@ -610,13 +660,14 @@ PRE_SYSCALL(restart_syscall)() {}
 
 POST_SYSCALL(restart_syscall)(long res) {}
 
-PRE_SYSCALL(kexec_load)(long entry, long nr_segments, void *segments,
-                        long flags) {}
+PRE_SYSCALL(kexec_load)
+(long entry, long nr_segments, void *segments, long flags) {}
 
-POST_SYSCALL(kexec_load)(long res, long entry, long nr_segments, void *segments,
-                         long flags) {
+POST_SYSCALL(kexec_load)
+(long res, long entry, long nr_segments, void *segments, long flags) {
   if (res >= 0) {
-    if (segments) POST_WRITE(segments, struct_kexec_segment_sz);
+    if (segments)
+      POST_WRITE(segments, struct_kexec_segment_sz);
   }
 }
 
@@ -630,22 +681,26 @@ POST_SYSCALL(exit_group)(long res, long error_code) {}
 
 PRE_SYSCALL(wait4)(long pid, void *stat_addr, long options, void *ru) {}
 
-POST_SYSCALL(wait4)(long res, long pid, void *stat_addr, long options,
-                    void *ru) {
+POST_SYSCALL(wait4)
+(long res, long pid, void *stat_addr, long options, void *ru) {
   if (res >= 0) {
-    if (stat_addr) POST_WRITE(stat_addr, sizeof(int));
-    if (ru) POST_WRITE(ru, struct_rusage_sz);
+    if (stat_addr)
+      POST_WRITE(stat_addr, sizeof(int));
+    if (ru)
+      POST_WRITE(ru, struct_rusage_sz);
   }
 }
 
-PRE_SYSCALL(waitid)(long which, long pid, void *infop, long options, void *ru) {
-}
+PRE_SYSCALL(waitid)
+(long which, long pid, void *infop, long options, void *ru) {}
 
-POST_SYSCALL(waitid)(long res, long which, long pid, void *infop, long options,
-                     void *ru) {
+POST_SYSCALL(waitid)
+(long res, long which, long pid, void *infop, long options, void *ru) {
   if (res >= 0) {
-    if (infop) POST_WRITE(infop, siginfo_t_sz);
-    if (ru) POST_WRITE(ru, struct_rusage_sz);
+    if (infop)
+      POST_WRITE(infop, siginfo_t_sz);
+    if (ru)
+      POST_WRITE(ru, struct_rusage_sz);
   }
 }
 
@@ -653,7 +708,8 @@ PRE_SYSCALL(waitpid)(long pid, void *stat_addr, long options) {}
 
 POST_SYSCALL(waitpid)(long res, long pid, void *stat_addr, long options) {
   if (res >= 0) {
-    if (stat_addr) POST_WRITE(stat_addr, sizeof(int));
+    if (stat_addr)
+      POST_WRITE(stat_addr, sizeof(int));
   }
 }
 
@@ -661,7 +717,8 @@ PRE_SYSCALL(set_tid_address)(void *tidptr) {}
 
 POST_SYSCALL(set_tid_address)(long res, void *tidptr) {
   if (res >= 0) {
-    if (tidptr) POST_WRITE(tidptr, sizeof(int));
+    if (tidptr)
+      POST_WRITE(tidptr, sizeof(int));
   }
 }
 
@@ -682,11 +739,14 @@ POST_SYSCALL(delete_module)(long res, const void *name_user, long flags) {}
 
 PRE_SYSCALL(rt_sigprocmask)(long how, void *set, void *oset, long sigsetsize) {}
 
-POST_SYSCALL(rt_sigprocmask)(long res, long how, kernel_sigset_t *set,
-                             kernel_sigset_t *oset, long sigsetsize) {
+POST_SYSCALL(rt_sigprocmask)
+(long res, long how, kernel_sigset_t *set, kernel_sigset_t *oset,
+ long sigsetsize) {
   if (res >= 0) {
-    if (set) POST_WRITE(set, sigsetsize);
-    if (oset) POST_WRITE(oset, sigsetsize);
+    if (set)
+      POST_WRITE(set, sigsetsize);
+    if (oset)
+      POST_WRITE(oset, sigsetsize);
   }
 }
 
@@ -694,29 +754,34 @@ PRE_SYSCALL(rt_sigpending)(void *set, long sigsetsize) {}
 
 POST_SYSCALL(rt_sigpending)(long res, kernel_sigset_t *set, long sigsetsize) {
   if (res >= 0) {
-    if (set) POST_WRITE(set, sigsetsize);
+    if (set)
+      POST_WRITE(set, sigsetsize);
   }
 }
 
-PRE_SYSCALL(rt_sigtimedwait)(const kernel_sigset_t *uthese, void *uinfo,
-                             const void *uts, long sigsetsize) {
-  if (uthese) PRE_READ(uthese, sigsetsize);
-  if (uts) PRE_READ(uts, struct_timespec_sz);
+PRE_SYSCALL(rt_sigtimedwait)
+(const kernel_sigset_t *uthese, void *uinfo, const void *uts, long sigsetsize) {
+  if (uthese)
+    PRE_READ(uthese, sigsetsize);
+  if (uts)
+    PRE_READ(uts, struct_timespec_sz);
 }
 
-POST_SYSCALL(rt_sigtimedwait)(long res, const void *uthese, void *uinfo,
-                              const void *uts, long sigsetsize) {
+POST_SYSCALL(rt_sigtimedwait)
+(long res, const void *uthese, void *uinfo, const void *uts, long sigsetsize) {
   if (res >= 0) {
-    if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+    if (uinfo)
+      POST_WRITE(uinfo, siginfo_t_sz);
   }
 }
 
 PRE_SYSCALL(rt_tgsigqueueinfo)(long tgid, long pid, long sig, void *uinfo) {}
 
-POST_SYSCALL(rt_tgsigqueueinfo)(long res, long tgid, long pid, long sig,
-                                void *uinfo) {
+POST_SYSCALL(rt_tgsigqueueinfo)
+(long res, long tgid, long pid, long sig, void *uinfo) {
   if (res >= 0) {
-    if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+    if (uinfo)
+      POST_WRITE(uinfo, siginfo_t_sz);
   }
 }
 
@@ -736,7 +801,8 @@ PRE_SYSCALL(rt_sigqueueinfo)(long pid, long sig, void *uinfo) {}
 
 POST_SYSCALL(rt_sigqueueinfo)(long res, long pid, long sig, void *uinfo) {
   if (res >= 0) {
-    if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+    if (uinfo)
+      POST_WRITE(uinfo, siginfo_t_sz);
   }
 }
 
@@ -772,11 +838,11 @@ PRE_SYSCALL(bdflush)(long func, long data) {}
 
 POST_SYSCALL(bdflush)(long res, long func, long data) {}
 
-PRE_SYSCALL(mount)(void *dev_name, void *dir_name, void *type, long flags,
-                   void *data) {}
+PRE_SYSCALL(mount)
+(void *dev_name, void *dir_name, void *type, long flags, void *data) {}
 
-POST_SYSCALL(mount)(long res, void *dev_name, void *dir_name, void *type,
-                    long flags, void *data) {
+POST_SYSCALL(mount)
+(long res, void *dev_name, void *dir_name, void *type, long flags, void *data) {
   if (res >= 0) {
     if (dev_name)
       POST_WRITE(dev_name,
@@ -826,11 +892,12 @@ PRE_SYSCALL(stat)(const void *filename, void *statbuf) {
 
 POST_SYSCALL(stat)(long res, const void *filename, void *statbuf) {
   if (res >= 0) {
-    if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+    if (statbuf)
+      POST_WRITE(statbuf, struct___old_kernel_stat_sz);
   }
 }
 
-#if !SANITIZER_ANDROID
+#  if !SANITIZER_ANDROID
 PRE_SYSCALL(statfs)(const void *path, void *buf) {
   if (path)
     PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
@@ -838,7 +905,8 @@ PRE_SYSCALL(statfs)(const void *path, void *buf) {
 
 POST_SYSCALL(statfs)(long res, const void *path, void *buf) {
   if (res >= 0) {
-    if (buf) POST_WRITE(buf, struct_statfs_sz);
+    if (buf)
+      POST_WRITE(buf, struct_statfs_sz);
   }
 }
 
@@ -849,7 +917,8 @@ PRE_SYSCALL(statfs64)(const void *path, long sz, void *buf) {
 
 POST_SYSCALL(statfs64)(long res, const void *path, long sz, void *buf) {
   if (res >= 0) {
-    if (buf) POST_WRITE(buf, struct_statfs64_sz);
+    if (buf)
+      POST_WRITE(buf, struct_statfs64_sz);
   }
 }
 
@@ -857,7 +926,8 @@ PRE_SYSCALL(fstatfs)(long fd, void *buf) {}
 
 POST_SYSCALL(fstatfs)(long res, long fd, void *buf) {
   if (res >= 0) {
-    if (buf) POST_WRITE(buf, struct_statfs_sz);
+    if (buf)
+      POST_WRITE(buf, struct_statfs_sz);
   }
 }
 
@@ -865,10 +935,11 @@ PRE_SYSCALL(fstatfs64)(long fd, long sz, void *buf) {}
 
 POST_SYSCALL(fstatfs64)(long res, long fd, long sz, void *buf) {
   if (res >= 0) {
-    if (buf) POST_WRITE(buf, struct_statfs64_sz);
+    if (buf)
+      POST_WRITE(buf, struct_statfs64_sz);
   }
 }
-#endif // !SANITIZER_ANDROID
+#  endif  // !SANITIZER_ANDROID
 
 PRE_SYSCALL(lstat)(const void *filename, void *statbuf) {
   if (filename)
@@ -878,7 +949,8 @@ PRE_SYSCALL(lstat)(const void *filename, void *statbuf) {
 
 POST_SYSCALL(lstat)(long res, const void *filename, void *statbuf) {
   if (res >= 0) {
-    if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+    if (statbuf)
+      POST_WRITE(statbuf, struct___old_kernel_stat_sz);
   }
 }
 
@@ -886,7 +958,8 @@ PRE_SYSCALL(fstat)(long fd, void *statbuf) {}
 
 POST_SYSCALL(fstat)(long res, long fd, void *statbuf) {
   if (res >= 0) {
-    if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+    if (statbuf)
+      POST_WRITE(statbuf, struct___old_kernel_stat_sz);
   }
 }
 
@@ -898,7 +971,8 @@ PRE_SYSCALL(newstat)(const void *filename, void *statbuf) {
 
 POST_SYSCALL(newstat)(long res, const void *filename, void *statbuf) {
   if (res >= 0) {
-    if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+    if (statbuf)
+      POST_WRITE(statbuf, struct_kernel_stat_sz);
   }
 }
 
@@ -910,7 +984,8 @@ PRE_SYSCALL(newlstat)(const void *filename, void *statbuf) {
 
 POST_SYSCALL(newlstat)(long res, const void *filename, void *statbuf) {
   if (res >= 0) {
-    if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+    if (statbuf)
+      POST_WRITE(statbuf, struct_kernel_stat_sz);
   }
 }
 
@@ -918,19 +993,21 @@ PRE_SYSCALL(newfstat)(long fd, void *statbuf) {}
 
 POST_SYSCALL(newfstat)(long res, long fd, void *statbuf) {
   if (res >= 0) {
-    if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+    if (statbuf)
+      POST_WRITE(statbuf, struct_kernel_stat_sz);
   }
 }
 
-#if !SANITIZER_ANDROID
+#  if !SANITIZER_ANDROID
 PRE_SYSCALL(ustat)(long dev, void *ubuf) {}
 
 POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
   if (res >= 0) {
-    if (ubuf) POST_WRITE(ubuf, struct_ustat_sz);
+    if (ubuf)
+      POST_WRITE(ubuf, struct_ustat_sz);
   }
 }
-#endif  // !SANITIZER_ANDROID
+#  endif  // !SANITIZER_ANDROID
 
 PRE_SYSCALL(stat64)(const void *filename, void *statbuf) {
   if (filename)
@@ -940,7 +1017,8 @@ PRE_SYSCALL(stat64)(const void *filename, void *statbuf) {
 
 POST_SYSCALL(stat64)(long res, const void *filename, void *statbuf) {
   if (res >= 0) {
-    if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+    if (statbuf)
+      POST_WRITE(statbuf, struct_kernel_stat64_sz);
   }
 }
 
@@ -948,7 +1026,8 @@ PRE_SYSCALL(fstat64)(long fd, void *statbuf) {}
 
 POST_SYSCALL(fstat64)(long res, long fd, void *statbuf) {
   if (res >= 0) {
-    if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+    if (statbuf)
+      POST_WRITE(statbuf, struct_kernel_stat64_sz);
   }
 }
 
@@ -960,71 +1039,80 @@ PRE_SYSCALL(lstat64)(const void *filename, void *statbuf) {
 
 POST_SYSCALL(lstat64)(long res, const void *filename, void *statbuf) {
   if (res >= 0) {
-    if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+    if (statbuf)
+      POST_WRITE(statbuf, struct_kernel_stat64_sz);
   }
 }
 
-PRE_SYSCALL(setxattr)(const void *path, const void *name, const void *value,
-                      long size, long flags) {
+PRE_SYSCALL(setxattr)
+(const void *path, const void *name, const void *value, long size, long flags) {
   if (path)
     PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
   if (name)
     PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
-  if (value) PRE_READ(value, size);
+  if (value)
+    PRE_READ(value, size);
 }
 
-POST_SYSCALL(setxattr)(long res, const void *path, const void *name,
-                       const void *value, long size, long flags) {}
+POST_SYSCALL(setxattr)
+(long res, const void *path, const void *name, const void *value, long size,
+ long flags) {}
 
-PRE_SYSCALL(lsetxattr)(const void *path, const void *name, const void *value,
-                       long size, long flags) {
+PRE_SYSCALL(lsetxattr)
+(const void *path, const void *name, const void *value, long size, long flags) {
   if (path)
     PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
   if (name)
     PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
-  if (value) PRE_READ(value, size);
+  if (value)
+    PRE_READ(value, size);
 }
 
-POST_SYSCALL(lsetxattr)(long res, const void *path, const void *name,
-                        const void *value, long size, long flags) {}
+POST_SYSCALL(lsetxattr)
+(long res, const void *path, const void *name, const void *value, long size,
+ long flags) {}
 
-PRE_SYSCALL(fsetxattr)(long fd, const void *name, const void *value, long size,
-                       long flags) {
+PRE_SYSCALL(fsetxattr)
+(long fd, const void *name, const void *value, long size, long flags) {
   if (name)
     PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
-  if (value) PRE_READ(value, size);
+  if (value)
+    PRE_READ(value, size);
 }
 
-POST_SYSCALL(fsetxattr)(long res, long fd, const void *name, const void *value,
-                        long size, long flags) {}
+POST_SYSCALL(fsetxattr)
+(long res, long fd, const void *name, const void *value, long size,
+ long flags) {}
 
-PRE_SYSCALL(getxattr)(const void *path, const void *name, void *value,
-                      long size) {
+PRE_SYSCALL(getxattr)
+(const void *path, const void *name, void *value, long size) {
   if (path)
     PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
   if (name)
     PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
 }
 
-POST_SYSCALL(getxattr)(long res, const void *path, const void *name,
-                       void *value, long size) {
+POST_SYSCALL(getxattr)
+(long res, const void *path, const void *name, void *value, long size) {
   if (size && res > 0) {
-    if (value) POST_WRITE(value, res);
+    if (value)
+      POST_WRITE(value, res);
   }
 }
 
-PRE_SYSCALL(lgetxattr)(const void *path, const void *name, void *value,
-                       long size) {
+PRE_SYSCALL(lgetxattr)
+(const void *path, const void *name, void *value, long size) {
   if (path)
     PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
   if (name)
     PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
 }
 
-POST_SYSCALL(lgetxattr)(long res, const void *path, const void *name,
-                        void *value, long size) {
+POST_SYSCALL(lgetxattr)
+(long res, const void *path, const void *name, void *value, long size) {
   if (size && res > 0) {
-    if (value) POST_WRITE(value, res);
+    if (value)
+      POST_WRITE(value, res);
   }
 }
 
@@ -1033,10 +1121,11 @@ PRE_SYSCALL(fgetxattr)(long fd, const void *name, void *value, long size) {
     PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
 }
 
-POST_SYSCALL(fgetxattr)(long res, long fd, const void *name, void *value,
-                        long size) {
+POST_SYSCALL(fgetxattr)
+(long res, long fd, const void *name, void *value, long size) {
   if (size && res > 0) {
-    if (value) POST_WRITE(value, res);
+    if (value)
+      POST_WRITE(value, res);
   }
 }
 
@@ -1047,7 +1136,8 @@ PRE_SYSCALL(listxattr)(const void *path, void *list, long size) {
 
 POST_SYSCALL(listxattr)(long res, const void *path, void *list, long size) {
   if (size && res > 0) {
-    if (list) POST_WRITE(list, res);
+    if (list)
+      POST_WRITE(list, res);
   }
 }
 
@@ -1058,7 +1148,8 @@ PRE_SYSCALL(llistxattr)(const void *path, void *list, long size) {
 
 POST_SYSCALL(llistxattr)(long res, const void *path, void *list, long size) {
   if (size && res > 0) {
-    if (list) POST_WRITE(list, res);
+    if (list)
+      POST_WRITE(list, res);
   }
 }
 
@@ -1066,7 +1157,8 @@ PRE_SYSCALL(flistxattr)(long fd, void *list, long size) {}
 
 POST_SYSCALL(flistxattr)(long res, long fd, void *list, long size) {
   if (size && res > 0) {
-    if (list) POST_WRITE(list, res);
+    if (list)
+      POST_WRITE(list, res);
   }
 }
 
@@ -1103,17 +1195,17 @@ PRE_SYSCALL(mprotect)(long start, long len, long prot) {}
 
 POST_SYSCALL(mprotect)(long res, long start, long len, long prot) {}
 
-PRE_SYSCALL(mremap)(long addr, long old_len, long new_len, long flags,
-                    long new_addr) {}
+PRE_SYSCALL(mremap)
+(long addr, long old_len, long new_len, long flags, long new_addr) {}
 
-POST_SYSCALL(mremap)(long res, long addr, long old_len, long new_len,
-                     long flags, long new_addr) {}
+POST_SYSCALL(mremap)
+(long res, long addr, long old_len, long new_len, long flags, long new_addr) {}
 
-PRE_SYSCALL(remap_file_pages)(long start, long size, long prot, long pgoff,
-                              long flags) {}
+PRE_SYSCALL(remap_file_pages)
+(long start, long size, long prot, long pgoff, long flags) {}
 
-POST_SYSCALL(remap_file_pages)(long res, long start, long size, long prot,
-                               long pgoff, long flags) {}
+POST_SYSCALL(remap_file_pages)
+(long res, long start, long size, long prot, long pgoff, long flags) {}
 
 PRE_SYSCALL(msync)(long start, long len, long flags) {}
 
@@ -1189,7 +1281,8 @@ PRE_SYSCALL(link)(const void *oldname, const void *newname) {
 POST_SYSCALL(link)(long res, const void *oldname, const void *newname) {}
 
 PRE_SYSCALL(symlink)(const void *old, const void *new_) {
-  if (old) PRE_READ(old, __sanitizer::internal_strlen((const char *)old) + 1);
+  if (old)
+    PRE_READ(old, __sanitizer::internal_strlen((const char *)old) + 1);
   if (new_)
     PRE_READ(new_, __sanitizer::internal_strlen((const char *)new_) + 1);
 }
@@ -1237,14 +1330,16 @@ PRE_SYSCALL(pipe)(void *fildes) {}
 
 POST_SYSCALL(pipe)(long res, void *fildes) {
   if (res >= 0)
-    if (fildes) POST_WRITE(fildes, sizeof(int) * 2);
+    if (fildes)
+      POST_WRITE(fildes, sizeof(int) * 2);
 }
 
 PRE_SYSCALL(pipe2)(void *fildes, long flags) {}
 
 POST_SYSCALL(pipe2)(long res, void *fildes, long flags) {
   if (res >= 0)
-    if (fildes) POST_WRITE(fildes, sizeof(int) * 2);
+    if (fildes)
+      POST_WRITE(fildes, sizeof(int) * 2);
 }
 
 PRE_SYSCALL(dup)(long fildes) {}
@@ -1272,16 +1367,19 @@ PRE_SYSCALL(flock)(long fd, long cmd) {}
 POST_SYSCALL(flock)(long res, long fd, long cmd) {}
 
 PRE_SYSCALL(io_setup)(long nr_reqs, void **ctx) {
-  if (ctx) PRE_WRITE(ctx, sizeof(*ctx));
+  if (ctx)
+    PRE_WRITE(ctx, sizeof(*ctx));
 }
 
 POST_SYSCALL(io_setup)(long res, long nr_reqs, void **ctx) {
   if (res >= 0) {
-    if (ctx) POST_WRITE(ctx, sizeof(*ctx));
+    if (ctx)
+      POST_WRITE(ctx, sizeof(*ctx));
     // (*ctx) is actually a pointer to a kernel mapped page, and there are
     // people out there who are crazy enough to peek into that page's 32-byte
     // header.
-    if (*ctx) POST_WRITE(*ctx, 32);
+    if (*ctx)
+      POST_WRITE(*ctx, 32);
   }
 }
 
@@ -1289,16 +1387,21 @@ PRE_SYSCALL(io_destroy)(long ctx) {}
 
 POST_SYSCALL(io_destroy)(long res, long ctx) {}
 
-PRE_SYSCALL(io_getevents)(long ctx_id, long min_nr, long nr,
-                          __sanitizer_io_event *ioevpp, void *timeout) {
-  if (timeout) PRE_READ(timeout, struct_timespec_sz);
+PRE_SYSCALL(io_getevents)
+(long ctx_id, long min_nr, long nr, __sanitizer_io_event *ioevpp,
+ void *timeout) {
+  if (timeout)
+    PRE_READ(timeout, struct_timespec_sz);
 }
 
-POST_SYSCALL(io_getevents)(long res, long ctx_id, long min_nr, long nr,
-                           __sanitizer_io_event *ioevpp, void *timeout) {
+POST_SYSCALL(io_getevents)
+(long res, long ctx_id, long min_nr, long nr, __sanitizer_io_event *ioevpp,
+ void *timeout) {
   if (res >= 0) {
-    if (ioevpp) POST_WRITE(ioevpp, res * sizeof(*ioevpp));
-    if (timeout) POST_WRITE(timeout, struct_timespec_sz);
+    if (ioevpp)
+      POST_WRITE(ioevpp, res * sizeof(*ioevpp));
+    if (timeout)
+      POST_WRITE(timeout, struct_timespec_sz);
   }
   for (long i = 0; i < res; i++) {
     // We synchronize io_submit -> io_getevents/io_cancel using the
@@ -1308,26 +1411,26 @@ POST_SYSCALL(io_getevents)(long res, long ctx_id, long min_nr, long nr,
     // synchronize on 0. But there does not seem to be a better solution
     // (except wrapping all operations in own context, which is unreliable).
     // We can not reliably extract fildes in io_getevents.
-    COMMON_SYSCALL_ACQUIRE((void*)ioevpp[i].data);
+    COMMON_SYSCALL_ACQUIRE((void *)ioevpp[i].data);
   }
 }
 
 PRE_SYSCALL(io_submit)(long ctx_id, long nr, __sanitizer_iocb **iocbpp) {
   for (long i = 0; i < nr; ++i) {
     uptr op = iocbpp[i]->aio_lio_opcode;
-    void *data = (void*)iocbpp[i]->aio_data;
-    void *buf = (void*)iocbpp[i]->aio_buf;
+    void *data = (void *)iocbpp[i]->aio_data;
+    void *buf = (void *)iocbpp[i]->aio_buf;
     uptr len = (uptr)iocbpp[i]->aio_nbytes;
     if (op == iocb_cmd_pwrite && buf && len) {
       PRE_READ(buf, len);
     } else if (op == iocb_cmd_pread && buf && len) {
       POST_WRITE(buf, len);
     } else if (op == iocb_cmd_pwritev) {
-      __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
+      __sanitizer_iovec *iovec = (__sanitizer_iovec *)buf;
       for (uptr v = 0; v < len; v++)
         PRE_READ(iovec[v].iov_base, iovec[v].iov_len);
     } else if (op == iocb_cmd_preadv) {
-      __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
+      __sanitizer_iovec *iovec = (__sanitizer_iovec *)buf;
       for (uptr v = 0; v < len; v++)
         POST_WRITE(iovec[v].iov_base, iovec[v].iov_len);
     }
@@ -1336,19 +1439,18 @@ PRE_SYSCALL(io_submit)(long ctx_id, long nr, __sanitizer_iocb **iocbpp) {
   }
 }
 
-POST_SYSCALL(io_submit)(long res, long ctx_id, long nr,
-    __sanitizer_iocb **iocbpp) {}
+POST_SYSCALL(io_submit)
+(long res, long ctx_id, long nr, __sanitizer_iocb **iocbpp) {}
 
-PRE_SYSCALL(io_cancel)(long ctx_id, __sanitizer_iocb *iocb,
-    __sanitizer_io_event *result) {
-}
+PRE_SYSCALL(io_cancel)
+(long ctx_id, __sanitizer_iocb *iocb, __sanitizer_io_event *result) {}
 
-POST_SYSCALL(io_cancel)(long res, long ctx_id, __sanitizer_iocb *iocb,
-    __sanitizer_io_event *result) {
+POST_SYSCALL(io_cancel)
+(long res, long ctx_id, __sanitizer_iocb *iocb, __sanitizer_io_event *result) {
   if (res == 0) {
     if (result) {
       // See comment in io_getevents.
-      COMMON_SYSCALL_ACQUIRE((void*)result->data);
+      COMMON_SYSCALL_ACQUIRE((void *)result->data);
       POST_WRITE(result, sizeof(*result));
     }
     if (iocb)
@@ -1358,19 +1460,23 @@ POST_SYSCALL(io_cancel)(long res, long ctx_id, __sanitizer_iocb *iocb,
 
 PRE_SYSCALL(sendfile)(long out_fd, long in_fd, void *offset, long count) {}
 
-POST_SYSCALL(sendfile)(long res, long out_fd, long in_fd,
-                       __sanitizer___kernel_off_t *offset, long count) {
+POST_SYSCALL(sendfile)
+(long res, long out_fd, long in_fd, __sanitizer___kernel_off_t *offset,
+ long count) {
   if (res >= 0) {
-    if (offset) POST_WRITE(offset, sizeof(*offset));
+    if (offset)
+      POST_WRITE(offset, sizeof(*offset));
   }
 }
 
 PRE_SYSCALL(sendfile64)(long out_fd, long in_fd, void *offset, long count) {}
 
-POST_SYSCALL(sendfile64)(long res, long out_fd, long in_fd,
-                         __sanitizer___kernel_loff_t *offset, long count) {
+POST_SYSCALL(sendfile64)
+(long res, long out_fd, long in_fd, __sanitizer___kernel_loff_t *offset,
+ long count) {
   if (res >= 0) {
-    if (offset) POST_WRITE(offset, sizeof(*offset));
+    if (offset)
+      POST_WRITE(offset, sizeof(*offset));
   }
 }
 
@@ -1402,9 +1508,7 @@ PRE_SYSCALL(open)(const void *filename, long flags, long mode) {
 
 POST_SYSCALL(open)(long res, const void *filename, long flags, long mode) {}
 
-PRE_SYSCALL(close)(long fd) {
-  COMMON_SYSCALL_FD_CLOSE((int)fd);
-}
+PRE_SYSCALL(close)(long fd) { COMMON_SYSCALL_FD_CLOSE((int)fd); }
 
 POST_SYSCALL(close)(long res, long fd) {}
 
@@ -1440,7 +1544,7 @@ PRE_SYSCALL(fchown)(long fd, long user, long group) {}
 
 POST_SYSCALL(fchown)(long res, long fd, long user, long group) {}
 
-#if SANITIZER_USES_UID16_SYSCALLS
+#  if SANITIZER_USES_UID16_SYSCALLS
 PRE_SYSCALL(chown16)(const void *filename, long user, long group) {
   if (filename)
     PRE_READ(filename,
@@ -1483,13 +1587,16 @@ POST_SYSCALL(setresuid16)(long res, long ruid, long euid, long suid) {}
 
 PRE_SYSCALL(getresuid16)(void *ruid, void *euid, void *suid) {}
 
-POST_SYSCALL(getresuid16)(long res, __sanitizer___kernel_old_uid_t *ruid,
-                          __sanitizer___kernel_old_uid_t *euid,
                         __sanitizer___kernel_old_uid_t *suid) {
+POST_SYSCALL(getresuid16)
+(long res, __sanitizer___kernel_old_uid_t *ruid,
__sanitizer___kernel_old_uid_t *euid, __sanitizer___kernel_old_uid_t *suid) {
   if (res >= 0) {
-    if (ruid) POST_WRITE(ruid, sizeof(*ruid));
-    if (euid) POST_WRITE(euid, sizeof(*euid));
-    if (suid) POST_WRITE(suid, sizeof(*suid));
+    if (ruid)
+      POST_WRITE(ruid, sizeof(*ruid));
+    if (euid)
+      POST_WRITE(euid, sizeof(*euid));
+    if (suid)
+      POST_WRITE(suid, sizeof(*suid));
   }
 }
 
@@ -1499,13 +1606,16 @@ POST_SYSCALL(setresgid16)(long res, long rgid, long egid, long sgid) {}
 
 PRE_SYSCALL(getresgid16)(void *rgid, void *egid, void *sgid) {}
 
-POST_SYSCALL(getresgid16)(long res, __sanitizer___kernel_old_gid_t *rgid,
-                          __sanitizer___kernel_old_gid_t *egid,
                         __sanitizer___kernel_old_gid_t *sgid) {
+POST_SYSCALL(getresgid16)
+(long res, __sanitizer___kernel_old_gid_t *rgid,
__sanitizer___kernel_old_gid_t *egid, __sanitizer___kernel_old_gid_t *sgid) {
   if (res >= 0) {
-    if (rgid) POST_WRITE(rgid, sizeof(*rgid));
-    if (egid) POST_WRITE(egid, sizeof(*egid));
-    if (sgid) POST_WRITE(sgid, sizeof(*sgid));
+    if (rgid)
+      POST_WRITE(rgid, sizeof(*rgid));
+    if (egid)
+      POST_WRITE(egid, sizeof(*egid));
+    if (sgid)
+      POST_WRITE(sgid, sizeof(*sgid));
   }
 }
 
@@ -1517,23 +1627,25 @@ PRE_SYSCALL(setfsgid16)(long gid) {}
 
 POST_SYSCALL(setfsgid16)(long res, long gid) {}
 
-PRE_SYSCALL(getgroups16)(long gidsetsize,
-                         __sanitizer___kernel_old_gid_t *grouplist) {}
+PRE_SYSCALL(getgroups16)
+(long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {}
 
-POST_SYSCALL(getgroups16)(long res, long gidsetsize,
-                          __sanitizer___kernel_old_gid_t *grouplist) {
+POST_SYSCALL(getgroups16)
+(long res, long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {
   if (res >= 0) {
-    if (grouplist) POST_WRITE(grouplist, res * sizeof(*grouplist));
+    if (grouplist)
+      POST_WRITE(grouplist, res * sizeof(*grouplist));
   }
 }
 
-PRE_SYSCALL(setgroups16)(long gidsetsize,
-                         __sanitizer___kernel_old_gid_t *grouplist) {
-  if (grouplist) POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
+PRE_SYSCALL(setgroups16)
+(long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {
+  if (grouplist)
+    POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
 }
 
-POST_SYSCALL(setgroups16)(long res, long gidsetsize,
-                          __sanitizer___kernel_old_gid_t *grouplist) {}
+POST_SYSCALL(setgroups16)
+(long res, long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {}
 
 PRE_SYSCALL(getuid16)() {}
 
@@ -1550,7 +1662,7 @@ POST_SYSCALL(getgid16)(long res) {}
 PRE_SYSCALL(getegid16)() {}
 
 POST_SYSCALL(getegid16)(long res) {}
-#endif // SANITIZER_USES_UID16_SYSCALLS
+#  endif  // SANITIZER_USES_UID16_SYSCALLS
 
 PRE_SYSCALL(utime)(void *filename, void *times) {}
 
@@ -1559,7 +1671,8 @@ POST_SYSCALL(utime)(long res, void *filename, void *times) {
     if (filename)
       POST_WRITE(filename,
                  __sanitizer::internal_strlen((const char *)filename) + 1);
-    if (times) POST_WRITE(times, struct_utimbuf_sz);
+    if (times)
+      POST_WRITE(times, struct_utimbuf_sz);
   }
 }
 
@@ -1570,7 +1683,8 @@ POST_SYSCALL(utimes)(long res, void *filename, void *utimes) {
     if (filename)
       POST_WRITE(filename,
                  __sanitizer::internal_strlen((const char *)filename) + 1);
-    if (utimes) POST_WRITE(utimes, timeval_sz);
+    if (utimes)
+      POST_WRITE(utimes, timeval_sz);
   }
 }
 
@@ -1578,91 +1692,104 @@ PRE_SYSCALL(lseek)(long fd, long offset, long origin) {}
 
 POST_SYSCALL(lseek)(long res, long fd, long offset, long origin) {}
 
-PRE_SYSCALL(llseek)(long fd, long offset_high, long offset_low, void *result,
-                    long origin) {}
+PRE_SYSCALL(llseek)
+(long fd, long offset_high, long offset_low, void *result, long origin) {}
 
-POST_SYSCALL(llseek)(long res, long fd, long offset_high, long offset_low,
-                     void *result, long origin) {
+POST_SYSCALL(llseek)
+(long res, long fd, long offset_high, long offset_low, void *result,
+ long origin) {
   if (res >= 0) {
-    if (result) POST_WRITE(result, sizeof(long long));
+    if (result)
+      POST_WRITE(result, sizeof(long long));
   }
 }
 
 PRE_SYSCALL(readv)(long fd, const __sanitizer_iovec *vec, long vlen) {}
 
-POST_SYSCALL(readv)(long res, long fd, const __sanitizer_iovec *vec,
-                    long vlen) {
+POST_SYSCALL(readv)
+(long res, long fd, const __sanitizer_iovec *vec, long vlen) {
   if (res >= 0) {
-    if (vec) kernel_write_iovec(vec, vlen, res);
+    if (vec)
+      kernel_write_iovec(vec, vlen, res);
   }
 }
 
 PRE_SYSCALL(write)(long fd, const void *buf, long count) {
-  if (buf) PRE_READ(buf, count);
+  if (buf)
+    PRE_READ(buf, count);
 }
 
 POST_SYSCALL(write)(long res, long fd, const void *buf, long count) {}
 
 PRE_SYSCALL(writev)(long fd, const __sanitizer_iovec *vec, long vlen) {}
 
-POST_SYSCALL(writev)(long res, long fd, const __sanitizer_iovec *vec,
-                     long vlen) {
+POST_SYSCALL(writev)
+(long res, long fd, const __sanitizer_iovec *vec, long vlen) {
   if (res >= 0) {
-    if (vec) kernel_read_iovec(vec, vlen, res);
+    if (vec)
+      kernel_read_iovec(vec, vlen, res);
   }
 }
 
-#ifdef _LP64
+#  ifdef _LP64
 PRE_SYSCALL(pread64)(long fd, void *buf, long count, long pos) {}
 
 POST_SYSCALL(pread64)(long res, long fd, void *buf, long count, long pos) {
   if (res >= 0) {
-    if (buf) POST_WRITE(buf, res);
+    if (buf)
+      POST_WRITE(buf, res);
   }
 }
 
 PRE_SYSCALL(pwrite64)(long fd, const void *buf, long count, long pos) {
-  if (buf) PRE_READ(buf, count);
+  if (buf)
+    PRE_READ(buf, count);
 }
 
-POST_SYSCALL(pwrite64)(long res, long fd, const void *buf, long count,
-                       long pos) {}
-#else
+POST_SYSCALL(pwrite64)
+(long res, long fd, const void *buf, long count, long pos) {}
+#  else
 PRE_SYSCALL(pread64)(long fd, void *buf, long count, long pos0, long pos1) {}
 
-POST_SYSCALL(pread64)(long res, long fd, void *buf, long count, long pos0,
-                      long pos1) {
+POST_SYSCALL(pread64)
+(long res, long fd, void *buf, long count, long pos0, long pos1) {
   if (res >= 0) {
-    if (buf) POST_WRITE(buf, res);
+    if (buf)
+      POST_WRITE(buf, res);
   }
 }
 
-PRE_SYSCALL(pwrite64)(long fd, const void *buf, long count, long pos0,
-                      long pos1) {
-  if (buf) PRE_READ(buf, count);
+PRE_SYSCALL(pwrite64)
+(long fd, const void *buf, long count, long pos0, long pos1) {
+  if (buf)
+    PRE_READ(buf, count);
 }
 
-POST_SYSCALL(pwrite64)(long res, long fd, const void *buf, long count,
-                       long pos0, long pos1) {}
-#endif
+POST_SYSCALL(pwrite64)
+(long res, long fd, const void *buf, long count, long pos0, long pos1) {}
+#  endif
 
-PRE_SYSCALL(preadv)(long fd, const __sanitizer_iovec *vec, long vlen,
-                    long pos_l, long pos_h) {}
+PRE_SYSCALL(preadv)
+(long fd, const __sanitizer_iovec *vec, long vlen, long pos_l, long pos_h) {}
 
-POST_SYSCALL(preadv)(long res, long fd, const __sanitizer_iovec *vec, long vlen,
-                     long pos_l, long pos_h) {
+POST_SYSCALL(preadv)
+(long res, long fd, const __sanitizer_iovec *vec, long vlen, long pos_l,
+ long pos_h) {
   if (res >= 0) {
-    if (vec) kernel_write_iovec(vec, vlen, res);
+    if (vec)
+      kernel_write_iovec(vec, vlen, res);
   }
 }
 
-PRE_SYSCALL(pwritev)(long fd, const __sanitizer_iovec *vec, long vlen,
-                     long pos_l, long pos_h) {}
+PRE_SYSCALL(pwritev)
+(long fd, const __sanitizer_iovec *vec, long vlen, long pos_l, long pos_h) {}
 
-POST_SYSCALL(pwritev)(long res, long fd, const __sanitizer_iovec *vec,
-                      long vlen, long pos_l, long pos_h) {
+POST_SYSCALL(pwritev)
+(long res, long fd, const __sanitizer_iovec *vec, long vlen, long pos_l,
+ long pos_h) {
   if (res >= 0) {
-    if (vec) kernel_read_iovec(vec, vlen, res);
+    if (vec)
+      kernel_read_iovec(vec, vlen, res);
   }
 }
 
@@ -1717,14 +1844,15 @@ PRE_SYSCALL(quotactl)(long cmd, const void *special, long id, void *addr) {
     PRE_READ(special, __sanitizer::internal_strlen((const char *)special) + 1);
 }
 
-POST_SYSCALL(quotactl)(long res, long cmd, const void *special, long id,
-                       void *addr) {}
+POST_SYSCALL(quotactl)
+(long res, long cmd, const void *special, long id, void *addr) {}
 
 PRE_SYSCALL(getdents)(long fd, void *dirent, long count) {}
 
 POST_SYSCALL(getdents)(long res, long fd, void *dirent, long count) {
   if (res >= 0) {
-    if (dirent) POST_WRITE(dirent, res);
+    if (dirent)
+      POST_WRITE(dirent, res);
   }
 }
 
@@ -1732,15 +1860,16 @@ PRE_SYSCALL(getdents64)(long fd, void *dirent, long count) {}
 
 POST_SYSCALL(getdents64)(long res, long fd, void *dirent, long count) {
   if (res >= 0) {
-    if (dirent) POST_WRITE(dirent, res);
+    if (dirent)
+      POST_WRITE(dirent, res);
   }
 }
 
-PRE_SYSCALL(setsockopt)(long fd, long level, long optname, void *optval,
-                        long optlen) {}
+PRE_SYSCALL(setsockopt)
+(long fd, long level, long optname, void *optval, long optlen) {}
 
-POST_SYSCALL(setsockopt)(long res, long fd, long level, long optname,
-                         void *optval, long optlen) {
+POST_SYSCALL(setsockopt)
+(long res, long fd, long level, long optname, void *optval, long optlen) {
   if (res >= 0) {
     if (optval)
       POST_WRITE(optval,
@@ -1748,77 +1877,88 @@ POST_SYSCALL(setsockopt)(long res, long fd, long level, long optname,
   }
 }
 
-PRE_SYSCALL(getsockopt)(long fd, long level, long optname, void *optval,
-                        void *optlen) {}
+PRE_SYSCALL(getsockopt)
+(long fd, long level, long optname, void *optval, void *optlen) {}
 
-POST_SYSCALL(getsockopt)(long res, long fd, long level, long optname,
-                         void *optval, void *optlen) {
+POST_SYSCALL(getsockopt)
+(long res, long fd, long level, long optname, void *optval, void *optlen) {
   if (res >= 0) {
     if (optval)
       POST_WRITE(optval,
                  __sanitizer::internal_strlen((const char *)optval) + 1);
-    if (optlen) POST_WRITE(optlen, sizeof(int));
+    if (optlen)
+      POST_WRITE(optlen, sizeof(int));
   }
 }
 
 PRE_SYSCALL(bind)(long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {}
 
-POST_SYSCALL(bind)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
-                   long arg2) {
+POST_SYSCALL(bind)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {
   if (res >= 0) {
-    if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+    if (arg1)
+      POST_WRITE(arg1, sizeof(*arg1));
   }
 }
 
 PRE_SYSCALL(connect)(long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {}
 
-POST_SYSCALL(connect)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
-                      long arg2) {
+POST_SYSCALL(connect)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {
   if (res >= 0) {
-    if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+    if (arg1)
+      POST_WRITE(arg1, sizeof(*arg1));
   }
 }
 
 PRE_SYSCALL(accept)(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {}
 
-POST_SYSCALL(accept)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
-                     void *arg2) {
+POST_SYSCALL(accept)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {
   if (res >= 0) {
-    if (arg1) POST_WRITE(arg1, sizeof(*arg1));
-    if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+    if (arg1)
+      POST_WRITE(arg1, sizeof(*arg1));
+    if (arg2)
+      POST_WRITE(arg2, sizeof(unsigned));
   }
 }
 
-PRE_SYSCALL(accept4)(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2,
-                     long arg3) {}
+PRE_SYSCALL(accept4)
+(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2, long arg3) {}
 
-POST_SYSCALL(accept4)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
-                      void *arg2, long arg3) {
+POST_SYSCALL(accept4)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2, long arg3) {
   if (res >= 0) {
-    if (arg1) POST_WRITE(arg1, sizeof(*arg1));
-    if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+    if (arg1)
+      POST_WRITE(arg1, sizeof(*arg1));
+    if (arg2)
+      POST_WRITE(arg2, sizeof(unsigned));
   }
 }
 
-PRE_SYSCALL(getsockname)(long arg0, sanitizer_kernel_sockaddr *arg1,
-                         void *arg2) {}
+PRE_SYSCALL(getsockname)
+(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {}
 
-POST_SYSCALL(getsockname)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
-                          void *arg2) {
+POST_SYSCALL(getsockname)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {
   if (res >= 0) {
-    if (arg1) POST_WRITE(arg1, sizeof(*arg1));
-    if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+    if (arg1)
+      POST_WRITE(arg1, sizeof(*arg1));
+    if (arg2)
+      POST_WRITE(arg2, sizeof(unsigned));
   }
 }
 
-PRE_SYSCALL(getpeername)(long arg0, sanitizer_kernel_sockaddr *arg1,
-                         void *arg2) {}
+PRE_SYSCALL(getpeername)
+(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {}
 
-POST_SYSCALL(getpeername)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
-                          void *arg2) {
+POST_SYSCALL(getpeername)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {
   if (res >= 0) {
-    if (arg1) POST_WRITE(arg1, sizeof(*arg1));
-    if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+    if (arg1)
+      POST_WRITE(arg1, sizeof(*arg1));
+    if (arg2)
+      POST_WRITE(arg2, sizeof(unsigned));
   }
 }
 
@@ -1826,18 +1966,23 @@ PRE_SYSCALL(send)(long arg0, void *arg1, long arg2, long arg3) {}
 
 POST_SYSCALL(send)(long res, long arg0, void *arg1, long arg2, long arg3) {
   if (res) {
-    if (arg1) POST_READ(arg1, res);
+    if (arg1)
+      POST_READ(arg1, res);
   }
 }
 
-PRE_SYSCALL(sendto)(long arg0, void *arg1, long arg2, long arg3,
-                    sanitizer_kernel_sockaddr *arg4, long arg5) {}
+PRE_SYSCALL(sendto)
+(long arg0, void *arg1, long arg2, long arg3, sanitizer_kernel_sockaddr *arg4,
+ long arg5) {}
 
-POST_SYSCALL(sendto)(long res, long arg0, void *arg1, long arg2, long arg3,
-                     sanitizer_kernel_sockaddr *arg4, long arg5) {
+POST_SYSCALL(sendto)
+(long res, long arg0, void *arg1, long arg2, long arg3,
+ sanitizer_kernel_sockaddr *arg4, long arg5) {
   if (res >= 0) {
-    if (arg1) POST_READ(arg1, res);
-    if (arg4) POST_WRITE(arg4, sizeof(*arg4));
+    if (arg1)
+      POST_READ(arg1, res);
+    if (arg4)
+      POST_WRITE(arg4, sizeof(*arg4));
   }
 }
 
@@ -1857,19 +2002,25 @@ PRE_SYSCALL(recv)(long arg0, void *buf, long len, long flags) {}
 
 POST_SYSCALL(recv)(long res, void *buf, long len, long flags) {
   if (res >= 0) {
-    if (buf) POST_WRITE(buf, res);
+    if (buf)
+      POST_WRITE(buf, res);
   }
 }
 
-PRE_SYSCALL(recvfrom)(long arg0, void *buf, long len, long flags,
-                      sanitizer_kernel_sockaddr *arg4, void *arg5) {}
+PRE_SYSCALL(recvfrom)
+(long arg0, void *buf, long len, long flags, sanitizer_kernel_sockaddr *arg4,
+ void *arg5) {}
 
-POST_SYSCALL(recvfrom)(long res, long arg0, void *buf, long len, long flags,
-                       sanitizer_kernel_sockaddr *arg4, void *arg5) {
+POST_SYSCALL(recvfrom)
+(long res, long arg0, void *buf, long len, long flags,
+ sanitizer_kernel_sockaddr *arg4, void *arg5) {
   if (res >= 0) {
-    if (buf) POST_WRITE(buf, res);
-    if (arg4) POST_WRITE(arg4, sizeof(*arg4));
-    if (arg5) POST_WRITE(arg5, sizeof(int));
+    if (buf)
+      POST_WRITE(buf, res);
+    if (arg4)
+      POST_WRITE(arg4, sizeof(*arg4));
+    if (arg5)
+      POST_WRITE(arg5, sizeof(int));
   }
 }
 
@@ -1881,14 +2032,16 @@ PRE_SYSCALL(socketpair)(long arg0, long arg1, long arg2, int *sv) {}
 
 POST_SYSCALL(socketpair)(long res, long arg0, long arg1, long arg2, int *sv) {
   if (res >= 0)
-    if (sv) POST_WRITE(sv, sizeof(int) * 2);
+    if (sv)
+      POST_WRITE(sv, sizeof(int) * 2);
 }
 
 PRE_SYSCALL(socketcall)(long call, void *args) {}
 
 POST_SYSCALL(socketcall)(long res, long call, void *args) {
   if (res >= 0) {
-    if (args) POST_WRITE(args, sizeof(long));
+    if (args)
+      POST_WRITE(args, sizeof(long));
   }
 }
 
@@ -1898,25 +2051,31 @@ POST_SYSCALL(listen)(long res, long arg0, long arg1) {}
 
 PRE_SYSCALL(poll)(void *ufds, long nfds, long timeout) {}
 
-POST_SYSCALL(poll)(long res, __sanitizer_pollfd *ufds, long nfds,
-                   long timeout) {
+POST_SYSCALL(poll)
+(long res, __sanitizer_pollfd *ufds, long nfds, long timeout) {
   if (res >= 0) {
-    if (ufds) POST_WRITE(ufds, nfds * sizeof(*ufds));
+    if (ufds)
+      POST_WRITE(ufds, nfds * sizeof(*ufds));
   }
 }
 
-PRE_SYSCALL(select)(long n, __sanitizer___kernel_fd_set *inp,
-                    __sanitizer___kernel_fd_set *outp,
                   __sanitizer___kernel_fd_set *exp, void *tvp) {}
+PRE_SYSCALL(select)
+(long n, __sanitizer___kernel_fd_set *inp, __sanitizer___kernel_fd_set *outp,
+ __sanitizer___kernel_fd_set *exp, void *tvp) {}
 
-POST_SYSCALL(select)(long res, long n, __sanitizer___kernel_fd_set *inp,
-                     __sanitizer___kernel_fd_set *outp,
-                     __sanitizer___kernel_fd_set *exp, void *tvp) {
+POST_SYSCALL(select)
+(long res, long n, __sanitizer___kernel_fd_set *inp,
+ __sanitizer___kernel_fd_set *outp, __sanitizer___kernel_fd_set *exp,
+ void *tvp) {
   if (res >= 0) {
-    if (inp) POST_WRITE(inp, sizeof(*inp));
-    if (outp) POST_WRITE(outp, sizeof(*outp));
-    if (exp) POST_WRITE(exp, sizeof(*exp));
-    if (tvp) POST_WRITE(tvp, timeval_sz);
+    if (inp)
+      POST_WRITE(inp, sizeof(*inp));
+    if (outp)
+      POST_WRITE(outp, sizeof(*outp));
+    if (exp)
+      POST_WRITE(exp, sizeof(*exp));
+    if (tvp)
+      POST_WRITE(tvp, timeval_sz);
   }
 }
 
@@ -1936,29 +2095,55 @@ PRE_SYSCALL(epoll_ctl)(long epfd, long op, long fd, void *event) {}
 
 POST_SYSCALL(epoll_ctl)(long res, long epfd, long op, long fd, void *event) {
   if (res >= 0) {
-    if (event) POST_WRITE(event, struct_epoll_event_sz);
+    if (event)
+      POST_WRITE(event, struct_epoll_event_sz);
   }
 }
 
-PRE_SYSCALL(epoll_wait)(long epfd, void *events, long maxevents, long timeout) {
+PRE_SYSCALL(epoll_wait)
+(long epfd, void *events, long maxevents, long timeout) {}
+
+POST_SYSCALL(epoll_wait)
+(long res, long epfd, void *events, long maxevents, long timeout) {
+  if (res >= 0) {
+    if (events)
+      POST_WRITE(events, res * struct_epoll_event_sz);
+  }
+}
+
+PRE_SYSCALL(epoll_pwait)
+(long epfd, void *events, long maxevents, long timeout,
+ const kernel_sigset_t *sigmask, long sigsetsize) {
+  if (sigmask)
+    PRE_READ(sigmask, sigsetsize);
 }
 
-POST_SYSCALL(epoll_wait)(long res, long epfd, void *events, long maxevents,
-                         long timeout) {
+POST_SYSCALL(epoll_pwait)
+(long res, long epfd, void *events, long maxevents, long timeout,
+ const void *sigmask, long sigsetsize) {
   if (res >= 0) {
-    if (events) POST_WRITE(events, struct_epoll_event_sz);
+    if (events)
+      POST_WRITE(events, res * struct_epoll_event_sz);
   }
 }
 
-PRE_SYSCALL(epoll_pwait)(long epfd, void *events, long maxevents, long timeout,
-                         const kernel_sigset_t *sigmask, long sigsetsize) {
-  if (sigmask) PRE_READ(sigmask, sigsetsize);
+PRE_SYSCALL(epoll_pwait2)
+(long epfd, void *events, long maxevents,
+ const sanitizer_kernel_timespec *timeout, const kernel_sigset_t *sigmask,
+ long sigsetsize) {
+  if (timeout)
+    PRE_READ(timeout, sizeof(timeout));
+  if (sigmask)
+    PRE_READ(sigmask, sigsetsize);
 }
 
-POST_SYSCALL(epoll_pwait)(long res, long epfd, void *events, long maxevents,
-                          long timeout, const void *sigmask, long sigsetsize) {
+POST_SYSCALL(epoll_pwait2)
+(long res, long epfd, void *events, long maxevents,
+ const sanitizer_kernel_timespec *timeout, const void *sigmask,
+ long sigsetsize) {
   if (res >= 0) {
-    if (events) POST_WRITE(events, struct_epoll_event_sz);
+    if (events)
+      POST_WRITE(events, res * struct_epoll_event_sz);
   }
 }
 
@@ -1993,7 +2178,8 @@ PRE_SYSCALL(newuname)(void *name) {}
 
 POST_SYSCALL(newuname)(long res, void *name) {
   if (res >= 0) {
-    if (name) POST_WRITE(name, struct_new_utsname_sz);
+    if (name)
+      POST_WRITE(name, struct_new_utsname_sz);
   }
 }
 
@@ -2001,7 +2187,8 @@ PRE_SYSCALL(uname)(void *arg0) {}
 
 POST_SYSCALL(uname)(long res, void *arg0) {
   if (res >= 0) {
-    if (arg0) POST_WRITE(arg0, struct_old_utsname_sz);
+    if (arg0)
+      POST_WRITE(arg0, struct_old_utsname_sz);
   }
 }
 
@@ -2009,7 +2196,8 @@ PRE_SYSCALL(olduname)(void *arg0) {}
 
 POST_SYSCALL(olduname)(long res, void *arg0) {
   if (res >= 0) {
-    if (arg0) POST_WRITE(arg0, struct_oldold_utsname_sz);
+    if (arg0)
+      POST_WRITE(arg0, struct_oldold_utsname_sz);
   }
 }
 
@@ -2017,7 +2205,8 @@ PRE_SYSCALL(getrlimit)(long resource, void *rlim) {}
 
 POST_SYSCALL(getrlimit)(long res, long resource, void *rlim) {
   if (res >= 0) {
-    if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+    if (rlim)
+      POST_WRITE(rlim, struct_rlimit_sz);
   }
 }
 
@@ -2025,7 +2214,8 @@ PRE_SYSCALL(old_getrlimit)(long resource, void *rlim) {}
 
 POST_SYSCALL(old_getrlimit)(long res, long resource, void *rlim) {
   if (res >= 0) {
-    if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+    if (rlim)
+      POST_WRITE(rlim, struct_rlimit_sz);
   }
 }
 
@@ -2033,29 +2223,33 @@ PRE_SYSCALL(setrlimit)(long resource, void *rlim) {}
 
 POST_SYSCALL(setrlimit)(long res, long resource, void *rlim) {
   if (res >= 0) {
-    if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+    if (rlim)
+      POST_WRITE(rlim, struct_rlimit_sz);
   }
 }
 
-#if !SANITIZER_ANDROID
-PRE_SYSCALL(prlimit64)(long pid, long resource, const void *new_rlim,
-                       void *old_rlim) {
-  if (new_rlim) PRE_READ(new_rlim, struct_rlimit64_sz);
+#  if !SANITIZER_ANDROID
+PRE_SYSCALL(prlimit64)
+(long pid, long resource, const void *new_rlim, void *old_rlim) {
+  if (new_rlim)
+    PRE_READ(new_rlim, struct_rlimit64_sz);
 }
 
-POST_SYSCALL(prlimit64)(long res, long pid, long resource, const void *new_rlim,
-                        void *old_rlim) {
+POST_SYSCALL(prlimit64)
+(long res, long pid, long resource, const void *new_rlim, void *old_rlim) {
   if (res >= 0) {
-    if (old_rlim) POST_WRITE(old_rlim, struct_rlimit64_sz);
+    if (old_rlim)
+      POST_WRITE(old_rlim, struct_rlimit64_sz);
   }
 }
-#endif
+#  endif
 
 PRE_SYSCALL(getrusage)(long who, void *ru) {}
 
 POST_SYSCALL(getrusage)(long res, long who, void *ru) {
   if (res >= 0) {
-    if (ru) POST_WRITE(ru, struct_rusage_sz);
+    if (ru)
+      POST_WRITE(ru, struct_rusage_sz);
   }
 }
 
@@ -2068,31 +2262,34 @@ PRE_SYSCALL(msgget)(long key, long msgflg) {}
 POST_SYSCALL(msgget)(long res, long key, long msgflg) {}
 
 PRE_SYSCALL(msgsnd)(long msqid, void *msgp, long msgsz, long msgflg) {
-  if (msgp) PRE_READ(msgp, msgsz);
+  if (msgp)
+    PRE_READ(msgp, msgsz);
 }
 
-POST_SYSCALL(msgsnd)(long res, long msqid, void *msgp, long msgsz,
-                     long msgflg) {}
+POST_SYSCALL(msgsnd)
+(long res, long msqid, void *msgp, long msgsz, long msgflg) {}
 
-PRE_SYSCALL(msgrcv)(long msqid, void *msgp, long msgsz, long msgtyp,
-                    long msgflg) {}
+PRE_SYSCALL(msgrcv)
+(long msqid, void *msgp, long msgsz, long msgtyp, long msgflg) {}
 
-POST_SYSCALL(msgrcv)(long res, long msqid, void *msgp, long msgsz, long msgtyp,
-                     long msgflg) {
+POST_SYSCALL(msgrcv)
+(long res, long msqid, void *msgp, long msgsz, long msgtyp, long msgflg) {
   if (res >= 0) {
-    if (msgp) POST_WRITE(msgp, res);
+    if (msgp)
+      POST_WRITE(msgp, res);
   }
 }
 
-#if !SANITIZER_ANDROID
+#  if !SANITIZER_ANDROID
 PRE_SYSCALL(msgctl)(long msqid, long cmd, void *buf) {}
 
 POST_SYSCALL(msgctl)(long res, long msqid, long cmd, void *buf) {
   if (res >= 0) {
-    if (buf) POST_WRITE(buf, struct_msqid_ds_sz);
+    if (buf)
+      POST_WRITE(buf, struct_msqid_ds_sz);
   }
 }
-#endif
+#  endif
 
 PRE_SYSCALL(semget)(long key, long nsems, long semflg) {}
 
@@ -2106,13 +2303,14 @@ PRE_SYSCALL(semctl)(long semid, long semnum, long cmd, void *arg) {}
 
 POST_SYSCALL(semctl)(long res, long semid, long semnum, long cmd, void *arg) {}
 
-PRE_SYSCALL(semtimedop)(long semid, void *sops, long nsops,
-                        const void *timeout) {
-  if (timeout) PRE_READ(timeout, struct_timespec_sz);
+PRE_SYSCALL(semtimedop)
+(long semid, void *sops, long nsops, const void *timeout) {
+  if (timeout)
+    PRE_READ(timeout, struct_timespec_sz);
 }
 
-POST_SYSCALL(semtimedop)(long res, long semid, void *sops, long nsops,
-                         const void *timeout) {}
+POST_SYSCALL(semtimedop)
+(long res, long semid, void *sops, long nsops, const void *timeout) {}
 
 PRE_SYSCALL(shmat)(long shmid, void *shmaddr, long shmflg) {}
 
@@ -2138,18 +2336,20 @@ POST_SYSCALL(shmdt)(long res, void *shmaddr) {
   }
 }
 
-PRE_SYSCALL(ipc)(long call, long first, long second, long third, void *ptr,
-                 long fifth) {}
+PRE_SYSCALL(ipc)
+(long call, long first, long second, long third, void *ptr, long fifth) {}
 
-POST_SYSCALL(ipc)(long res, long call, long first, long second, long third,
-                  void *ptr, long fifth) {}
+POST_SYSCALL(ipc)
+(long res, long call, long first, long second, long third, void *ptr,
+ long fifth) {}
 
-#if !SANITIZER_ANDROID
+#  if !SANITIZER_ANDROID
 PRE_SYSCALL(shmctl)(long shmid, long cmd, void *buf) {}
 
 POST_SYSCALL(shmctl)(long res, long shmid, long cmd, void *buf) {
   if (res >= 0) {
-    if (buf) POST_WRITE(buf, sizeof(__sanitizer_shmid_ds));
+    if (buf)
+      POST_WRITE(buf, sizeof(__sanitizer_shmid_ds));
   }
 }
 
@@ -2158,10 +2358,11 @@ PRE_SYSCALL(mq_open)(const void *name, long oflag, long mode, void *attr) {
     PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
 }
 
-POST_SYSCALL(mq_open)(long res, const void *name, long oflag, long mode,
-                      void *attr) {
+POST_SYSCALL(mq_open)
+(long res, const void *name, long oflag, long mode, void *attr) {
   if (res >= 0) {
-    if (attr) POST_WRITE(attr, struct_mq_attr_sz);
+    if (attr)
+      POST_WRITE(attr, struct_mq_attr_sz);
   }
 }
 
@@ -2172,62 +2373,73 @@ PRE_SYSCALL(mq_unlink)(const void *name) {
 
 POST_SYSCALL(mq_unlink)(long res, const void *name) {}
 
-PRE_SYSCALL(mq_timedsend)(long mqdes, const void *msg_ptr, long msg_len,
-                          long msg_prio, const void *abs_timeout) {
-  if (msg_ptr) PRE_READ(msg_ptr, msg_len);
-  if (abs_timeout) PRE_READ(abs_timeout, struct_timespec_sz);
+PRE_SYSCALL(mq_timedsend)
+(long mqdes, const void *msg_ptr, long msg_len, long msg_prio,
+ const void *abs_timeout) {
+  if (msg_ptr)
+    PRE_READ(msg_ptr, msg_len);
+  if (abs_timeout)
+    PRE_READ(abs_timeout, struct_timespec_sz);
 }
 
-POST_SYSCALL(mq_timedsend)(long res, long mqdes, const void *msg_ptr,
-                           long msg_len, long msg_prio,
                          const void *abs_timeout) {}
+POST_SYSCALL(mq_timedsend)
+(long res, long mqdes, const void *msg_ptr, long msg_len, long msg_prio,
+ const void *abs_timeout) {}
 
-PRE_SYSCALL(mq_timedreceive)(long mqdes, void *msg_ptr, long msg_len,
-                             void *msg_prio, const void *abs_timeout) {
-  if (abs_timeout) PRE_READ(abs_timeout, struct_timespec_sz);
+PRE_SYSCALL(mq_timedreceive)
+(long mqdes, void *msg_ptr, long msg_len, void *msg_prio,
+ const void *abs_timeout) {
+  if (abs_timeout)
+    PRE_READ(abs_timeout, struct_timespec_sz);
 }
 
-POST_SYSCALL(mq_timedreceive)(long res, long mqdes, void *msg_ptr, long msg_len,
-                              int *msg_prio, const void *abs_timeout) {
+POST_SYSCALL(mq_timedreceive)
+(long res, long mqdes, void *msg_ptr, long msg_len, int *msg_prio,
+ const void *abs_timeout) {
   if (res >= 0) {
-    if (msg_ptr) POST_WRITE(msg_ptr, res);
-    if (msg_prio) POST_WRITE(msg_prio, sizeof(*msg_prio));
+    if (msg_ptr)
+      POST_WRITE(msg_ptr, res);
+    if (msg_prio)
+      POST_WRITE(msg_prio, sizeof(*msg_prio));
   }
 }
 
 PRE_SYSCALL(mq_notify)(long mqdes, const void *notification) {
-  if (notification) PRE_READ(notification, struct_sigevent_sz);
+  if (notification)
+    PRE_READ(notification, struct_sigevent_sz);
 }
 
 POST_SYSCALL(mq_notify)(long res, long mqdes, const void *notification) {}
 
 PRE_SYSCALL(mq_getsetattr)(long mqdes, const void *mqstat, void *omqstat) {
-  if (mqstat) PRE_READ(mqstat, struct_mq_attr_sz);
+  if (mqstat)
+    PRE_READ(mqstat, struct_mq_attr_sz);
 }
 
-POST_SYSCALL(mq_getsetattr)(long res, long mqdes, const void *mqstat,
-                            void *omqstat) {
+POST_SYSCALL(mq_getsetattr)
+(long res, long mqdes, const void *mqstat, void *omqstat) {
   if (res >= 0) {
-    if (omqstat) POST_WRITE(omqstat, struct_mq_attr_sz);
+    if (omqstat)
+      POST_WRITE(omqstat, struct_mq_attr_sz);
   }
 }
-#endif  // SANITIZER_ANDROID
+#  endif  // SANITIZER_ANDROID
 
 PRE_SYSCALL(pciconfig_iobase)(long which, long bus, long devfn) {}
 
 POST_SYSCALL(pciconfig_iobase)(long res, long which, long bus, long devfn) {}
 
-PRE_SYSCALL(pciconfig_read)(long bus, long dfn, long off, long len, void *buf) {
-}
+PRE_SYSCALL(pciconfig_read)
+(long bus, long dfn, long off, long len, void *buf) {}
 
-POST_SYSCALL(pciconfig_read)(long res, long bus, long dfn, long off, long len,
-                             void *buf) {}
+POST_SYSCALL(pciconfig_read)
+(long res, long bus, long dfn, long off, long len, void *buf) {}
 
-PRE_SYSCALL(pciconfig_write)(long bus, long dfn, long off, long len,
-                             void *buf) {}
+PRE_SYSCALL(pciconfig_write)
+(long bus, long dfn, long off, long len, void *buf) {}
 
-POST_SYSCALL(pciconfig_write)(long res, long bus, long dfn, long off, long len,
-                              void *buf) {}
+POST_SYSCALL(pciconfig_write)
+(long res, long bus, long dfn, long off, long len, void *buf) {}
 
 PRE_SYSCALL(swapon)(const void *specialfile, long swap_flags) {
   if (specialfile)
@@ -2247,8 +2459,10 @@ POST_SYSCALL(swapoff)(long res, const void *specialfile) {}
 
 PRE_SYSCALL(sysctl)(__sanitizer___sysctl_args *args) {
   if (args) {
-    if (args->name) PRE_READ(args->name, args->nlen * sizeof(*args->name));
-    if (args->newval) PRE_READ(args->name, args->newlen);
+    if (args->name)
+      PRE_READ(args->name, args->nlen * sizeof(*args->name));
+    if (args->newval)
+      PRE_READ(args->name, args->newlen);
   }
 }
 
@@ -2265,7 +2479,8 @@ PRE_SYSCALL(sysinfo)(void *info) {}
 
 POST_SYSCALL(sysinfo)(long res, void *info) {
   if (res >= 0) {
-    if (info) POST_WRITE(info, struct_sysinfo_sz);
+    if (info)
+      POST_WRITE(info, struct_sysinfo_sz);
   }
 }
 
@@ -2294,10 +2509,10 @@ PRE_SYSCALL(ni_syscall)() {}
 POST_SYSCALL(ni_syscall)(long res) {}
 
 PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
-#if !SANITIZER_ANDROID &&                                                   \
-    (defined(__i386) || defined(__x86_64) || defined(__mips64) ||           \
-     defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
-     SANITIZER_RISCV64)
+#  if !SANITIZER_ANDROID &&                                                   \
+      (defined(__i386) || defined(__x86_64) || defined(__mips64) ||           \
+       defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
+       SANITIZER_RISCV64)
   if (data) {
     if (request == ptrace_setregs) {
       PRE_READ((void *)data, struct_user_regs_struct_sz);
@@ -2312,14 +2527,14 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
       PRE_READ(iov->iov_base, iov->iov_len);
     }
   }
-#endif
+#  endif
 }
 
 POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
-#if !SANITIZER_ANDROID &&                                                   \
-    (defined(__i386) || defined(__x86_64) || defined(__mips64) ||           \
-     defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
-     SANITIZER_RISCV64)
+#  if !SANITIZER_ANDROID &&                                                   \
+      (defined(__i386) || defined(__x86_64) || defined(__mips64) ||           \
+       defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
+       SANITIZER_RISCV64)
   if (res >= 0 && data) {
     // Note that this is different from the interceptor in
     // sanitizer_common_interceptors.inc.
@@ -2340,11 +2555,12 @@ POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
       POST_WRITE((void *)data, sizeof(void *));
     }
   }
-#endif
+#  endif
 }
 
-PRE_SYSCALL(add_key)(const void *_type, const void *_description,
-                     const void *_payload, long plen, long destringid) {
+PRE_SYSCALL(add_key)
+(const void *_type, const void *_description, const void *_payload, long plen,
+ long destringid) {
   if (_type)
     PRE_READ(_type, __sanitizer::internal_strlen((const char *)_type) + 1);
   if (_description)
@@ -2352,11 +2568,13 @@ PRE_SYSCALL(add_key)(const void *_type, const void *_description,
              __sanitizer::internal_strlen((const char *)_description) + 1);
 }
 
-POST_SYSCALL(add_key)(long res, const void *_type, const void *_description,
-                      const void *_payload, long plen, long destringid) {}
+POST_SYSCALL(add_key)
+(long res, const void *_type, const void *_description, const void *_payload,
+ long plen, long destringid) {}
 
-PRE_SYSCALL(request_key)(const void *_type, const void *_description,
-                         const void *_callout_info, long destringid) {
+PRE_SYSCALL(request_key)
+(const void *_type, const void *_description, const void *_callout_info,
+ long destringid) {
   if (_type)
     PRE_READ(_type, __sanitizer::internal_strlen((const char *)_type) + 1);
   if (_description)
@@ -2367,13 +2585,14 @@ PRE_SYSCALL(request_key)(const void *_type, const void *_description,
              __sanitizer::internal_strlen((const char *)_callout_info) + 1);
 }
 
-POST_SYSCALL(request_key)(long res, const void *_type, const void *_description,
-                          const void *_callout_info, long destringid) {}
+POST_SYSCALL(request_key)
+(long res, const void *_type, const void *_description,
+ const void *_callout_info, long destringid) {}
 
 PRE_SYSCALL(keyctl)(long cmd, long arg2, long arg3, long arg4, long arg5) {}
 
-POST_SYSCALL(keyctl)(long res, long cmd, long arg2, long arg3, long arg4,
-                     long arg5) {}
+POST_SYSCALL(keyctl)
+(long res, long cmd, long arg2, long arg3, long arg4, long arg5) {}
 
 PRE_SYSCALL(ioprio_set)(long which, long who, long ioprio) {}
 
@@ -2387,50 +2606,62 @@ PRE_SYSCALL(set_mempolicy)(long mode, void *nmask, long maxnode) {}
 
 POST_SYSCALL(set_mempolicy)(long res, long mode, void *nmask, long maxnode) {
   if (res >= 0) {
-    if (nmask) POST_WRITE(nmask, sizeof(long));
+    if (nmask)
+      POST_WRITE(nmask, sizeof(long));
   }
 }
 
-PRE_SYSCALL(migrate_pages)(long pid, long maxnode, const void *from,
-                           const void *to) {
-  if (from) PRE_READ(from, sizeof(long));
-  if (to) PRE_READ(to, sizeof(long));
+PRE_SYSCALL(migrate_pages)
+(long pid, long maxnode, const void *from, const void *to) {
+  if (from)
+    PRE_READ(from, sizeof(long));
+  if (to)
+    PRE_READ(to, sizeof(long));
 }
 
-POST_SYSCALL(migrate_pages)(long res, long pid, long maxnode, const void *from,
-                            const void *to) {}
+POST_SYSCALL(migrate_pages)
+(long res, long pid, long maxnode, const void *from, const void *to) {}
 
-PRE_SYSCALL(move_pages)(long pid, long nr_pages, const void **pages,
-                        const int *nodes, int *status, long flags) {
-  if (pages) PRE_READ(pages, nr_pages * sizeof(*pages));
-  if (nodes) PRE_READ(nodes, nr_pages * sizeof(*nodes));
+PRE_SYSCALL(move_pages)
+(long pid, long nr_pages, const void **pages, const int *nodes, int *status,
+ long flags) {
+  if (pages)
+    PRE_READ(pages, nr_pages * sizeof(*pages));
+  if (nodes)
+    PRE_READ(nodes, nr_pages * sizeof(*nodes));
 }
 
-POST_SYSCALL(move_pages)(long res, long pid, long nr_pages, const void **pages,
-                         const int *nodes, int *status, long flags) {
+POST_SYSCALL(move_pages)
+(long res, long pid, long nr_pages, const void **pages, const int *nodes,
+ int *status, long flags) {
   if (res >= 0) {
-    if (status) POST_WRITE(status, nr_pages * sizeof(*status));
+    if (status)
+      POST_WRITE(status, nr_pages * sizeof(*status));
   }
 }
 
-PRE_SYSCALL(mbind)(long start, long len, long mode, void *nmask, long maxnode,
-                   long flags) {}
+PRE_SYSCALL(mbind)
+(long start, long len, long mode, void *nmask, long maxnode, long flags) {}
 
-POST_SYSCALL(mbind)(long res, long start, long len, long mode, void *nmask,
-                    long maxnode, long flags) {
+POST_SYSCALL(mbind)
+(long res, long start, long len, long mode, void *nmask, long maxnode,
+ long flags) {
   if (res >= 0) {
-    if (nmask) POST_WRITE(nmask, sizeof(long));
+    if (nmask)
+      POST_WRITE(nmask, sizeof(long));
   }
 }
 
-PRE_SYSCALL(get_mempolicy)(void *policy, void *nmask, long maxnode, long addr,
-                           long flags) {}
+PRE_SYSCALL(get_mempolicy)
+(void *policy, void *nmask, long maxnode, long addr, long flags) {}
 
-POST_SYSCALL(get_mempolicy)(long res, void *policy, void *nmask, long maxnode,
-                            long addr, long flags) {
+POST_SYSCALL(get_mempolicy)
+(long res, void *policy, void *nmask, long maxnode, long addr, long flags) {
   if (res >= 0) {
-    if (policy) POST_WRITE(policy, sizeof(int));
-    if (nmask) POST_WRITE(nmask, sizeof(long));
+    if (policy)
+      POST_WRITE(policy, sizeof(int));
+    if (nmask)
+      POST_WRITE(nmask, sizeof(long));
   }
 }
 
@@ -2447,8 +2678,8 @@ PRE_SYSCALL(inotify_add_watch)(long fd, const void *path, long mask) {
     PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
 }
 
-POST_SYSCALL(inotify_add_watch)(long res, long fd, const void *path,
-                                long mask) {}
+POST_SYSCALL(inotify_add_watch)
+(long res, long fd, const void *path, long mask) {}
 
 PRE_SYSCALL(inotify_rm_watch)(long fd, long wd) {}
 
@@ -2458,8 +2689,10 @@ PRE_SYSCALL(spu_run)(long fd, void *unpc, void *ustatus) {}
 
 POST_SYSCALL(spu_run)(long res, long fd, unsigned *unpc, unsigned *ustatus) {
   if (res >= 0) {
-    if (unpc) POST_WRITE(unpc, sizeof(*unpc));
-    if (ustatus) POST_WRITE(ustatus, sizeof(*ustatus));
+    if (unpc)
+      POST_WRITE(unpc, sizeof(*unpc));
+    if (ustatus)
+      POST_WRITE(ustatus, sizeof(*ustatus));
   }
 }
 
@@ -2468,8 +2701,8 @@ PRE_SYSCALL(spu_create)(const void *name, long flags, long mode, long fd) {
     PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
 }
 
-POST_SYSCALL(spu_create)(long res, const void *name, long flags, long mode,
-                         long fd) {}
+POST_SYSCALL(spu_create)
+(long res, const void *name, long flags, long mode, long fd) {}
 
 PRE_SYSCALL(mknodat)(long dfd, const void *filename, long mode, long dev) {
   if (filename)
@@ -2477,8 +2710,8 @@ PRE_SYSCALL(mknodat)(long dfd, const void *filename, long mode, long dev) {
              __sanitizer::internal_strlen((const char *)filename) + 1);
 }
 
-POST_SYSCALL(mknodat)(long res, long dfd, const void *filename, long mode,
-                      long dev) {}
+POST_SYSCALL(mknodat)
+(long res, long dfd, const void *filename, long mode, long dev) {}
 
 PRE_SYSCALL(mkdirat)(long dfd, const void *pathname, long mode) {
   if (pathname)
@@ -2503,30 +2736,33 @@ PRE_SYSCALL(symlinkat)(const void *oldname, long newdfd, const void *newname) {
     PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
 }
 
-POST_SYSCALL(symlinkat)(long res, const void *oldname, long newdfd,
-                        const void *newname) {}
+POST_SYSCALL(symlinkat)
+(long res, const void *oldname, long newdfd, const void *newname) {}
 
-PRE_SYSCALL(linkat)(long olddfd, const void *oldname, long newdfd,
-                    const void *newname, long flags) {
+PRE_SYSCALL(linkat)
+(long olddfd, const void *oldname, long newdfd, const void *newname,
+ long flags) {
   if (oldname)
     PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
   if (newname)
     PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
 }
 
-POST_SYSCALL(linkat)(long res, long olddfd, const void *oldname, long newdfd,
-                     const void *newname, long flags) {}
+POST_SYSCALL(linkat)
+(long res, long olddfd, const void *oldname, long newdfd, const void *newname,
+ long flags) {}
 
-PRE_SYSCALL(renameat)(long olddfd, const void *oldname, long newdfd,
-                      const void *newname) {
+PRE_SYSCALL(renameat)
+(long olddfd, const void *oldname, long newdfd, const void *newname) {
   if (oldname)
     PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
   if (newname)
     PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
 }
 
-POST_SYSCALL(renameat)(long res, long olddfd, const void *oldname, long newdfd,
-                       const void *newname) {}
+POST_SYSCALL(renameat)
+(long res, long olddfd, const void *oldname, long newdfd, const void *newname) {
+}
 
 PRE_SYSCALL(futimesat)(long dfd, const void *filename, void *utimes) {
   if (filename)
@@ -2534,10 +2770,11 @@ PRE_SYSCALL(futimesat)(long dfd, const void *filename, void *utimes) {
              __sanitizer::internal_strlen((const char *)filename) + 1);
 }
 
-POST_SYSCALL(futimesat)(long res, long dfd, const void *filename,
-                        void *utimes) {
+POST_SYSCALL(futimesat)
+(long res, long dfd, const void *filename, void *utimes) {
   if (res >= 0) {
-    if (utimes) POST_WRITE(utimes, timeval_sz);
+    if (utimes)
+      POST_WRITE(utimes, timeval_sz);
   }
 }
 
@@ -2557,15 +2794,15 @@ PRE_SYSCALL(fchmodat)(long dfd, const void *filename, long mode) {
 
 POST_SYSCALL(fchmodat)(long res, long dfd, const void *filename, long mode) {}
 
-PRE_SYSCALL(fchownat)(long dfd, const void *filename, long user, long group,
-                      long flag) {
+PRE_SYSCALL(fchownat)
+(long dfd, const void *filename, long user, long group, long flag) {
   if (filename)
     PRE_READ(filename,
              __sanitizer::internal_strlen((const char *)filename) + 1);
 }
 
-POST_SYSCALL(fchownat)(long res, long dfd, const void *filename, long user,
-                       long group, long flag) {}
+POST_SYSCALL(fchownat)
+(long res, long dfd, const void *filename, long user, long group, long flag) {}
 
 PRE_SYSCALL(openat)(long dfd, const void *filename, long flags, long mode) {
   if (filename)
@@ -2573,34 +2810,36 @@ PRE_SYSCALL(openat)(long dfd, const void *filename, long flags, long mode) {
              __sanitizer::internal_strlen((const char *)filename) + 1);
 }
 
-POST_SYSCALL(openat)(long res, long dfd, const void *filename, long flags,
-                     long mode) {}
+POST_SYSCALL(openat)
+(long res, long dfd, const void *filename, long flags, long mode) {}
 
-PRE_SYSCALL(newfstatat)(long dfd, const void *filename, void *statbuf,
-                        long flag) {
+PRE_SYSCALL(newfstatat)
+(long dfd, const void *filename, void *statbuf, long flag) {
   if (filename)
     PRE_READ(filename,
              __sanitizer::internal_strlen((const char *)filename) + 1);
 }
 
-POST_SYSCALL(newfstatat)(long res, long dfd, const void *filename,
-                         void *statbuf, long flag) {
+POST_SYSCALL(newfstatat)
+(long res, long dfd, const void *filename, void *statbuf, long flag) {
   if (res >= 0) {
-    if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+    if (statbuf)
+      POST_WRITE(statbuf, struct_kernel_stat_sz);
   }
 }
 
-PRE_SYSCALL(fstatat64)(long dfd, const void *filename, void *statbuf,
-                       long flag) {
+PRE_SYSCALL(fstatat64)
+(long dfd, const void *filename, void *statbuf, long flag) {
   if (filename)
     PRE_READ(filename,
              __sanitizer::internal_strlen((const char *)filename) + 1);
 }
 
-POST_SYSCALL(fstatat64)(long res, long dfd, const void *filename, void *statbuf,
-                        long flag) {
+POST_SYSCALL(fstatat64)
+(long res, long dfd, const void *filename, void *statbuf, long flag) {
   if (res >= 0) {
-    if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+    if (statbuf)
+      POST_WRITE(statbuf, struct_kernel_stat64_sz);
   }
 }
 
@@ -2609,25 +2848,26 @@ PRE_SYSCALL(readlinkat)(long dfd, const void *path, void *buf, long bufsiz) {
     PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
 }
 
-POST_SYSCALL(readlinkat)(long res, long dfd, const void *path, void *buf,
-                         long bufsiz) {
+POST_SYSCALL(readlinkat)
+(long res, long dfd, const void *path, void *buf, long bufsiz) {
   if (res >= 0) {
     if (buf)
       POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);
   }
 }
 
-PRE_SYSCALL(utimensat)(long dfd, const void *filename, void *utimes,
-                       long flags) {
+PRE_SYSCALL(utimensat)
+(long dfd, const void *filename, void *utimes, long flags) {
   if (filename)
     PRE_READ(filename,
              __sanitizer::internal_strlen((const char *)filename) + 1);
 }
 
-POST_SYSCALL(utimensat)(long res, long dfd, const void *filename, void *utimes,
-                        long flags) {
+POST_SYSCALL(utimensat)
+(long res, long dfd, const void *filename, void *utimes, long flags) {
   if (res >= 0) {
-    if (utimes) POST_WRITE(utimes, struct_timespec_sz);
+    if (utimes)
+      POST_WRITE(utimes, struct_timespec_sz);
   }
 }
 
@@ -2635,24 +2875,28 @@ PRE_SYSCALL(unshare)(long unshare_flags) {}
 
 POST_SYSCALL(unshare)(long res, long unshare_flags) {}
 
-PRE_SYSCALL(splice)(long fd_in, void *off_in, long fd_out, void *off_out,
-                    long len, long flags) {}
+PRE_SYSCALL(splice)
+(long fd_in, void *off_in, long fd_out, void *off_out, long len, long flags) {}
 
-POST_SYSCALL(splice)(long res, long fd_in, void *off_in, long fd_out,
-                     void *off_out, long len, long flags) {
+POST_SYSCALL(splice)
+(long res, long fd_in, void *off_in, long fd_out, void *off_out, long len,
+ long flags) {
   if (res >= 0) {
-    if (off_in) POST_WRITE(off_in, sizeof(long long));
-    if (off_out) POST_WRITE(off_out, sizeof(long long));
+    if (off_in)
+      POST_WRITE(off_in, sizeof(long long));
+    if (off_out)
+      POST_WRITE(off_out, sizeof(long long));
   }
 }
 
-PRE_SYSCALL(vmsplice)(long fd, const __sanitizer_iovec *iov, long nr_segs,
-                      long flags) {}
+PRE_SYSCALL(vmsplice)
+(long fd, const __sanitizer_iovec *iov, long nr_segs, long flags) {}
 
-POST_SYSCALL(vmsplice)(long res, long fd, const __sanitizer_iovec *iov,
-                       long nr_segs, long flags) {
+POST_SYSCALL(vmsplice)
+(long res, long fd, const __sanitizer_iovec *iov, long nr_segs, long flags) {
   if (res >= 0) {
-    if (iov) kernel_read_iovec(iov, nr_segs, res);
+    if (iov)
+      kernel_read_iovec(iov, nr_segs, res);
   }
 }
 
@@ -2662,8 +2906,8 @@ POST_SYSCALL(tee)(long res, long fdin, long fdout, long len, long flags) {}
 
 PRE_SYSCALL(get_robust_list)(long pid, void *head_ptr, void *len_ptr) {}
 
-POST_SYSCALL(get_robust_list)(long res, long pid, void *head_ptr,
-                              void *len_ptr) {}
+POST_SYSCALL(get_robust_list)
+(long res, long pid, void *head_ptr, void *len_ptr) {}
 
 PRE_SYSCALL(set_robust_list)(void *head, long len) {}
 
@@ -2673,27 +2917,31 @@ PRE_SYSCALL(getcpu)(void *cpu, void *node, void *cache) {}
 
 POST_SYSCALL(getcpu)(long res, void *cpu, void *node, void *cache) {
   if (res >= 0) {
-    if (cpu) POST_WRITE(cpu, sizeof(unsigned));
-    if (node) POST_WRITE(node, sizeof(unsigned));
+    if (cpu)
+      POST_WRITE(cpu, sizeof(unsigned));
+    if (node)
+      POST_WRITE(node, sizeof(unsigned));
     // The third argument to this system call is nowadays unused.
   }
 }
 
 PRE_SYSCALL(signalfd)(long ufd, void *user_mask, long sizemask) {}
 
-POST_SYSCALL(signalfd)(long res, long ufd, kernel_sigset_t *user_mask,
-                       long sizemask) {
+POST_SYSCALL(signalfd)
+(long res, long ufd, kernel_sigset_t *user_mask, long sizemask) {
   if (res >= 0) {
-    if (user_mask) POST_WRITE(user_mask, sizemask);
+    if (user_mask)
+      POST_WRITE(user_mask, sizemask);
   }
 }
 
 PRE_SYSCALL(signalfd4)(long ufd, void *user_mask, long sizemask, long flags) {}
 
-POST_SYSCALL(signalfd4)(long res, long ufd, kernel_sigset_t *user_mask,
-                        long sizemask, long flags) {
+POST_SYSCALL(signalfd4)
+(long res, long ufd, kernel_sigset_t *user_mask, long sizemask, long flags) {
   if (res >= 0) {
-    if (user_mask) POST_WRITE(user_mask, sizemask);
+    if (user_mask)
+      POST_WRITE(user_mask, sizemask);
   }
 }
 
@@ -2701,15 +2949,17 @@ PRE_SYSCALL(timerfd_create)(long clockid, long flags) {}
 
 POST_SYSCALL(timerfd_create)(long res, long clockid, long flags) {}
 
-PRE_SYSCALL(timerfd_settime)(long ufd, long flags, const void *utmr,
-                             void *otmr) {
-  if (utmr) PRE_READ(utmr, struct_itimerspec_sz);
+PRE_SYSCALL(timerfd_settime)
+(long ufd, long flags, const void *utmr, void *otmr) {
+  if (utmr)
+    PRE_READ(utmr, struct_itimerspec_sz);
 }
 
-POST_SYSCALL(timerfd_settime)(long res, long ufd, long flags, const void *utmr,
-                              void *otmr) {
+POST_SYSCALL(timerfd_settime)
+(long res, long ufd, long flags, const void *utmr, void *otmr) {
   if (res >= 0) {
-    if (otmr) POST_WRITE(otmr, struct_itimerspec_sz);
+    if (otmr)
+      POST_WRITE(otmr, struct_itimerspec_sz);
   }
 }
 
@@ -2717,7 +2967,8 @@ PRE_SYSCALL(timerfd_gettime)(long ufd, void *otmr) {}
 
 POST_SYSCALL(timerfd_gettime)(long res, long ufd, void *otmr) {
   if (res >= 0) {
-    if (otmr) POST_WRITE(otmr, struct_itimerspec_sz);
+    if (otmr)
+      POST_WRITE(otmr, struct_itimerspec_sz);
   }
 }
 
@@ -2735,33 +2986,42 @@ POST_SYSCALL(old_readdir)(long res, long arg0, void *arg1, long arg2) {
   // Missing definition of 'struct old_linux_dirent'.
 }
 
-PRE_SYSCALL(pselect6)(long arg0, __sanitizer___kernel_fd_set *arg1,
-                      __sanitizer___kernel_fd_set *arg2,
                     __sanitizer___kernel_fd_set *arg3, void *arg4,
                     void *arg5) {}
+PRE_SYSCALL(pselect6)
+(long arg0, __sanitizer___kernel_fd_set *arg1,
__sanitizer___kernel_fd_set *arg2, __sanitizer___kernel_fd_set *arg3,
void *arg4, void *arg5) {}
 
-POST_SYSCALL(pselect6)(long res, long arg0, __sanitizer___kernel_fd_set *arg1,
-                       __sanitizer___kernel_fd_set *arg2,
                      __sanitizer___kernel_fd_set *arg3, void *arg4,
                      void *arg5) {
+POST_SYSCALL(pselect6)
+(long res, long arg0, __sanitizer___kernel_fd_set *arg1,
__sanitizer___kernel_fd_set *arg2, __sanitizer___kernel_fd_set *arg3,
void *arg4, void *arg5) {
   if (res >= 0) {
-    if (arg1) POST_WRITE(arg1, sizeof(*arg1));
-    if (arg2) POST_WRITE(arg2, sizeof(*arg2));
-    if (arg3) POST_WRITE(arg3, sizeof(*arg3));
-    if (arg4) POST_WRITE(arg4, struct_timespec_sz);
+    if (arg1)
+      POST_WRITE(arg1, sizeof(*arg1));
+    if (arg2)
+      POST_WRITE(arg2, sizeof(*arg2));
+    if (arg3)
+      POST_WRITE(arg3, sizeof(*arg3));
+    if (arg4)
+      POST_WRITE(arg4, struct_timespec_sz);
   }
 }
 
-PRE_SYSCALL(ppoll)(__sanitizer_pollfd *arg0, long arg1, void *arg2,
-                   const kernel_sigset_t *arg3, long arg4) {
-  if (arg3) PRE_READ(arg3, arg4);
+PRE_SYSCALL(ppoll)
+(__sanitizer_pollfd *arg0, long arg1, void *arg2, const kernel_sigset_t *arg3,
+ long arg4) {
+  if (arg3)
+    PRE_READ(arg3, arg4);
 }
 
-POST_SYSCALL(ppoll)(long res, __sanitizer_pollfd *arg0, long arg1, void *arg2,
-                    const void *arg3, long arg4) {
+POST_SYSCALL(ppoll)
+(long res, __sanitizer_pollfd *arg0, long arg1, void *arg2, const void *arg3,
+ long arg4) {
   if (res >= 0) {
-    if (arg0) POST_WRITE(arg0, sizeof(*arg0));
-    if (arg2) POST_WRITE(arg2, struct_timespec_sz);
+    if (arg0)
+      POST_WRITE(arg0, sizeof(*arg0));
+    if (arg2)
+      POST_WRITE(arg2, struct_timespec_sz);
   }
 }
 
@@ -2769,81 +3029,79 @@ PRE_SYSCALL(syncfs)(long fd) {}
 
 POST_SYSCALL(syncfs)(long res, long fd) {}
 
-PRE_SYSCALL(perf_event_open)(__sanitizer_perf_event_attr *attr_uptr, long pid,
-                             long cpu, long group_fd, long flags) {
-  if (attr_uptr) PRE_READ(attr_uptr, attr_uptr->size);
+PRE_SYSCALL(perf_event_open)
+(__sanitizer_perf_event_attr *attr_uptr, long pid, long cpu, long group_fd,
+ long flags) {
+  if (attr_uptr)
+    PRE_READ(attr_uptr, attr_uptr->size);
 }
 
-POST_SYSCALL(perf_event_open)(long res, __sanitizer_perf_event_attr *attr_uptr,
-                              long pid, long cpu, long group_fd, long flags) {}
+POST_SYSCALL(perf_event_open)
+(long res, __sanitizer_perf_event_attr *attr_uptr, long pid, long cpu,
+ long group_fd, long flags) {}
 
-PRE_SYSCALL(mmap_pgoff)(long addr, long len, long prot, long flags, long fd,
-                        long pgoff) {}
+PRE_SYSCALL(mmap_pgoff)
+(long addr, long len, long prot, long flags, long fd, long pgoff) {}
 
-POST_SYSCALL(mmap_pgoff)(long res, long addr, long len, long prot, long flags,
-                         long fd, long pgoff) {}
+POST_SYSCALL(mmap_pgoff)
+(long res, long addr, long len, long prot, long flags, long fd, long pgoff) {}
 
 PRE_SYSCALL(old_mmap)(void *arg) {}
 
 POST_SYSCALL(old_mmap)(long res, void *arg) {}
 
-PRE_SYSCALL(name_to_handle_at)(long dfd, const void *name, void *handle,
-                               void *mnt_id, long flag) {}
+PRE_SYSCALL(name_to_handle_at)
+(long dfd, const void *name, void *handle, void *mnt_id, long flag) {}
 
-POST_SYSCALL(name_to_handle_at)(long res, long dfd, const void *name,
-                                void *handle, void *mnt_id, long flag) {}
+POST_SYSCALL(name_to_handle_at)
+(long res, long dfd, const void *name, void *handle, void *mnt_id, long flag) {}
 
 PRE_SYSCALL(open_by_handle_at)(long mountdirfd, void *handle, long flags) {}
 
-POST_SYSCALL(open_by_handle_at)(long res, long mountdirfd, void *handle,
-                                long flags) {}
+POST_SYSCALL(open_by_handle_at)
+(long res, long mountdirfd, void *handle, long flags) {}
 
 PRE_SYSCALL(setns)(long fd, long nstype) {}
 
 POST_SYSCALL(setns)(long res, long fd, long nstype) {}
 
-PRE_SYSCALL(process_vm_readv)(long pid, const __sanitizer_iovec *lvec,
-                              long liovcnt, const void *rvec, long riovcnt,
                             long flags) {}
+PRE_SYSCALL(process_vm_readv)
+(long pid, const __sanitizer_iovec *lvec, long liovcnt, const void *rvec,
long riovcnt, long flags) {}
 
-POST_SYSCALL(process_vm_readv)(long res, long pid,
-                               const __sanitizer_iovec *lvec, long liovcnt,
                              const void *rvec, long riovcnt, long flags) {
+POST_SYSCALL(process_vm_readv)
+(long res, long pid, const __sanitizer_iovec *lvec, long liovcnt,
+ const void *rvec, long riovcnt, long flags) {
   if (res >= 0) {
-    if (lvec) kernel_write_iovec(lvec, liovcnt, res);
+    if (lvec)
+      kernel_write_iovec(lvec, liovcnt, res);
   }
 }
 
-PRE_SYSCALL(process_vm_writev)(long pid, const __sanitizer_iovec *lvec,
-                               long liovcnt, const void *rvec, long riovcnt,
                              long flags) {}
+PRE_SYSCALL(process_vm_writev)
+(long pid, const __sanitizer_iovec *lvec, long liovcnt, const void *rvec,
long riovcnt, long flags) {}
 
-POST_SYSCALL(process_vm_writev)(long res, long pid,
-                                const __sanitizer_iovec *lvec, long liovcnt,
                               const void *rvec, long riovcnt, long flags) {
+POST_SYSCALL(process_vm_writev)
+(long res, long pid, const __sanitizer_iovec *lvec, long liovcnt,
+ const void *rvec, long riovcnt, long flags) {
   if (res >= 0) {
-    if (lvec) kernel_read_iovec(lvec, liovcnt, res);
+    if (lvec)
+      kernel_read_iovec(lvec, liovcnt, res);
   }
 }
 
-PRE_SYSCALL(fork)() {
-  COMMON_SYSCALL_PRE_FORK();
-}
+PRE_SYSCALL(fork)() { COMMON_SYSCALL_PRE_FORK(); }
 
-POST_SYSCALL(fork)(long res) {
-  COMMON_SYSCALL_POST_FORK(res);
-}
+POST_SYSCALL(fork)(long res) { COMMON_SYSCALL_POST_FORK(res); }
 
-PRE_SYSCALL(vfork)() {
-  COMMON_SYSCALL_PRE_FORK();
-}
+PRE_SYSCALL(vfork)() { COMMON_SYSCALL_PRE_FORK(); }
 
-POST_SYSCALL(vfork)(long res) {
-  COMMON_SYSCALL_POST_FORK(res);
-}
+POST_SYSCALL(vfork)(long res) { COMMON_SYSCALL_POST_FORK(res); }
 
-PRE_SYSCALL(sigaction)(long signum, const __sanitizer_kernel_sigaction_t *act,
-                       __sanitizer_kernel_sigaction_t *oldact) {
+PRE_SYSCALL(sigaction)
+(long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact) {
   if (act) {
     PRE_READ(&act->sigaction, sizeof(act->sigaction));
     PRE_READ(&act->sa_flags, sizeof(act->sa_flags));
@@ -2851,15 +3109,16 @@ PRE_SYSCALL(sigaction)(long signum, const __sanitizer_kernel_sigaction_t *act,
   }
 }
 
-POST_SYSCALL(sigaction)(long res, long signum,
-                        const __sanitizer_kernel_sigaction_t *act,
-                        __sanitizer_kernel_sigaction_t *oldact) {
-  if (res >= 0 && oldact) POST_WRITE(oldact, sizeof(*oldact));
+POST_SYSCALL(sigaction)
+(long res, long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact) {
+  if (res >= 0 && oldact)
+    POST_WRITE(oldact, sizeof(*oldact));
 }
 
-PRE_SYSCALL(rt_sigaction)(long signum,
-                          const __sanitizer_kernel_sigaction_t *act,
                         __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
+PRE_SYSCALL(rt_sigaction)
+(long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
   if (act) {
     PRE_READ(&act->sigaction, sizeof(act->sigaction));
     PRE_READ(&act->sa_flags, sizeof(act->sa_flags));
@@ -2867,9 +3126,9 @@ PRE_SYSCALL(rt_sigaction)(long signum,
   }
 }
 
-POST_SYSCALL(rt_sigaction)(long res, long signum,
-                           const __sanitizer_kernel_sigaction_t *act,
                          __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
+POST_SYSCALL(rt_sigaction)
+(long res, long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
   if (res >= 0 && oldact) {
     SIZE_T oldact_sz = ((char *)&oldact->sa_mask) - ((char *)oldact) + sz;
     POST_WRITE(oldact, oldact_sz);
@@ -2906,11 +3165,11 @@ POST_SYSCALL(sigaltstack)(long res, void *ss, void *oss) {
 }
 }  // extern "C"
 
-#undef PRE_SYSCALL
-#undef PRE_READ
-#undef PRE_WRITE
-#undef POST_SYSCALL
-#undef POST_READ
-#undef POST_WRITE
+#  undef PRE_SYSCALL
+#  undef PRE_READ
+#  undef PRE_WRITE
+#  undef POST_SYSCALL
+#  undef POST_READ
+#  undef POST_WRITE
 
 #endif  // SANITIZER_LINUX
index a52db08433e3b4de99c29062ef86770c45e1bc02..1d0dbe592b9372e043c2c2f4b86b041d3af480f1 100644 (file)
@@ -51,6 +51,8 @@ constexpr const char kSancovSinkName[] = "sancov";
 // This class relies on zero-initialization.
 class TracePcGuardController final {
  public:
+  constexpr TracePcGuardController() {}
+
   // For each PC location being tracked, there is a u32 reserved in global
   // data called the "guard".  At startup, we assign each guard slot a
   // unique index into the big results array.  Later during runtime, the
@@ -87,7 +89,7 @@ class TracePcGuardController final {
   }
 
   void Dump() {
-    BlockingMutexLock locked(&setup_lock_);
+    Lock locked(&setup_lock_);
     if (array_) {
       CHECK_NE(vmo_, ZX_HANDLE_INVALID);
 
@@ -114,7 +116,7 @@ class TracePcGuardController final {
   // We can always spare the 32G of address space.
   static constexpr size_t MappingSize = sizeof(uptr) << 32;
 
-  BlockingMutex setup_lock_ = BlockingMutex(LINKER_INITIALIZED);
+  Mutex setup_lock_;
   uptr *array_ = nullptr;
   u32 next_index_ = 0;
   zx_handle_t vmo_ = {};
@@ -123,7 +125,7 @@ class TracePcGuardController final {
   size_t DataSize() const { return next_index_ * sizeof(uintptr_t); }
 
   u32 Setup(u32 num_guards) {
-    BlockingMutexLock locked(&setup_lock_);
+    Lock locked(&setup_lock_);
     DCHECK(common_flags()->coverage);
 
     if (next_index_ == 0) {
index 73ebeb5fa14add217a92fd83af3f25e54a7385bb..56220df2ac18b71f4b5fd363a737197280c056d7 100644 (file)
@@ -73,7 +73,7 @@ static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) {
     if (!pc) continue;
 
     if (!__sanitizer_get_module_and_offset_for_pc(pc, nullptr, 0, &pcs[i])) {
-      Printf("ERROR: unknown pc 0x%x (may happen if dlclose is used)\n", pc);
+      Printf("ERROR: unknown pc 0x%zx (may happen if dlclose is used)\n", pc);
       continue;
     }
     uptr module_base = pc - pcs[i];
@@ -151,6 +151,55 @@ class TracePcGuardController {
 
 static TracePcGuardController pc_guard_controller;
 
+// A basic default implementation of callbacks for
+// -fsanitize-coverage=inline-8bit-counters,pc-table.
+// Use TOOL_OPTIONS (UBSAN_OPTIONS, etc) to dump the coverage data:
+// * cov_8bit_counters_out=PATH to dump the 8bit counters.
+// * cov_pcs_out=PATH to dump the pc table.
+//
+// Most users will still need to define their own callbacks for greater
+// flexibility.
+namespace SingletonCounterCoverage {
+
+static char *counters_beg, *counters_end;
+static const uptr *pcs_beg, *pcs_end;
+
+static void DumpCoverage() {
+  const char* file_path = common_flags()->cov_8bit_counters_out;
+  if (file_path && internal_strlen(file_path)) {
+    fd_t fd = OpenFile(file_path);
+    FileCloser file_closer(fd);
+    uptr size = counters_end - counters_beg;
+    WriteToFile(fd, counters_beg, size);
+    if (common_flags()->verbosity)
+      __sanitizer::Printf("cov_8bit_counters_out: written %zd bytes to %s\n",
+                          size, file_path);
+  }
+  file_path = common_flags()->cov_pcs_out;
+  if (file_path && internal_strlen(file_path)) {
+    fd_t fd = OpenFile(file_path);
+    FileCloser file_closer(fd);
+    uptr size = (pcs_end - pcs_beg) * sizeof(uptr);
+    WriteToFile(fd, pcs_beg, size);
+    if (common_flags()->verbosity)
+      __sanitizer::Printf("cov_pcs_out: written %zd bytes to %s\n", size,
+                          file_path);
+  }
+}
+
+static void Cov8bitCountersInit(char* beg, char* end) {
+  counters_beg = beg;
+  counters_end = end;
+  Atexit(DumpCoverage);
+}
+
+static void CovPcsInit(const uptr* beg, const uptr* end) {
+  pcs_beg = beg;
+  pcs_end = end;
+}
+
+}  // namespace SingletonCounterCoverage
+
 }  // namespace
 }  // namespace __sancov
 
@@ -191,7 +240,9 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_reset() {
   __sancov::pc_guard_controller.Reset();
 }
-// Default empty implementations (weak). Users should redefine them.
+// Default implementations (weak).
+// Either empty or very simple.
+// Most users should redefine them.
 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}
 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}
 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}
@@ -206,9 +257,15 @@ SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}
 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}
 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}
 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
-SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_8bit_counters_init, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_8bit_counters_init,
+                             char* start, char* end) {
+  __sancov::SingletonCounterCoverage::Cov8bitCountersInit(start, end);
+}
 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_bool_flag_init, void) {}
-SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, const uptr* beg,
+                             const uptr* end) {
+  __sancov::SingletonCounterCoverage::CovPcsInit(beg, end);
+}
 }  // extern "C"
 // Weak definition for code instrumented with -fsanitize-coverage=stack-depth
 // and later linked with code containing a strong definition.
index 0b92dccde4a144c988efed2cbb647477bfb2fb10..5492560df914b774da9da7fc13bd718d6ff9a4b0 100644 (file)
@@ -75,6 +75,20 @@ void ReportFile::ReopenIfNecessary() {
   fd_pid = pid;
 }
 
+static void RecursiveCreateParentDirs(char *path) {
+  if (path[0] == '\0')
+    return;
+  for (int i = 1; path[i] != '\0'; ++i) {
+    char save = path[i];
+    if (!IsPathSeparator(path[i]))
+      continue;
+    path[i] = '\0';
+    /* Some of these will fail, because the directory exists, ignore it. */
+    CreateDir(path);
+    path[i] = save;
+  }
+}
+
 void ReportFile::SetReportPath(const char *path) {
   if (path) {
     uptr len = internal_strlen(path);
@@ -95,6 +109,7 @@ void ReportFile::SetReportPath(const char *path) {
     fd = kStdoutFd;
   } else {
     internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
+    RecursiveCreateParentDirs(path_prefix);
   }
 }
 
index 08671ab67d0f54dd5640961243d57b41386d8de5..3d7916171c1efeb01cf4b42dd757108f064df60c 100644 (file)
@@ -81,6 +81,8 @@ bool FileExists(const char *filename);
 char *FindPathToBinary(const char *name);
 bool IsPathSeparator(const char c);
 bool IsAbsolutePath(const char *path);
+// Returns true on success, false on failure.
+bool CreateDir(const char *pathname);
 // Starts a subprocess and returs its pid.
 // If *_fd parameters are not kInvalidFd their corresponding input/output
 // streams will be redirect to the file. The files will always be closed
index acc71ccd89eea725e030dafa68fa76bb0f0f8126..3ccc6a6fa5377e3cc8d7f58676268ad1a8a913eb 100644 (file)
@@ -138,7 +138,7 @@ inline bool FlagHandler<uptr>::Parse(const char *value) {
 
 template <>
 inline bool FlagHandler<uptr>::Format(char *buffer, uptr size) {
-  uptr num_symbols_should_write = internal_snprintf(buffer, size, "%p", *t_);
+  uptr num_symbols_should_write = internal_snprintf(buffer, size, "0x%zx", *t_);
   return num_symbols_should_write < size;
 }
 
index 3bc44c6b1eb1a12820c0a8ebffec32c57b3bbe67..95da82b1a1dadd77420a20be3328ccf838c4dd74 100644 (file)
@@ -160,6 +160,10 @@ COMMON_FLAG(
 COMMON_FLAG(const char *, coverage_dir, ".",
             "Target directory for coverage dumps. Defaults to the current "
             "directory.")
+COMMON_FLAG(const char *, cov_8bit_counters_out, "",
+    "If non-empty, write 8bit counters to this file. ")
+COMMON_FLAG(const char *, cov_pcs_out, "",
+    "If non-empty, write the coverage pc table to this file. ")
 COMMON_FLAG(bool, full_address_space, false,
             "Sanitize complete address space; "
             "by default kernel area on 32-bit platforms will not be sanitized")
index 65bc398656c936850f16fe11a6ec33c80e59bdfd..de4c985e4e4ee5ed51fa8e5084dbde89482563fe 100644 (file)
@@ -112,47 +112,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
   CHECK_EQ(status, ZX_OK);
 }
 
-enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
-
-BlockingMutex::BlockingMutex() {
-  // NOTE!  It's important that this use internal_memset, because plain
-  // memset might be intercepted (e.g., actually be __asan_memset).
-  // Defining this so the compiler initializes each field, e.g.:
-  //   BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
-  // might result in the compiler generating a call to memset, which would
-  // have the same problem.
-  internal_memset(this, 0, sizeof(*this));
-}
-
-void BlockingMutex::Lock() {
-  CHECK_EQ(owner_, 0);
-  atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
-  if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
-    return;
-  while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
-    zx_status_t status =
-        _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), MtxSleeping,
-                       ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
-    if (status != ZX_ERR_BAD_STATE)  // Normal race.
-      CHECK_EQ(status, ZX_OK);
-  }
-}
-
-void BlockingMutex::Unlock() {
-  atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
-  u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
-  CHECK_NE(v, MtxUnlocked);
-  if (v == MtxSleeping) {
-    zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
-    CHECK_EQ(status, ZX_OK);
-  }
-}
-
-void BlockingMutex::CheckLocked() const {
-  auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
-  CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
-}
-
 uptr GetPageSize() { return _zx_system_get_page_size(); }
 
 uptr GetMmapGranularity() { return _zx_system_get_page_size(); }
index 576807ea3a6a1f4f0b409eaecfdb6fe3d4fd8c37..9683b97ab91d6b0aab5ca7b34441ef9ce7e65e71 100644 (file)
@@ -1406,7 +1406,7 @@ static void ioctl_table_fill() {
   _(URIO_SEND_COMMAND, READWRITE, struct_urio_command_sz);
   _(URIO_RECV_COMMAND, READWRITE, struct_urio_command_sz);
 #undef _
-} // NOLINT
+}
 
 static bool ioctl_initialized = false;
 
index 0b001c1c483007e3349e576ffa0a40d09d372fbd..1600d31c30c0c428174ba1bede354eea4e716e48 100644 (file)
@@ -111,12 +111,13 @@ extern "C" {
   SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
   void __sanitizer_cov_trace_pc_guard_init(__sanitizer::u32*,
                                            __sanitizer::u32*);
-  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-  void __sanitizer_cov_8bit_counters_init();
+  SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+  __sanitizer_cov_8bit_counters_init(char *, char *);
   SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
   __sanitizer_cov_bool_flag_init();
   SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
-  __sanitizer_cov_pcs_init();
+  __sanitizer_cov_pcs_init(const __sanitizer::uptr *,
+                           const __sanitizer::uptr *);
 } // extern "C"
 
 #endif  // SANITIZER_INTERFACE_INTERNAL_H
index 84053fec2649a950e36c3d012983eeb6ce86cb65..e97cc9ac0df17ac0abaac959733856ca3d5a793a 100644 (file)
 # define __has_attribute(x) 0
 #endif
 
+#if !defined(__has_cpp_attribute)
+#  define __has_cpp_attribute(x) 0
+#endif
+
 // For portability reasons we do not include stddef.h, stdint.h or any other
 // system header, but we do need some basic types that are not defined
 // in a portable way by the language itself.
@@ -135,8 +139,13 @@ namespace __sanitizer {
 typedef unsigned long long uptr;
 typedef signed long long sptr;
 #else
+#  if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC || SANITIZER_WINDOWS
 typedef unsigned long uptr;
 typedef signed long sptr;
+#  else
+typedef unsigned int uptr;
+typedef signed int sptr;
+#  endif
 #endif  // defined(_WIN64)
 #if defined(__x86_64__)
 // Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
@@ -168,10 +177,9 @@ typedef long pid_t;
 typedef int pid_t;
 #endif
 
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD || \
-    SANITIZER_MAC || \
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC ||             \
     (SANITIZER_SOLARIS && (defined(_LP64) || _FILE_OFFSET_BITS == 64)) || \
-    (SANITIZER_LINUX && defined(__x86_64__))
+    (SANITIZER_LINUX && (defined(__x86_64__) || defined(__hexagon__)))
 typedef u64 OFF_T;
 #else
 typedef uptr OFF_T;
@@ -250,6 +258,12 @@ typedef u64 tid_t;
 # define NOEXCEPT throw()
 #endif
 
+#if __has_cpp_attribute(clang::fallthrough)
+#  define FALLTHROUGH [[clang::fallthrough]]
+#else
+#  define FALLTHROUGH
+#endif
+
 // Unaligned versions of basic types.
 typedef ALIGNED(1) u16 uu16;
 typedef ALIGNED(1) u32 uu32;
@@ -277,14 +291,16 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
                           u64 v1, u64 v2);
 
 // Check macro
-#define RAW_CHECK_MSG(expr, msg) do { \
-  if (UNLIKELY(!(expr))) { \
-    RawWrite(msg); \
-    Die(); \
-  } \
-} while (0)
+#define RAW_CHECK_MSG(expr, msg, ...)          \
+  do {                                         \
+    if (UNLIKELY(!(expr))) {                   \
+      const char* msgs[] = {msg, __VA_ARGS__}; \
+      for (const char* m : msgs) RawWrite(m);  \
+      Die();                                   \
+    }                                          \
+  } while (0)
 
-#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr)
+#define RAW_CHECK(expr, ...) RAW_CHECK_MSG(expr, #expr "\n", __VA_ARGS__)
 
 #define CHECK_IMPL(c1, op, c2) \
   do { \
@@ -409,8 +425,14 @@ inline void Trap() {
     (void)enable_fp;                      \
   } while (0)
 
-constexpr u32 kInvalidTid = -1;
-constexpr u32 kMainTid = 0;
+// Internal thread identifier allocated by ThreadRegistry.
+typedef u32 Tid;
+constexpr Tid kInvalidTid = -1;
+constexpr Tid kMainTid = 0;
+
+// Stack depot stack identifier.
+typedef u32 StackID;
+const StackID kInvalidStackID = 0;
 
 }  // namespace __sanitizer
 
index 4bc04b486870c6e459ef969e67bcec498c991b02..d3076f0da48914f2bb41cb44929fd4af513b9dac 100644 (file)
@@ -258,6 +258,18 @@ s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base) {
   }
 }
 
+uptr internal_wcslen(const wchar_t *s) {
+  uptr i = 0;
+  while (s[i]) i++;
+  return i;
+}
+
+uptr internal_wcsnlen(const wchar_t *s, uptr maxlen) {
+  uptr i = 0;
+  while (i < maxlen && s[i]) i++;
+  return i;
+}
+
 bool mem_is_zero(const char *beg, uptr size) {
   CHECK_LE(size, 1ULL << FIRST_32_SECOND_64(30, 40));  // Sanity check.
   const char *end = beg + size;
index bcb81ebbc803edfb1dec1e022e1e114266f58b5e..39a212665d0aef73bb9d5b67339887f4b2aa0318 100644 (file)
@@ -49,7 +49,10 @@ char *internal_strrchr(const char *s, int c);
 char *internal_strstr(const char *haystack, const char *needle);
 // Works only for base=10 and doesn't set errno.
 s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base);
-int internal_snprintf(char *buffer, uptr length, const char *format, ...);
+int internal_snprintf(char *buffer, uptr length, const char *format, ...)
+    FORMAT(3, 4);
+uptr internal_wcslen(const wchar_t *s);
+uptr internal_wcsnlen(const wchar_t *s, uptr maxlen);
 
 // Return true if all bytes in [mem, mem+size) are zero.
 // Optimized for the case when the result is true.
index 431efc574fa6595ed30a0b1be0d5a27bf32ddbb7..caaba3155a7becfb97bd9fe669bd0138d37d4c88 100644 (file)
@@ -22,9 +22,9 @@ LibIgnore::LibIgnore(LinkerInitialized) {
 }
 
 void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
-  BlockingMutexLock lock(&mutex_);
+  Lock lock(&mutex_);
   if (count_ >= kMaxLibs) {
-    Report("%s: too many ignored libraries (max: %d)\n", SanitizerToolName,
+    Report("%s: too many ignored libraries (max: %zu)\n", SanitizerToolName,
            kMaxLibs);
     Die();
   }
@@ -36,7 +36,7 @@ void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
 }
 
 void LibIgnore::OnLibraryLoaded(const char *name) {
-  BlockingMutexLock lock(&mutex_);
+  Lock lock(&mutex_);
   // Try to match suppressions with symlink target.
   InternalMmapVector<char> buf(kMaxPathLength);
   if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
@@ -84,7 +84,6 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
         ignored_code_ranges_[idx].begin = range.beg;
         ignored_code_ranges_[idx].end = range.end;
         atomic_store(&ignored_ranges_count_, idx + 1, memory_order_release);
-        atomic_store(&enabled_, 1, memory_order_release);
         break;
       }
     }
@@ -106,7 +105,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
           continue;
         if (IsPcInstrumented(range.beg) && IsPcInstrumented(range.end - 1))
           continue;
-        VReport(1, "Adding instrumented range %p-%p from library '%s'\n",
+        VReport(1, "Adding instrumented range 0x%zx-0x%zx from library '%s'\n",
                 range.beg, range.end, mod.full_name());
         const uptr idx =
             atomic_load(&instrumented_ranges_count_, memory_order_relaxed);
@@ -115,7 +114,6 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
         instrumented_code_ranges_[idx].end = range.end;
         atomic_store(&instrumented_ranges_count_, idx + 1,
                      memory_order_release);
-        atomic_store(&enabled_, 1, memory_order_release);
       }
     }
   }
@@ -125,29 +123,6 @@ void LibIgnore::OnLibraryUnloaded() {
   OnLibraryLoaded(nullptr);
 }
 
-bool LibIgnore::IsIgnoredSlow(uptr pc, bool *pc_in_ignored_lib) const {
-  const uptr n = atomic_load(&ignored_ranges_count_, memory_order_acquire);
-  for (uptr i = 0; i < n; i++) {
-    if (IsInRange(pc, ignored_code_ranges_[i])) {
-      *pc_in_ignored_lib = true;
-      return true;
-    }
-  }
-  *pc_in_ignored_lib = false;
-  if (track_instrumented_libs_ && !IsPcInstrumented(pc))
-    return true;
-  return false;
-}
-
-bool LibIgnore::IsPcInstrumented(uptr pc) const {
-  const uptr n = atomic_load(&instrumented_ranges_count_, memory_order_acquire);
-  for (uptr i = 0; i < n; i++) {
-    if (IsInRange(pc, instrumented_code_ranges_[i]))
-      return true;
-  }
-  return false;
-}
-
 } // namespace __sanitizer
 
 #endif  // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC ||
index 85452e57ba3c2d32041d373f4b5a363f21ff6f9c..18e4d83ed77fb8d3a9db0f12e974b308fc3ac948 100644 (file)
@@ -45,6 +45,9 @@ class LibIgnore {
   // "pc_in_ignored_lib" if the PC is in an ignored library, false otherwise.
   bool IsIgnored(uptr pc, bool *pc_in_ignored_lib) const;
 
+  // Checks whether the provided PC belongs to an instrumented module.
+  bool IsPcInstrumented(uptr pc) const;
+
  private:
   struct Lib {
     char *templ;
@@ -58,10 +61,6 @@ class LibIgnore {
     uptr end;
   };
 
-  // Checks whether the provided PC belongs to an instrumented module.
-  bool IsPcInstrumented(uptr pc) const;
-  bool IsIgnoredSlow(uptr pc, bool *pc_in_ignored_lib) const;
-
   inline bool IsInRange(uptr pc, const LibCodeRange &range) const {
     return (pc >= range.begin && pc < range.end);
   }
@@ -71,8 +70,6 @@ class LibIgnore {
   static const uptr kMaxLibs = 1024;
 
   // Hot part:
-  atomic_uintptr_t enabled_;
-
   atomic_uintptr_t ignored_ranges_count_;
   LibCodeRange ignored_code_ranges_[kMaxIgnoredRanges];
 
@@ -80,7 +77,7 @@ class LibIgnore {
   LibCodeRange instrumented_code_ranges_[kMaxInstrumentedRanges];
 
   // Cold part:
-  BlockingMutex mutex_;
+  Mutex mutex_;
   uptr count_;
   Lib libs_[kMaxLibs];
   bool track_instrumented_libs_;
@@ -90,11 +87,27 @@ class LibIgnore {
   void operator = (const LibIgnore&);  // not implemented
 };
 
-ALWAYS_INLINE
-bool LibIgnore::IsIgnored(uptr pc, bool *pc_in_ignored_lib) const {
-  if (LIKELY(atomic_load(&enabled_, memory_order_acquire) == 0))
-    return false;
-  return IsIgnoredSlow(pc, pc_in_ignored_lib);
+inline bool LibIgnore::IsIgnored(uptr pc, bool *pc_in_ignored_lib) const {
+  const uptr n = atomic_load(&ignored_ranges_count_, memory_order_acquire);
+  for (uptr i = 0; i < n; i++) {
+    if (IsInRange(pc, ignored_code_ranges_[i])) {
+      *pc_in_ignored_lib = true;
+      return true;
+    }
+  }
+  *pc_in_ignored_lib = false;
+  if (track_instrumented_libs_ && !IsPcInstrumented(pc))
+    return true;
+  return false;
+}
+
+inline bool LibIgnore::IsPcInstrumented(uptr pc) const {
+  const uptr n = atomic_load(&instrumented_ranges_count_, memory_order_acquire);
+  for (uptr i = 0; i < n; i++) {
+    if (IsInRange(pc, instrumented_code_ranges_[i]))
+      return true;
+  }
+  return false;
 }
 
 }  // namespace __sanitizer
index 9b7d87eb85e1ac7fe945c3373951172bc3037351..ea3e5bdbc754a15a10446d6d142ec372715f9ea9 100644 (file)
@@ -158,9 +158,11 @@ namespace __sanitizer {
 #include "sanitizer_syscall_linux_aarch64.inc"
 #elif SANITIZER_LINUX && defined(__arm__)
 #include "sanitizer_syscall_linux_arm.inc"
-#else
-#include "sanitizer_syscall_generic.inc"
-#endif
+#  elif SANITIZER_LINUX && defined(__hexagon__)
+#    include "sanitizer_syscall_linux_hexagon.inc"
+#  else
+#    include "sanitizer_syscall_generic.inc"
+#  endif
 
 // --------------- sanitizer_libc.h
 #if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
@@ -415,7 +417,7 @@ uptr internal_unlink(const char *path) {
 }
 
 uptr internal_rename(const char *oldpath, const char *newpath) {
-#if defined(__riscv)
+#if defined(__riscv) && defined(__linux__)
   return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
                           (uptr)newpath, 0);
 #elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
@@ -659,48 +661,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
 #    endif
 }
 
-enum { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
-
-BlockingMutex::BlockingMutex() {
-  internal_memset(this, 0, sizeof(*this));
-}
-
-void BlockingMutex::Lock() {
-  CHECK_EQ(owner_, 0);
-  atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
-  if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
-    return;
-  while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
-#if SANITIZER_FREEBSD
-    _umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0);
-#elif SANITIZER_NETBSD
-    sched_yield(); /* No userspace futex-like synchronization */
-#else
-    internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT_PRIVATE, MtxSleeping,
-                     0, 0, 0);
-#endif
-  }
-}
-
-void BlockingMutex::Unlock() {
-  atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
-  u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
-  CHECK_NE(v, MtxUnlocked);
-  if (v == MtxSleeping) {
-#if SANITIZER_FREEBSD
-    _umtx_op(m, UMTX_OP_WAKE, 1, 0, 0);
-#elif SANITIZER_NETBSD
-                   /* No userspace futex-like synchronization */
-#else
-    internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE_PRIVATE, 1, 0, 0, 0);
-#endif
-  }
-}
-
-void BlockingMutex::CheckLocked() const {
-  auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
-  CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
-}
 #  endif  // !SANITIZER_SOLARIS
 
 // ----------------- sanitizer_linux.h
@@ -1217,7 +1177,8 @@ void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
 }
 #endif
 
-#if defined(__x86_64__) && SANITIZER_LINUX
+#if SANITIZER_LINUX
+#if defined(__x86_64__)
 // We cannot use glibc's clone wrapper, because it messes with the child
 // task's TLS. It writes the PID and TID of the child task to its thread
 // descriptor, but in our case the child task shares the thread descriptor with
@@ -1556,7 +1517,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
              : "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29");
   return res;
 }
-#elif defined(__i386__) && SANITIZER_LINUX
+#elif defined(__i386__)
 uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
                     int *parent_tidptr, void *newtls, int *child_tidptr) {
   int res;
@@ -1621,7 +1582,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
                        : "memory");
   return res;
 }
-#elif defined(__arm__) && SANITIZER_LINUX
+#elif defined(__arm__)
 uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
                     int *parent_tidptr, void *newtls, int *child_tidptr) {
   unsigned int res;
@@ -1687,7 +1648,8 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
                        : "memory");
   return res;
 }
-#endif  // defined(__x86_64__) && SANITIZER_LINUX
+#endif
+#endif  // SANITIZER_LINUX
 
 #if SANITIZER_LINUX
 int internal_uname(struct utsname *buf) {
@@ -1917,7 +1879,11 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
   u32 instr = *(u32 *)pc;
   return (instr >> 21) & 1 ? WRITE: READ;
 #elif defined(__riscv)
+#if SANITIZER_FREEBSD
+  unsigned long pc = ucontext->uc_mcontext.mc_gpregs.gp_sepc;
+#else
   unsigned long pc = ucontext->uc_mcontext.__gregs[REG_PC];
+#endif
   unsigned faulty_instruction = *(uint16_t *)pc;
 
 #if defined(__riscv_compressed)
@@ -2136,12 +2102,23 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
   *sp = ucontext->uc_mcontext.gregs[15];
 #elif defined(__riscv)
   ucontext_t *ucontext = (ucontext_t*)context;
+#    if SANITIZER_FREEBSD
+  *pc = ucontext->uc_mcontext.mc_gpregs.gp_sepc;
+  *bp = ucontext->uc_mcontext.mc_gpregs.gp_s[0];
+  *sp = ucontext->uc_mcontext.mc_gpregs.gp_sp;
+#    else
   *pc = ucontext->uc_mcontext.__gregs[REG_PC];
   *bp = ucontext->uc_mcontext.__gregs[REG_S0];
   *sp = ucontext->uc_mcontext.__gregs[REG_SP];
-#else
-# error "Unsupported arch"
-#endif
+#    endif
+#  elif defined(__hexagon__)
+  ucontext_t *ucontext = (ucontext_t *)context;
+  *pc = ucontext->uc_mcontext.pc;
+  *bp = ucontext->uc_mcontext.r30;
+  *sp = ucontext->uc_mcontext.r29;
+#  else
+#    error "Unsupported arch"
+#  endif
 }
 
 void SignalContext::InitPcSpBp() { GetPcSpBp(context, &pc, &sp, &bp); }
index fc5619e4b37cb3a3db0bb5236169e25b59f436b3..7ce9e25da342d8d70312d956a68b137f29326eb8 100644 (file)
@@ -759,13 +759,9 @@ u32 GetNumberOfCPUs() {
 #elif SANITIZER_SOLARIS
   return sysconf(_SC_NPROCESSORS_ONLN);
 #else
-#if defined(CPU_COUNT)
   cpu_set_t CPUs;
   CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
   return CPU_COUNT(&CPUs);
-#else
-  return 1;
-#endif
 #endif
 }
 
index 0e19c4d4a80110d999724c215da52394c4c8a513..a47cfc945cd855b30d6f10b2aacb03705ffee15e 100644 (file)
@@ -17,7 +17,7 @@
 // instantiated with the `LocalAddressSpaceView` type. This type is used to
 // load any pointers in instance methods. This implementation is effectively
 // a no-op. When an object is to be used in an out-of-process manner it is
-// instansiated with the `RemoteAddressSpaceView` type.
+// instantiated with the `RemoteAddressSpaceView` type.
 //
 // By making `AddressSpaceView` a template parameter of an object, it can
 // change its implementation at compile time which has no run time overhead.
index 0aafbdbc50cdc8eb6da7fa5e816b5df1e17a6689..b8839f197d2c1fb89f032258692ea399a6421d5f 100644 (file)
@@ -37,7 +37,7 @@
 extern char **environ;
 #endif
 
-#if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__)
+#if defined(__has_include) && __has_include(<os/trace.h>)
 #define SANITIZER_OS_TRACE 1
 #include <os/trace.h>
 #else
@@ -70,15 +70,7 @@ extern "C" {
 #include <mach/mach_time.h>
 #include <mach/vm_statistics.h>
 #include <malloc/malloc.h>
-#if defined(__has_builtin) && __has_builtin(__builtin_os_log_format)
-# include <os/log.h>
-#else
-   /* Without support for __builtin_os_log_format, fall back to the older
-      method.  */
-# define OS_LOG_DEFAULT 0
-# define os_log_error(A,B,C) \
-  asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", (C));
-#endif
+#include <os/log.h>
 #include <pthread.h>
 #include <sched.h>
 #include <signal.h>
@@ -524,25 +516,6 @@ void FutexWait(atomic_uint32_t *p, u32 cmp) {
 
 void FutexWake(atomic_uint32_t *p, u32 count) {}
 
-BlockingMutex::BlockingMutex() {
-  internal_memset(this, 0, sizeof(*this));
-}
-
-void BlockingMutex::Lock() {
-  CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
-  CHECK_EQ(OS_SPINLOCK_INIT, 0);
-  CHECK_EQ(owner_, 0);
-  OSSpinLockLock((OSSpinLock*)&opaque_storage_);
-}
-
-void BlockingMutex::Unlock() {
-  OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
-}
-
-void BlockingMutex::CheckLocked() const {
-  CHECK_NE(*(OSSpinLock*)&opaque_storage_, 0);
-}
-
 u64 NanoTime() {
   timeval tv;
   internal_memset(&tv, 0, sizeof(tv));
@@ -792,8 +765,8 @@ void *internal_start_thread(void *(*func)(void *arg), void *arg) {
 void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); }
 
 #if !SANITIZER_GO
-static BlockingMutex syslog_lock(LINKER_INITIALIZED);
-#endif
+static Mutex syslog_lock;
+#  endif
 
 void WriteOneLineToSyslog(const char *s) {
 #if !SANITIZER_GO
@@ -808,7 +781,7 @@ void WriteOneLineToSyslog(const char *s) {
 
 // buffer to store crash report application information
 static char crashreporter_info_buff[__sanitizer::kErrorMessageBufferSize] = {};
-static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
+static Mutex crashreporter_info_mutex;
 
 extern "C" {
 // Integrate with crash reporter libraries.
@@ -838,7 +811,7 @@ asm(".desc ___crashreporter_info__, 0x10");
 }  // extern "C"
 
 static void CRAppendCrashLogMessage(const char *msg) {
-  BlockingMutexLock l(&crashreporter_info_mutex);
+  Lock l(&crashreporter_info_mutex);
   internal_strlcat(crashreporter_info_buff, msg,
                    sizeof(crashreporter_info_buff));
 #if HAVE_CRASHREPORTERCLIENT_H
@@ -882,7 +855,7 @@ void LogFullErrorReport(const char *buffer) {
   // the reporting thread holds the thread registry mutex, and asl_log waits
   // for GCD to dispatch a new thread, the process will deadlock, because the
   // pthread_create wrapper needs to acquire the lock as well.
-  BlockingMutexLock l(&syslog_lock);
+  Lock l(&syslog_lock);
   if (common_flags()->log_to_syslog)
     WriteToSyslog(buffer);
 
index 96a5986a47a34e7ca2ee59d6553c0bf9bb60ba16..0b6af5a3c0edc649c4d65dfd1a6ca1bce13aa9b9 100644 (file)
 
 #include "sanitizer_common.h"
 #include "sanitizer_platform.h"
-
-/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use
-   TARGET_OS_MAC (we have no support for iOS in any form for these versions,
-   so there's no ambiguity).  */
-#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC
-# define TARGET_OS_OSX 1
-#endif
-
-/* Other TARGET_OS_xxx are not present on earlier versions, define them to
-   0 (we have no support for them; they are not valid targets anyway).  */
-#ifndef TARGET_OS_IOS
-#define TARGET_OS_IOS 0
-#endif
-#ifndef TARGET_OS_TV
-#define TARGET_OS_TV 0
-#endif
-#ifndef TARGET_OS_WATCH
-#define TARGET_OS_WATCH 0
-#endif
-
 #if SANITIZER_MAC
 #include "sanitizer_posix.h"
 
index bc2d83c42c1a371a460adc3b3744eb66c7313f08..1c177d8e7cca3aa3948cd984a22035fbc544f098 100644 (file)
 
 namespace __sanitizer {
 
+void StaticSpinMutex::LockSlow() {
+  for (int i = 0;; i++) {
+    if (i < 100)
+      proc_yield(1);
+    else
+      internal_sched_yield();
+    if (atomic_load(&state_, memory_order_relaxed) == 0 &&
+        atomic_exchange(&state_, 1, memory_order_acquire) == 0)
+      return;
+  }
+}
+
 void Semaphore::Wait() {
   u32 count = atomic_load(&state_, memory_order_relaxed);
   for (;;) {
@@ -36,4 +48,178 @@ void Semaphore::Post(u32 count) {
   FutexWake(&state_, count);
 }
 
+#if SANITIZER_CHECK_DEADLOCKS
+// An empty mutex meta table, it effectively disables deadlock detection.
+// Each tool can override the table to define own mutex hierarchy and
+// enable deadlock detection.
+// The table defines a static mutex type hierarchy (what mutex types can be locked
+// under what mutex types). This table is checked to be acyclic and then
+// actual mutex lock/unlock operations are checked to adhere to this hierarchy.
+// The checking happens on mutex types rather than on individual mutex instances
+// because doing it on mutex instances will both significantly complicate
+// the implementation, worsen performance and memory overhead and is mostly
+// unnecessary (we almost never lock multiple mutexes of the same type recursively).
+static constexpr int kMutexTypeMax = 20;
+SANITIZER_WEAK_ATTRIBUTE MutexMeta mutex_meta[kMutexTypeMax] = {};
+SANITIZER_WEAK_ATTRIBUTE void PrintMutexPC(uptr pc) {}
+static StaticSpinMutex mutex_meta_mtx;
+static int mutex_type_count = -1;
+// Adjacency matrix of what mutexes can be locked under what mutexes.
+static bool mutex_can_lock[kMutexTypeMax][kMutexTypeMax];
+// Mutex types with MutexMulti mark.
+static bool mutex_multi[kMutexTypeMax];
+
+void DebugMutexInit() {
+  // Build adjacency matrix.
+  bool leaf[kMutexTypeMax];
+  internal_memset(&leaf, 0, sizeof(leaf));
+  int cnt[kMutexTypeMax];
+  internal_memset(&cnt, 0, sizeof(cnt));
+  for (int t = 0; t < kMutexTypeMax; t++) {
+    mutex_type_count = t;
+    if (!mutex_meta[t].name)
+      break;
+    CHECK_EQ(t, mutex_meta[t].type);
+    for (uptr j = 0; j < ARRAY_SIZE(mutex_meta[t].can_lock); j++) {
+      MutexType z = mutex_meta[t].can_lock[j];
+      if (z == MutexInvalid)
+        break;
+      if (z == MutexLeaf) {
+        CHECK(!leaf[t]);
+        leaf[t] = true;
+        continue;
+      }
+      if (z == MutexMulti) {
+        mutex_multi[t] = true;
+        continue;
+      }
+      CHECK_LT(z, kMutexTypeMax);
+      CHECK(!mutex_can_lock[t][z]);
+      mutex_can_lock[t][z] = true;
+      cnt[t]++;
+    }
+  }
+  // Indicates the array is not properly terminated.
+  CHECK_LT(mutex_type_count, kMutexTypeMax);
+  // Add leaf mutexes.
+  for (int t = 0; t < mutex_type_count; t++) {
+    if (!leaf[t])
+      continue;
+    CHECK_EQ(cnt[t], 0);
+    for (int z = 0; z < mutex_type_count; z++) {
+      if (z == MutexInvalid || t == z || leaf[z])
+        continue;
+      CHECK(!mutex_can_lock[z][t]);
+      mutex_can_lock[z][t] = true;
+    }
+  }
+  // Build the transitive closure and check that the graphs is acyclic.
+  u32 trans[kMutexTypeMax];
+  static_assert(sizeof(trans[0]) * 8 >= kMutexTypeMax,
+                "kMutexTypeMax does not fit into u32, switch to u64");
+  internal_memset(&trans, 0, sizeof(trans));
+  for (int i = 0; i < mutex_type_count; i++) {
+    for (int j = 0; j < mutex_type_count; j++)
+      if (mutex_can_lock[i][j])
+        trans[i] |= 1 << j;
+  }
+  for (int k = 0; k < mutex_type_count; k++) {
+    for (int i = 0; i < mutex_type_count; i++) {
+      if (trans[i] & (1 << k))
+        trans[i] |= trans[k];
+    }
+  }
+  for (int i = 0; i < mutex_type_count; i++) {
+    if (trans[i] & (1 << i)) {
+      Printf("Mutex %s participates in a cycle\n", mutex_meta[i].name);
+      Die();
+    }
+  }
+}
+
+struct InternalDeadlockDetector {
+  struct LockDesc {
+    u64 seq;
+    uptr pc;
+    int recursion;
+  };
+  int initialized;
+  u64 sequence;
+  LockDesc locked[kMutexTypeMax];
+
+  void Lock(MutexType type, uptr pc) {
+    if (!Initialize(type))
+      return;
+    CHECK_LT(type, mutex_type_count);
+    // Find the last locked mutex type.
+    // This is the type we will use for hierarchy checks.
+    u64 max_seq = 0;
+    MutexType max_idx = MutexInvalid;
+    for (int i = 0; i != mutex_type_count; i++) {
+      if (locked[i].seq == 0)
+        continue;
+      CHECK_NE(locked[i].seq, max_seq);
+      if (max_seq < locked[i].seq) {
+        max_seq = locked[i].seq;
+        max_idx = (MutexType)i;
+      }
+    }
+    if (max_idx == type && mutex_multi[type]) {
+      // Recursive lock of the same type.
+      CHECK_EQ(locked[type].seq, max_seq);
+      CHECK(locked[type].pc);
+      locked[type].recursion++;
+      return;
+    }
+    if (max_idx != MutexInvalid && !mutex_can_lock[max_idx][type]) {
+      Printf("%s: internal deadlock: can't lock %s under %s mutex\n", SanitizerToolName,
+             mutex_meta[type].name, mutex_meta[max_idx].name);
+      PrintMutexPC(pc);
+      CHECK(0);
+    }
+    locked[type].seq = ++sequence;
+    locked[type].pc = pc;
+    locked[type].recursion = 1;
+  }
+
+  void Unlock(MutexType type) {
+    if (!Initialize(type))
+      return;
+    CHECK_LT(type, mutex_type_count);
+    CHECK(locked[type].seq);
+    CHECK_GT(locked[type].recursion, 0);
+    if (--locked[type].recursion)
+      return;
+    locked[type].seq = 0;
+    locked[type].pc = 0;
+  }
+
+  void CheckNoLocks() {
+    for (int i = 0; i < mutex_type_count; i++) CHECK_EQ(locked[i].recursion, 0);
+  }
+
+  bool Initialize(MutexType type) {
+    if (type == MutexUnchecked || type == MutexInvalid)
+      return false;
+    CHECK_GT(type, MutexInvalid);
+    if (initialized != 0)
+      return initialized > 0;
+    initialized = -1;
+    SpinMutexLock lock(&mutex_meta_mtx);
+    if (mutex_type_count < 0)
+      DebugMutexInit();
+    initialized = mutex_type_count ? 1 : -1;
+    return initialized > 0;
+  }
+};
+
+static THREADLOCAL InternalDeadlockDetector deadlock_detector;
+
+void CheckedMutex::LockImpl(uptr pc) { deadlock_detector.Lock(type_, pc); }
+
+void CheckedMutex::UnlockImpl() { deadlock_detector.Unlock(type_); }
+
+void CheckedMutex::CheckNoLocksImpl() { deadlock_detector.CheckNoLocks(); }
+#endif
+
 }  // namespace __sanitizer
index e3ff650b2c5924f0b39479a930f0c46ca834cd41..7479d35f2a594416c776e296c8309449aa44901e 100644 (file)
@@ -27,7 +27,7 @@ class MUTEX StaticSpinMutex {
   }
 
   void Lock() ACQUIRE() {
-    if (TryLock())
+    if (LIKELY(TryLock()))
       return;
     LockSlow();
   }
@@ -45,17 +45,7 @@ class MUTEX StaticSpinMutex {
  private:
   atomic_uint8_t state_;
 
-  void NOINLINE LockSlow() {
-    for (int i = 0;; i++) {
-      if (i < 10)
-        proc_yield(10);
-      else
-        internal_sched_yield();
-      if (atomic_load(&state_, memory_order_relaxed) == 0
-          && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
-        return;
-    }
-  }
+  void LockSlow();
 };
 
 class MUTEX SpinMutex : public StaticSpinMutex {
@@ -64,7 +54,6 @@ class MUTEX SpinMutex : public StaticSpinMutex {
     Init();
   }
 
- private:
   SpinMutex(const SpinMutex &) = delete;
   void operator=(const SpinMutex &) = delete;
 };
@@ -85,15 +74,94 @@ class Semaphore {
   atomic_uint32_t state_ = {0};
 };
 
+typedef int MutexType;
+
+enum {
+  // Used as sentinel and to catch unassigned types
+  // (should not be used as real Mutex type).
+  MutexInvalid = 0,
+  MutexThreadRegistry,
+  // Each tool own mutexes must start at this number.
+  MutexLastCommon,
+  // Type for legacy mutexes that are not checked for deadlocks.
+  MutexUnchecked = -1,
+  // Special marks that can be used in MutexMeta::can_lock table.
+  // The leaf mutexes can be locked under any other non-leaf mutex,
+  // but no other mutex can be locked while under a leaf mutex.
+  MutexLeaf = -1,
+  // Multiple mutexes of this type can be locked at the same time.
+  MutexMulti = -3,
+};
+
+// Go linker does not support THREADLOCAL variables,
+// so we can't use per-thread state.
+#define SANITIZER_CHECK_DEADLOCKS \
+  (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL)
+
+#if SANITIZER_CHECK_DEADLOCKS
+struct MutexMeta {
+  MutexType type;
+  const char *name;
+  // The table fixes what mutexes can be locked under what mutexes.
+  // If the entry for MutexTypeFoo contains MutexTypeBar,
+  // then Bar mutex can be locked while under Foo mutex.
+  // Can also contain the special MutexLeaf/MutexMulti marks.
+  MutexType can_lock[10];
+};
+#endif
+
+class CheckedMutex {
+ public:
+  explicit constexpr CheckedMutex(MutexType type)
+#if SANITIZER_CHECK_DEADLOCKS
+      : type_(type)
+#endif
+  {
+  }
+
+  ALWAYS_INLINE void Lock() {
+#if SANITIZER_CHECK_DEADLOCKS
+    LockImpl(GET_CALLER_PC());
+#endif
+  }
+
+  ALWAYS_INLINE void Unlock() {
+#if SANITIZER_CHECK_DEADLOCKS
+    UnlockImpl();
+#endif
+  }
+
+  // Checks that the current thread does not hold any mutexes
+  // (e.g. when returning from a runtime function to user code).
+  static void CheckNoLocks() {
+#if SANITIZER_CHECK_DEADLOCKS
+    CheckNoLocksImpl();
+#endif
+  }
+
+ private:
+#if SANITIZER_CHECK_DEADLOCKS
+  const MutexType type_;
+
+  void LockImpl(uptr pc);
+  void UnlockImpl();
+  static void CheckNoLocksImpl();
+#endif
+};
+
 // Reader-writer mutex.
-class MUTEX Mutex2 {
+// Derive from CheckedMutex for the purposes of EBO.
+// We could make it a field marked with [[no_unique_address]],
+// but this attribute is not supported by some older compilers.
+class MUTEX Mutex : CheckedMutex {
  public:
-  constexpr Mutex2() {}
+  explicit constexpr Mutex(MutexType type = MutexUnchecked)
+      : CheckedMutex(type) {}
 
   void Lock() ACQUIRE() {
+    CheckedMutex::Lock();
     u64 reset_mask = ~0ull;
     u64 state = atomic_load_relaxed(&state_);
-    const uptr kMaxSpinIters = 1500;
     for (uptr spin_iters = 0;; spin_iters++) {
       u64 new_state;
       bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
@@ -122,8 +190,6 @@ class MUTEX Mutex2 {
         // We've incremented waiting writers, so now block.
         writers_.Wait();
         spin_iters = 0;
-        state = atomic_load(&state_, memory_order_relaxed);
-        DCHECK_NE(state & kWriterSpinWait, 0);
       } else {
         // We've set kWriterSpinWait, but we are still in active spinning.
       }
@@ -132,10 +198,13 @@ class MUTEX Mutex2 {
       // Either way we need to reset kWriterSpinWait
       // next time we take the lock or block again.
       reset_mask = ~kWriterSpinWait;
+      state = atomic_load(&state_, memory_order_relaxed);
+      DCHECK_NE(state & kWriterSpinWait, 0);
     }
   }
 
   void Unlock() RELEASE() {
+    CheckedMutex::Unlock();
     bool wake_writer;
     u64 wake_readers;
     u64 new_state;
@@ -144,17 +213,16 @@ class MUTEX Mutex2 {
       DCHECK_NE(state & kWriterLock, 0);
       DCHECK_EQ(state & kReaderLockMask, 0);
       new_state = state & ~kWriterLock;
-      wake_writer =
-          (state & kWriterSpinWait) == 0 && (state & kWaitingWriterMask) != 0;
+      wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 &&
+                    (state & kWaitingWriterMask) != 0;
       if (wake_writer)
         new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
       wake_readers =
-          (state & (kWriterSpinWait | kWaitingWriterMask)) != 0
+          wake_writer || (state & kWriterSpinWait) != 0
               ? 0
               : ((state & kWaitingReaderMask) >> kWaitingReaderShift);
       if (wake_readers)
-        new_state = (new_state & ~kWaitingReaderMask) +
-                    (wake_readers << kReaderLockShift);
+        new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait;
     } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
                                                     memory_order_release)));
     if (UNLIKELY(wake_writer))
@@ -164,34 +232,53 @@ class MUTEX Mutex2 {
   }
 
   void ReadLock() ACQUIRE_SHARED() {
-    bool locked;
-    u64 new_state;
+    CheckedMutex::Lock();
+    u64 reset_mask = ~0ull;
     u64 state = atomic_load_relaxed(&state_);
-    do {
-      locked =
-          (state & kReaderLockMask) == 0 &&
-          (state & (kWriterLock | kWriterSpinWait | kWaitingWriterMask)) != 0;
+    for (uptr spin_iters = 0;; spin_iters++) {
+      bool locked = (state & kWriterLock) != 0;
+      u64 new_state;
+      if (LIKELY(!locked)) {
+        new_state = (state + kReaderLockInc) & reset_mask;
+      } else if (spin_iters > kMaxSpinIters) {
+        new_state = (state + kWaitingReaderInc) & reset_mask;
+      } else if ((state & kReaderSpinWait) == 0) {
+        // Active spinning, but denote our presence so that unlocking
+        // thread does not wake up other threads.
+        new_state = state | kReaderSpinWait;
+      } else {
+        // Active spinning.
+        state = atomic_load(&state_, memory_order_relaxed);
+        continue;
+      }
+      if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
+                                                 memory_order_acquire)))
+        continue;
       if (LIKELY(!locked))
-        new_state = state + kReaderLockInc;
-      else
-        new_state = state + kWaitingReaderInc;
-    } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
-                                                    memory_order_acquire)));
-    if (UNLIKELY(locked))
-      readers_.Wait();
-    DCHECK_EQ(atomic_load_relaxed(&state_) & kWriterLock, 0);
-    DCHECK_NE(atomic_load_relaxed(&state_) & kReaderLockMask, 0);
+        return;  // We've locked the mutex.
+      if (spin_iters > kMaxSpinIters) {
+        // We've incremented waiting readers, so now block.
+        readers_.Wait();
+        spin_iters = 0;
+      } else {
+        // We've set kReaderSpinWait, but we are still in active spinning.
+      }
+      reset_mask = ~kReaderSpinWait;
+      state = atomic_load(&state_, memory_order_relaxed);
+    }
   }
 
   void ReadUnlock() RELEASE_SHARED() {
+    CheckedMutex::Unlock();
     bool wake;
     u64 new_state;
     u64 state = atomic_load_relaxed(&state_);
     do {
       DCHECK_NE(state & kReaderLockMask, 0);
-      DCHECK_EQ(state & (kWaitingReaderMask | kWriterLock), 0);
+      DCHECK_EQ(state & kWriterLock, 0);
       new_state = state - kReaderLockInc;
-      wake = (new_state & (kReaderLockMask | kWriterSpinWait)) == 0 &&
+      wake = (new_state &
+              (kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 &&
              (new_state & kWaitingWriterMask) != 0;
       if (wake)
         new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
@@ -235,16 +322,14 @@ class MUTEX Mutex2 {
   //  - a writer is awake and spin-waiting
   //    the flag is used to prevent thundering herd problem
   //    (new writers are not woken if this flag is set)
+  //  - a reader is awake and spin-waiting
   //
-  // Writer support active spinning, readers does not.
+  // Both writers and readers use active spinning before blocking.
   // But readers are more aggressive and always take the mutex
   // if there are any other readers.
-  // Writers hand off the mutex to readers: after wake up readers
-  // already assume ownership of the mutex (don't need to do any
-  // state updates). But the mutex is not handed off to writers,
-  // after wake up writers compete to lock the mutex again.
-  // This is needed to allow repeated write locks even in presence
-  // of other blocked writers.
+  // After wake up both writers and readers compete to lock the
+  // mutex again. This is needed to allow repeated locks even in presence
+  // of other blocked threads.
   static constexpr u64 kCounterWidth = 20;
   static constexpr u64 kReaderLockShift = 0;
   static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
@@ -260,119 +345,18 @@ class MUTEX Mutex2 {
                                             << kWaitingWriterShift;
   static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
   static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
+  static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2);
+
+  static constexpr uptr kMaxSpinIters = 1500;
 
-  Mutex2(const Mutex2 &) = delete;
-  void operator=(const Mutex2 &) = delete;
+  Mutex(LinkerInitialized) = delete;
+  Mutex(const Mutex &) = delete;
+  void operator=(const Mutex &) = delete;
 };
 
 void FutexWait(atomic_uint32_t *p, u32 cmp);
 void FutexWake(atomic_uint32_t *p, u32 count);
 
-class MUTEX BlockingMutex {
- public:
-  explicit constexpr BlockingMutex(LinkerInitialized)
-      : opaque_storage_ {0, }, owner_ {0} {}
-  BlockingMutex();
-  void Lock() ACQUIRE();
-  void Unlock() RELEASE();
-
-  // This function does not guarantee an explicit check that the calling thread
-  // is the thread which owns the mutex. This behavior, while more strictly
-  // correct, causes problems in cases like StopTheWorld, where a parent thread
-  // owns the mutex but a child checks that it is locked. Rather than
-  // maintaining complex state to work around those situations, the check only
-  // checks that the mutex is owned, and assumes callers to be generally
-  // well-behaved.
-  void CheckLocked() const CHECK_LOCKED();
-
- private:
-  // Solaris mutex_t has a member that requires 64-bit alignment.
-  ALIGNED(8) uptr opaque_storage_[10];
-  uptr owner_;  // for debugging
-};
-
-// Reader-writer spin mutex.
-class MUTEX RWMutex {
- public:
-  RWMutex() {
-    atomic_store(&state_, kUnlocked, memory_order_relaxed);
-  }
-
-  ~RWMutex() {
-    CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
-  }
-
-  void Lock() ACQUIRE() {
-    u32 cmp = kUnlocked;
-    if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
-                                       memory_order_acquire))
-      return;
-    LockSlow();
-  }
-
-  void Unlock() RELEASE() {
-    u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
-    DCHECK_NE(prev & kWriteLock, 0);
-    (void)prev;
-  }
-
-  void ReadLock() ACQUIRE_SHARED() {
-    u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
-    if ((prev & kWriteLock) == 0)
-      return;
-    ReadLockSlow();
-  }
-
-  void ReadUnlock() RELEASE_SHARED() {
-    u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
-    DCHECK_EQ(prev & kWriteLock, 0);
-    DCHECK_GT(prev & ~kWriteLock, 0);
-    (void)prev;
-  }
-
-  void CheckLocked() const CHECK_LOCKED() {
-    CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
-  }
-
- private:
-  atomic_uint32_t state_;
-
-  enum {
-    kUnlocked = 0,
-    kWriteLock = 1,
-    kReadLock = 2
-  };
-
-  void NOINLINE LockSlow() {
-    for (int i = 0;; i++) {
-      if (i < 10)
-        proc_yield(10);
-      else
-        internal_sched_yield();
-      u32 cmp = atomic_load(&state_, memory_order_relaxed);
-      if (cmp == kUnlocked &&
-          atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
-                                       memory_order_acquire))
-          return;
-    }
-  }
-
-  void NOINLINE ReadLockSlow() {
-    for (int i = 0;; i++) {
-      if (i < 10)
-        proc_yield(10);
-      else
-        internal_sched_yield();
-      u32 prev = atomic_load(&state_, memory_order_acquire);
-      if ((prev & kWriteLock) == 0)
-        return;
-    }
-  }
-
-  RWMutex(const RWMutex &) = delete;
-  void operator=(const RWMutex &) = delete;
-};
-
 template <typename MutexType>
 class SCOPED_LOCK GenericScopedLock {
  public:
@@ -405,10 +389,37 @@ class SCOPED_LOCK GenericScopedReadLock {
   void operator=(const GenericScopedReadLock &) = delete;
 };
 
+template <typename MutexType>
+class SCOPED_LOCK GenericScopedRWLock {
+ public:
+  ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
+      ACQUIRE(mu)
+      : mu_(mu), write_(write) {
+    if (write_)
+      mu_->Lock();
+    else
+      mu_->ReadLock();
+  }
+
+  ALWAYS_INLINE ~GenericScopedRWLock() RELEASE() {
+    if (write_)
+      mu_->Unlock();
+    else
+      mu_->ReadUnlock();
+  }
+
+ private:
+  MutexType *mu_;
+  bool write_;
+
+  GenericScopedRWLock(const GenericScopedRWLock &) = delete;
+  void operator=(const GenericScopedRWLock &) = delete;
+};
+
 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
-typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
-typedef GenericScopedLock<RWMutex> RWMutexLock;
-typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
+typedef GenericScopedLock<Mutex> Lock;
+typedef GenericScopedReadLock<Mutex> ReadLock;
+typedef GenericScopedRWLock<Mutex> RWLock;
 
 }  // namespace __sanitizer
 
index 4d3c08893c11c11f59a6461fafe16d5fa4a4a78b..3153de34e5a3f6c793915e6790dfba399221915d 100644 (file)
 // mandated by the upstream linux community for all new ports. Other ports
 // may still use legacy syscalls.
 #ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
-# if (defined(__aarch64__) || defined(__riscv)) && SANITIZER_LINUX
-# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
-# else
-# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
-# endif
+#  if (defined(__aarch64__) || defined(__riscv) || defined(__hexagon__)) && \
+      SANITIZER_LINUX
+#    define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
+#  else
+#    define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
+#  endif
 #endif
 
 // udi16 syscalls can only be used when the following conditions are
 #define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0
 #endif
 
+// SANITIZER_SUPPORTS_THREADLOCAL
+// 1 - THREADLOCAL macro is supported by target
+// 0 - THREADLOCAL macro is not supported by target
+#ifndef __has_feature
+// TODO: Support other compilers here
+#  define SANITIZER_SUPPORTS_THREADLOCAL 1
+#else
+#  if __has_feature(tls)
+#    define SANITIZER_SUPPORTS_THREADLOCAL 1
+#  else
+#    define SANITIZER_SUPPORTS_THREADLOCAL 0
+#  endif
+#endif
+
 #endif // SANITIZER_PLATFORM_H
index 5b710c23fd0048fe1a10d8bbd83d45be66d22f18..02c51d9fb0d24d7d91fd8935536a6f3c908ecde3 100644 (file)
   (SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
 #define SANITIZER_INTERCEPT_CLOCK_GETTIME \
   (SI_FREEBSD || SI_NETBSD || SI_LINUX || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID SI_LINUX
+#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID \
+  (SI_LINUX || SI_FREEBSD || SI_NETBSD)
 #define SANITIZER_INTERCEPT_GETITIMER SI_POSIX
 #define SANITIZER_INTERCEPT_TIME SI_POSIX
 #define SANITIZER_INTERCEPT_GLOB (SI_GLIBC || SI_SOLARIS)
 #define SANITIZER_INTERCEPT_GETHOSTENT_R (SI_FREEBSD || SI_GLIBC || SI_SOLARIS)
 #define SANITIZER_INTERCEPT_GETSOCKOPT SI_POSIX
 #define SANITIZER_INTERCEPT_ACCEPT SI_POSIX
-#define SANITIZER_INTERCEPT_ACCEPT4 (SI_LINUX_NOT_ANDROID || SI_NETBSD)
+#define SANITIZER_INTERCEPT_ACCEPT4 \
+  (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_FREEBSD)
 #define SANITIZER_INTERCEPT_PACCEPT SI_NETBSD
 #define SANITIZER_INTERCEPT_MODF SI_POSIX
 #define SANITIZER_INTERCEPT_RECVMSG SI_POSIX
 #define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID || SI_SOLARIS
 #define SANITIZER_INTERCEPT_WORDEXP                                          \
   (SI_FREEBSD || SI_NETBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID || \
-   SI_SOLARIS)  // NOLINT
+   SI_SOLARIS)
 #define SANITIZER_INTERCEPT_SIGWAIT SI_POSIX
 #define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID || SI_SOLARIS
 #define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID || SI_SOLARIS
 #define SANITIZER_INTERCEPT_ETHER_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
 #define SANITIZER_INTERCEPT_SHMCTL                                       \
   (((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64) || \
-   SI_NETBSD || SI_SOLARIS)  // NOLINT
+   SI_NETBSD || SI_SOLARIS)
 #define SANITIZER_INTERCEPT_RANDOM_R SI_GLIBC
 #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_POSIX
 #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
 #define SANITIZER_INTERCEPT_SEM \
   (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
 #define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_POSIX
-#define SANITIZER_INTERCEPT_MINCORE (SI_LINUX || SI_NETBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_MINCORE \
+  (SI_LINUX || SI_NETBSD || SI_FREEBSD || SI_SOLARIS)
 #define SANITIZER_INTERCEPT_PROCESS_VM_READV SI_LINUX
 #define SANITIZER_INTERCEPT_CTERMID \
   (SI_LINUX || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
 #define SANITIZER_INTERCEPT_GID_FROM_GROUP SI_NETBSD
 #define SANITIZER_INTERCEPT_ACCESS (SI_NETBSD || SI_FREEBSD)
 #define SANITIZER_INTERCEPT_FACCESSAT (SI_NETBSD || SI_FREEBSD)
-#define SANITIZER_INTERCEPT_GETGROUPLIST SI_NETBSD
+#define SANITIZER_INTERCEPT_GETGROUPLIST \
+  (SI_NETBSD || SI_FREEBSD || SI_LINUX)
 #define SANITIZER_INTERCEPT_STRLCPY \
   (SI_NETBSD || SI_FREEBSD || SI_MAC || SI_ANDROID)
 
 #define SANITIZER_INTERCEPT_DEVNAME_R (SI_NETBSD || SI_FREEBSD)
 #define SANITIZER_INTERCEPT_FGETLN (SI_NETBSD || SI_FREEBSD)
 #define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD)
-#define SANITIZER_INTERCEPT_TTYENT SI_NETBSD
-#define SANITIZER_INTERCEPT_PROTOENT (SI_NETBSD || SI_LINUX)
+#define SANITIZER_INTERCEPT_TTYENT (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_TTYENTPATH SI_NETBSD
+#define SANITIZER_INTERCEPT_PROTOENT (SI_LINUX || SI_NETBSD || SI_FREEBSD)
 #define SANITIZER_INTERCEPT_PROTOENT_R SI_GLIBC
-#define SANITIZER_INTERCEPT_NETENT SI_NETBSD
+#define SANITIZER_INTERCEPT_NETENT (SI_LINUX || SI_NETBSD || SI_FREEBSD)
 #define SANITIZER_INTERCEPT_SETVBUF \
   (SI_NETBSD || SI_FREEBSD || SI_LINUX || SI_MAC)
 #define SANITIZER_INTERCEPT_GETMNTINFO (SI_NETBSD || SI_FREEBSD || SI_MAC)
 #define SANITIZER_INTERCEPT_MODCTL SI_NETBSD
 #define SANITIZER_INTERCEPT_CAPSICUM SI_FREEBSD
 #define SANITIZER_INTERCEPT_STRTONUM (SI_NETBSD || SI_FREEBSD)
-#define SANITIZER_INTERCEPT_FPARSELN SI_NETBSD
+#define SANITIZER_INTERCEPT_FPARSELN (SI_NETBSD || SI_FREEBSD)
 #define SANITIZER_INTERCEPT_STATVFS1 SI_NETBSD
 #define SANITIZER_INTERCEPT_STRTOI SI_NETBSD
 #define SANITIZER_INTERCEPT_CAPSICUM SI_FREEBSD
 #define SANITIZER_INTERCEPT_QSORT \
   (SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
 #define SANITIZER_INTERCEPT_QSORT_R SI_GLIBC
+#define SANITIZER_INTERCEPT_BSEARCH \
+  (SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
 // sigaltstack on i386 macOS cannot be intercepted due to setjmp()
 // calling it and assuming that it does not clobber registers.
 #define SANITIZER_INTERCEPT_SIGALTSTACK \
index b5a45ae72cd9393d702ac15b19ac76201af887dd..bfe3eea464d64b6988e209f03a64851e67e86522 100644 (file)
@@ -74,6 +74,7 @@
 #include <term.h>
 #include <termios.h>
 #include <time.h>
+#include <ttyent.h>
 #include <utime.h>
 #include <utmpx.h>
 #include <vis.h>
@@ -170,9 +171,12 @@ uptr __sanitizer_in_addr_sz(int af) {
 unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
 int glob_nomatch = GLOB_NOMATCH;
 int glob_altdirfunc = GLOB_ALTDIRFUNC;
+const int wordexp_wrde_dooffs = WRDE_DOOFFS;
 
 unsigned path_max = PATH_MAX;
 
+int struct_ttyent_sz = sizeof(struct ttyent);
+
 // ioctl arguments
 unsigned struct_ifreq_sz = sizeof(struct ifreq);
 unsigned struct_termios_sz = sizeof(struct termios);
index 5e0ca9c7d78236821e41226edf33a4b551f3ee5d..89022ca6422c94804a75b792fdd0907aa11be6ac 100644 (file)
 
 #if SANITIZER_FREEBSD
 
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform.h"
-#include "sanitizer_platform_limits_posix.h"
+#  include "sanitizer_internal_defs.h"
+#  include "sanitizer_platform.h"
+#  include "sanitizer_platform_limits_posix.h"
 
 // Get sys/_types.h, because that tells us whether 64-bit inodes are
 // used in struct dirent below.
-#include <sys/_types.h>
+#  include <sys/_types.h>
 
 namespace __sanitizer {
 void *__sanitizer_get_link_map_by_dlopen_handle(void *handle);
-#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
-  (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
+#  define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+    (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
 
 extern unsigned struct_utsname_sz;
 extern unsigned struct_stat_sz;
-#if defined(__powerpc64__)
+#  if defined(__powerpc64__)
 const unsigned struct___old_kernel_stat_sz = 0;
-#else
+#  else
 const unsigned struct___old_kernel_stat_sz = 32;
-#endif
+#  endif
 extern unsigned struct_rusage_sz;
 extern unsigned siginfo_t_sz;
 extern unsigned struct_itimerval_sz;
@@ -114,11 +114,24 @@ struct __sanitizer_ipc_perm {
   long key;
 };
 
-#if !defined(__i386__)
+struct __sanitizer_protoent {
+  char *p_name;
+  char **p_aliases;
+  int p_proto;
+};
+
+struct __sanitizer_netent {
+  char *n_name;
+  char **n_aliases;
+  int n_addrtype;
+  u32 n_net;
+};
+
+#  if !defined(__i386__)
 typedef long long __sanitizer_time_t;
-#else
+#  else
 typedef long __sanitizer_time_t;
-#endif
+#  endif
 
 struct __sanitizer_shmid_ds {
   __sanitizer_ipc_perm shm_perm;
@@ -147,7 +160,7 @@ struct __sanitizer_ifaddrs {
   unsigned int ifa_flags;
   void *ifa_addr;     // (struct sockaddr *)
   void *ifa_netmask;  // (struct sockaddr *)
-#undef ifa_dstaddr
+#  undef ifa_dstaddr
   void *ifa_dstaddr;  // (struct sockaddr *)
   void *ifa_data;
 };
@@ -229,12 +242,12 @@ struct __sanitizer_cmsghdr {
 };
 
 struct __sanitizer_dirent {
-#if defined(__INO64)
+#  if defined(__INO64)
   unsigned long long d_fileno;
   unsigned long long d_off;
-#else
+#  else
   unsigned int d_fileno;
-#endif
+#  endif
   unsigned short d_reclen;
   // more fields that we don't care about
 };
@@ -243,23 +256,23 @@ struct __sanitizer_dirent {
 typedef int __sanitizer_clock_t;
 typedef int __sanitizer_clockid_t;
 
-#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
-    defined(__mips__)
+#  if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
+      defined(__mips__)
 typedef unsigned __sanitizer___kernel_uid_t;
 typedef unsigned __sanitizer___kernel_gid_t;
-#else
+#  else
 typedef unsigned short __sanitizer___kernel_uid_t;
 typedef unsigned short __sanitizer___kernel_gid_t;
-#endif
+#  endif
 typedef long long __sanitizer___kernel_off_t;
 
-#if defined(__powerpc__) || defined(__mips__)
+#  if defined(__powerpc__) || defined(__mips__)
 typedef unsigned int __sanitizer___kernel_old_uid_t;
 typedef unsigned int __sanitizer___kernel_old_gid_t;
-#else
+#  else
 typedef unsigned short __sanitizer___kernel_old_uid_t;
 typedef unsigned short __sanitizer___kernel_old_gid_t;
-#endif
+#  endif
 
 typedef long long __sanitizer___kernel_loff_t;
 typedef struct {
@@ -366,9 +379,12 @@ struct __sanitizer_glob_t {
 
 extern int glob_nomatch;
 extern int glob_altdirfunc;
+extern const int wordexp_wrde_dooffs;
 
 extern unsigned path_max;
 
+extern int struct_ttyent_sz;
+
 struct __sanitizer_wordexp_t {
   uptr we_wordc;
   char **we_wordv;
@@ -398,39 +414,49 @@ struct __sanitizer_ifconf {
   } ifc_ifcu;
 };
 
-#define IOC_NRBITS 8
-#define IOC_TYPEBITS 8
-#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
-#define IOC_SIZEBITS 13
-#define IOC_DIRBITS 3
-#define IOC_NONE 1U
-#define IOC_WRITE 4U
-#define IOC_READ 2U
-#else
-#define IOC_SIZEBITS 14
-#define IOC_DIRBITS 2
-#define IOC_NONE 0U
-#define IOC_WRITE 1U
-#define IOC_READ 2U
-#endif
-#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
-#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
-#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
-#if defined(IOC_DIRMASK)
-#undef IOC_DIRMASK
-#endif
-#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
-#define IOC_NRSHIFT 0
-#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
-#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
-#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
-#define EVIOC_EV_MAX 0x1f
-#define EVIOC_ABS_MAX 0x3f
-
-#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
-#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
-#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
-#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+struct __sanitizer__ttyent {
+  char *ty_name;
+  char *ty_getty;
+  char *ty_type;
+  int ty_status;
+  char *ty_window;
+  char *ty_comment;
+  char *ty_group;
+};
+
+#  define IOC_NRBITS 8
+#  define IOC_TYPEBITS 8
+#  if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
+#    define IOC_SIZEBITS 13
+#    define IOC_DIRBITS 3
+#    define IOC_NONE 1U
+#    define IOC_WRITE 4U
+#    define IOC_READ 2U
+#  else
+#    define IOC_SIZEBITS 14
+#    define IOC_DIRBITS 2
+#    define IOC_NONE 0U
+#    define IOC_WRITE 1U
+#    define IOC_READ 2U
+#  endif
+#  define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+#  define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+#  define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+#  if defined(IOC_DIRMASK)
+#    undef IOC_DIRMASK
+#  endif
+#  define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+#  define IOC_NRSHIFT 0
+#  define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+#  define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+#  define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+#  define EVIOC_EV_MAX 0x1f
+#  define EVIOC_ABS_MAX 0x3f
+
+#  define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+#  define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+#  define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+#  define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
 
 extern unsigned struct_ifreq_sz;
 extern unsigned struct_termios_sz;
@@ -632,24 +658,24 @@ extern unsigned struct_fstab_sz;
 extern unsigned struct_StringList_sz;
 }  // namespace __sanitizer
 
-#define CHECK_TYPE_SIZE(TYPE) \
-  COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+#  define CHECK_TYPE_SIZE(TYPE) \
+    COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
 
-#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER)                      \
-  COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
-                 sizeof(((CLASS *)NULL)->MEMBER));                \
-  COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) ==         \
-                 offsetof(CLASS, MEMBER))
+#  define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER)                      \
+    COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
+                   sizeof(((CLASS *)NULL)->MEMBER));                \
+    COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) ==         \
+                   offsetof(CLASS, MEMBER))
 
 // For sigaction, which is a function and struct at the same time,
 // and thus requires explicit "struct" in sizeof() expression.
-#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER)                      \
-  COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
-                 sizeof(((struct CLASS *)NULL)->MEMBER));                \
-  COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) ==         \
-                 offsetof(struct CLASS, MEMBER))
+#  define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER)                      \
+    COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
+                   sizeof(((struct CLASS *)NULL)->MEMBER));                \
+    COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) ==         \
+                   offsetof(struct CLASS, MEMBER))
 
-#define SIGACTION_SYMNAME sigaction
+#  define SIGACTION_SYMNAME sigaction
 
 #endif
 
index f22f50391286b484a17c19b2b7efe17995fd858b..9d577570ea1e2e219b4a08c64f450794d6a145d7 100644 (file)
 
 // With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
 // are not defined anywhere in userspace headers. Fake them. This seems to work
-// fine with newer headers, too.  Beware that with <sys/stat.h>, struct stat
-// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
-// Also, for some platforms (e.g. mips) there are additional members in the
-// <sys/stat.h> struct stat:s.
+// fine with newer headers, too.
 #include <linux/posix_types.h>
-#if defined(__x86_64__)
-#include <sys/stat.h>
-#else
-#define ino_t __kernel_ino_t
-#define mode_t __kernel_mode_t
-#define nlink_t __kernel_nlink_t
-#define uid_t __kernel_uid_t
-#define gid_t __kernel_gid_t
-#define off_t __kernel_off_t
-#define time_t __kernel_time_t
+#  if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__)
+#    include <sys/stat.h>
+#  else
+#    define ino_t __kernel_ino_t
+#    define mode_t __kernel_mode_t
+#    define nlink_t __kernel_nlink_t
+#    define uid_t __kernel_uid_t
+#    define gid_t __kernel_gid_t
+#    define off_t __kernel_off_t
+#    define time_t __kernel_time_t
 // This header seems to contain the definitions of _kernel_ stat* structs.
-#include <asm/stat.h>
-#undef ino_t
-#undef mode_t
-#undef nlink_t
-#undef uid_t
-#undef gid_t
-#undef off_t
-#endif
-
-#include <linux/aio_abi.h>
-
-#if !SANITIZER_ANDROID
-#include <sys/statfs.h>
-#include <linux/perf_event.h>
-#endif
+#    include <asm/stat.h>
+#    undef ino_t
+#    undef mode_t
+#    undef nlink_t
+#    undef uid_t
+#    undef gid_t
+#    undef off_t
+#  endif
+
+#  include <linux/aio_abi.h>
+
+#  if !SANITIZER_ANDROID
+#    include <sys/statfs.h>
+#    include <linux/perf_event.h>
+#  endif
 
 using namespace __sanitizer;
 
@@ -66,9 +63,9 @@ namespace __sanitizer {
 #endif
 }  // namespace __sanitizer
 
-#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
-                            && !defined(__mips__) && !defined(__s390__)\
-                            && !defined(__sparc__) && !defined(__riscv)
+#  if !defined(__powerpc64__) && !defined(__x86_64__) &&                   \
+      !defined(__aarch64__) && !defined(__mips__) && !defined(__s390__) && \
+      !defined(__sparc__) && !defined(__riscv) && !defined(__hexagon__)
 COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
 #endif
 
index c8f2aa5dba4af0fa6a1754c378dd0e6595fe50d9..531e07f2d4c57f1dc8e61d2f8116b6a1b8e5c806 100644 (file)
@@ -666,6 +666,7 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
 
 int glob_nomatch = GLOB_NOMATCH;
 int glob_altdirfunc = GLOB_ALTDIRFUNC;
+const int wordexp_wrde_dooffs = WRDE_DOOFFS;
 
 unsigned path_max = PATH_MAX;
 
index 9e28dcfef0415895280c6d59276005c33c515336..9407803fc9c399d9bf42637796ce85aa2abb2a43 100644 (file)
@@ -394,6 +394,7 @@ struct __sanitizer_glob_t {
 
 extern int glob_nomatch;
 extern int glob_altdirfunc;
+extern const int wordexp_wrde_dooffs;
 
 extern unsigned path_max;
 
index 6e5c330b98eff32e3fd49b4ff95f72a50230c538..a1c452855ae77b6bed99fb35d55b56e991606a13 100644 (file)
 #if SANITIZER_LINUX
 # include <utime.h>
 # include <sys/ptrace.h>
-#if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \
-    SANITIZER_RISCV64
-#  include <asm/ptrace.h>
-#  ifdef __arm__
+#    if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \
+        defined(__hexagon__) || SANITIZER_RISCV64
+#      include <asm/ptrace.h>
+#      ifdef __arm__
 typedef struct user_fpregs elf_fpregset_t;
 #   define ARM_VFPREGS_SIZE_ASAN (32 * 8 /*fpregs*/ + 4 /*fpscr*/)
 #   if !defined(ARM_VFPREGS_SIZE)
@@ -242,12 +242,13 @@ namespace __sanitizer {
     defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) || \
     defined(__x86_64__) || SANITIZER_RISCV64
 #define SIZEOF_STRUCT_USTAT 32
-#elif defined(__arm__) || defined(__i386__) || defined(__mips__) \
-  || defined(__powerpc__) || defined(__s390__) || defined(__sparc__)
-#define SIZEOF_STRUCT_USTAT 20
-#else
-#error Unknown size of struct ustat
-#endif
+#    elif defined(__arm__) || defined(__i386__) || defined(__mips__) ||    \
+        defined(__powerpc__) || defined(__s390__) || defined(__sparc__) || \
+        defined(__hexagon__)
+#      define SIZEOF_STRUCT_USTAT 20
+#    else
+#      error Unknown size of struct ustat
+#    endif
   unsigned struct_ustat_sz = SIZEOF_STRUCT_USTAT;
   unsigned struct_rlimit64_sz = sizeof(struct rlimit64);
   unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
@@ -312,6 +313,10 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
   int glob_altdirfunc = GLOB_ALTDIRFUNC;
 #endif
 
+#  if !SANITIZER_ANDROID
+  const int wordexp_wrde_dooffs = WRDE_DOOFFS;
+#  endif  // !SANITIZER_ANDROID
+
 #if SANITIZER_LINUX && !SANITIZER_ANDROID &&                               \
     (defined(__i386) || defined(__x86_64) || defined(__mips64) ||          \
      defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
index 07c7a93df4084320571b6434057d571271569fe3..d69b344dd613d6e2f25c39d839cbf5a899ddf81d 100644 (file)
@@ -83,7 +83,7 @@ const unsigned struct_kernel_stat64_sz = 104;
 #elif defined(__mips__)
 const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
                                            ? FIRST_32_SECOND_64(104, 128)
-                                           : FIRST_32_SECOND_64(144, 216);
+                                           : FIRST_32_SECOND_64(160, 216);
 const unsigned struct_kernel_stat64_sz = 104;
 #elif defined(__s390__) && !defined(__s390x__)
 const unsigned struct_kernel_stat_sz = 64;
@@ -102,7 +102,10 @@ const unsigned struct_kernel_stat64_sz = 104;
 #elif SANITIZER_RISCV64
 const unsigned struct_kernel_stat_sz = 128;
 const unsigned struct_kernel_stat64_sz = 0;  // RISCV64 does not use stat64
-#endif
+#    elif defined(__hexagon__)
+const unsigned struct_kernel_stat_sz = 128;
+const unsigned struct_kernel_stat64_sz = 0;
+#    endif
 struct __sanitizer_perf_event_attr {
   unsigned type;
   unsigned size;
@@ -367,7 +370,7 @@ struct __sanitizer_group {
   char **gr_mem;
 };
 
-#if defined(__x86_64__) && !defined(_LP64)
+#  if (defined(__x86_64__) && !defined(_LP64)) || defined(__hexagon__)
 typedef long long __sanitizer_time_t;
 #else
 typedef long __sanitizer_time_t;
@@ -475,23 +478,23 @@ struct __sanitizer_dirent {
   unsigned short d_reclen;
   // more fields that we don't care about
 };
-#elif SANITIZER_ANDROID || defined(__x86_64__)
+#  elif SANITIZER_ANDROID || defined(__x86_64__) || defined(__hexagon__)
 struct __sanitizer_dirent {
   unsigned long long d_ino;
   unsigned long long d_off;
   unsigned short d_reclen;
   // more fields that we don't care about
 };
-#else
+#  else
 struct __sanitizer_dirent {
   uptr d_ino;
   uptr d_off;
   unsigned short d_reclen;
   // more fields that we don't care about
 };
-#endif
+#  endif
 
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#  if SANITIZER_LINUX && !SANITIZER_ANDROID
 struct __sanitizer_dirent64 {
   unsigned long long d_ino;
   unsigned long long d_off;
@@ -511,8 +514,8 @@ typedef int __sanitizer_clockid_t;
 #endif
 
 #if SANITIZER_LINUX
-#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
-    defined(__mips__)
+#    if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
+        defined(__mips__) || defined(__hexagon__)
 typedef unsigned __sanitizer___kernel_uid_t;
 typedef unsigned __sanitizer___kernel_gid_t;
 #else
@@ -712,6 +715,13 @@ struct __sanitizer_protoent {
   int p_proto;
 };
 
+struct __sanitizer_netent {
+  char *n_name;
+  char **n_aliases;
+  int n_addrtype;
+  u32 n_net;
+};
+
 struct __sanitizer_addrinfo {
   int ai_flags;
   int ai_family;
@@ -773,6 +783,10 @@ extern int glob_altdirfunc;
 
 extern unsigned path_max;
 
+#  if !SANITIZER_ANDROID
+extern const int wordexp_wrde_dooffs;
+#  endif  // !SANITIZER_ANDROID
+
 struct __sanitizer_wordexp_t {
   uptr we_wordc;
   char **we_wordv;
index 565b31f68aaee0b835f45fa9b6f204bf040b08d4..a113cb0d34903f1e5dd690453a12dc38c0248ba4 100644 (file)
@@ -123,6 +123,7 @@ namespace __sanitizer {
   unsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr));
 
   int glob_nomatch = GLOB_NOMATCH;
+  const int wordexp_wrde_dooffs = WRDE_DOOFFS;
 
   unsigned path_max = PATH_MAX;
 
index 85995e79792d211910676a2cd04337e20125a487..cbab577bcf26c847301c898c9c6523f42c7fd4f0 100644 (file)
@@ -341,6 +341,7 @@ struct __sanitizer_glob_t {
 
 extern int glob_nomatch;
 extern int glob_altdirfunc;
+extern const int wordexp_wrde_dooffs;
 
 extern unsigned path_max;
 
index b65dae644767ade1bd35440405f8fffb669acb0a..f91e26e74b87ccd070cec3d5cd37d8efe8e2389a 100644 (file)
 #include "sanitizer_platform_limits_posix.h"
 #include "sanitizer_platform_limits_solaris.h"
 
-#if !SANITIZER_POSIX
-// Make it hard to accidentally use any of functions declared in this file:
-#error This file should only be included on POSIX
-#endif
+#if SANITIZER_POSIX
 
 namespace __sanitizer {
 
@@ -126,4 +123,6 @@ void DecorateMapping(uptr addr, uptr size, const char *name);
 
 }  // namespace __sanitizer
 
+#endif  // SANITIZER_POSIX
+
 #endif  // SANITIZER_POSIX_H
index ddf6844bed13efeb3f7f87aee664b6b96682a8f6..eed02ce4f6aa668a43ecffa592189a8874700674 100644 (file)
@@ -151,6 +151,8 @@ int Atexit(void (*function)(void)) {
 #endif
 }
 
+bool CreateDir(const char *pathname) { return mkdir(pathname, 0755) == 0; }
+
 bool SupportsColoredOutput(fd_t fd) {
   return isatty(fd) != 0;
 }
index b913c92e16f10dccbe1de7a79c42e3164a54174c..79aee8ba628231c960fc15a3b2e06f9997a2dc56 100644 (file)
 #include <stdio.h>
 #include <stdarg.h>
 
-#if defined(__x86_64__)
-#  include <emmintrin.h>
-#endif
-
 #if SANITIZER_WINDOWS && defined(_MSC_VER) && _MSC_VER < 1800 &&               \
       !defined(va_copy)
 # define va_copy(dst, src) ((dst) = (src))
@@ -132,8 +128,8 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
 int VSNPrintf(char *buff, int buff_length,
               const char *format, va_list args) {
   static const char *kPrintfFormatsHelp =
-      "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X,V}; %p; "
-      "%[-]([0-9]*)?(\\.\\*)?s; %c\n";
+      "Supported Printf formats: %([0-9]*)?(z|l|ll)?{d,u,x,X}; %p; "
+      "%[-]([0-9]*)?(\\.\\*)?s; %c\nProvided format: ";
   RAW_CHECK(format);
   RAW_CHECK(buff_length > 0);
   const char *buff_end = &buff[buff_length - 1];
@@ -164,9 +160,11 @@ int VSNPrintf(char *buff, int buff_length,
     }
     bool have_z = (*cur == 'z');
     cur += have_z;
-    bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
+    bool have_l = cur[0] == 'l' && cur[1] != 'l';
+    cur += have_l;
+    bool have_ll = cur[0] == 'l' && cur[1] == 'l';
     cur += have_ll * 2;
-    const bool have_length = have_z || have_ll;
+    const bool have_length = have_z || have_l || have_ll;
     const bool have_flags = have_width || have_length;
     // At the moment only %s supports precision and left-justification.
     CHECK(!((precision >= 0 || left_justified) && *cur != 's'));
@@ -174,6 +172,7 @@ int VSNPrintf(char *buff, int buff_length,
       case 'd': {
         s64 dval = have_ll  ? va_arg(args, s64)
                    : have_z ? va_arg(args, sptr)
+                   : have_l ? va_arg(args, long)
                             : va_arg(args, int);
         result += AppendSignedDecimal(&buff, buff_end, dval, width,
                                       pad_with_zero);
@@ -184,26 +183,20 @@ int VSNPrintf(char *buff, int buff_length,
       case 'X': {
         u64 uval = have_ll  ? va_arg(args, u64)
                    : have_z ? va_arg(args, uptr)
+                   : have_l ? va_arg(args, unsigned long)
                             : va_arg(args, unsigned);
         bool uppercase = (*cur == 'X');
         result += AppendUnsigned(&buff, buff_end, uval, (*cur == 'u') ? 10 : 16,
                                  width, pad_with_zero, uppercase);
         break;
       }
-      case 'V': {
-        for (uptr i = 0; i < 16; i++) {
-          unsigned x = va_arg(args, unsigned);
-          result += AppendUnsigned(&buff, buff_end, x, 16, 2, true, false);
-        }
-        break;
-      }
       case 'p': {
-        RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+        RAW_CHECK(!have_flags, kPrintfFormatsHelp, format);
         result += AppendPointer(&buff, buff_end, va_arg(args, uptr));
         break;
       }
       case 's': {
-        RAW_CHECK_MSG(!have_length, kPrintfFormatsHelp);
+        RAW_CHECK(!have_length, kPrintfFormatsHelp, format);
         // Only left-justified width is supported.
         CHECK(!have_width || left_justified);
         result += AppendString(&buff, buff_end, left_justified ? -width : width,
@@ -211,17 +204,17 @@ int VSNPrintf(char *buff, int buff_length,
         break;
       }
       case 'c': {
-        RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+        RAW_CHECK(!have_flags, kPrintfFormatsHelp, format);
         result += AppendChar(&buff, buff_end, va_arg(args, int));
         break;
       }
       case '%' : {
-        RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+        RAW_CHECK(!have_flags, kPrintfFormatsHelp, format);
         result += AppendChar(&buff, buff_end, '%');
         break;
       }
       default: {
-        RAW_CHECK_MSG(false, kPrintfFormatsHelp);
+        RAW_CHECK(false, kPrintfFormatsHelp, format);
       }
     }
   }
@@ -317,7 +310,6 @@ static void NOINLINE SharedPrintfCode(bool append_pid, const char *format,
                            format, args);
 }
 
-FORMAT(1, 2)
 void Printf(const char *format, ...) {
   va_list args;
   va_start(args, format);
@@ -326,7 +318,6 @@ void Printf(const char *format, ...) {
 }
 
 // Like Printf, but prints the current PID before the output string.
-FORMAT(1, 2)
 void Report(const char *format, ...) {
   va_list args;
   va_start(args, format);
@@ -338,7 +329,6 @@ void Report(const char *format, ...) {
 // Returns the number of symbols that should have been written to buffer
 // (not including trailing '\0'). Thus, the string is truncated
 // iff return value is not less than "length".
-FORMAT(3, 4)
 int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
   va_list args;
   va_start(args, format);
@@ -347,7 +337,6 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
   return needed_length;
 }
 
-FORMAT(2, 3)
 void InternalScopedString::append(const char *format, ...) {
   uptr prev_len = length();
 
index cefb870f7e258a7d9988566a95a7ef1f72753823..475e577d9982e8104addcf241b24a92e1821f29a 100644 (file)
@@ -29,8 +29,16 @@ using namespace __sanitizer;
 #endif
 
 #ifndef SIGNAL_INTERCEPTOR_SIGACTION_IMPL
-#define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact) \
-  { return REAL(sigaction_symname)(signum, act, oldact); }
+#  define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact)              \
+    {                                                                         \
+      if (!REAL(sigaction_symname)) {                                         \
+        Printf(                                                               \
+            "Warning: REAL(sigaction_symname) == nullptr. This may happen "   \
+            "if you link with ubsan statically. Sigaction will not work.\n"); \
+        return -1;                                                            \
+      }                                                                       \
+      return REAL(sigaction_symname)(signum, act, oldact);                    \
+    }
 #endif
 
 #if SANITIZER_INTERCEPT_BSD_SIGNAL
index cb53eab8da150ea37f28e288dd1ce6369186cefa..62c40affc9ac5b4d04578c0e5e2e4f6fabbb794a 100644 (file)
@@ -225,28 +225,6 @@ void FutexWait(atomic_uint32_t *p, u32 cmp) {
 
 void FutexWake(atomic_uint32_t *p, u32 count) {}
 
-BlockingMutex::BlockingMutex() {
-  CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
-  internal_memset(this, 0, sizeof(*this));
-  CHECK_EQ(mutex_init((mutex_t *)&opaque_storage_, USYNC_THREAD, NULL), 0);
-}
-
-void BlockingMutex::Lock() {
-  CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
-  CHECK_NE(owner_, (uptr)thr_self());
-  CHECK_EQ(mutex_lock((mutex_t *)&opaque_storage_), 0);
-  CHECK(!owner_);
-  owner_ = (uptr)thr_self();
-}
-
-void BlockingMutex::Unlock() {
-  CHECK(owner_ == (uptr)thr_self());
-  owner_ = 0;
-  CHECK_EQ(mutex_unlock((mutex_t *)&opaque_storage_), 0);
-}
-
-void BlockingMutex::CheckLocked() const { CHECK_EQ((uptr)thr_self(), owner_); }
-
 }  // namespace __sanitizer
 
 #endif  // SANITIZER_SOLARIS
index 515deddf6b29d5c8fd8db982a8fe57ce1066e1f7..4707c6c5d00b3da3198114594d42d383e6e7317b 100644 (file)
@@ -22,7 +22,8 @@ namespace __sanitizer {
 uptr StackTrace::GetNextInstructionPc(uptr pc) {
 #if defined(__sparc__) || defined(__mips__)
   return pc + 8;
-#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__)
+#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__) || \
+    defined(__hexagon__)
   return pc + 4;
 #elif SANITIZER_RISCV64
   // Current check order is 4 -> 2 -> 6 -> 8
@@ -64,7 +65,7 @@ void BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
   top_frame_bp = 0;
 }
 
-// Sparc implemention is in its own file.
+// Sparc implementation is in its own file.
 #if !defined(__sparc__)
 
 // In GCC on ARM bp points to saved lr, not fp, so we should check the next
@@ -85,8 +86,8 @@ static inline uhwptr *GetCanonicFrame(uptr bp,
   // Nope, this does not look right either. This means the frame after next does
   // not have a valid frame pointer, but we can still extract the caller PC.
   // Unfortunately, there is no way to decide between GCC and LLVM frame
-  // layouts. Assume GCC.
-  return bp_prev - 1;
+  // layouts. Assume LLVM.
+  return bp_prev;
 #else
   return (uhwptr*)bp;
 #endif
@@ -109,21 +110,14 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
          IsAligned((uptr)frame, sizeof(*frame)) &&
          size < max_depth) {
 #ifdef __powerpc__
-    // PowerPC ABIs specify that the return address is saved on the
-    // *caller's* stack frame.  Thus we must dereference the back chain
-    // to find the caller frame before extracting it.
+    // PowerPC ABIs specify that the return address is saved at offset
+    // 16 of the *caller's* stack frame.  Thus we must dereference the
+    // back chain to find the caller frame before extracting it.
     uhwptr *caller_frame = (uhwptr*)frame[0];
     if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
         !IsAligned((uptr)caller_frame, sizeof(uhwptr)))
       break;
-    // For most ABIs the offset where the return address is saved is two
-    // register sizes.  The exception is the SVR4 ABI, which uses an
-    // offset of only one register size.
-#ifdef _CALL_SYSV
-    uhwptr pc1 = caller_frame[1];
-#else
     uhwptr pc1 = caller_frame[2];
-#endif
 #elif defined(__s390__)
     uhwptr pc1 = frame[14];
 #elif defined(__riscv)
index f60ea7731748be59a0702a57f506b8726314deb4..2d1c03f732217402339851669fc8a76776ee0e7a 100644 (file)
@@ -64,7 +64,7 @@ class StackTraceTextPrinter {
       if (dedup_token_->length())
         dedup_token_->append("--");
       if (stack->info.function != nullptr)
-        dedup_token_->append(stack->info.function);
+        dedup_token_->append("%s", stack->info.function);
     }
   }
 
index c998322d394411a3740212d82d7d5932a0534d65..ad638a84a5933bc0642a6325e07438803f146ecf 100644 (file)
@@ -129,7 +129,7 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
       break;
     // Frame number and all fields of AddressInfo structure.
     case 'n':
-      buffer->append("%zu", frame_no);
+      buffer->append("%u", frame_no);
       break;
     case 'p':
       buffer->append("0x%zx", address);
@@ -198,8 +198,7 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
       }
       break;
     default:
-      Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
-             *p);
+      Report("Unsupported specifier in stack frame format: %c (%p)!\n", *p, p);
       Die();
     }
   }
@@ -244,14 +243,14 @@ void RenderData(InternalScopedString *buffer, const char *format,
         buffer->append("%s", StripPathPrefix(DI->file, strip_path_prefix));
         break;
       case 'l':
-        buffer->append("%d", DI->line);
+        buffer->append("%zu", DI->line);
         break;
       case 'g':
         buffer->append("%s", DI->name);
         break;
       default:
-        Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
-               *p);
+        Report("Unsupported specifier in stack frame format: %c (%p)!\n", *p,
+               p);
         Die();
     }
   }
index 34190fb1bbb201f3d2eab24c84f9d9da234908cb..1e635a66978f36422335e70a65c4c217c24b4352 100644 (file)
@@ -9,7 +9,7 @@
 // This file is shared between AddressSanitizer and ThreadSanitizer
 // run-time libraries.
 //
-// Implemention of fast stack unwinding for Sparc.
+// Implementation of fast stack unwinding for Sparc.
 //===----------------------------------------------------------------------===//
 
 #if defined(__sparc__)
index 53cfddcfbe0bebf00be9b43ebd9d2c5bedf07488..403bda1174cc46b7a9998e26be388c2173e93850 100644 (file)
@@ -108,7 +108,7 @@ struct TracerThreadArgument {
   void *callback_argument;
   // The tracer thread waits on this mutex while the parent finishes its
   // preparations.
-  BlockingMutex mutex;
+  Mutex mutex;
   // Tracer thread signals its completion by setting done.
   atomic_uintptr_t done;
   uptr parent_pid;
index 9c7cd64255e55ece1417894e6f48a56c42ed74b5..701db72619a3d47c4f44b7096c6a8775be460f86 100644 (file)
@@ -68,7 +68,7 @@ class SuspendedThreadsListNetBSD final : public SuspendedThreadsList {
 struct TracerThreadArgument {
   StopTheWorldCallback callback;
   void *callback_argument;
-  BlockingMutex mutex;
+  Mutex mutex;
   atomic_uintptr_t done;
   uptr parent_pid;
 };
index 2476b0ea7bf7d858e99269433fc1625f1973c9e1..42bd157fa62791a89badc05dccb84cf1f59c1e67 100644 (file)
@@ -158,7 +158,7 @@ class Symbolizer final {
   // its method should be protected by |mu_|.
   class ModuleNameOwner {
    public:
-    explicit ModuleNameOwner(BlockingMutex *synchronized_by)
+    explicit ModuleNameOwner(Mutex *synchronized_by)
         : last_match_(nullptr), mu_(synchronized_by) {
       storage_.reserve(kInitialCapacity);
     }
@@ -169,7 +169,7 @@ class Symbolizer final {
     InternalMmapVector<const char*> storage_;
     const char *last_match_;
 
-    BlockingMutex *mu_;
+    Mutex *mu_;
   } module_names_;
 
   /// Platform-specific function for creating a Symbolizer object.
@@ -192,7 +192,7 @@ class Symbolizer final {
   // Mutex locked from public methods of |Symbolizer|, so that the internals
   // (including individual symbolizer tools and platform-specific methods) are
   // always synchronized.
-  BlockingMutex mu_;
+  Mutex mu_;
 
   IntrusiveList<SymbolizerTool> tools_;
 
index 71de1758b3e9c6df3d6952dfebbce88555acc8ae..b8670941a05ec019bc6786e634e073663deba560 100644 (file)
@@ -21,7 +21,7 @@ namespace __sanitizer {
 
 // Parsing helpers, 'str' is searched for delimiter(s) and a string or uptr
 // is extracted. When extracting a string, a newly allocated (using
-// InternalAlloc) and null-terminataed buffer is returned. They return a pointer
+// InternalAlloc) and null-terminated buffer is returned. They return a pointer
 // to the next characted after the found delimiter.
 const char *ExtractToken(const char *str, const char *delims, char **result);
 const char *ExtractInt(const char *str, const char *delims, int *result);
index 98418b426c376d63ebfe55cf117980fb82661e3f..3fc994fd3deb2b387f5a168300d72470cd403baa 100644 (file)
@@ -83,7 +83,7 @@ const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter,
 }
 
 SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
-  BlockingMutexLock l(&mu_);
+  Lock l(&mu_);
   const char *module_name = nullptr;
   uptr module_offset;
   ModuleArch arch;
@@ -103,7 +103,7 @@ SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
 }
 
 bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
-  BlockingMutexLock l(&mu_);
+  Lock l(&mu_);
   const char *module_name = nullptr;
   uptr module_offset;
   ModuleArch arch;
@@ -124,7 +124,7 @@ bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
 }
 
 bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
-  BlockingMutexLock l(&mu_);
+  Lock l(&mu_);
   const char *module_name = nullptr;
   if (!FindModuleNameAndOffsetForAddress(
           addr, &module_name, &info->module_offset, &info->module_arch))
@@ -141,7 +141,7 @@ bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
 
 bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
                                              uptr *module_address) {
-  BlockingMutexLock l(&mu_);
+  Lock l(&mu_);
   const char *internal_module_name = nullptr;
   ModuleArch arch;
   if (!FindModuleNameAndOffsetForAddress(pc, &internal_module_name,
@@ -154,7 +154,7 @@ bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
 }
 
 void Symbolizer::Flush() {
-  BlockingMutexLock l(&mu_);
+  Lock l(&mu_);
   for (auto &tool : tools_) {
     SymbolizerScope sym_scope(this);
     tool.Flush();
@@ -162,7 +162,7 @@ void Symbolizer::Flush() {
 }
 
 const char *Symbolizer::Demangle(const char *name) {
-  BlockingMutexLock l(&mu_);
+  Lock l(&mu_);
   for (auto &tool : tools_) {
     SymbolizerScope sym_scope(this);
     if (const char *demangled = tool.Demangle(name))
diff --git a/libsanitizer/sanitizer_common/sanitizer_syscall_linux_hexagon.inc b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_hexagon.inc
new file mode 100644 (file)
index 0000000..553bff7
--- /dev/null
@@ -0,0 +1,131 @@
+//===-- sanitizer_syscall_linux_hexagon.inc ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementations of internal_syscall and internal_iserror for Linux/hexagon.
+//
+//===----------------------------------------------------------------------===//
+
+#define SYSCALL(name) __NR_##name
+
+#define __internal_syscall_LL_E(x) \
+  ((union {                        \
+    long long ll;                  \
+    long l[2];                     \
+  }){.ll = x})                     \
+      .l[0],                       \
+      ((union {                    \
+        long long ll;              \
+        long l[2];                 \
+      }){.ll = x})                 \
+          .l[1]
+#define __internal_syscall_LL_O(x) 0, __SYSCALL_LL_E((x))
+
+#define __asm_syscall(...)                                                 \
+  do {                                                                     \
+    __asm__ __volatile__("trap0(#1)" : "=r"(r0) : __VA_ARGS__ : "memory"); \
+    return r0;                                                             \
+  } while (0)
+
+#define __internal_syscall0(n) (__internal_syscall)(n)
+
+static uptr __internal_syscall(long n) {
+  register u32 r6 __asm__("r6") = n;
+  register u32 r0 __asm__("r0");
+  __asm_syscall("r"(r6));
+}
+
+#define __internal_syscall1(n, a1) (__internal_syscall)(n, (long)(a1))
+
+static uptr __internal_syscall(long n, long a) {
+  register u32 r6 __asm__("r6") = n;
+  register u32 r0 __asm__("r0") = a;
+  __asm_syscall("r"(r6), "0"(r0));
+}
+
+#define __internal_syscall2(n, a1, a2) \
+  (__internal_syscall)(n, (long)(a1), (long)(a2))
+
+static uptr __internal_syscall(long n, long a, long b) {
+  register u32 r6 __asm__("r6") = n;
+  register u32 r0 __asm__("r0") = a;
+  register u32 r1 __asm__("r1") = b;
+  __asm_syscall("r"(r6), "0"(r0), "r"(r1));
+}
+
+#define __internal_syscall3(n, a1, a2, a3) \
+  (__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3))
+
+static uptr __internal_syscall(long n, long a, long b, long c) {
+  register u32 r6 __asm__("r6") = n;
+  register u32 r0 __asm__("r0") = a;
+  register u32 r1 __asm__("r1") = b;
+  register u32 r2 __asm__("r2") = c;
+  __asm_syscall("r"(r6), "0"(r0), "r"(r1), "r"(r2));
+}
+
+#define __internal_syscall4(n, a1, a2, a3, a4) \
+  (__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3), (long)(a4))
+
+static uptr __internal_syscall(long n, long a, long b, long c, long d) {
+  register u32 r6 __asm__("r6") = n;
+  register u32 r0 __asm__("r0") = a;
+  register u32 r1 __asm__("r1") = b;
+  register u32 r2 __asm__("r2") = c;
+  register u32 r3 __asm__("r3") = d;
+  __asm_syscall("r"(r6), "0"(r0), "r"(r1), "r"(r2), "r"(r3));
+}
+
+#define __internal_syscall5(n, a1, a2, a3, a4, a5)                        \
+  (__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3), (long)(a4), \
+                       (long)(a5))
+
+static uptr __internal_syscall(long n, long a, long b, long c, long d, long e) {
+  register u32 r6 __asm__("r6") = n;
+  register u32 r0 __asm__("r0") = a;
+  register u32 r1 __asm__("r1") = b;
+  register u32 r2 __asm__("r2") = c;
+  register u32 r3 __asm__("r3") = d;
+  register u32 r4 __asm__("r4") = e;
+  __asm_syscall("r"(r6), "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4));
+}
+
+#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6)                    \
+  (__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3), (long)(a4), \
+                       (long)(a5), (long)(a6))
+
+static uptr __internal_syscall(long n, long a, long b, long c, long d, long e,
+                               long f) {
+  register u32 r6 __asm__("r6") = n;
+  register u32 r0 __asm__("r0") = a;
+  register u32 r1 __asm__("r1") = b;
+  register u32 r2 __asm__("r2") = c;
+  register u32 r3 __asm__("r3") = d;
+  register u32 r4 __asm__("r4") = e;
+  register u32 r5 __asm__("r5") = f;
+  __asm_syscall("r"(r6), "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r5));
+}
+
+#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
+#define __SYSCALL_NARGS(...) \
+  __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
+#define __SYSCALL_CONCAT_X(a, b) a##b
+#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
+#define __SYSCALL_DISP(b, ...) \
+  __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
+
+// Helper function used to avoid clobbering of errno.
+bool internal_iserror(uptr retval, int *rverrno) {
+  if (retval >= (uptr)-4095) {
+    if (rverrno)
+      *rverrno = -retval;
+    return true;
+  }
+  return false;
+}
index 745fbf76b01f63e4cbbedfc0ad17b441cd806769..a34b8c15aa5b01c0b377f9309dbbca7be001e9ca 100644 (file)
@@ -119,7 +119,7 @@ ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
 
 void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
                                         uptr *alive) {
-  BlockingMutexLock l(&mtx_);
+  ThreadRegistryLock l(this);
   if (total)
     *total = threads_.size();
   if (running) *running = running_threads_;
@@ -127,13 +127,13 @@ void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
 }
 
 uptr ThreadRegistry::GetMaxAliveThreads() {
-  BlockingMutexLock l(&mtx_);
+  ThreadRegistryLock l(this);
   return max_alive_threads_;
 }
 
 u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
                                  void *arg) {
-  BlockingMutexLock l(&mtx_);
+  ThreadRegistryLock l(this);
   u32 tid = kInvalidTid;
   ThreadContextBase *tctx = QuarantinePop();
   if (tctx) {
@@ -179,7 +179,7 @@ void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,
 }
 
 u32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) {
-  BlockingMutexLock l(&mtx_);
+  ThreadRegistryLock l(this);
   for (u32 tid = 0; tid < threads_.size(); tid++) {
     ThreadContextBase *tctx = threads_[tid];
     if (tctx != 0 && cb(tctx, arg))
@@ -211,7 +211,7 @@ ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(tid_t os_id) {
 }
 
 void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
-  BlockingMutexLock l(&mtx_);
+  ThreadRegistryLock l(this);
   ThreadContextBase *tctx = threads_[tid];
   CHECK_NE(tctx, 0);
   CHECK_EQ(SANITIZER_FUCHSIA ? ThreadStatusCreated : ThreadStatusRunning,
@@ -220,7 +220,7 @@ void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
 }
 
 void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
-  BlockingMutexLock l(&mtx_);
+  ThreadRegistryLock l(this);
   for (u32 tid = 0; tid < threads_.size(); tid++) {
     ThreadContextBase *tctx = threads_[tid];
     if (tctx != 0 && tctx->user_id == user_id &&
@@ -232,7 +232,7 @@ void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
 }
 
 void ThreadRegistry::DetachThread(u32 tid, void *arg) {
-  BlockingMutexLock l(&mtx_);
+  ThreadRegistryLock l(this);
   ThreadContextBase *tctx = threads_[tid];
   CHECK_NE(tctx, 0);
   if (tctx->status == ThreadStatusInvalid) {
@@ -252,7 +252,7 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) {
   bool destroyed = false;
   do {
     {
-      BlockingMutexLock l(&mtx_);
+      ThreadRegistryLock l(this);
       ThreadContextBase *tctx = threads_[tid];
       CHECK_NE(tctx, 0);
       if (tctx->status == ThreadStatusInvalid) {
@@ -275,7 +275,7 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) {
 // thread before trying to create it, and then failed to actually
 // create it, and so never called StartThread.
 ThreadStatus ThreadRegistry::FinishThread(u32 tid) {
-  BlockingMutexLock l(&mtx_);
+  ThreadRegistryLock l(this);
   CHECK_GT(alive_threads_, 0);
   alive_threads_--;
   ThreadContextBase *tctx = threads_[tid];
@@ -301,7 +301,7 @@ ThreadStatus ThreadRegistry::FinishThread(u32 tid) {
 
 void ThreadRegistry::StartThread(u32 tid, tid_t os_id, ThreadType thread_type,
                                  void *arg) {
-  BlockingMutexLock l(&mtx_);
+  ThreadRegistryLock l(this);
   running_threads_++;
   ThreadContextBase *tctx = threads_[tid];
   CHECK_NE(tctx, 0);
@@ -334,7 +334,7 @@ ThreadContextBase *ThreadRegistry::QuarantinePop() {
 }
 
 void ThreadRegistry::SetThreadUserId(u32 tid, uptr user_id) {
-  BlockingMutexLock l(&mtx_);
+  ThreadRegistryLock l(this);
   ThreadContextBase *tctx = threads_[tid];
   CHECK_NE(tctx, 0);
   CHECK_NE(tctx->status, ThreadStatusInvalid);
index 0b28bbe6ddf6e06fd0636299fd796de49b8ce769..a8a4d4d86a03eb33722ce87cfa36e85279f8ecf3 100644 (file)
@@ -135,7 +135,7 @@ class MUTEX ThreadRegistry {
   const u32 thread_quarantine_size_;
   const u32 max_reuse_;
 
-  BlockingMutex mtx_;
+  Mutex mtx_;
 
   u64 total_threads_;   // Total number of created threads. May be greater than
                         // max_threads_ if contexts were reused.
index 1f664b6cf5b8fbc772677566ace589d61a2878fb..ce5e85df1553eddf73bdce5c3f05731253ef41d5 100644 (file)
@@ -44,7 +44,7 @@ static atomic_uintptr_t number_of_live_dtls;
 static const uptr kDestroyedThread = -1;
 
 static void DTLS_Deallocate(DTLS::DTVBlock *block) {
-  VReport(2, "__tls_get_addr: DTLS_Deallocate %p %zd\n", block);
+  VReport(2, "__tls_get_addr: DTLS_Deallocate %p\n", block);
   UnmapOrDie(block, sizeof(DTLS::DTVBlock));
   atomic_fetch_sub(&number_of_live_dtls, 1, memory_order_relaxed);
 }
@@ -117,26 +117,27 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
     return 0;
   uptr tls_size = 0;
   uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset - kDtvOffset;
-  VReport(2, "__tls_get_addr: %p {%p,%p} => %p; tls_beg: %p; sp: %p "
-             "num_live_dtls %zd\n",
+  VReport(2,
+          "__tls_get_addr: %p {0x%zx,0x%zx} => %p; tls_beg: 0x%zx; sp: %p "
+          "num_live_dtls %zd\n",
           arg, arg->dso_id, arg->offset, res, tls_beg, &tls_beg,
           atomic_load(&number_of_live_dtls, memory_order_relaxed));
   if (dtls.last_memalign_ptr == tls_beg) {
     tls_size = dtls.last_memalign_size;
-    VReport(2, "__tls_get_addr: glibc <=2.18 suspected; tls={%p,%p}\n",
-        tls_beg, tls_size);
+    VReport(2, "__tls_get_addr: glibc <=2.18 suspected; tls={0x%zx,0x%zx}\n",
+            tls_beg, tls_size);
   } else if (tls_beg >= static_tls_begin && tls_beg < static_tls_end) {
     // This is the static TLS block which was initialized / unpoisoned at thread
     // creation.
-    VReport(2, "__tls_get_addr: static tls: %p\n", tls_beg);
+    VReport(2, "__tls_get_addr: static tls: 0x%zx\n", tls_beg);
     tls_size = 0;
   } else if ((tls_beg % 4096) == sizeof(Glibc_2_19_tls_header)) {
     // We may want to check gnu_get_libc_version().
     Glibc_2_19_tls_header *header = (Glibc_2_19_tls_header *)tls_beg - 1;
     tls_size = header->size;
     tls_beg = header->start;
-    VReport(2, "__tls_get_addr: glibc >=2.19 suspected; tls={%p %p}\n",
-        tls_beg, tls_size);
+    VReport(2, "__tls_get_addr: glibc >=2.19 suspected; tls={0x%zx 0x%zx}\n",
+            tls_beg, tls_size);
   } else {
     VReport(2, "__tls_get_addr: Can't guess glibc version\n");
     // This may happen inside the DTOR of main thread, so just ignore it.
@@ -149,7 +150,7 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
 
 void DTLS_on_libc_memalign(void *ptr, uptr size) {
   if (!common_flags()->intercept_tls_get_addr) return;
-  VReport(2, "DTLS_on_libc_memalign: %p %p\n", ptr, size);
+  VReport(2, "DTLS_on_libc_memalign: %p 0x%zx\n", ptr, size);
   dtls.last_memalign_ptr = reinterpret_cast<uptr>(ptr);
   dtls.last_memalign_size = size;
 }
index dddd885a45dd24a87fe6b6892b554e6fc63058f2..811aa497d97dc801055262c93ff496f39aadaf95 100644 (file)
@@ -16,6 +16,7 @@
 
 #define WIN32_LEAN_AND_MEAN
 #define NOGDI
+#include <direct.h>
 #include <windows.h>
 #include <io.h>
 #include <psapi.h>
@@ -565,6 +566,8 @@ void Abort() {
   internal__exit(3);
 }
 
+bool CreateDir(const char *pathname) { return _mkdir(pathname) == 0; }
+
 #if !SANITIZER_GO
 // Read the file to extract the ImageBase field from the PE header. If ASLR is
 // disabled and this virtual address is available, the loader will typically
@@ -827,27 +830,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
     WakeByAddressAll(p);
 }
 
-// ---------------------- BlockingMutex ---------------- {{{1
-
-BlockingMutex::BlockingMutex() {
-  CHECK(sizeof(SRWLOCK) <= sizeof(opaque_storage_));
-  internal_memset(this, 0, sizeof(*this));
-}
-
-void BlockingMutex::Lock() {
-  AcquireSRWLockExclusive((PSRWLOCK)opaque_storage_);
-  CHECK_EQ(owner_, 0);
-  owner_ = GetThreadSelf();
-}
-
-void BlockingMutex::Unlock() {
-  CheckLocked();
-  owner_ = 0;
-  ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_);
-}
-
-void BlockingMutex::CheckLocked() const { CHECK_EQ(owner_, GetThreadSelf()); }
-
 uptr GetTlsSize() {
   return 0;
 }
index dcb247ec032c17c13e9750fbebfbeb0ea72d4f2d..9dc11f79072b90a0a70789920be457c63c8e357b 100644 (file)
@@ -29,7 +29,6 @@ tsan_files = \
        tsan_malloc_mac.cpp \
        tsan_md5.cpp \
        tsan_mman.cpp \
-       tsan_mutex.cpp \
        tsan_mutexset.cpp \
        tsan_new_delete.cpp \
        tsan_platform_linux.cpp \
@@ -45,7 +44,8 @@ tsan_files = \
        tsan_stack_trace.cpp \
        tsan_suppressions.cpp \
        tsan_symbolize.cpp \
-       tsan_sync.cpp 
+       tsan_sync.cpp \
+       tsan_vector_clock.cpp
 
 libtsan_la_SOURCES = $(tsan_files)
 EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S tsan_rtl_s390x.S
index 83617cf6ab7f714ce73b5eec926de1c05024b37e..921a78c7484703f6c9aa9a3cd9af4aacdcb873eb 100644 (file)
@@ -150,12 +150,13 @@ am__objects_1 = tsan_clock.lo tsan_debugging.lo tsan_external.lo \
        tsan_interceptors_posix.lo tsan_interceptors_mac.lo \
        tsan_interface_ann.lo tsan_interface_atomic.lo \
        tsan_interface.lo tsan_interface_java.lo tsan_malloc_mac.lo \
-       tsan_md5.lo tsan_mman.lo tsan_mutex.lo tsan_mutexset.lo \
-       tsan_new_delete.lo tsan_platform_linux.lo tsan_platform_mac.lo \
+       tsan_md5.lo tsan_mman.lo tsan_mutexset.lo tsan_new_delete.lo \
+       tsan_platform_linux.lo tsan_platform_mac.lo \
        tsan_platform_posix.lo tsan_platform_windows.lo tsan_report.lo \
        tsan_rtl.lo tsan_rtl_mutex.lo tsan_rtl_proc.lo \
        tsan_rtl_report.lo tsan_rtl_thread.lo tsan_stack_trace.lo \
-       tsan_suppressions.lo tsan_symbolize.lo tsan_sync.lo
+       tsan_suppressions.lo tsan_symbolize.lo tsan_sync.lo \
+       tsan_vector_clock.lo
 am_libtsan_la_OBJECTS = $(am__objects_1)
 libtsan_la_OBJECTS = $(am_libtsan_la_OBJECTS)
 AM_V_lt = $(am__v_lt_@AM_V@)
@@ -431,7 +432,6 @@ tsan_files = \
        tsan_malloc_mac.cpp \
        tsan_md5.cpp \
        tsan_mman.cpp \
-       tsan_mutex.cpp \
        tsan_mutexset.cpp \
        tsan_new_delete.cpp \
        tsan_platform_linux.cpp \
@@ -447,7 +447,8 @@ tsan_files = \
        tsan_stack_trace.cpp \
        tsan_suppressions.cpp \
        tsan_symbolize.cpp \
-       tsan_sync.cpp 
+       tsan_sync.cpp \
+       tsan_vector_clock.cpp
 
 libtsan_la_SOURCES = $(tsan_files)
 EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S tsan_rtl_s390x.S
@@ -594,7 +595,6 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_malloc_mac.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_md5.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mman.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutex.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutexset.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_new_delete.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_linux.Plo@am__quote@
@@ -616,6 +616,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_suppressions.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_symbolize.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_sync.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_vector_clock.Plo@am__quote@
 
 .S.o:
 @am__fastdepCCAS_TRUE@ $(AM_V_CPPAS)$(CPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
index 61848c21d162b4c11b0ee6e36a4300c3704eeb6c..d122b67c0aaa5fc9016d5d208120d38e75a543dc 100644 (file)
@@ -72,9 +72,9 @@
 // clk_ - variable size vector clock, low kClkBits hold timestamp,
 //   the remaining bits hold "acquired" flag (the actual value is thread's
 //   reused counter);
-//   if acquried == thr->reused_, then the respective thread has already
+//   if acquired == thr->reused_, then the respective thread has already
 //   acquired this clock (except possibly for dirty elements).
-// dirty_ - holds up to two indeces in the vector clock that other threads
+// dirty_ - holds up to two indices in the vector clock that other threads
 //   need to acquire regardless of "acquired" flag value;
 // release_store_tid_ - denotes that the clock state is a result of
 //   release-store operation by the thread with release_store_tid_ index.
@@ -272,7 +272,7 @@ void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
     // we could update the existing clock and cache it, or replace it with the
     // currently cached clock and release the old one. And for a shared
     // existing clock, we could replace it with the currently cached;
-    // or unshare, update and cache. But, for simplicity, we currnetly reuse
+    // or unshare, update and cache. But, for simplicity, we currently reuse
     // cached clock only when the target clock is empty.
     dst->tab_ = ctx->clock_alloc.Map(cached_idx_);
     dst->tab_idx_ = cached_idx_;
@@ -285,7 +285,7 @@ void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
     dst->dirty_[0].epoch = clk_[tid_];
     dst->release_store_tid_ = tid_;
     dst->release_store_reused_ = reused_;
-    // Rememeber that we don't need to acquire it in future.
+    // Remember that we don't need to acquire it in future.
     dst->elem(tid_).reused = reused_;
     // Grab a reference.
     atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
@@ -316,7 +316,7 @@ void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
   for (uptr i = 0; i < kDirtyTids; i++) dst->dirty_[i].set_tid(kInvalidTid);
   dst->release_store_tid_ = tid_;
   dst->release_store_reused_ = reused_;
-  // Rememeber that we don't need to acquire it in future.
+  // Remember that we don't need to acquire it in future.
   dst->elem(tid_).reused = reused_;
 
   // If the resulting clock is cachable, cache it for future release operations.
index 31376a1bc9e2f4953aa99ac38cdb9b26cb1a4c2e..11cbc0c0b86b64c6a30486d1d59f1e1ff283fecb 100644 (file)
@@ -213,7 +213,7 @@ class ThreadClock {
   // We reuse it for subsequent store-release operations without intervening
   // acquire operations. Since it is shared (and thus constant), clock value
   // for the current thread is then stored in dirty entries in the SyncClock.
-  // We host a refernece to the table while it is cached here.
+  // We host a reference to the table while it is cached here.
   u32 cached_idx_;
   u16 cached_size_;
   u16 cached_blocks_;
index d3d6255090b758b46c0189d9ede0c55d9d0af1df..1d3c3849a446303759ff9bd9a480a66730c9a524 100644 (file)
@@ -195,9 +195,9 @@ const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
   const char *region_kind = nullptr;
   if (name && name_size > 0) name[0] = 0;
 
-  if (IsMetaMem(addr)) {
+  if (IsMetaMem(reinterpret_cast<u32 *>(addr))) {
     region_kind = "meta shadow";
-  } else if (IsShadowMem(addr)) {
+  } else if (IsShadowMem(reinterpret_cast<RawShadow *>(addr))) {
     region_kind = "shadow";
   } else {
     bool is_stack = false;
@@ -215,9 +215,9 @@ const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
     } else {
       // TODO(kuba.brecka): We should not lock. This is supposed to be called
       // from within the debugger when other threads are stopped.
-      ctx->thread_registry->Lock();
+      ctx->thread_registry.Lock();
       ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack);
-      ctx->thread_registry->Unlock();
+      ctx->thread_registry.Unlock();
       if (tctx) {
         region_kind = is_stack ? "stack" : "tls";
       } else {
@@ -252,7 +252,7 @@ int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
   *thread_id = b->tid;
   // No locking.  This is supposed to be called from within the debugger when
   // other threads are stopped.
-  ThreadContextBase *tctx = ctx->thread_registry->GetThreadLocked(b->tid);
+  ThreadContextBase *tctx = ctx->thread_registry.GetThreadLocked(b->tid);
   *os_id = tctx->os_id;
 
   StackTrace stack = StackDepotGet(b->stk);
index 5c8f2801b0c2b00b8aa1e22fecdced75d86db3cb..fe0c1da31599b18bbf885d4f1a1ad8a4a4a671b4 100644 (file)
 
 #include "sanitizer_common/sanitizer_internal_defs.h"
 #include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_mutex.h"
 #include "ubsan/ubsan_platform.h"
 
+#ifndef TSAN_VECTORIZE
+#  define TSAN_VECTORIZE __SSE4_2__
+#endif
+
+#if TSAN_VECTORIZE
+// <emmintrin.h> transitively includes <stdlib.h>,
+// and it's prohibited to include std headers into tsan runtime.
+// So we do this dirty trick.
+#  define _MM_MALLOC_H_INCLUDED
+#  define __MM_MALLOC_H
+#  include <emmintrin.h>
+#  include <smmintrin.h>
+#  define VECTOR_ALIGNED ALIGNED(16)
+typedef __m128i m128;
+#else
+#  define VECTOR_ALIGNED
+#endif
+
 // Setup defaults for compile definitions.
 #ifndef TSAN_NO_HISTORY
 # define TSAN_NO_HISTORY 0
 
 namespace __tsan {
 
+constexpr uptr kByteBits = 8;
+
+// Thread slot ID.
+enum class Sid : u8 {};
+constexpr uptr kThreadSlotCount = 256;
+constexpr Sid kFreeSid = static_cast<Sid>(255);
+
+// Abstract time unit, vector clock element.
+enum class Epoch : u16 {};
+constexpr uptr kEpochBits = 14;
+constexpr Epoch kEpochZero = static_cast<Epoch>(0);
+constexpr Epoch kEpochOver = static_cast<Epoch>(1 << kEpochBits);
+
 const int kClkBits = 42;
 const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
 
@@ -74,8 +106,9 @@ const uptr kShadowCnt = 4;
 // That many user bytes are mapped onto a single shadow cell.
 const uptr kShadowCell = 8;
 
-// Size of a single shadow value (u64).
-const uptr kShadowSize = 8;
+// Single shadow value.
+typedef u64 RawShadow;
+const uptr kShadowSize = sizeof(RawShadow);
 
 // Shadow memory is kShadowMultiplier times larger than user memory.
 const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
@@ -87,6 +120,9 @@ const uptr kMetaShadowCell = 8;
 // Size of a single meta shadow value (u32).
 const uptr kMetaShadowSize = 4;
 
+// All addresses and PCs are assumed to be compressable to that many bits.
+const uptr kCompressedAddrBits = 44;
+
 #if TSAN_NO_HISTORY
 const bool kCollectHistory = false;
 #else
@@ -153,12 +189,23 @@ struct ReportStack;
 class ReportDesc;
 class RegionAlloc;
 
+typedef uptr AccessType;
+
+enum : AccessType {
+  kAccessWrite = 0,
+  kAccessRead = 1 << 0,
+  kAccessAtomic = 1 << 1,
+  kAccessVptr = 1 << 2,  // read or write of an object virtual table pointer
+  kAccessFree = 1 << 3,  // synthetic memory access during memory freeing
+  kAccessExternalPC = 1 << 4,  // access PC can have kExternalPCBit set
+};
+
 // Descriptor of user's memory block.
 struct MBlock {
   u64  siz : 48;
   u64  tag : 16;
-  u32  stk;
-  u16  tid;
+  StackID stk;
+  Tid tid;
 };
 
 COMPILER_CHECK(sizeof(MBlock) == 16);
@@ -172,6 +219,17 @@ enum ExternalTag : uptr {
   // as 16-bit values, see tsan_defs.h.
 };
 
+enum MutexType {
+  MutexTypeTrace = MutexLastCommon,
+  MutexTypeReport,
+  MutexTypeSyncVar,
+  MutexTypeAnnotations,
+  MutexTypeAtExit,
+  MutexTypeFired,
+  MutexTypeRacy,
+  MutexTypeGlobalProc,
+};
+
 }  // namespace __tsan
 
 #endif  // TSAN_DEFS_H
index 6c89e40598042e730ba089dbf98e36bcc7e08c7a..9e15f74a0615219fbb0d27a3388d391c1fb2a31e 100644 (file)
@@ -20,7 +20,6 @@
 
 #include "sanitizer_common/sanitizer_common.h"
 #include "tsan_defs.h"
-#include "tsan_mutex.h"
 
 namespace __tsan {
 
@@ -50,11 +49,7 @@ class DenseSlabAlloc {
   static_assert(sizeof(T) > sizeof(IndexT),
                 "it doesn't make sense to use dense alloc");
 
-  explicit DenseSlabAlloc(LinkerInitialized, const char *name) {
-    freelist_ = 0;
-    fillpos_ = 0;
-    name_ = name;
-  }
+  DenseSlabAlloc(LinkerInitialized, const char *name) : name_(name) {}
 
   explicit DenseSlabAlloc(const char *name)
       : DenseSlabAlloc(LINKER_INITIALIZED, name) {
@@ -90,6 +85,8 @@ class DenseSlabAlloc {
   }
 
   void FlushCache(Cache *c) {
+    if (!c->pos)
+      return;
     SpinMutexLock lock(&mtx_);
     while (c->pos) {
       IndexT idx = c->cache[--c->pos];
@@ -103,33 +100,39 @@ class DenseSlabAlloc {
     internal_memset(c->cache, 0, sizeof(c->cache));
   }
 
+  uptr AllocatedMemory() const {
+    return atomic_load_relaxed(&fillpos_) * kL2Size * sizeof(T);
+  }
+
  private:
   T *map_[kL1Size];
   SpinMutex mtx_;
-  IndexT freelist_;
-  uptr fillpos_;
-  const char *name_;
+  IndexT freelist_ = {0};
+  atomic_uintptr_t fillpos_ = {0};
+  const char *const name_;
 
   void Refill(Cache *c) {
     SpinMutexLock lock(&mtx_);
     if (freelist_ == 0) {
-      if (fillpos_ == kL1Size) {
+      uptr fillpos = atomic_load_relaxed(&fillpos_);
+      if (fillpos == kL1Size) {
         Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
             name_, kL1Size, kL2Size);
         Die();
       }
-      VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n",
-          name_, fillpos_, kL1Size, kL2Size);
+      VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", name_,
+              fillpos, kL1Size, kL2Size);
       T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
       // Reserve 0 as invalid index.
-      IndexT start = fillpos_ == 0 ? 1 : 0;
+      IndexT start = fillpos == 0 ? 1 : 0;
       for (IndexT i = start; i < kL2Size; i++) {
         new(batch + i) T;
-        *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
+        *(IndexT *)(batch + i) = i + 1 + fillpos * kL2Size;
       }
       *(IndexT*)(batch + kL2Size - 1) = 0;
-      freelist_ = fillpos_ * kL2Size + start;
-      map_[fillpos_++] = batch;
+      freelist_ = fillpos * kL2Size + start;
+      map_[fillpos] = batch;
+      atomic_store_relaxed(&fillpos_, fillpos + 1);
     }
     for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
       IndexT idx = freelist_;
index a87e12f2936f3c58b35eba9151b38a0d34306a0a..19ae174f20a595db3acf3d493569c0577d055561 100644 (file)
 //
 //===----------------------------------------------------------------------===//
 #include "tsan_rtl.h"
-#include "tsan_interceptors.h"
 #include "sanitizer_common/sanitizer_ptrauth.h"
 
+#if !SANITIZER_GO
+#  include "tsan_interceptors.h"
+#endif
+
 namespace __tsan {
 
 #define CALLERPC ((uptr)__builtin_return_address(0))
@@ -57,16 +60,14 @@ uptr TagFromShadowStackFrame(uptr pc) {
 
 #if !SANITIZER_GO
 
-typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int);
-void ExternalAccess(void *addr, uptr caller_pc, void *tag, AccessFunc access) {
+void ExternalAccess(void *addr, uptr caller_pc, void *tag, AccessType typ) {
   CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
   ThreadState *thr = cur_thread();
   if (caller_pc) FuncEntry(thr, caller_pc);
   InsertShadowStackFrameForTag(thr, (uptr)tag);
   bool in_ignored_lib;
-  if (!caller_pc || !libignore()->IsIgnored(caller_pc, &in_ignored_lib)) {
-    access(thr, CALLERPC, (uptr)addr, kSizeLog1);
-  }
+  if (!caller_pc || !libignore()->IsIgnored(caller_pc, &in_ignored_lib))
+    MemoryAccess(thr, CALLERPC, (uptr)addr, 1, typ);
   FuncExit(thr);
   if (caller_pc) FuncExit(thr);
 }
@@ -92,7 +93,7 @@ void __tsan_external_register_header(void *tag, const char *header) {
   header = internal_strdup(header);
   char *old_header =
       (char *)atomic_exchange(header_ptr, (uptr)header, memory_order_seq_cst);
-  if (old_header) internal_free(old_header);
+  Free(old_header);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
@@ -111,12 +112,12 @@ void __tsan_external_assign_tag(void *addr, void *tag) {
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
-  ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryRead);
+  ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, kAccessRead);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
-  ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryWrite);
+  ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, kAccessWrite);
 }
 }  // extern "C"
 
index 50a6b56916aa7d4667c68907b5e8305871bdfef3..255ffa8daf7604a6225d387471b393ac403d3074 100644 (file)
@@ -26,8 +26,8 @@ struct FdSync {
 
 struct FdDesc {
   FdSync *sync;
-  int creation_tid;
-  u32 creation_stack;
+  Tid creation_tid;
+  StackID creation_stack;
 };
 
 struct FdContext {
@@ -115,7 +115,7 @@ static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
     MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
   } else {
     // See the dup-related comment in FdClose.
-    MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+    MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
   }
 }
 
@@ -140,7 +140,7 @@ void FdOnFork(ThreadState *thr, uptr pc) {
   }
 }
 
-bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
+bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack) {
   for (int l1 = 0; l1 < kTableSizeL1; l1++) {
     FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
     if (tab == 0)
@@ -163,7 +163,7 @@ void FdAcquire(ThreadState *thr, uptr pc, int fd) {
   FdDesc *d = fddesc(thr, pc, fd);
   FdSync *s = d->sync;
   DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
-  MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+  MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
   if (s)
     Acquire(thr, pc, (uptr)s);
 }
@@ -174,7 +174,7 @@ void FdRelease(ThreadState *thr, uptr pc, int fd) {
   FdDesc *d = fddesc(thr, pc, fd);
   FdSync *s = d->sync;
   DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
-  MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+  MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
   if (s)
     Release(thr, pc, (uptr)s);
 }
@@ -184,7 +184,7 @@ void FdAccess(ThreadState *thr, uptr pc, int fd) {
   if (bogusfd(fd))
     return;
   FdDesc *d = fddesc(thr, pc, fd);
-  MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+  MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
 }
 
 void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
@@ -194,7 +194,7 @@ void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
   FdDesc *d = fddesc(thr, pc, fd);
   if (write) {
     // To catch races between fd usage and close.
-    MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
+    MemoryAccess(thr, pc, (uptr)d, 8, kAccessWrite);
   } else {
     // This path is used only by dup2/dup3 calls.
     // We do read instead of write because there is a number of legitimate
@@ -204,15 +204,15 @@ void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
     // 2. Some daemons dup /dev/null in place of stdin/stdout.
     // On the other hand we have not seen cases when write here catches real
     // bugs.
-    MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+    MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
   }
   // We need to clear it, because if we do not intercept any call out there
   // that creates fd, we will hit false postives.
   MemoryResetRange(thr, pc, (uptr)d, 8);
   unref(thr, pc, d->sync);
   d->sync = 0;
-  d->creation_tid = 0;
-  d->creation_stack = 0;
+  d->creation_tid = kInvalidTid;
+  d->creation_stack = kInvalidStackID;
 }
 
 void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
@@ -228,7 +228,7 @@ void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
     return;
   // Ignore the case when user dups not yet connected socket.
   FdDesc *od = fddesc(thr, pc, oldfd);
-  MemoryRead(thr, pc, (uptr)od, kSizeLog8);
+  MemoryAccess(thr, pc, (uptr)od, 8, kAccessRead);
   FdClose(thr, pc, newfd, write);
   init(thr, pc, newfd, ref(od->sync), write);
 }
index ce4f2f73bac675bac8fa21a3bf5c0535d8f8c497..d9648178481c6704c62c400cf9a5b047a4d9e7fa 100644 (file)
@@ -53,7 +53,7 @@ void FdSocketCreate(ThreadState *thr, uptr pc, int fd);
 void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd);
 void FdSocketConnecting(ThreadState *thr, uptr pc, int fd);
 void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
-bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack);
+bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack);
 void FdOnFork(ThreadState *thr, uptr pc);
 
 uptr File2addr(const char *path);
index 49e4a9c21da9c7a7ea5e25ac151ab71e681e126a..ee89862d17bd89440ed8a1b09ee11406b7846849 100644 (file)
@@ -55,6 +55,7 @@ void InitializeFlags(Flags *f, const char *env, const char *env_option_name) {
     // Override some common flags defaults.
     CommonFlags cf;
     cf.CopyFrom(*common_flags());
+    cf.external_symbolizer_path = GetEnv("TSAN_SYMBOLIZER_PATH");
     cf.allow_addr2line = true;
     if (SANITIZER_GO) {
       // Does not work as expected for Go: runtime handles SIGABRT and crashes.
index 2105c754486f08cb666ac77b179ff8410f5ae6ff..7954a4307fa1e04e381f144b2339c9db7f2aff23 100644 (file)
@@ -43,7 +43,6 @@ TSAN_FLAG(
     bool, force_seq_cst_atomics, false,
     "If set, all atomics are effectively sequentially consistent (seq_cst), "
     "regardless of what user actually specified.")
-TSAN_FLAG(bool, print_benign, false, "Print matched \"benign\" races at exit.")
 TSAN_FLAG(bool, halt_on_error, false, "Exit after first reported error.")
 TSAN_FLAG(int, atexit_sleep_ms, 1000,
           "Sleep in main thread before exiting for that many ms "
index f6e41f668618887b39712c321a28ed66371abb53..1fca1cf4f9fcf254ccf838eb1e58751b77058f1e 100644 (file)
@@ -19,7 +19,7 @@ IgnoreSet::IgnoreSet()
     : size_() {
 }
 
-void IgnoreSet::Add(u32 stack_id) {
+void IgnoreSet::Add(StackID stack_id) {
   if (size_ == kMaxSize)
     return;
   for (uptr i = 0; i < size_; i++) {
@@ -29,15 +29,7 @@ void IgnoreSet::Add(u32 stack_id) {
   stacks_[size_++] = stack_id;
 }
 
-void IgnoreSet::Reset() {
-  size_ = 0;
-}
-
-uptr IgnoreSet::Size() const {
-  return size_;
-}
-
-u32 IgnoreSet::At(uptr i) const {
+StackID IgnoreSet::At(uptr i) const {
   CHECK_LT(i, size_);
   CHECK_LE(size_, kMaxSize);
   return stacks_[i];
index 3e318bd674d975be21a7fe5c26deaf7d0027ac4a..4e2511291ce4d608c83dac318156748bd9682d23 100644 (file)
@@ -19,17 +19,16 @@ namespace __tsan {
 
 class IgnoreSet {
  public:
-  static const uptr kMaxSize = 16;
-
   IgnoreSet();
-  void Add(u32 stack_id);
-  void Reset();
-  uptr Size() const;
-  u32 At(uptr i) const;
+  void Add(StackID stack_id);
+  void Reset() { size_ = 0; }
+  uptr Size() const { return size_; }
+  StackID At(uptr i) const;
 
  private:
+  static constexpr uptr kMaxSize = 16;
   uptr size_;
-  u32 stacks_[kMaxSize];
+  StackID stacks_[kMaxSize];
 };
 
 }  // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_ilist.h b/libsanitizer/tsan/tsan_ilist.h
new file mode 100644 (file)
index 0000000..d7d8be2
--- /dev/null
@@ -0,0 +1,189 @@
+//===-- tsan_ilist.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_ILIST_H
+#define TSAN_ILIST_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __tsan {
+
+class INode {
+ public:
+  INode() = default;
+
+ private:
+  INode* next_ = nullptr;
+  INode* prev_ = nullptr;
+
+  template <typename Base, INode Base::*Node, typename Elem>
+  friend class IList;
+  INode(const INode&) = delete;
+  void operator=(const INode&) = delete;
+};
+
+// Intrusive doubly-linked list.
+//
+// The node class (MyNode) needs to include "INode foo" field,
+// then the list can be declared as IList<MyNode, &MyNode::foo>.
+// This design allows to link MyNode into multiple lists using
+// different INode fields.
+// The optional Elem template argument allows to specify node MDT
+// (most derived type) if it's different from MyNode.
+template <typename Base, INode Base::*Node, typename Elem = Base>
+class IList {
+ public:
+  IList();
+
+  void PushFront(Elem* e);
+  void PushBack(Elem* e);
+  void Remove(Elem* e);
+
+  Elem* PopFront();
+  Elem* PopBack();
+  Elem* Front();
+  Elem* Back();
+
+  // Prev links point towards front of the queue.
+  Elem* Prev(Elem* e);
+  // Next links point towards back of the queue.
+  Elem* Next(Elem* e);
+
+  uptr Size() const;
+  bool Empty() const;
+  bool Queued(Elem* e) const;
+
+ private:
+  INode node_;
+  uptr size_ = 0;
+
+  void Push(Elem* e, INode* after);
+  static INode* ToNode(Elem* e);
+  static Elem* ToElem(INode* n);
+
+  IList(const IList&) = delete;
+  void operator=(const IList&) = delete;
+};
+
+template <typename Base, INode Base::*Node, typename Elem>
+IList<Base, Node, Elem>::IList() {
+  node_.next_ = node_.prev_ = &node_;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::PushFront(Elem* e) {
+  Push(e, &node_);
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::PushBack(Elem* e) {
+  Push(e, node_.prev_);
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::Push(Elem* e, INode* after) {
+  INode* n = ToNode(e);
+  DCHECK_EQ(n->next_, nullptr);
+  DCHECK_EQ(n->prev_, nullptr);
+  INode* next = after->next_;
+  n->next_ = next;
+  n->prev_ = after;
+  next->prev_ = n;
+  after->next_ = n;
+  size_++;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::Remove(Elem* e) {
+  INode* n = ToNode(e);
+  INode* next = n->next_;
+  INode* prev = n->prev_;
+  DCHECK(next);
+  DCHECK(prev);
+  DCHECK(size_);
+  next->prev_ = prev;
+  prev->next_ = next;
+  n->prev_ = n->next_ = nullptr;
+  size_--;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::PopFront() {
+  Elem* e = Front();
+  if (e)
+    Remove(e);
+  return e;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::PopBack() {
+  Elem* e = Back();
+  if (e)
+    Remove(e);
+  return e;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Front() {
+  return size_ ? ToElem(node_.next_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Back() {
+  return size_ ? ToElem(node_.prev_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Prev(Elem* e) {
+  INode* n = ToNode(e);
+  DCHECK(n->prev_);
+  return n->prev_ != &node_ ? ToElem(n->prev_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Next(Elem* e) {
+  INode* n = ToNode(e);
+  DCHECK(n->next_);
+  return n->next_ != &node_ ? ToElem(n->next_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+uptr IList<Base, Node, Elem>::Size() const {
+  return size_;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+bool IList<Base, Node, Elem>::Empty() const {
+  return size_ == 0;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+bool IList<Base, Node, Elem>::Queued(Elem* e) const {
+  INode* n = ToNode(e);
+  DCHECK_EQ(!n->next_, !n->prev_);
+  return n->next_;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+INode* IList<Base, Node, Elem>::ToNode(Elem* e) {
+  return &(e->*Node);
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::ToElem(INode* n) {
+  return static_cast<Elem*>(reinterpret_cast<Base*>(
+      reinterpret_cast<uptr>(n) -
+      reinterpret_cast<uptr>(&(reinterpret_cast<Elem*>(0)->*Node))));
+}
+
+}  // namespace __tsan
+
+#endif
index c5716f53a323a025298d445df3c7ad773666b6fd..a855d1d8deab8c9f8e4c9c31faddb775121d4f97 100644 (file)
@@ -10,13 +10,22 @@ class ScopedInterceptor {
  public:
   ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
   ~ScopedInterceptor();
-  void DisableIgnores();
-  void EnableIgnores();
+  void DisableIgnores() {
+    if (UNLIKELY(ignoring_))
+      DisableIgnoresImpl();
+  }
+  void EnableIgnores() {
+    if (UNLIKELY(ignoring_))
+      EnableIgnoresImpl();
+  }
+
  private:
   ThreadState *const thr_;
-  const uptr pc_;
   bool in_ignored_lib_;
   bool ignoring_;
+
+  void DisableIgnoresImpl();
+  void EnableIgnoresImpl();
 };
 
 LibIgnore *libignore();
@@ -36,18 +45,16 @@ inline bool in_symbolizer() {
   const uptr caller_pc = GET_CALLER_PC();      \
   ScopedInterceptor si(thr, #func, caller_pc); \
   const uptr pc = GET_CURRENT_PC();            \
-  (void)pc;                                    \
-  /**/
+  (void)pc;
 
-#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
-    SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
-    if (REAL(func) == 0) { \
-      Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
-      Die(); \
-    }                                                    \
-    if (!thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib) \
-      return REAL(func)(__VA_ARGS__); \
-/**/
+#define SCOPED_TSAN_INTERCEPTOR(func, ...)                                \
+  SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__);                              \
+  if (REAL(func) == 0) {                                                  \
+    Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func);    \
+    Die();                                                                \
+  }                                                                       \
+  if (!thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib) \
+    return REAL(func)(__VA_ARGS__);
 
 #define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() \
     si.DisableIgnores();
index 2d400c7e7098dcd9e81abdcb8ed78c2df92de3ad..ed064150d005cd6e0228b21d8841ab0f889ca292 100644 (file)
@@ -365,7 +365,7 @@ static uptr GetOrCreateSyncAddress(uptr addr, ThreadState *thr, uptr pc) {
   if (h.created()) {
     ThreadIgnoreBegin(thr, pc);
     *h = (uptr) user_alloc(thr, pc, /*size=*/1);
-    ThreadIgnoreEnd(thr, pc);
+    ThreadIgnoreEnd(thr);
   }
   return *h;
 }
@@ -405,8 +405,8 @@ TSAN_INTERCEPTOR(int, swapcontext, ucontext_t *oucp, const ucontext_t *ucp) {
   {
     SCOPED_INTERCEPTOR_RAW(swapcontext, oucp, ucp);
   }
-  // Bacause of swapcontext() semantics we have no option but to copy its
-  // impementation here
+  // Because of swapcontext() semantics we have no option but to copy its
+  // implementation here
   if (!oucp || !ucp) {
     errno = EINVAL;
     return -1;
index 6808f2e0e2d3da72d91ce5077e602aed49cc3031..d3e4c8f03714cf41169cded4f548017a0d907f1d 100644 (file)
@@ -96,9 +96,6 @@ extern "C" void _exit(int status);
 extern "C" int fileno_unlocked(void *stream);
 extern "C" int dirfd(void *dirp);
 #endif
-#if SANITIZER_GLIBC
-extern "C" int mallopt(int param, int value);
-#endif
 #if SANITIZER_NETBSD
 extern __sanitizer_FILE __sF[];
 #else
@@ -161,7 +158,6 @@ const int SIG_SETMASK = 2;
 namespace __tsan {
 struct SignalDesc {
   bool armed;
-  bool sigaction;
   __sanitizer_siginfo siginfo;
   ucontext_t ctx;
 };
@@ -169,7 +165,6 @@ struct SignalDesc {
 struct ThreadSignalContext {
   int int_signal_send;
   atomic_uintptr_t in_blocking_func;
-  atomic_uintptr_t have_pending_signals;
   SignalDesc pending_signals[kSigCount];
   // emptyset and oldset are too big for stack.
   __sanitizer_sigset_t emptyset;
@@ -196,12 +191,10 @@ struct InterceptorContext {
   unsigned finalize_key;
 #endif
 
-  BlockingMutex atexit_mu;
+  Mutex atexit_mu;
   Vector<struct AtExitCtx *> AtExitStack;
 
-  InterceptorContext()
-      : libignore(LINKER_INITIALIZED), AtExitStack() {
-  }
+  InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {}
 };
 
 static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
@@ -250,8 +243,8 @@ static ThreadSignalContext *SigCtx(ThreadState *thr) {
 
 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
                                      uptr pc)
-    : thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) {
-  Initialize(thr);
+    : thr_(thr), in_ignored_lib_(false), ignoring_(false) {
+  LazyInitialize(thr);
   if (!thr_->is_inited) return;
   if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
   DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
@@ -267,29 +260,29 @@ ScopedInterceptor::~ScopedInterceptor() {
   if (!thr_->ignore_interceptors) {
     ProcessPendingSignals(thr_);
     FuncExit(thr_);
-    CheckNoLocks(thr_);
+    CheckedMutex::CheckNoLocks();
   }
 }
 
-void ScopedInterceptor::EnableIgnores() {
-  if (ignoring_) {
-    ThreadIgnoreBegin(thr_, pc_, /*save_stack=*/false);
-    if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++;
-    if (in_ignored_lib_) {
-      DCHECK(!thr_->in_ignored_lib);
-      thr_->in_ignored_lib = true;
-    }
+NOINLINE
+void ScopedInterceptor::EnableIgnoresImpl() {
+  ThreadIgnoreBegin(thr_, 0);
+  if (flags()->ignore_noninstrumented_modules)
+    thr_->suppress_reports++;
+  if (in_ignored_lib_) {
+    DCHECK(!thr_->in_ignored_lib);
+    thr_->in_ignored_lib = true;
   }
 }
 
-void ScopedInterceptor::DisableIgnores() {
-  if (ignoring_) {
-    ThreadIgnoreEnd(thr_, pc_);
-    if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports--;
-    if (in_ignored_lib_) {
-      DCHECK(thr_->in_ignored_lib);
-      thr_->in_ignored_lib = false;
-    }
+NOINLINE
+void ScopedInterceptor::DisableIgnoresImpl() {
+  ThreadIgnoreEnd(thr_);
+  if (flags()->ignore_noninstrumented_modules)
+    thr_->suppress_reports--;
+  if (in_ignored_lib_) {
+    DCHECK(thr_->in_ignored_lib);
+    thr_->in_ignored_lib = false;
   }
 }
 
@@ -325,7 +318,7 @@ struct BlockingCall {
       , ctx(SigCtx(thr)) {
     for (;;) {
       atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
-      if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0)
+      if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
         break;
       atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
       ProcessPendingSignals(thr);
@@ -377,7 +370,7 @@ static void at_exit_wrapper() {
   AtExitCtx *ctx;
   {
     // Ensure thread-safety.
-    BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
+    Lock l(&interceptor_ctx()->atexit_mu);
 
     // Pop AtExitCtx from the top of the stack of callback functions
     uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
@@ -387,14 +380,14 @@ static void at_exit_wrapper() {
 
   Acquire(cur_thread(), (uptr)0, (uptr)ctx);
   ((void(*)())ctx->f)();
-  InternalFree(ctx);
+  Free(ctx);
 }
 
 static void cxa_at_exit_wrapper(void *arg) {
   Acquire(cur_thread(), 0, (uptr)arg);
   AtExitCtx *ctx = (AtExitCtx*)arg;
   ((void(*)(void *arg))ctx->f)(ctx->arg);
-  InternalFree(ctx);
+  Free(ctx);
 }
 
 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
@@ -420,7 +413,7 @@ TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
 
 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
       void *arg, void *dso) {
-  AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
+  auto *ctx = New<AtExitCtx>();
   ctx->f = f;
   ctx->arg = arg;
   Release(thr, pc, (uptr)ctx);
@@ -433,7 +426,10 @@ static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
     // Store ctx in a local stack-like structure
 
     // Ensure thread-safety.
-    BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
+    Lock l(&interceptor_ctx()->atexit_mu);
+    // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail
+    // due to atexit_mu held on exit from the calloc interceptor.
+    ScopedIgnoreInterceptors ignore;
 
     res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0);
     // Push AtExitCtx on the top of the stack of callback functions
@@ -443,7 +439,7 @@ static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
   } else {
     res = REAL(__cxa_atexit)(cxa_at_exit_wrapper, ctx, dso);
   }
-  ThreadIgnoreEnd(thr, pc);
+  ThreadIgnoreEnd(thr);
   return res;
 }
 
@@ -454,14 +450,14 @@ static void on_exit_wrapper(int status, void *arg) {
   Acquire(thr, pc, (uptr)arg);
   AtExitCtx *ctx = (AtExitCtx*)arg;
   ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
-  InternalFree(ctx);
+  Free(ctx);
 }
 
 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
   if (in_symbolizer())
     return 0;
   SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
-  AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
+  auto *ctx = New<AtExitCtx>();
   ctx->f = (void(*)())f;
   ctx->arg = arg;
   Release(thr, pc, (uptr)ctx);
@@ -469,7 +465,7 @@ TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
   // because we do not see synchronization around atexit callback list.
   ThreadIgnoreBegin(thr, pc);
   int res = REAL(on_exit)(on_exit_wrapper, ctx);
-  ThreadIgnoreEnd(thr, pc);
+  ThreadIgnoreEnd(thr);
   return res;
 }
 #define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
@@ -848,6 +844,53 @@ TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
 }
 #endif
 
+// Both __cxa_guard_acquire and pthread_once 0-initialize
+// the object initially. pthread_once does not have any
+// other ABI requirements. __cxa_guard_acquire assumes
+// that any non-0 value in the first byte means that
+// initialization is completed. Contents of the remaining
+// bytes are up to us.
+constexpr u32 kGuardInit = 0;
+constexpr u32 kGuardDone = 1;
+constexpr u32 kGuardRunning = 1 << 16;
+constexpr u32 kGuardWaiter = 1 << 17;
+
+static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
+                         bool blocking_hooks = true) {
+  if (blocking_hooks)
+    OnPotentiallyBlockingRegionBegin();
+  auto on_exit = at_scope_exit([blocking_hooks] {
+    if (blocking_hooks)
+      OnPotentiallyBlockingRegionEnd();
+  });
+
+  for (;;) {
+    u32 cmp = atomic_load(g, memory_order_acquire);
+    if (cmp == kGuardInit) {
+      if (atomic_compare_exchange_strong(g, &cmp, kGuardRunning,
+                                         memory_order_relaxed))
+        return 1;
+    } else if (cmp == kGuardDone) {
+      if (!thr->in_ignored_lib)
+        Acquire(thr, pc, (uptr)g);
+      return 0;
+    } else {
+      if ((cmp & kGuardWaiter) ||
+          atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter,
+                                         memory_order_relaxed))
+        FutexWait(g, cmp | kGuardWaiter);
+    }
+  }
+}
+
+static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g) {
+  if (!thr->in_ignored_lib)
+    Release(thr, pc, (uptr)g);
+  u32 old = atomic_exchange(g, kGuardDone, memory_order_release);
+  if (old & kGuardWaiter)
+    FutexWake(g, 1 << 30);
+}
+
 // __cxa_guard_acquire and friends need to be intercepted in a special way -
 // regular interceptors will break statically-linked libstdc++. Linux
 // interceptors are especially defined as weak functions (so that they don't
@@ -868,31 +911,17 @@ TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
 // Used in thread-safe function static initialization.
 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
   SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
-  OnPotentiallyBlockingRegionBegin();
-  auto on_exit = at_scope_exit(&OnPotentiallyBlockingRegionEnd);
-  for (;;) {
-    u32 cmp = atomic_load(g, memory_order_acquire);
-    if (cmp == 0) {
-      if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed))
-        return 1;
-    } else if (cmp == 1) {
-      Acquire(thr, pc, (uptr)g);
-      return 0;
-    } else {
-      internal_sched_yield();
-    }
-  }
+  return guard_acquire(thr, pc, g);
 }
 
 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
   SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
-  Release(thr, pc, (uptr)g);
-  atomic_store(g, 1, memory_order_release);
+  guard_release(thr, pc, g);
 }
 
 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
   SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
-  atomic_store(g, 0, memory_order_relaxed);
+  atomic_store(g, kGuardInit, memory_order_relaxed);
 }
 
 namespace __tsan {
@@ -934,14 +963,15 @@ static void thread_finalize(void *v) {
 struct ThreadParam {
   void* (*callback)(void *arg);
   void *param;
-  atomic_uintptr_t tid;
+  Tid tid;
+  Semaphore created;
+  Semaphore started;
 };
 
 extern "C" void *__tsan_thread_start_func(void *arg) {
   ThreadParam *p = (ThreadParam*)arg;
   void* (*callback)(void *arg) = p->callback;
   void *param = p->param;
-  int tid = 0;
   {
     cur_thread_init();
     ThreadState *thr = cur_thread();
@@ -954,14 +984,13 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
       Printf("ThreadSanitizer: failed to set thread key\n");
       Die();
     }
-    ThreadIgnoreEnd(thr, 0);
+    ThreadIgnoreEnd(thr);
 #endif
-    while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
-      internal_sched_yield();
+    p->created.Wait();
     Processor *proc = ProcCreate();
     ProcWire(proc, thr);
-    ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
-    atomic_store(&p->tid, 0, memory_order_release);
+    ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular);
+    p->started.Post();
   }
   void *res = callback(param);
   // Prevent the callback from being tail called,
@@ -983,9 +1012,11 @@ TSAN_INTERCEPTOR(int, pthread_create,
           "fork is not supported. Dying (set die_after_fork=0 to override)\n");
       Die();
     } else {
-      VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded "
-          "fork is not supported (pid %d). Continuing because of "
-          "die_after_fork=0, but you are on your own\n", internal_getpid());
+      VPrintf(1,
+              "ThreadSanitizer: starting new threads after multi-threaded "
+              "fork is not supported (pid %lu). Continuing because of "
+              "die_after_fork=0, but you are on your own\n",
+              internal_getpid());
     }
   }
   __sanitizer_pthread_attr_t myattr;
@@ -1000,18 +1031,18 @@ TSAN_INTERCEPTOR(int, pthread_create,
   ThreadParam p;
   p.callback = callback;
   p.param = param;
-  atomic_store(&p.tid, 0, memory_order_relaxed);
+  p.tid = kMainTid;
   int res = -1;
   {
     // Otherwise we see false positives in pthread stack manipulation.
     ScopedIgnoreInterceptors ignore;
     ThreadIgnoreBegin(thr, pc);
     res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
-    ThreadIgnoreEnd(thr, pc);
+    ThreadIgnoreEnd(thr);
   }
   if (res == 0) {
-    int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached));
-    CHECK_NE(tid, 0);
+    p.tid = ThreadCreate(thr, pc, *(uptr *)th, IsStateDetached(detached));
+    CHECK_NE(p.tid, kMainTid);
     // Synchronization on p.tid serves two purposes:
     // 1. ThreadCreate must finish before the new thread starts.
     //    Otherwise the new thread can call pthread_detach, but the pthread_t
@@ -1019,9 +1050,8 @@ TSAN_INTERCEPTOR(int, pthread_create,
     // 2. ThreadStart must finish before this thread continues.
     //    Otherwise, this thread can call pthread_detach and reset thr->sync
     //    before the new thread got a chance to acquire from it in ThreadStart.
-    atomic_store(&p.tid, tid, memory_order_release);
-    while (atomic_load(&p.tid, memory_order_acquire) != 0)
-      internal_sched_yield();
+    p.created.Post();
+    p.started.Wait();
   }
   if (attr == &myattr)
     pthread_attr_destroy(&myattr);
@@ -1030,10 +1060,10 @@ TSAN_INTERCEPTOR(int, pthread_create,
 
 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
   SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
-  int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+  Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
   ThreadIgnoreBegin(thr, pc);
   int res = BLOCK_REAL(pthread_join)(th, ret);
-  ThreadIgnoreEnd(thr, pc);
+  ThreadIgnoreEnd(thr);
   if (res == 0) {
     ThreadJoin(thr, pc, tid);
   }
@@ -1044,7 +1074,7 @@ DEFINE_REAL_PTHREAD_FUNCTIONS
 
 TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
   SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
-  int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+  Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
   int res = REAL(pthread_detach)(th);
   if (res == 0) {
     ThreadDetach(thr, pc, tid);
@@ -1065,10 +1095,10 @@ TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
 #if SANITIZER_LINUX
 TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
   SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
-  int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+  Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
   ThreadIgnoreBegin(thr, pc);
   int res = REAL(pthread_tryjoin_np)(th, ret);
-  ThreadIgnoreEnd(thr, pc);
+  ThreadIgnoreEnd(thr);
   if (res == 0)
     ThreadJoin(thr, pc, tid);
   else
@@ -1079,10 +1109,10 @@ TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
 TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
                  const struct timespec *abstime) {
   SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
-  int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+  Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
   ThreadIgnoreBegin(thr, pc);
   int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
-  ThreadIgnoreEnd(thr, pc);
+  ThreadIgnoreEnd(thr);
   if (res == 0)
     ThreadJoin(thr, pc, tid);
   else
@@ -1446,14 +1476,14 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
 #if !SANITIZER_MAC
 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
-  MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
+  MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
   int res = REAL(pthread_barrier_init)(b, a, count);
   return res;
 }
 
 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
-  MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
+  MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
   int res = REAL(pthread_barrier_destroy)(b);
   return res;
 }
@@ -1461,9 +1491,9 @@ TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
   Release(thr, pc, (uptr)b);
-  MemoryRead(thr, pc, (uptr)b, kSizeLog1);
+  MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
   int res = REAL(pthread_barrier_wait)(b);
-  MemoryRead(thr, pc, (uptr)b, kSizeLog1);
+  MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
   if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
     Acquire(thr, pc, (uptr)b);
   }
@@ -1485,20 +1515,11 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
   else
     a = static_cast<atomic_uint32_t*>(o);
 
-  u32 v = atomic_load(a, memory_order_acquire);
-  if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
-                                               memory_order_relaxed)) {
+  // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
+  // result in crashes due to too little stack space.
+  if (guard_acquire(thr, pc, a, !SANITIZER_MAC)) {
     (*f)();
-    if (!thr->in_ignored_lib)
-      Release(thr, pc, (uptr)o);
-    atomic_store(a, 2, memory_order_release);
-  } else {
-    while (v != 2) {
-      internal_sched_yield();
-      v = atomic_load(a, memory_order_acquire);
-    }
-    if (!thr->in_ignored_lib)
-      Acquire(thr, pc, (uptr)o);
+    guard_release(thr, pc, a);
   }
   return 0;
 }
@@ -1932,24 +1953,45 @@ TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
 
 namespace __tsan {
 
+static void ReportErrnoSpoiling(ThreadState *thr, uptr pc) {
+  VarSizeStackTrace stack;
+  // StackTrace::GetNestInstructionPc(pc) is used because return address is
+  // expected, OutputReport() will undo this.
+  ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
+  ThreadRegistryLock l(&ctx->thread_registry);
+  ScopedReport rep(ReportTypeErrnoInSignal);
+  if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
+    rep.AddStack(stack, true);
+    OutputReport(thr, rep);
+  }
+}
+
 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
-                                  bool sigact, int sig,
-                                  __sanitizer_siginfo *info, void *uctx) {
+                                  int sig, __sanitizer_siginfo *info,
+                                  void *uctx) {
   __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
   if (acquire)
     Acquire(thr, 0, (uptr)&sigactions[sig]);
   // Signals are generally asynchronous, so if we receive a signals when
   // ignores are enabled we should disable ignores. This is critical for sync
-  // and interceptors, because otherwise we can miss syncronization and report
+  // and interceptors, because otherwise we can miss synchronization and report
   // false races.
   int ignore_reads_and_writes = thr->ignore_reads_and_writes;
   int ignore_interceptors = thr->ignore_interceptors;
   int ignore_sync = thr->ignore_sync;
+  // For symbolizer we only process SIGSEGVs synchronously
+  // (bug in symbolizer or in tsan). But we want to reset
+  // in_symbolizer to fail gracefully. Symbolizer and user code
+  // use different memory allocators, so if we don't reset
+  // in_symbolizer we can get memory allocated with one being
+  // feed with another, which can cause more crashes.
+  int in_symbolizer = thr->in_symbolizer;
   if (!ctx->after_multithreaded_fork) {
     thr->ignore_reads_and_writes = 0;
     thr->fast_state.ClearIgnoreBit();
     thr->ignore_interceptors = 0;
     thr->ignore_sync = 0;
+    thr->in_symbolizer = 0;
   }
   // Ensure that the handler does not spoil errno.
   const int saved_errno = errno;
@@ -1957,13 +1999,14 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
   // This code races with sigaction. Be careful to not read sa_sigaction twice.
   // Also need to remember pc for reporting before the call,
   // because the handler can reset it.
-  volatile uptr pc =
-      sigact ? (uptr)sigactions[sig].sigaction : (uptr)sigactions[sig].handler;
+  volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
+                         ? (uptr)sigactions[sig].sigaction
+                         : (uptr)sigactions[sig].handler;
   if (pc != sig_dfl && pc != sig_ign) {
-    if (sigact)
-      ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
-    else
-      ((__sanitizer_sighandler_ptr)pc)(sig);
+    // The callback can be either sa_handler or sa_sigaction.
+    // They have different signatures, but we assume that passing
+    // additional arguments to sa_handler works and is harmless.
+    ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
   }
   if (!ctx->after_multithreaded_fork) {
     thr->ignore_reads_and_writes = ignore_reads_and_writes;
@@ -1971,6 +2014,7 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
       thr->fast_state.SetIgnoreBit();
     thr->ignore_interceptors = ignore_interceptors;
     thr->ignore_sync = ignore_sync;
+    thr->in_symbolizer = in_symbolizer;
   }
   // We do not detect errno spoiling for SIGTERM,
   // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
@@ -1980,27 +2024,16 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
   // from rtl_generic_sighandler) we have not yet received the reraised
   // signal; and it looks too fragile to intercept all ways to reraise a signal.
   if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
-      errno != 99) {
-    VarSizeStackTrace stack;
-    // StackTrace::GetNestInstructionPc(pc) is used because return address is
-    // expected, OutputReport() will undo this.
-    ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
-    ThreadRegistryLock l(ctx->thread_registry);
-    ScopedReport rep(ReportTypeErrnoInSignal);
-    if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
-      rep.AddStack(stack, true);
-      OutputReport(thr, rep);
-    }
-  }
+      errno != 99)
+    ReportErrnoSpoiling(thr, pc);
   errno = saved_errno;
 }
 
-void ProcessPendingSignals(ThreadState *thr) {
+void ProcessPendingSignalsImpl(ThreadState *thr) {
+  atomic_store(&thr->pending_signals, 0, memory_order_relaxed);
   ThreadSignalContext *sctx = SigCtx(thr);
-  if (sctx == 0 ||
-      atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0)
+  if (sctx == 0)
     return;
-  atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
   atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
   internal_sigfillset(&sctx->emptyset);
   int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
@@ -2009,8 +2042,8 @@ void ProcessPendingSignals(ThreadState *thr) {
     SignalDesc *signal = &sctx->pending_signals[sig];
     if (signal->armed) {
       signal->armed = false;
-      CallUserSignalHandler(thr, false, true, signal->sigaction, sig,
-          &signal->siginfo, &signal->ctx);
+      CallUserSignalHandler(thr, false, true, sig, &signal->siginfo,
+                            &signal->ctx);
     }
   }
   res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
@@ -2027,9 +2060,7 @@ static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
          (sctx && sig == sctx->int_signal_send);
 }
 
-void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
-                                          __sanitizer_siginfo *info,
-                                          void *ctx) {
+void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
   cur_thread_init();
   ThreadState *thr = cur_thread();
   ThreadSignalContext *sctx = SigCtx(thr);
@@ -2047,7 +2078,7 @@ void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
     atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
     if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
       atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
-      CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
+      CallUserSignalHandler(thr, sync, true, sig, info, ctx);
       atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
     } else {
       // Be very conservative with when we do acquire in this case.
@@ -2056,7 +2087,7 @@ void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
       // SIGSYS looks relatively safe -- it's synchronous and can actually
       // need some global state.
       bool acq = (sig == SIGSYS);
-      CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx);
+      CallUserSignalHandler(thr, sync, acq, sig, info, ctx);
     }
     atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
     return;
@@ -2067,23 +2098,12 @@ void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
   SignalDesc *signal = &sctx->pending_signals[sig];
   if (signal->armed == false) {
     signal->armed = true;
-    signal->sigaction = sigact;
-    if (info)
-      internal_memcpy(&signal->siginfo, info, sizeof(*info));
-    if (ctx)
-      internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
-    atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed);
+    internal_memcpy(&signal->siginfo, info, sizeof(*info));
+    internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
+    atomic_store(&thr->pending_signals, 1, memory_order_relaxed);
   }
 }
 
-static void rtl_sighandler(int sig) {
-  rtl_generic_sighandler(false, sig, 0, 0);
-}
-
-static void rtl_sigaction(int sig, __sanitizer_siginfo *info, void *ctx) {
-  rtl_generic_sighandler(true, sig, info, ctx);
-}
-
 TSAN_INTERCEPTOR(int, raise, int sig) {
   SCOPED_TSAN_INTERCEPTOR(raise, sig);
   ThreadSignalContext *sctx = SigCtx(thr);
@@ -2142,7 +2162,7 @@ TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
   // inside of getaddrinfo. So ignore memory accesses.
   ThreadIgnoreBegin(thr, pc);
   int res = REAL(getaddrinfo)(node, service, hints, rv);
-  ThreadIgnoreEnd(thr, pc);
+  ThreadIgnoreEnd(thr);
   return res;
 }
 
@@ -2206,7 +2226,7 @@ struct dl_iterate_phdr_data {
 };
 
 static bool IsAppNotRodata(uptr addr) {
-  return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata;
+  return IsAppMem(addr) && *MemToShadow(addr) != kShadowRodata;
 }
 
 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
@@ -2249,7 +2269,6 @@ static int OnExit(ThreadState *thr) {
 
 struct TsanInterceptorContext {
   ThreadState *thr;
-  const uptr caller_pc;
   const uptr pc;
 };
 
@@ -2290,17 +2309,17 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
                     ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
                     false)
 
-#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...)      \
-  SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__);         \
-  TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
-  ctx = (void *)&_ctx;                                \
-  (void) ctx;
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+  SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__);    \
+  TsanInterceptorContext _ctx = {thr, pc};       \
+  ctx = (void *)&_ctx;                           \
+  (void)ctx;
 
 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
   SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__);              \
-  TsanInterceptorContext _ctx = {thr, caller_pc, pc};     \
+  TsanInterceptorContext _ctx = {thr, pc};                \
   ctx = (void *)&_ctx;                                    \
-  (void) ctx;
+  (void)ctx;
 
 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
   if (path)                                           \
@@ -2347,7 +2366,7 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
   ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
 
 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
-  __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
+  __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
 
 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
 
@@ -2419,9 +2438,13 @@ static __sanitizer_sighandler_ptr signal_impl(int sig,
 int sigaction_impl(int sig, const __sanitizer_sigaction *act,
                    __sanitizer_sigaction *old) {
   // Note: if we call REAL(sigaction) directly for any reason without proxying
-  // the signal handler through rtl_sigaction, very bad things will happen.
+  // the signal handler through sighandler, very bad things will happen.
   // The handler will run synchronously and corrupt tsan per-thread state.
   SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
+  if (sig <= 0 || sig >= kSigCount) {
+    errno = errno_EINVAL;
+    return -1;
+  }
   __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
   __sanitizer_sigaction old_stored;
   if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
@@ -2443,22 +2466,17 @@ int sigaction_impl(int sig, const __sanitizer_sigaction *act,
 #endif
     internal_memcpy(&newact, act, sizeof(newact));
     internal_sigfillset(&newact.sa_mask);
-    if ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl) {
-      if (newact.sa_flags & SA_SIGINFO)
-        newact.sigaction = rtl_sigaction;
-      else
-        newact.handler = rtl_sighandler;
+    if ((act->sa_flags & SA_SIGINFO) ||
+        ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
+      newact.sa_flags |= SA_SIGINFO;
+      newact.sigaction = sighandler;
     }
     ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
     act = &newact;
   }
   int res = REAL(sigaction)(sig, act, old);
-  if (res == 0 && old) {
-    uptr cb = (uptr)old->sigaction;
-    if (cb == (uptr)rtl_sigaction || cb == (uptr)rtl_sighandler) {
-      internal_memcpy(old, &old_stored, sizeof(*old));
-    }
-  }
+  if (res == 0 && old && old->sigaction == sighandler)
+    internal_memcpy(old, &old_stored, sizeof(*old));
   return res;
 }
 
@@ -2474,20 +2492,16 @@ static __sanitizer_sighandler_ptr signal_impl(int sig,
   return old.handler;
 }
 
-#define TSAN_SYSCALL() \
+#define TSAN_SYSCALL()             \
   ThreadState *thr = cur_thread(); \
-  if (thr->ignore_interceptors) \
-    return; \
-  ScopedSyscall scoped_syscall(thr) \
-/**/
+  if (thr->ignore_interceptors)    \
+    return;                        \
+  ScopedSyscall scoped_syscall(thr)
 
 struct ScopedSyscall {
   ThreadState *thr;
 
-  explicit ScopedSyscall(ThreadState *thr)
-      : thr(thr) {
-    Initialize(thr);
-  }
+  explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
 
   ~ScopedSyscall() {
     ProcessPendingSignals(thr);
@@ -2503,12 +2517,12 @@ static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
 static USED void syscall_acquire(uptr pc, uptr addr) {
   TSAN_SYSCALL();
   Acquire(thr, pc, addr);
-  DPrintf("syscall_acquire(%p)\n", addr);
+  DPrintf("syscall_acquire(0x%zx))\n", addr);
 }
 
 static USED void syscall_release(uptr pc, uptr addr) {
   TSAN_SYSCALL();
-  DPrintf("syscall_release(%p)\n", addr);
+  DPrintf("syscall_release(0x%zx)\n", addr);
   Release(thr, pc, addr);
 }
 
@@ -2520,12 +2534,12 @@ static void syscall_fd_close(uptr pc, int fd) {
 static USED void syscall_fd_acquire(uptr pc, int fd) {
   TSAN_SYSCALL();
   FdAcquire(thr, pc, fd);
-  DPrintf("syscall_fd_acquire(%p)\n", fd);
+  DPrintf("syscall_fd_acquire(%d)\n", fd);
 }
 
 static USED void syscall_fd_release(uptr pc, int fd) {
   TSAN_SYSCALL();
-  DPrintf("syscall_fd_release(%p)\n", fd);
+  DPrintf("syscall_fd_release(%d)\n", fd);
   FdRelease(thr, pc, fd);
 }
 
@@ -2695,12 +2709,6 @@ void InitializeInterceptors() {
   REAL(memcpy) = internal_memcpy;
 #endif
 
-  // Instruct libc malloc to consume less memory.
-#if SANITIZER_GLIBC
-  mallopt(1, 0);  // M_MXFAST
-  mallopt(-3, 32*1024);  // M_MMAP_THRESHOLD
-#endif
-
   new(interceptor_ctx()) InterceptorContext();
 
   InitializeCommonInterceptors();
@@ -2915,25 +2923,36 @@ void InitializeInterceptors() {
 // Note that no_sanitize_thread attribute does not turn off atomic interception
 // so attaching it to the function defined in user code does not help.
 // That's why we now have what we have.
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_testonly_barrier_init(u64 *barrier, u32 count) {
-  if (count >= (1 << 8)) {
-      Printf("barrier_init: count is too large (%d)\n", count);
-      Die();
+constexpr u32 kBarrierThreadBits = 10;
+constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
+    atomic_uint32_t *barrier, u32 num_threads) {
+  if (num_threads >= kBarrierThreads) {
+    Printf("barrier_init: count is too large (%d)\n", num_threads);
+    Die();
   }
-  // 8 lsb is thread count, the remaining are count of entered threads.
-  *barrier = count;
+  // kBarrierThreadBits lsb is thread count,
+  // the remaining are count of entered threads.
+  atomic_store(barrier, num_threads, memory_order_relaxed);
 }
 
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_testonly_barrier_wait(u64 *barrier) {
-  unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED);
-  unsigned old_epoch = (old >> 8) / (old & 0xff);
+static u32 barrier_epoch(u32 value) {
+  return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
+    atomic_uint32_t *barrier) {
+  u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
+  u32 old_epoch = barrier_epoch(old);
+  if (barrier_epoch(old + kBarrierThreads) != old_epoch) {
+    FutexWake(barrier, (1 << 30));
+    return;
+  }
   for (;;) {
-    unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED);
-    unsigned cur_epoch = (cur >> 8) / (cur & 0xff);
-    if (cur_epoch != old_epoch)
+    u32 cur = atomic_load(barrier, memory_order_relaxed);
+    if (barrier_epoch(cur) != old_epoch)
       return;
-    internal_sched_yield();
+    FutexWait(barrier, cur);
   }
 }
index 9bd0e8580b175b47d316bfb27adf1bffdd8a3ab8..704c06a1c78e7200bee33c19aa45e5614238ffb4 100644 (file)
@@ -30,98 +30,50 @@ void __tsan_flush_memory() {
 }
 
 void __tsan_read16(void *addr) {
-  MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
-  MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
+  uptr pc = CALLERPC;
+  ThreadState *thr = cur_thread();
+  MemoryAccess(thr, pc, (uptr)addr, 8, kAccessRead);
+  MemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessRead);
 }
 
 void __tsan_write16(void *addr) {
-  MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
-  MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
+  uptr pc = CALLERPC;
+  ThreadState *thr = cur_thread();
+  MemoryAccess(thr, pc, (uptr)addr, 8, kAccessWrite);
+  MemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessWrite);
 }
 
 void __tsan_read16_pc(void *addr, void *pc) {
-  MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
-  MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8);
+  uptr pc_no_pac = STRIP_PAC_PC(pc);
+  ThreadState *thr = cur_thread();
+  MemoryAccess(thr, pc_no_pac, (uptr)addr, 8, kAccessRead);
+  MemoryAccess(thr, pc_no_pac, (uptr)addr + 8, 8, kAccessRead);
 }
 
 void __tsan_write16_pc(void *addr, void *pc) {
-  MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
-  MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8);
+  uptr pc_no_pac = STRIP_PAC_PC(pc);
+  ThreadState *thr = cur_thread();
+  MemoryAccess(thr, pc_no_pac, (uptr)addr, 8, kAccessWrite);
+  MemoryAccess(thr, pc_no_pac, (uptr)addr + 8, 8, kAccessWrite);
 }
 
 // __tsan_unaligned_read/write calls are emitted by compiler.
 
-void __tsan_unaligned_read2(const void *addr) {
-  UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false);
-}
-
-void __tsan_unaligned_read4(const void *addr) {
-  UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false);
-}
-
-void __tsan_unaligned_read8(const void *addr) {
-  UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false);
-}
-
 void __tsan_unaligned_read16(const void *addr) {
-  UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, false, false);
-}
-
-void __tsan_unaligned_write2(void *addr) {
-  UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false);
-}
-
-void __tsan_unaligned_write4(void *addr) {
-  UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false);
-}
-
-void __tsan_unaligned_write8(void *addr) {
-  UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false);
+  uptr pc = CALLERPC;
+  ThreadState *thr = cur_thread();
+  UnalignedMemoryAccess(thr, pc, (uptr)addr, 8, kAccessRead);
+  UnalignedMemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessRead);
 }
 
 void __tsan_unaligned_write16(void *addr) {
-  UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, true, false);
+  uptr pc = CALLERPC;
+  ThreadState *thr = cur_thread();
+  UnalignedMemoryAccess(thr, pc, (uptr)addr, 8, kAccessWrite);
+  UnalignedMemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessWrite);
 }
 
-// __sanitizer_unaligned_load/store are for user instrumentation.
-
 extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE
-u16 __sanitizer_unaligned_load16(const uu16 *addr) {
-  __tsan_unaligned_read2(addr);
-  return *addr;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-u32 __sanitizer_unaligned_load32(const uu32 *addr) {
-  __tsan_unaligned_read4(addr);
-  return *addr;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-u64 __sanitizer_unaligned_load64(const uu64 *addr) {
-  __tsan_unaligned_read8(addr);
-  return *addr;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store16(uu16 *addr, u16 v) {
-  __tsan_unaligned_write2(addr);
-  *addr = v;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store32(uu32 *addr, u32 v) {
-  __tsan_unaligned_write4(addr);
-  *addr = v;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store64(uu64 *addr, u64 v) {
-  __tsan_unaligned_write8(addr);
-  *addr = v;
-}
-
 SANITIZER_INTERFACE_ATTRIBUTE
 void *__tsan_get_current_fiber() {
   return cur_thread();
index 124aa2fd21431e02bc9fa19c4a881368e75dbd76..711f064174c2c3ec83eb31b91464ba577907a547 100644 (file)
@@ -95,9 +95,9 @@ SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_write_range(void *addr, unsigned long size);
 
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_read_range_pc(void *addr, unsigned long size, void *pc);  // NOLINT
+void __tsan_read_range_pc(void *addr, unsigned long size, void *pc);
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_write_range_pc(void *addr, unsigned long size, void *pc);  // NOLINT
+void __tsan_write_range_pc(void *addr, unsigned long size, void *pc);
 
 // User may provide function that would be called right when TSan detects
 // an error. The argument 'report' is an opaque pointer that can be used to
@@ -417,12 +417,6 @@ SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
                                          u8 *a);
 
-SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_on_initialize();
-
-SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_on_finalize(int failed);
-
 }  // extern "C"
 
 }  // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_interface.inc b/libsanitizer/tsan/tsan_interface.inc
new file mode 100644 (file)
index 0000000..0031800
--- /dev/null
@@ -0,0 +1,182 @@
+//===-- tsan_interface.inc --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_ptrauth.h"
+#include "tsan_interface.h"
+#include "tsan_rtl.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan;
+
+void __tsan_read1(void *addr) {
+  MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessRead);
+}
+
+void __tsan_read2(void *addr) {
+  MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessRead);
+}
+
+void __tsan_read4(void *addr) {
+  MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessRead);
+}
+
+void __tsan_read8(void *addr) {
+  MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead);
+}
+
+void __tsan_write1(void *addr) {
+  MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessWrite);
+}
+
+void __tsan_write2(void *addr) {
+  MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessWrite);
+}
+
+void __tsan_write4(void *addr) {
+  MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessWrite);
+}
+
+void __tsan_write8(void *addr) {
+  MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite);
+}
+
+void __tsan_read1_pc(void *addr, void *pc) {
+  MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 1, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_read2_pc(void *addr, void *pc) {
+  MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 2, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_read4_pc(void *addr, void *pc) {
+  MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 4, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_read8_pc(void *addr, void *pc) {
+  MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 8, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_write1_pc(void *addr, void *pc) {
+  MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 1, kAccessWrite | kAccessExternalPC);
+}
+
+void __tsan_write2_pc(void *addr, void *pc) {
+  MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 2, kAccessWrite | kAccessExternalPC);
+}
+
+void __tsan_write4_pc(void *addr, void *pc) {
+  MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 4, kAccessWrite | kAccessExternalPC);
+}
+
+void __tsan_write8_pc(void *addr, void *pc) {
+  MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 8, kAccessWrite | kAccessExternalPC);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_read2(const void *addr) {
+  UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessRead);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_read4(const void *addr) {
+  UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessRead);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_read8(const void *addr) {
+  UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_write2(void *addr) {
+  UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessWrite);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_write4(void *addr) {
+  UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessWrite);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_write8(void *addr) {
+  UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite);
+}
+
+extern "C" {
+// __sanitizer_unaligned_load/store are for user instrumentation.
+SANITIZER_INTERFACE_ATTRIBUTE
+u16 __sanitizer_unaligned_load16(const uu16 *addr) {
+  __tsan_unaligned_read2(addr);
+  return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u32 __sanitizer_unaligned_load32(const uu32 *addr) {
+  __tsan_unaligned_read4(addr);
+  return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u64 __sanitizer_unaligned_load64(const uu64 *addr) {
+  __tsan_unaligned_read8(addr);
+  return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store16(uu16 *addr, u16 v) {
+  *addr = v;
+  __tsan_unaligned_write2(addr);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store32(uu32 *addr, u32 v) {
+  *addr = v;
+  __tsan_unaligned_write4(addr);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store64(uu64 *addr, u64 v) {
+  *addr = v;
+  __tsan_unaligned_write8(addr);
+}
+}
+
+void __tsan_vptr_update(void **vptr_p, void *new_val) {
+  if (*vptr_p == new_val)
+    return;
+  MemoryAccess(cur_thread(), CALLERPC, (uptr)vptr_p, sizeof(*vptr_p),
+               kAccessWrite | kAccessVptr);
+}
+
+void __tsan_vptr_read(void **vptr_p) {
+  MemoryAccess(cur_thread(), CALLERPC, (uptr)vptr_p, sizeof(*vptr_p),
+               kAccessRead | kAccessVptr);
+}
+
+void __tsan_func_entry(void *pc) { FuncEntry(cur_thread(), STRIP_PAC_PC(pc)); }
+
+void __tsan_func_exit() { FuncExit(cur_thread()); }
+
+void __tsan_ignore_thread_begin() { ThreadIgnoreBegin(cur_thread(), CALLERPC); }
+
+void __tsan_ignore_thread_end() { ThreadIgnoreEnd(cur_thread()); }
+
+void __tsan_read_range(void *addr, uptr size) {
+  MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
+}
+
+void __tsan_write_range(void *addr, uptr size) {
+  MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, true);
+}
+
+void __tsan_read_range_pc(void *addr, uptr size, void *pc) {
+  MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, false);
+}
+
+void __tsan_write_range_pc(void *addr, uptr size, void *pc) {
+  MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, true);
+}
index 175855f66c8cbfab3649c9ec883cd98399906815..6bd72e18d942580691b99530fac2f2c46be33cf2 100644 (file)
@@ -15,7 +15,6 @@
 #include "sanitizer_common/sanitizer_stacktrace.h"
 #include "sanitizer_common/sanitizer_vector.h"
 #include "tsan_interface_ann.h"
-#include "tsan_mutex.h"
 #include "tsan_report.h"
 #include "tsan_rtl.h"
 #include "tsan_mman.h"
@@ -38,21 +37,20 @@ class ScopedAnnotation {
 
   ~ScopedAnnotation() {
     FuncExit(thr_);
-    CheckNoLocks(thr_);
+    CheckedMutex::CheckNoLocks();
   }
  private:
   ThreadState *const thr_;
 };
 
-#define SCOPED_ANNOTATION_RET(typ, ret) \
-    if (!flags()->enable_annotations) \
-      return ret; \
-    ThreadState *thr = cur_thread(); \
-    const uptr caller_pc = (uptr)__builtin_return_address(0); \
-    ScopedAnnotation sa(thr, __func__, caller_pc); \
-    const uptr pc = StackTrace::GetCurrentPc(); \
-    (void)pc; \
-/**/
+#define SCOPED_ANNOTATION_RET(typ, ret)                     \
+  if (!flags()->enable_annotations)                         \
+    return ret;                                             \
+  ThreadState *thr = cur_thread();                          \
+  const uptr caller_pc = (uptr)__builtin_return_address(0); \
+  ScopedAnnotation sa(thr, __func__, caller_pc);            \
+  const uptr pc = StackTrace::GetCurrentPc();               \
+  (void)pc;
 
 #define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
 
@@ -72,7 +70,6 @@ struct ExpectRace {
 
 struct DynamicAnnContext {
   Mutex mtx;
-  ExpectRace expect;
   ExpectRace benign;
 
   DynamicAnnContext() : mtx(MutexTypeAnnotations) {}
@@ -91,7 +88,7 @@ static void AddExpectRace(ExpectRace *list,
       return;
     }
   }
-  race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace));
+  race = static_cast<ExpectRace *>(Alloc(sizeof(ExpectRace)));
   race->addr = addr;
   race->size = size;
   race->file = f;
@@ -138,81 +135,12 @@ static void InitList(ExpectRace *list) {
 
 void InitializeDynamicAnnotations() {
   dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext;
-  InitList(&dyn_ann_ctx->expect);
   InitList(&dyn_ann_ctx->benign);
 }
 
 bool IsExpectedReport(uptr addr, uptr size) {
   ReadLock lock(&dyn_ann_ctx->mtx);
-  if (CheckContains(&dyn_ann_ctx->expect, addr, size))
-    return true;
-  if (CheckContains(&dyn_ann_ctx->benign, addr, size))
-    return true;
-  return false;
-}
-
-static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched,
-    int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) {
-  ExpectRace *list = &dyn_ann_ctx->benign;
-  for (ExpectRace *race = list->next; race != list; race = race->next) {
-    (*unique_count)++;
-    const uptr cnt = atomic_load_relaxed(&(race->*counter));
-    if (cnt == 0)
-      continue;
-    *hit_count += cnt;
-    uptr i = 0;
-    for (; i < matched->Size(); i++) {
-      ExpectRace *race0 = &(*matched)[i];
-      if (race->line == race0->line
-          && internal_strcmp(race->file, race0->file) == 0
-          && internal_strcmp(race->desc, race0->desc) == 0) {
-        atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed);
-        break;
-      }
-    }
-    if (i == matched->Size())
-      matched->PushBack(*race);
-  }
-}
-
-void PrintMatchedBenignRaces() {
-  Lock lock(&dyn_ann_ctx->mtx);
-  int unique_count = 0;
-  int hit_count = 0;
-  int add_count = 0;
-  Vector<ExpectRace> hit_matched;
-  CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count,
-      &ExpectRace::hitcount);
-  Vector<ExpectRace> add_matched;
-  CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count,
-      &ExpectRace::addcount);
-  if (hit_matched.Size()) {
-    Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n",
-        hit_count, (int)internal_getpid());
-    for (uptr i = 0; i < hit_matched.Size(); i++) {
-      Printf("%d %s:%d %s\n",
-          atomic_load_relaxed(&hit_matched[i].hitcount),
-          hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc);
-    }
-  }
-  if (hit_matched.Size()) {
-    Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique"
-           " (pid=%d):\n",
-        add_count, unique_count, (int)internal_getpid());
-    for (uptr i = 0; i < add_matched.Size(); i++) {
-      Printf("%d %s:%d %s\n",
-          atomic_load_relaxed(&add_matched[i].addcount),
-          add_matched[i].file, add_matched[i].line, add_matched[i].desc);
-    }
-  }
-}
-
-static void ReportMissedExpectedRace(ExpectRace *race) {
-  Printf("==================\n");
-  Printf("WARNING: ThreadSanitizer: missed expected data race\n");
-  Printf("  %s addr=%zx %s:%d\n",
-      race->desc, race->addr, race->file, race->line);
-  Printf("==================\n");
+  return CheckContains(&dyn_ann_ctx->benign, addr, size);
 }
 }  // namespace __tsan
 
@@ -230,20 +158,16 @@ void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) {
 }
 
 void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) {
-  SCOPED_ANNOTATION(AnnotateCondVarSignal);
 }
 
 void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
-  SCOPED_ANNOTATION(AnnotateCondVarSignalAll);
 }
 
 void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
-  SCOPED_ANNOTATION(AnnotateMutexIsNotPHB);
 }
 
 void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
                                              uptr lock) {
-  SCOPED_ANNOTATION(AnnotateCondVarWait);
 }
 
 void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
@@ -280,86 +204,56 @@ void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
 }
 
 void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) {
-  SCOPED_ANNOTATION(AnnotateTraceMemory);
 }
 
 void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) {
-  SCOPED_ANNOTATION(AnnotateFlushState);
 }
 
 void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem,
                                            uptr size) {
-  SCOPED_ANNOTATION(AnnotateNewMemory);
 }
 
 void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
-  SCOPED_ANNOTATION(AnnotateNoOp);
 }
 
 void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
-  SCOPED_ANNOTATION(AnnotateFlushExpectedRaces);
-  Lock lock(&dyn_ann_ctx->mtx);
-  while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
-    ExpectRace *race = dyn_ann_ctx->expect.next;
-    if (atomic_load_relaxed(&race->hitcount) == 0) {
-      ctx->nmissed_expected++;
-      ReportMissedExpectedRace(race);
-    }
-    race->prev->next = race->next;
-    race->next->prev = race->prev;
-    internal_free(race);
-  }
 }
 
 void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection(
     char *f, int l, int enable) {
-  SCOPED_ANNOTATION(AnnotateEnableRaceDetection);
-  // FIXME: Reconsider this functionality later. It may be irrelevant.
 }
 
 void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar(
     char *f, int l, uptr mu) {
-  SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar);
 }
 
 void INTERFACE_ATTRIBUTE AnnotatePCQGet(
     char *f, int l, uptr pcq) {
-  SCOPED_ANNOTATION(AnnotatePCQGet);
 }
 
 void INTERFACE_ATTRIBUTE AnnotatePCQPut(
     char *f, int l, uptr pcq) {
-  SCOPED_ANNOTATION(AnnotatePCQPut);
 }
 
 void INTERFACE_ATTRIBUTE AnnotatePCQDestroy(
     char *f, int l, uptr pcq) {
-  SCOPED_ANNOTATION(AnnotatePCQDestroy);
 }
 
 void INTERFACE_ATTRIBUTE AnnotatePCQCreate(
     char *f, int l, uptr pcq) {
-  SCOPED_ANNOTATION(AnnotatePCQCreate);
 }
 
 void INTERFACE_ATTRIBUTE AnnotateExpectRace(
     char *f, int l, uptr mem, char *desc) {
-  SCOPED_ANNOTATION(AnnotateExpectRace);
-  Lock lock(&dyn_ann_ctx->mtx);
-  AddExpectRace(&dyn_ann_ctx->expect,
-                f, l, mem, 1, desc);
-  DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l);
 }
 
-static void BenignRaceImpl(
-    char *f, int l, uptr mem, uptr size, char *desc) {
+static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) {
   Lock lock(&dyn_ann_ctx->mtx);
   AddExpectRace(&dyn_ann_ctx->benign,
                 f, l, mem, size, desc);
   DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l);
 }
 
-// FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
 void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized(
     char *f, int l, uptr mem, uptr size, char *desc) {
   SCOPED_ANNOTATION(AnnotateBenignRaceSized);
@@ -379,7 +273,7 @@ void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
 
 void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
   SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
-  ThreadIgnoreEnd(thr, pc);
+  ThreadIgnoreEnd(thr);
 }
 
 void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
@@ -389,7 +283,7 @@ void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
 
 void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
   SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
-  ThreadIgnoreEnd(thr, pc);
+  ThreadIgnoreEnd(thr);
 }
 
 void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
@@ -399,17 +293,15 @@ void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
 
 void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) {
   SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd);
-  ThreadIgnoreSyncEnd(thr, pc);
+  ThreadIgnoreSyncEnd(thr);
 }
 
 void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
     char *f, int l, uptr addr, uptr size) {
-  SCOPED_ANNOTATION(AnnotatePublishMemoryRange);
 }
 
 void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange(
     char *f, int l, uptr addr, uptr size) {
-  SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange);
 }
 
 void INTERFACE_ATTRIBUTE AnnotateThreadName(
@@ -422,11 +314,9 @@ void INTERFACE_ATTRIBUTE AnnotateThreadName(
 // WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate
 // atomic operations, which should be handled by ThreadSanitizer correctly.
 void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
-  SCOPED_ANNOTATION(AnnotateHappensBefore);
 }
 
 void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
-  SCOPED_ANNOTATION(AnnotateHappensAfter);
 }
 
 void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
@@ -478,15 +368,15 @@ void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
     else
       MutexPreLock(thr, pc, (uptr)m);
   }
-  ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
-  ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+  ThreadIgnoreBegin(thr, 0);
+  ThreadIgnoreSyncBegin(thr, 0);
 }
 
 INTERFACE_ATTRIBUTE
 void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) {
   SCOPED_ANNOTATION(__tsan_mutex_post_lock);
-  ThreadIgnoreSyncEnd(thr, pc);
-  ThreadIgnoreEnd(thr, pc);
+  ThreadIgnoreSyncEnd(thr);
+  ThreadIgnoreEnd(thr);
   if (!(flagz & MutexFlagTryLockFailed)) {
     if (flagz & MutexFlagReadLock)
       MutexPostReadLock(thr, pc, (uptr)m, flagz);
@@ -505,44 +395,44 @@ int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
   } else {
     ret = MutexUnlock(thr, pc, (uptr)m, flagz);
   }
-  ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
-  ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+  ThreadIgnoreBegin(thr, 0);
+  ThreadIgnoreSyncBegin(thr, 0);
   return ret;
 }
 
 INTERFACE_ATTRIBUTE
 void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
   SCOPED_ANNOTATION(__tsan_mutex_post_unlock);
-  ThreadIgnoreSyncEnd(thr, pc);
-  ThreadIgnoreEnd(thr, pc);
+  ThreadIgnoreSyncEnd(thr);
+  ThreadIgnoreEnd(thr);
 }
 
 INTERFACE_ATTRIBUTE
 void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
   SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
-  ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
-  ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+  ThreadIgnoreBegin(thr, 0);
+  ThreadIgnoreSyncBegin(thr, 0);
 }
 
 INTERFACE_ATTRIBUTE
 void __tsan_mutex_post_signal(void *addr, unsigned flagz) {
   SCOPED_ANNOTATION(__tsan_mutex_post_signal);
-  ThreadIgnoreSyncEnd(thr, pc);
-  ThreadIgnoreEnd(thr, pc);
+  ThreadIgnoreSyncEnd(thr);
+  ThreadIgnoreEnd(thr);
 }
 
 INTERFACE_ATTRIBUTE
 void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
   SCOPED_ANNOTATION(__tsan_mutex_pre_divert);
   // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
-  ThreadIgnoreSyncEnd(thr, pc);
-  ThreadIgnoreEnd(thr, pc);
+  ThreadIgnoreSyncEnd(thr);
+  ThreadIgnoreEnd(thr);
 }
 
 INTERFACE_ATTRIBUTE
 void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
   SCOPED_ANNOTATION(__tsan_mutex_post_divert);
-  ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
-  ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+  ThreadIgnoreBegin(thr, 0);
+  ThreadIgnoreSyncBegin(thr, 0);
 }
 }  // extern "C"
index 21fe4a19619ad779acf64bb1504130f8f40813f1..24ba3bb1f65df4e3148c3a4098922fdf75b6b1d3 100644 (file)
@@ -32,6 +32,7 @@ using namespace __tsan;
 static StaticSpinMutex mutex128;
 #endif
 
+#if SANITIZER_DEBUG
 static bool IsLoadOrder(morder mo) {
   return mo == mo_relaxed || mo == mo_consume
       || mo == mo_acquire || mo == mo_seq_cst;
@@ -40,6 +41,7 @@ static bool IsLoadOrder(morder mo) {
 static bool IsStoreOrder(morder mo) {
   return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
 }
+#endif
 
 static bool IsReleaseOrder(morder mo) {
   return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
@@ -161,16 +163,16 @@ a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
 }
 #endif
 
-template<typename T>
-static int SizeLog() {
+template <typename T>
+static int AccessSize() {
   if (sizeof(T) <= 1)
-    return kSizeLog1;
+    return 1;
   else if (sizeof(T) <= 2)
-    return kSizeLog2;
+    return 2;
   else if (sizeof(T) <= 4)
-    return kSizeLog4;
+    return 4;
   else
-    return kSizeLog8;
+    return 8;
   // For 16-byte atomics we also use 8-byte memory access,
   // this leads to false negatives only in very obscure cases.
 }
@@ -202,7 +204,7 @@ static memory_order to_mo(morder mo) {
   case mo_acq_rel: return memory_order_acq_rel;
   case mo_seq_cst: return memory_order_seq_cst;
   }
-  CHECK(0);
+  DCHECK(0);
   return memory_order_seq_cst;
 }
 
@@ -218,27 +220,28 @@ static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
 }
 #endif
 
-template<typename T>
+template <typename T>
 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
-  CHECK(IsLoadOrder(mo));
+  DCHECK(IsLoadOrder(mo));
   // This fast-path is critical for performance.
   // Assume the access is atomic.
   if (!IsAcquireOrder(mo)) {
-    MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+    MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
+                 kAccessRead | kAccessAtomic);
     return NoTsanAtomicLoad(a, mo);
   }
   // Don't create sync object if it does not exist yet. For example, an atomic
   // pointer is initialized to nullptr and then periodically acquire-loaded.
   T v = NoTsanAtomicLoad(a, mo);
-  SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false);
+  SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);
   if (s) {
+    ReadLock l(&s->mtx);
     AcquireImpl(thr, pc, &s->clock);
     // Re-read under sync mutex because we need a consistent snapshot
     // of the value and the clock we acquire.
     v = NoTsanAtomicLoad(a, mo);
-    s->mtx.ReadUnlock();
   }
-  MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+  MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessRead | kAccessAtomic);
   return v;
 }
 
@@ -254,11 +257,11 @@ static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
 }
 #endif
 
-template<typename T>
+template <typename T>
 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
-    morder mo) {
-  CHECK(IsStoreOrder(mo));
-  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+                        morder mo) {
+  DCHECK(IsStoreOrder(mo));
+  MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
   // This fast-path is critical for performance.
   // Assume the access is atomic.
   // Strictly saying even relaxed store cuts off release sequence,
@@ -268,35 +271,32 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
     return;
   }
   __sync_synchronize();
-  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+  Lock l(&s->mtx);
   thr->fast_state.IncrementEpoch();
   // Can't increment epoch w/o writing to the trace as well.
   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
   ReleaseStoreImpl(thr, pc, &s->clock);
   NoTsanAtomicStore(a, v, mo);
-  s->mtx.Unlock();
 }
 
-template<typename T, T (*F)(volatile T *v, T op)>
+template <typename T, T (*F)(volatile T *v, T op)>
 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
-  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
-  SyncVar *s = 0;
-  if (mo != mo_relaxed) {
-    s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
-    thr->fast_state.IncrementEpoch();
-    // Can't increment epoch w/o writing to the trace as well.
-    TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
-    if (IsAcqRelOrder(mo))
-      AcquireReleaseImpl(thr, pc, &s->clock);
-    else if (IsReleaseOrder(mo))
-      ReleaseImpl(thr, pc, &s->clock);
-    else if (IsAcquireOrder(mo))
-      AcquireImpl(thr, pc, &s->clock);
-  }
-  v = F(a, v);
-  if (s)
-    s->mtx.Unlock();
-  return v;
+  MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
+  if (LIKELY(mo == mo_relaxed))
+    return F(a, v);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+  Lock l(&s->mtx);
+  thr->fast_state.IncrementEpoch();
+  // Can't increment epoch w/o writing to the trace as well.
+  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+  if (IsAcqRelOrder(mo))
+    AcquireReleaseImpl(thr, pc, &s->clock);
+  else if (IsReleaseOrder(mo))
+    ReleaseImpl(thr, pc, &s->clock);
+  else if (IsAcquireOrder(mo))
+    AcquireImpl(thr, pc, &s->clock);
+  return F(a, v);
 }
 
 template<typename T>
@@ -399,21 +399,27 @@ static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
   return c;
 }
 
-template<typename T>
-static bool AtomicCAS(ThreadState *thr, uptr pc,
-    volatile T *a, T *c, T v, morder mo, morder fmo) {
+template <typename T>
+static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
+                      morder mo, morder fmo) {
   // 31.7.2.18: "The failure argument shall not be memory_order_release
   // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
   // (mo_relaxed) when those are used.
-  CHECK(IsLoadOrder(fmo));
-
-  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
-  SyncVar *s = 0;
-  bool write_lock = IsReleaseOrder(mo);
-
-  if (mo != mo_relaxed || fmo != mo_relaxed)
-    s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
+  DCHECK(IsLoadOrder(fmo));
+
+  MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
+  if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
+    T cc = *c;
+    T pr = func_cas(a, cc, v);
+    if (pr == cc)
+      return true;
+    *c = pr;
+    return false;
+  }
 
+  bool release = IsReleaseOrder(mo);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+  RWLock l(&s->mtx, release);
   T cc = *c;
   T pr = func_cas(a, cc, v);
   bool success = pr == cc;
@@ -421,25 +427,16 @@ static bool AtomicCAS(ThreadState *thr, uptr pc,
     *c = pr;
     mo = fmo;
   }
+  thr->fast_state.IncrementEpoch();
+  // Can't increment epoch w/o writing to the trace as well.
+  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
 
-  if (s) {
-    thr->fast_state.IncrementEpoch();
-    // Can't increment epoch w/o writing to the trace as well.
-    TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
-
-    if (success && IsAcqRelOrder(mo))
-      AcquireReleaseImpl(thr, pc, &s->clock);
-    else if (success && IsReleaseOrder(mo))
-      ReleaseImpl(thr, pc, &s->clock);
-    else if (IsAcquireOrder(mo))
-      AcquireImpl(thr, pc, &s->clock);
-
-    if (write_lock)
-      s->mtx.Unlock();
-    else
-      s->mtx.ReadUnlock();
-  }
-
+  if (success && IsAcqRelOrder(mo))
+    AcquireReleaseImpl(thr, pc, &s->clock);
+  else if (success && IsReleaseOrder(mo))
+    ReleaseImpl(thr, pc, &s->clock);
+  else if (IsAcquireOrder(mo))
+    AcquireImpl(thr, pc, &s->clock);
   return success;
 }
 
@@ -483,380 +480,356 @@ static morder convert_morder(morder mo) {
   return (morder)(mo & 0x7fff);
 }
 
-#define SCOPED_ATOMIC(func, ...) \
-    ThreadState *const thr = cur_thread(); \
-    if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) { \
-      ProcessPendingSignals(thr); \
-      return NoTsanAtomic##func(__VA_ARGS__); \
-    } \
-    const uptr callpc = (uptr)__builtin_return_address(0); \
-    uptr pc = StackTrace::GetCurrentPc(); \
-    mo = convert_morder(mo); \
-    ScopedAtomic sa(thr, callpc, a, mo, __func__); \
-    return Atomic##func(thr, pc, __VA_ARGS__); \
-/**/
-
-class ScopedAtomic {
- public:
-  ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
-               morder mo, const char *func)
-      : thr_(thr) {
-    FuncEntry(thr_, pc);
-    DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
-  }
-  ~ScopedAtomic() {
-    ProcessPendingSignals(thr_);
-    FuncExit(thr_);
-  }
- private:
-  ThreadState *thr_;
-};
+#  define ATOMIC_IMPL(func, ...)                                \
+    ThreadState *const thr = cur_thread();                      \
+    ProcessPendingSignals(thr);                                 \
+    if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \
+      return NoTsanAtomic##func(__VA_ARGS__);                   \
+    mo = convert_morder(mo);                                    \
+    return Atomic##func(thr, GET_CALLER_PC(), __VA_ARGS__);
 
 extern "C" {
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
-  SCOPED_ATOMIC(Load, a, mo);
+  ATOMIC_IMPL(Load, a, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
-  SCOPED_ATOMIC(Load, a, mo);
+  ATOMIC_IMPL(Load, a, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
-  SCOPED_ATOMIC(Load, a, mo);
+  ATOMIC_IMPL(Load, a, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
-  SCOPED_ATOMIC(Load, a, mo);
+  ATOMIC_IMPL(Load, a, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
-  SCOPED_ATOMIC(Load, a, mo);
+  ATOMIC_IMPL(Load, a, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
-  SCOPED_ATOMIC(Store, a, v, mo);
+  ATOMIC_IMPL(Store, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
-  SCOPED_ATOMIC(Store, a, v, mo);
+  ATOMIC_IMPL(Store, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
-  SCOPED_ATOMIC(Store, a, v, mo);
+  ATOMIC_IMPL(Store, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
-  SCOPED_ATOMIC(Store, a, v, mo);
+  ATOMIC_IMPL(Store, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
-  SCOPED_ATOMIC(Store, a, v, mo);
+  ATOMIC_IMPL(Store, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
-  SCOPED_ATOMIC(Exchange, a, v, mo);
+  ATOMIC_IMPL(Exchange, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
-  SCOPED_ATOMIC(Exchange, a, v, mo);
+  ATOMIC_IMPL(Exchange, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
-  SCOPED_ATOMIC(Exchange, a, v, mo);
+  ATOMIC_IMPL(Exchange, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
-  SCOPED_ATOMIC(Exchange, a, v, mo);
+  ATOMIC_IMPL(Exchange, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
-  SCOPED_ATOMIC(Exchange, a, v, mo);
+  ATOMIC_IMPL(Exchange, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
-  SCOPED_ATOMIC(FetchAdd, a, v, mo);
+  ATOMIC_IMPL(FetchAdd, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
-  SCOPED_ATOMIC(FetchAdd, a, v, mo);
+  ATOMIC_IMPL(FetchAdd, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
-  SCOPED_ATOMIC(FetchAdd, a, v, mo);
+  ATOMIC_IMPL(FetchAdd, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
-  SCOPED_ATOMIC(FetchAdd, a, v, mo);
+  ATOMIC_IMPL(FetchAdd, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
-  SCOPED_ATOMIC(FetchAdd, a, v, mo);
+  ATOMIC_IMPL(FetchAdd, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
-  SCOPED_ATOMIC(FetchSub, a, v, mo);
+  ATOMIC_IMPL(FetchSub, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
-  SCOPED_ATOMIC(FetchSub, a, v, mo);
+  ATOMIC_IMPL(FetchSub, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
-  SCOPED_ATOMIC(FetchSub, a, v, mo);
+  ATOMIC_IMPL(FetchSub, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
-  SCOPED_ATOMIC(FetchSub, a, v, mo);
+  ATOMIC_IMPL(FetchSub, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
-  SCOPED_ATOMIC(FetchSub, a, v, mo);
+  ATOMIC_IMPL(FetchSub, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
-  SCOPED_ATOMIC(FetchAnd, a, v, mo);
+  ATOMIC_IMPL(FetchAnd, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
-  SCOPED_ATOMIC(FetchAnd, a, v, mo);
+  ATOMIC_IMPL(FetchAnd, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
-  SCOPED_ATOMIC(FetchAnd, a, v, mo);
+  ATOMIC_IMPL(FetchAnd, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
-  SCOPED_ATOMIC(FetchAnd, a, v, mo);
+  ATOMIC_IMPL(FetchAnd, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
-  SCOPED_ATOMIC(FetchAnd, a, v, mo);
+  ATOMIC_IMPL(FetchAnd, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
-  SCOPED_ATOMIC(FetchOr, a, v, mo);
+  ATOMIC_IMPL(FetchOr, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
-  SCOPED_ATOMIC(FetchOr, a, v, mo);
+  ATOMIC_IMPL(FetchOr, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
-  SCOPED_ATOMIC(FetchOr, a, v, mo);
+  ATOMIC_IMPL(FetchOr, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
-  SCOPED_ATOMIC(FetchOr, a, v, mo);
+  ATOMIC_IMPL(FetchOr, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
-  SCOPED_ATOMIC(FetchOr, a, v, mo);
+  ATOMIC_IMPL(FetchOr, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
-  SCOPED_ATOMIC(FetchXor, a, v, mo);
+  ATOMIC_IMPL(FetchXor, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
-  SCOPED_ATOMIC(FetchXor, a, v, mo);
+  ATOMIC_IMPL(FetchXor, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
-  SCOPED_ATOMIC(FetchXor, a, v, mo);
+  ATOMIC_IMPL(FetchXor, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
-  SCOPED_ATOMIC(FetchXor, a, v, mo);
+  ATOMIC_IMPL(FetchXor, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
-  SCOPED_ATOMIC(FetchXor, a, v, mo);
+  ATOMIC_IMPL(FetchXor, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
-  SCOPED_ATOMIC(FetchNand, a, v, mo);
+  ATOMIC_IMPL(FetchNand, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
-  SCOPED_ATOMIC(FetchNand, a, v, mo);
+  ATOMIC_IMPL(FetchNand, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
-  SCOPED_ATOMIC(FetchNand, a, v, mo);
+  ATOMIC_IMPL(FetchNand, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
-  SCOPED_ATOMIC(FetchNand, a, v, mo);
+  ATOMIC_IMPL(FetchNand, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
-  SCOPED_ATOMIC(FetchNand, a, v, mo);
+  ATOMIC_IMPL(FetchNand, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
     morder mo, morder fmo) {
-  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+  ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic_thread_fence(morder mo) {
-  char* a = 0;
-  SCOPED_ATOMIC(Fence, mo);
-}
+void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic_signal_fence(morder mo) {
@@ -867,25 +840,23 @@ void __tsan_atomic_signal_fence(morder mo) {
 
 // Go
 
-#define ATOMIC(func, ...) \
-    if (thr->ignore_sync) { \
-      NoTsanAtomic##func(__VA_ARGS__); \
-    } else { \
-      FuncEntry(thr, cpc); \
+#  define ATOMIC(func, ...)               \
+    if (thr->ignore_sync) {               \
+      NoTsanAtomic##func(__VA_ARGS__);    \
+    } else {                              \
+      FuncEntry(thr, cpc);                \
       Atomic##func(thr, pc, __VA_ARGS__); \
-      FuncExit(thr); \
-    } \
-/**/
-
-#define ATOMIC_RET(func, ret, ...) \
-    if (thr->ignore_sync) { \
-      (ret) = NoTsanAtomic##func(__VA_ARGS__); \
-    } else { \
-      FuncEntry(thr, cpc); \
+      FuncExit(thr);                      \
+    }
+
+#  define ATOMIC_RET(func, ret, ...)              \
+    if (thr->ignore_sync) {                       \
+      (ret) = NoTsanAtomic##func(__VA_ARGS__);    \
+    } else {                                      \
+      FuncEntry(thr, cpc);                        \
       (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
-      FuncExit(thr); \
-    } \
-/**/
+      FuncExit(thr);                              \
+    }
 
 extern "C" {
 SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/libsanitizer/tsan/tsan_interface_inl.h b/libsanitizer/tsan/tsan_interface_inl.h
deleted file mode 100644 (file)
index 5e77d4d..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-//===-- tsan_interface_inl.h ------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-
-#include "tsan_interface.h"
-#include "tsan_rtl.h"
-#include "sanitizer_common/sanitizer_ptrauth.h"
-
-#define CALLERPC ((uptr)__builtin_return_address(0))
-
-using namespace __tsan;
-
-void __tsan_read1(void *addr) {
-  MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
-}
-
-void __tsan_read2(void *addr) {
-  MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
-}
-
-void __tsan_read4(void *addr) {
-  MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
-}
-
-void __tsan_read8(void *addr) {
-  MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
-}
-
-void __tsan_write1(void *addr) {
-  MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
-}
-
-void __tsan_write2(void *addr) {
-  MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
-}
-
-void __tsan_write4(void *addr) {
-  MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
-}
-
-void __tsan_write8(void *addr) {
-  MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
-}
-
-void __tsan_read1_pc(void *addr, void *pc) {
-  MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog1);
-}
-
-void __tsan_read2_pc(void *addr, void *pc) {
-  MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog2);
-}
-
-void __tsan_read4_pc(void *addr, void *pc) {
-  MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog4);
-}
-
-void __tsan_read8_pc(void *addr, void *pc) {
-  MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
-}
-
-void __tsan_write1_pc(void *addr, void *pc) {
-  MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog1);
-}
-
-void __tsan_write2_pc(void *addr, void *pc) {
-  MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog2);
-}
-
-void __tsan_write4_pc(void *addr, void *pc) {
-  MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog4);
-}
-
-void __tsan_write8_pc(void *addr, void *pc) {
-  MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
-}
-
-void __tsan_vptr_update(void **vptr_p, void *new_val) {
-  CHECK_EQ(sizeof(vptr_p), 8);
-  if (*vptr_p != new_val) {
-    ThreadState *thr = cur_thread();
-    thr->is_vptr_access = true;
-    MemoryWrite(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
-    thr->is_vptr_access = false;
-  }
-}
-
-void __tsan_vptr_read(void **vptr_p) {
-  CHECK_EQ(sizeof(vptr_p), 8);
-  ThreadState *thr = cur_thread();
-  thr->is_vptr_access = true;
-  MemoryRead(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
-  thr->is_vptr_access = false;
-}
-
-void __tsan_func_entry(void *pc) {
-  FuncEntry(cur_thread(), STRIP_PAC_PC(pc));
-}
-
-void __tsan_func_exit() {
-  FuncExit(cur_thread());
-}
-
-void __tsan_ignore_thread_begin() {
-  ThreadIgnoreBegin(cur_thread(), CALLERPC);
-}
-
-void __tsan_ignore_thread_end() {
-  ThreadIgnoreEnd(cur_thread(), CALLERPC);
-}
-
-void __tsan_read_range(void *addr, uptr size) {
-  MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
-}
-
-void __tsan_write_range(void *addr, uptr size) {
-  MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, true);
-}
-
-void __tsan_read_range_pc(void *addr, uptr size, void *pc) {
-  MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, false);
-}
-
-void __tsan_write_range_pc(void *addr, uptr size, void *pc) {
-  MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, true);
-}
index 081c6ff1022e876e115ba9032e893acdfe74033d..c090c1f08cbeb8df3f350079c99418d2aa3fff6e 100644 (file)
@@ -12,7 +12,6 @@
 
 #include "tsan_interface_java.h"
 #include "tsan_rtl.h"
-#include "tsan_mutex.h"
 #include "sanitizer_common/sanitizer_internal_defs.h"
 #include "sanitizer_common/sanitizer_common.h"
 #include "sanitizer_common/sanitizer_placement_new.h"
@@ -35,52 +34,49 @@ struct JavaContext {
   }
 };
 
-class ScopedJavaFunc {
- public:
-  ScopedJavaFunc(ThreadState *thr, uptr pc)
-      : thr_(thr) {
-    Initialize(thr_);
-    FuncEntry(thr, pc);
-  }
-
-  ~ScopedJavaFunc() {
-    FuncExit(thr_);
-    // FIXME(dvyukov): process pending signals.
-  }
-
- private:
-  ThreadState *thr_;
-};
-
 static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
 static JavaContext *jctx;
 
+MBlock *JavaHeapBlock(uptr addr, uptr *start) {
+  if (!jctx || addr < jctx->heap_begin ||
+      addr >= jctx->heap_begin + jctx->heap_size)
+    return nullptr;
+  for (uptr p = RoundDown(addr, kMetaShadowCell); p >= jctx->heap_begin;
+       p -= kMetaShadowCell) {
+    MBlock *b = ctx->metamap.GetBlock(p);
+    if (!b)
+      continue;
+    if (p + b->siz <= addr)
+      return nullptr;
+    *start = p;
+    return b;
+  }
+  return nullptr;
+}
+
 }  // namespace __tsan
 
-#define SCOPED_JAVA_FUNC(func) \
+#define JAVA_FUNC_ENTER(func)      \
   ThreadState *thr = cur_thread(); \
-  const uptr caller_pc = GET_CALLER_PC(); \
-  const uptr pc = StackTrace::GetCurrentPc(); \
-  (void)pc; \
-  ScopedJavaFunc scoped(thr, caller_pc); \
-/**/
+  (void)thr;
 
 void __tsan_java_init(jptr heap_begin, jptr heap_size) {
-  SCOPED_JAVA_FUNC(__tsan_java_init);
-  DPrintf("#%d: java_init(%p, %p)\n", thr->tid, heap_begin, heap_size);
-  CHECK_EQ(jctx, 0);
-  CHECK_GT(heap_begin, 0);
-  CHECK_GT(heap_size, 0);
-  CHECK_EQ(heap_begin % kHeapAlignment, 0);
-  CHECK_EQ(heap_size % kHeapAlignment, 0);
-  CHECK_LT(heap_begin, heap_begin + heap_size);
+  JAVA_FUNC_ENTER(__tsan_java_init);
+  Initialize(thr);
+  DPrintf("#%d: java_init(0x%zx, 0x%zx)\n", thr->tid, heap_begin, heap_size);
+  DCHECK_EQ(jctx, 0);
+  DCHECK_GT(heap_begin, 0);
+  DCHECK_GT(heap_size, 0);
+  DCHECK_EQ(heap_begin % kHeapAlignment, 0);
+  DCHECK_EQ(heap_size % kHeapAlignment, 0);
+  DCHECK_LT(heap_begin, heap_begin + heap_size);
   jctx = new(jctx_buf) JavaContext(heap_begin, heap_size);
 }
 
 int  __tsan_java_fini() {
-  SCOPED_JAVA_FUNC(__tsan_java_fini);
+  JAVA_FUNC_ENTER(__tsan_java_fini);
   DPrintf("#%d: java_fini()\n", thr->tid);
-  CHECK_NE(jctx, 0);
+  DCHECK_NE(jctx, 0);
   // FIXME(dvyukov): this does not call atexit() callbacks.
   int status = Finalize(thr);
   DPrintf("#%d: java_fini() = %d\n", thr->tid, status);
@@ -88,74 +84,65 @@ int  __tsan_java_fini() {
 }
 
 void __tsan_java_alloc(jptr ptr, jptr size) {
-  SCOPED_JAVA_FUNC(__tsan_java_alloc);
-  DPrintf("#%d: java_alloc(%p, %p)\n", thr->tid, ptr, size);
-  CHECK_NE(jctx, 0);
-  CHECK_NE(size, 0);
-  CHECK_EQ(ptr % kHeapAlignment, 0);
-  CHECK_EQ(size % kHeapAlignment, 0);
-  CHECK_GE(ptr, jctx->heap_begin);
-  CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
-
-  OnUserAlloc(thr, pc, ptr, size, false);
+  JAVA_FUNC_ENTER(__tsan_java_alloc);
+  DPrintf("#%d: java_alloc(0x%zx, 0x%zx)\n", thr->tid, ptr, size);
+  DCHECK_NE(jctx, 0);
+  DCHECK_NE(size, 0);
+  DCHECK_EQ(ptr % kHeapAlignment, 0);
+  DCHECK_EQ(size % kHeapAlignment, 0);
+  DCHECK_GE(ptr, jctx->heap_begin);
+  DCHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
+
+  OnUserAlloc(thr, 0, ptr, size, false);
 }
 
 void __tsan_java_free(jptr ptr, jptr size) {
-  SCOPED_JAVA_FUNC(__tsan_java_free);
-  DPrintf("#%d: java_free(%p, %p)\n", thr->tid, ptr, size);
-  CHECK_NE(jctx, 0);
-  CHECK_NE(size, 0);
-  CHECK_EQ(ptr % kHeapAlignment, 0);
-  CHECK_EQ(size % kHeapAlignment, 0);
-  CHECK_GE(ptr, jctx->heap_begin);
-  CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
+  JAVA_FUNC_ENTER(__tsan_java_free);
+  DPrintf("#%d: java_free(0x%zx, 0x%zx)\n", thr->tid, ptr, size);
+  DCHECK_NE(jctx, 0);
+  DCHECK_NE(size, 0);
+  DCHECK_EQ(ptr % kHeapAlignment, 0);
+  DCHECK_EQ(size % kHeapAlignment, 0);
+  DCHECK_GE(ptr, jctx->heap_begin);
+  DCHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
 
   ctx->metamap.FreeRange(thr->proc(), ptr, size);
 }
 
 void __tsan_java_move(jptr src, jptr dst, jptr size) {
-  SCOPED_JAVA_FUNC(__tsan_java_move);
-  DPrintf("#%d: java_move(%p, %p, %p)\n", thr->tid, src, dst, size);
-  CHECK_NE(jctx, 0);
-  CHECK_NE(size, 0);
-  CHECK_EQ(src % kHeapAlignment, 0);
-  CHECK_EQ(dst % kHeapAlignment, 0);
-  CHECK_EQ(size % kHeapAlignment, 0);
-  CHECK_GE(src, jctx->heap_begin);
-  CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
-  CHECK_GE(dst, jctx->heap_begin);
-  CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
-  CHECK_NE(dst, src);
-  CHECK_NE(size, 0);
+  JAVA_FUNC_ENTER(__tsan_java_move);
+  DPrintf("#%d: java_move(0x%zx, 0x%zx, 0x%zx)\n", thr->tid, src, dst, size);
+  DCHECK_NE(jctx, 0);
+  DCHECK_NE(size, 0);
+  DCHECK_EQ(src % kHeapAlignment, 0);
+  DCHECK_EQ(dst % kHeapAlignment, 0);
+  DCHECK_EQ(size % kHeapAlignment, 0);
+  DCHECK_GE(src, jctx->heap_begin);
+  DCHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
+  DCHECK_GE(dst, jctx->heap_begin);
+  DCHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
+  DCHECK_NE(dst, src);
+  DCHECK_NE(size, 0);
 
   // Assuming it's not running concurrently with threads that do
   // memory accesses and mutex operations (stop-the-world phase).
   ctx->metamap.MoveMemory(src, dst, size);
 
-  // Move shadow.
-  u64 *s = (u64*)MemToShadow(src);
-  u64 *d = (u64*)MemToShadow(dst);
-  u64 *send = (u64*)MemToShadow(src + size);
-  uptr inc = 1;
-  if (dst > src) {
-    s = (u64*)MemToShadow(src + size) - 1;
-    d = (u64*)MemToShadow(dst + size) - 1;
-    send = (u64*)MemToShadow(src) - 1;
-    inc = -1;
-  }
-  for (; s != send; s += inc, d += inc) {
-    *d = *s;
-    *s = 0;
-  }
+  // Clear the destination shadow range.
+  // We used to move shadow from src to dst, but the trace format does not
+  // support that anymore as it contains addresses of accesses.
+  RawShadow *d = MemToShadow(dst);
+  RawShadow *dend = MemToShadow(dst + size);
+  internal_memset(d, 0, (dend - d) * sizeof(*d));
 }
 
 jptr __tsan_java_find(jptr *from_ptr, jptr to) {
-  SCOPED_JAVA_FUNC(__tsan_java_find);
-  DPrintf("#%d: java_find(&%p, %p)\n", *from_ptr, to);
-  CHECK_EQ((*from_ptr) % kHeapAlignment, 0);
-  CHECK_EQ(to % kHeapAlignment, 0);
-  CHECK_GE(*from_ptr, jctx->heap_begin);
-  CHECK_LE(to, jctx->heap_begin + jctx->heap_size);
+  JAVA_FUNC_ENTER(__tsan_java_find);
+  DPrintf("#%d: java_find(&0x%zx, 0x%zx)\n", thr->tid, *from_ptr, to);
+  DCHECK_EQ((*from_ptr) % kHeapAlignment, 0);
+  DCHECK_EQ(to % kHeapAlignment, 0);
+  DCHECK_GE(*from_ptr, jctx->heap_begin);
+  DCHECK_LE(to, jctx->heap_begin + jctx->heap_size);
   for (uptr from = *from_ptr; from < to; from += kHeapAlignment) {
     MBlock *b = ctx->metamap.GetBlock(from);
     if (b) {
@@ -167,101 +154,105 @@ jptr __tsan_java_find(jptr *from_ptr, jptr to) {
 }
 
 void __tsan_java_finalize() {
-  SCOPED_JAVA_FUNC(__tsan_java_finalize);
-  DPrintf("#%d: java_mutex_finalize()\n", thr->tid);
-  AcquireGlobal(thr, 0);
+  JAVA_FUNC_ENTER(__tsan_java_finalize);
+  DPrintf("#%d: java_finalize()\n", thr->tid);
+  AcquireGlobal(thr);
 }
 
 void __tsan_java_mutex_lock(jptr addr) {
-  SCOPED_JAVA_FUNC(__tsan_java_mutex_lock);
-  DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr);
-  CHECK_NE(jctx, 0);
-  CHECK_GE(addr, jctx->heap_begin);
-  CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
-
-  MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
-      MutexFlagDoPreLockOnPostLock);
+  JAVA_FUNC_ENTER(__tsan_java_mutex_lock);
+  DPrintf("#%d: java_mutex_lock(0x%zx)\n", thr->tid, addr);
+  DCHECK_NE(jctx, 0);
+  DCHECK_GE(addr, jctx->heap_begin);
+  DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+  MutexPostLock(thr, 0, addr,
+                MutexFlagLinkerInit | MutexFlagWriteReentrant |
+                    MutexFlagDoPreLockOnPostLock);
 }
 
 void __tsan_java_mutex_unlock(jptr addr) {
-  SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock);
-  DPrintf("#%d: java_mutex_unlock(%p)\n", thr->tid, addr);
-  CHECK_NE(jctx, 0);
-  CHECK_GE(addr, jctx->heap_begin);
-  CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+  JAVA_FUNC_ENTER(__tsan_java_mutex_unlock);
+  DPrintf("#%d: java_mutex_unlock(0x%zx)\n", thr->tid, addr);
+  DCHECK_NE(jctx, 0);
+  DCHECK_GE(addr, jctx->heap_begin);
+  DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
 
-  MutexUnlock(thr, pc, addr);
+  MutexUnlock(thr, 0, addr);
 }
 
 void __tsan_java_mutex_read_lock(jptr addr) {
-  SCOPED_JAVA_FUNC(__tsan_java_mutex_read_lock);
-  DPrintf("#%d: java_mutex_read_lock(%p)\n", thr->tid, addr);
-  CHECK_NE(jctx, 0);
-  CHECK_GE(addr, jctx->heap_begin);
-  CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
-
-  MutexPostReadLock(thr, pc, addr, MutexFlagLinkerInit |
-      MutexFlagWriteReentrant | MutexFlagDoPreLockOnPostLock);
+  JAVA_FUNC_ENTER(__tsan_java_mutex_read_lock);
+  DPrintf("#%d: java_mutex_read_lock(0x%zx)\n", thr->tid, addr);
+  DCHECK_NE(jctx, 0);
+  DCHECK_GE(addr, jctx->heap_begin);
+  DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+  MutexPostReadLock(thr, 0, addr,
+                    MutexFlagLinkerInit | MutexFlagWriteReentrant |
+                        MutexFlagDoPreLockOnPostLock);
 }
 
 void __tsan_java_mutex_read_unlock(jptr addr) {
-  SCOPED_JAVA_FUNC(__tsan_java_mutex_read_unlock);
-  DPrintf("#%d: java_mutex_read_unlock(%p)\n", thr->tid, addr);
-  CHECK_NE(jctx, 0);
-  CHECK_GE(addr, jctx->heap_begin);
-  CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+  JAVA_FUNC_ENTER(__tsan_java_mutex_read_unlock);
+  DPrintf("#%d: java_mutex_read_unlock(0x%zx)\n", thr->tid, addr);
+  DCHECK_NE(jctx, 0);
+  DCHECK_GE(addr, jctx->heap_begin);
+  DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
 
-  MutexReadUnlock(thr, pc, addr);
+  MutexReadUnlock(thr, 0, addr);
 }
 
 void __tsan_java_mutex_lock_rec(jptr addr, int rec) {
-  SCOPED_JAVA_FUNC(__tsan_java_mutex_lock_rec);
-  DPrintf("#%d: java_mutex_lock_rec(%p, %d)\n", thr->tid, addr, rec);
-  CHECK_NE(jctx, 0);
-  CHECK_GE(addr, jctx->heap_begin);
-  CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
-  CHECK_GT(rec, 0);
-
-  MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
-      MutexFlagDoPreLockOnPostLock | MutexFlagRecursiveLock, rec);
+  JAVA_FUNC_ENTER(__tsan_java_mutex_lock_rec);
+  DPrintf("#%d: java_mutex_lock_rec(0x%zx, %d)\n", thr->tid, addr, rec);
+  DCHECK_NE(jctx, 0);
+  DCHECK_GE(addr, jctx->heap_begin);
+  DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+  DCHECK_GT(rec, 0);
+
+  MutexPostLock(thr, 0, addr,
+                MutexFlagLinkerInit | MutexFlagWriteReentrant |
+                    MutexFlagDoPreLockOnPostLock | MutexFlagRecursiveLock,
+                rec);
 }
 
 int __tsan_java_mutex_unlock_rec(jptr addr) {
-  SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock_rec);
-  DPrintf("#%d: java_mutex_unlock_rec(%p)\n", thr->tid, addr);
-  CHECK_NE(jctx, 0);
-  CHECK_GE(addr, jctx->heap_begin);
-  CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+  JAVA_FUNC_ENTER(__tsan_java_mutex_unlock_rec);
+  DPrintf("#%d: java_mutex_unlock_rec(0x%zx)\n", thr->tid, addr);
+  DCHECK_NE(jctx, 0);
+  DCHECK_GE(addr, jctx->heap_begin);
+  DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
 
-  return MutexUnlock(thr, pc, addr, MutexFlagRecursiveUnlock);
+  return MutexUnlock(thr, 0, addr, MutexFlagRecursiveUnlock);
 }
 
 void __tsan_java_acquire(jptr addr) {
-  SCOPED_JAVA_FUNC(__tsan_java_acquire);
-  DPrintf("#%d: java_acquire(%p)\n", thr->tid, addr);
-  CHECK_NE(jctx, 0);
-  CHECK_GE(addr, jctx->heap_begin);
-  CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+  JAVA_FUNC_ENTER(__tsan_java_acquire);
+  DPrintf("#%d: java_acquire(0x%zx)\n", thr->tid, addr);
+  DCHECK_NE(jctx, 0);
+  DCHECK_GE(addr, jctx->heap_begin);
+  DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
 
-  Acquire(thr, caller_pc, addr);
+  Acquire(thr, 0, addr);
 }
 
 void __tsan_java_release(jptr addr) {
-  SCOPED_JAVA_FUNC(__tsan_java_release);
-  DPrintf("#%d: java_release(%p)\n", thr->tid, addr);
-  CHECK_NE(jctx, 0);
-  CHECK_GE(addr, jctx->heap_begin);
-  CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+  JAVA_FUNC_ENTER(__tsan_java_release);
+  DPrintf("#%d: java_release(0x%zx)\n", thr->tid, addr);
+  DCHECK_NE(jctx, 0);
+  DCHECK_GE(addr, jctx->heap_begin);
+  DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
 
-  Release(thr, caller_pc, addr);
+  Release(thr, 0, addr);
 }
 
 void __tsan_java_release_store(jptr addr) {
-  SCOPED_JAVA_FUNC(__tsan_java_release);
-  DPrintf("#%d: java_release_store(%p)\n", thr->tid, addr);
-  CHECK_NE(jctx, 0);
-  CHECK_GE(addr, jctx->heap_begin);
-  CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+  JAVA_FUNC_ENTER(__tsan_java_release);
+  DPrintf("#%d: java_release_store(0x%zx)\n", thr->tid, addr);
+  DCHECK_NE(jctx, 0);
+  DCHECK_GE(addr, jctx->heap_begin);
+  DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
 
-  ReleaseStore(thr, caller_pc, addr);
+  ReleaseStore(thr, 0, addr);
 }
index 7765bc070522acefce58a03ef8d140072f408c60..f1b6768c5921be7e9b352877870ff945e19e6c0c 100644 (file)
@@ -148,7 +148,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
   ObtainCurrentStack(thr, pc, &stack);
   if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
     return;
-  ThreadRegistryLock l(ctx->thread_registry);
+  ThreadRegistryLock l(&ctx->thread_registry);
   ScopedReport rep(ReportTypeSignalUnsafe);
   rep.AddStack(stack, true);
   OutputReport(thr, rep);
@@ -218,7 +218,7 @@ void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
 }
 
 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
-  DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
+  DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p);
   ctx->metamap.AllocBlock(thr, pc, p, sz);
   if (write && thr->ignore_reads_and_writes == 0)
     MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
@@ -229,7 +229,7 @@ void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
   CHECK_NE(p, (void*)0);
   uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
-  DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
+  DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);
   if (write && thr->ignore_reads_and_writes == 0)
     MemoryRangeFreed(thr, pc, (uptr)p, sz);
 }
@@ -336,7 +336,7 @@ void invoke_free_hook(void *ptr) {
   RunFreeHooks(ptr);
 }
 
-void *internal_alloc(MBlockType typ, uptr sz) {
+void *Alloc(uptr sz) {
   ThreadState *thr = cur_thread();
   if (thr->nomalloc) {
     thr->nomalloc = 0;  // CHECK calls internal_malloc().
@@ -345,7 +345,7 @@ void *internal_alloc(MBlockType typ, uptr sz) {
   return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
 }
 
-void internal_free(void *p) {
+void FreeImpl(void *p) {
   ThreadState *thr = cur_thread();
   if (thr->nomalloc) {
     thr->nomalloc = 0;  // CHECK calls internal_malloc().
index a5280d4472c9a8c742f6ee2d0d5ad4f9370e47f0..efea5e5abdec7a97d7ca0b619900ef26aaf97cca 100644 (file)
@@ -47,42 +47,29 @@ uptr user_alloc_usable_size(const void *p);
 void invoke_malloc_hook(void *ptr, uptr size);
 void invoke_free_hook(void *ptr);
 
-enum MBlockType {
-  MBlockScopedBuf,
-  MBlockString,
-  MBlockStackTrace,
-  MBlockShadowStack,
-  MBlockSync,
-  MBlockClock,
-  MBlockThreadContex,
-  MBlockDeadInfo,
-  MBlockRacyStacks,
-  MBlockRacyAddresses,
-  MBlockAtExit,
-  MBlockFlag,
-  MBlockReport,
-  MBlockReportMop,
-  MBlockReportThread,
-  MBlockReportMutex,
-  MBlockReportLoc,
-  MBlockReportStack,
-  MBlockSuppression,
-  MBlockExpectRace,
-  MBlockSignal,
-  MBlockJmpBuf,
+// For internal data structures.
+void *Alloc(uptr sz);
+void FreeImpl(void *p);
 
-  // This must be the last.
-  MBlockTypeCount
-};
+template <typename T, typename... Args>
+T *New(Args &&...args) {
+  return new (Alloc(sizeof(T))) T(static_cast<Args &&>(args)...);
+}
 
-// For internal data structures.
-void *internal_alloc(MBlockType typ, uptr sz);
-void internal_free(void *p);
+template <typename T>
+void Free(T *&p) {
+  if (p == nullptr)
+    return;
+  FreeImpl(p);
+  p = nullptr;
+}
 
 template <typename T>
-void DestroyAndFree(T *p) {
+void DestroyAndFree(T *&p) {
+  if (p == nullptr)
+    return;
   p->~T();
-  internal_free(p);
+  Free(p);
 }
 
 }  // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_mutex.cpp b/libsanitizer/tsan/tsan_mutex.cpp
deleted file mode 100644 (file)
index d8b1826..0000000
+++ /dev/null
@@ -1,280 +0,0 @@
-//===-- tsan_mutex.cpp ----------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_libc.h"
-#include "tsan_mutex.h"
-#include "tsan_platform.h"
-#include "tsan_rtl.h"
-
-namespace __tsan {
-
-// Simple reader-writer spin-mutex. Optimized for not-so-contended case.
-// Readers have preference, can possibly starvate writers.
-
-// The table fixes what mutexes can be locked under what mutexes.
-// E.g. if the row for MutexTypeThreads contains MutexTypeReport,
-// then Report mutex can be locked while under Threads mutex.
-// The leaf mutexes can be locked under any other mutexes.
-// Recursive locking is not supported.
-#if SANITIZER_DEBUG && !SANITIZER_GO
-const MutexType MutexTypeLeaf = (MutexType)-1;
-static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
-  /*0  MutexTypeInvalid*/     {},
-  /*1  MutexTypeTrace*/       {MutexTypeLeaf},
-  /*2  MutexTypeThreads*/     {MutexTypeReport},
-  /*3  MutexTypeReport*/      {MutexTypeSyncVar,
-                               MutexTypeMBlock, MutexTypeJavaMBlock},
-  /*4  MutexTypeSyncVar*/     {MutexTypeDDetector},
-  /*5  MutexTypeSyncTab*/     {},  // unused
-  /*6  MutexTypeSlab*/        {MutexTypeLeaf},
-  /*7  MutexTypeAnnotations*/ {},
-  /*8  MutexTypeAtExit*/      {MutexTypeSyncVar},
-  /*9  MutexTypeMBlock*/      {MutexTypeSyncVar},
-  /*10 MutexTypeJavaMBlock*/  {MutexTypeSyncVar},
-  /*11 MutexTypeDDetector*/   {},
-  /*12 MutexTypeFired*/       {MutexTypeLeaf},
-  /*13 MutexTypeRacy*/        {MutexTypeLeaf},
-  /*14 MutexTypeGlobalProc*/  {},
-};
-
-static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
-#endif
-
-void InitializeMutex() {
-#if SANITIZER_DEBUG && !SANITIZER_GO
-  // Build the "can lock" adjacency matrix.
-  // If [i][j]==true, then one can lock mutex j while under mutex i.
-  const int N = MutexTypeCount;
-  int cnt[N] = {};
-  bool leaf[N] = {};
-  for (int i = 1; i < N; i++) {
-    for (int j = 0; j < N; j++) {
-      MutexType z = CanLockTab[i][j];
-      if (z == MutexTypeInvalid)
-        continue;
-      if (z == MutexTypeLeaf) {
-        CHECK(!leaf[i]);
-        leaf[i] = true;
-        continue;
-      }
-      CHECK(!CanLockAdj[i][(int)z]);
-      CanLockAdj[i][(int)z] = true;
-      cnt[i]++;
-    }
-  }
-  for (int i = 0; i < N; i++) {
-    CHECK(!leaf[i] || cnt[i] == 0);
-  }
-  // Add leaf mutexes.
-  for (int i = 0; i < N; i++) {
-    if (!leaf[i])
-      continue;
-    for (int j = 0; j < N; j++) {
-      if (i == j || leaf[j] || j == MutexTypeInvalid)
-        continue;
-      CHECK(!CanLockAdj[j][i]);
-      CanLockAdj[j][i] = true;
-    }
-  }
-  // Build the transitive closure.
-  bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
-  for (int i = 0; i < N; i++) {
-    for (int j = 0; j < N; j++) {
-      CanLockAdj2[i][j] = CanLockAdj[i][j];
-    }
-  }
-  for (int k = 0; k < N; k++) {
-    for (int i = 0; i < N; i++) {
-      for (int j = 0; j < N; j++) {
-        if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
-          CanLockAdj2[i][j] = true;
-        }
-      }
-    }
-  }
-#if 0
-  Printf("Can lock graph:\n");
-  for (int i = 0; i < N; i++) {
-    for (int j = 0; j < N; j++) {
-      Printf("%d ", CanLockAdj[i][j]);
-    }
-    Printf("\n");
-  }
-  Printf("Can lock graph closure:\n");
-  for (int i = 0; i < N; i++) {
-    for (int j = 0; j < N; j++) {
-      Printf("%d ", CanLockAdj2[i][j]);
-    }
-    Printf("\n");
-  }
-#endif
-  // Verify that the graph is acyclic.
-  for (int i = 0; i < N; i++) {
-    if (CanLockAdj2[i][i]) {
-      Printf("Mutex %d participates in a cycle\n", i);
-      Die();
-    }
-  }
-#endif
-}
-
-InternalDeadlockDetector::InternalDeadlockDetector() {
-  // Rely on zero initialization because some mutexes can be locked before ctor.
-}
-
-#if SANITIZER_DEBUG && !SANITIZER_GO
-void InternalDeadlockDetector::Lock(MutexType t) {
-  // Printf("LOCK %d @%zu\n", t, seq_ + 1);
-  CHECK_GT(t, MutexTypeInvalid);
-  CHECK_LT(t, MutexTypeCount);
-  u64 max_seq = 0;
-  u64 max_idx = MutexTypeInvalid;
-  for (int i = 0; i != MutexTypeCount; i++) {
-    if (locked_[i] == 0)
-      continue;
-    CHECK_NE(locked_[i], max_seq);
-    if (max_seq < locked_[i]) {
-      max_seq = locked_[i];
-      max_idx = i;
-    }
-  }
-  locked_[t] = ++seq_;
-  if (max_idx == MutexTypeInvalid)
-    return;
-  // Printf("  last %d @%zu\n", max_idx, max_seq);
-  if (!CanLockAdj[max_idx][t]) {
-    Printf("ThreadSanitizer: internal deadlock detected\n");
-    Printf("ThreadSanitizer: can't lock %d while under %zu\n",
-               t, (uptr)max_idx);
-    CHECK(0);
-  }
-}
-
-void InternalDeadlockDetector::Unlock(MutexType t) {
-  // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
-  CHECK(locked_[t]);
-  locked_[t] = 0;
-}
-
-void InternalDeadlockDetector::CheckNoLocks() {
-  for (int i = 0; i != MutexTypeCount; i++) {
-    CHECK_EQ(locked_[i], 0);
-  }
-}
-#endif
-
-void CheckNoLocks(ThreadState *thr) {
-#if SANITIZER_DEBUG && !SANITIZER_GO
-  thr->internal_deadlock_detector.CheckNoLocks();
-#endif
-}
-
-const uptr kUnlocked = 0;
-const uptr kWriteLock = 1;
-const uptr kReadLock = 2;
-
-class Backoff {
- public:
-  Backoff()
-    : iter_() {
-  }
-
-  bool Do() {
-    if (iter_++ < kActiveSpinIters)
-      proc_yield(kActiveSpinCnt);
-    else
-      internal_sched_yield();
-    return true;
-  }
-
-  u64 Contention() const {
-    u64 active = iter_ % kActiveSpinIters;
-    u64 passive = iter_ - active;
-    return active + 10 * passive;
-  }
-
- private:
-  int iter_;
-  static const int kActiveSpinIters = 10;
-  static const int kActiveSpinCnt = 20;
-};
-
-Mutex::Mutex(MutexType type) {
-  CHECK_GT(type, MutexTypeInvalid);
-  CHECK_LT(type, MutexTypeCount);
-#if SANITIZER_DEBUG
-  type_ = type;
-#endif
-  atomic_store(&state_, kUnlocked, memory_order_relaxed);
-}
-
-Mutex::~Mutex() {
-  CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
-}
-
-void Mutex::Lock() {
-#if SANITIZER_DEBUG && !SANITIZER_GO
-  cur_thread()->internal_deadlock_detector.Lock(type_);
-#endif
-  uptr cmp = kUnlocked;
-  if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
-                                     memory_order_acquire))
-    return;
-  for (Backoff backoff; backoff.Do();) {
-    if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
-      cmp = kUnlocked;
-      if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
-                                       memory_order_acquire)) {
-        return;
-      }
-    }
-  }
-}
-
-void Mutex::Unlock() {
-  uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
-  (void)prev;
-  DCHECK_NE(prev & kWriteLock, 0);
-#if SANITIZER_DEBUG && !SANITIZER_GO
-  cur_thread()->internal_deadlock_detector.Unlock(type_);
-#endif
-}
-
-void Mutex::ReadLock() {
-#if SANITIZER_DEBUG && !SANITIZER_GO
-  cur_thread()->internal_deadlock_detector.Lock(type_);
-#endif
-  uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
-  if ((prev & kWriteLock) == 0)
-    return;
-  for (Backoff backoff; backoff.Do();) {
-    prev = atomic_load(&state_, memory_order_acquire);
-    if ((prev & kWriteLock) == 0) {
-      return;
-    }
-  }
-}
-
-void Mutex::ReadUnlock() {
-  uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
-  (void)prev;
-  DCHECK_EQ(prev & kWriteLock, 0);
-  DCHECK_GT(prev & ~kWriteLock, 0);
-#if SANITIZER_DEBUG && !SANITIZER_GO
-  cur_thread()->internal_deadlock_detector.Unlock(type_);
-#endif
-}
-
-void Mutex::CheckLocked() {
-  CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
-}
-
-}  // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_mutex.h b/libsanitizer/tsan/tsan_mutex.h
deleted file mode 100644 (file)
index 9a579ea..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-//===-- tsan_mutex.h --------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#ifndef TSAN_MUTEX_H
-#define TSAN_MUTEX_H
-
-#include "sanitizer_common/sanitizer_atomic.h"
-#include "sanitizer_common/sanitizer_mutex.h"
-#include "tsan_defs.h"
-
-namespace __tsan {
-
-enum MutexType {
-  MutexTypeInvalid,
-  MutexTypeTrace,
-  MutexTypeThreads,
-  MutexTypeReport,
-  MutexTypeSyncVar,
-  MutexTypeSyncTab,
-  MutexTypeSlab,
-  MutexTypeAnnotations,
-  MutexTypeAtExit,
-  MutexTypeMBlock,
-  MutexTypeJavaMBlock,
-  MutexTypeDDetector,
-  MutexTypeFired,
-  MutexTypeRacy,
-  MutexTypeGlobalProc,
-
-  // This must be the last.
-  MutexTypeCount
-};
-
-class Mutex {
- public:
-  explicit Mutex(MutexType type);
-  ~Mutex();
-
-  void Lock();
-  void Unlock();
-
-  void ReadLock();
-  void ReadUnlock();
-
-  void CheckLocked();
-
- private:
-  atomic_uintptr_t state_;
-#if SANITIZER_DEBUG
-  MutexType type_;
-#endif
-
-  Mutex(const Mutex&);
-  void operator = (const Mutex&);
-};
-
-typedef GenericScopedLock<Mutex> Lock;
-typedef GenericScopedReadLock<Mutex> ReadLock;
-
-class InternalDeadlockDetector {
- public:
-  InternalDeadlockDetector();
-  void Lock(MutexType t);
-  void Unlock(MutexType t);
-  void CheckNoLocks();
- private:
-  u64 seq_;
-  u64 locked_[MutexTypeCount];
-};
-
-void InitializeMutex();
-
-// Checks that the current thread does not hold any runtime locks
-// (e.g. when returning from an interceptor).
-void CheckNoLocks(ThreadState *thr);
-
-}  // namespace __tsan
-
-#endif  // TSAN_MUTEX_H
index 813fa3bca9366ceeb8883ee170864ce2a6dc515c..efc0e4195a12d8d8bf2c011c3a16347b4ede61a6 100644 (file)
 
 namespace __tsan {
 
-const uptr MutexSet::kMaxSize;
-
 MutexSet::MutexSet() {
-  size_ = 0;
-  internal_memset(&descs_, 0, sizeof(descs_));
 }
 
 void MutexSet::Add(u64 id, bool write, u64 epoch) {
@@ -44,9 +40,12 @@ void MutexSet::Add(u64 id, bool write, u64 epoch) {
     CHECK_EQ(size_, kMaxSize - 1);
   }
   // Add new mutex descriptor.
+  descs_[size_].addr = 0;
+  descs_[size_].stack_id = kInvalidStackID;
   descs_[size_].id = id;
   descs_[size_].write = write;
   descs_[size_].epoch = epoch;
+  descs_[size_].seq = seq_++;
   descs_[size_].count = 1;
   size_++;
 }
@@ -70,6 +69,46 @@ void MutexSet::Remove(u64 id) {
   }
 }
 
+void MutexSet::AddAddr(uptr addr, StackID stack_id, bool write) {
+  // Look up existing mutex with the same id.
+  for (uptr i = 0; i < size_; i++) {
+    if (descs_[i].addr == addr) {
+      descs_[i].count++;
+      descs_[i].seq = seq_++;
+      return;
+    }
+  }
+  // On overflow, find the oldest mutex and drop it.
+  if (size_ == kMaxSize) {
+    uptr min = 0;
+    for (uptr i = 0; i < size_; i++) {
+      if (descs_[i].seq < descs_[min].seq)
+        min = i;
+    }
+    RemovePos(min);
+    CHECK_EQ(size_, kMaxSize - 1);
+  }
+  // Add new mutex descriptor.
+  descs_[size_].addr = addr;
+  descs_[size_].stack_id = stack_id;
+  descs_[size_].id = 0;
+  descs_[size_].write = write;
+  descs_[size_].epoch = 0;
+  descs_[size_].seq = seq_++;
+  descs_[size_].count = 1;
+  size_++;
+}
+
+void MutexSet::DelAddr(uptr addr, bool destroy) {
+  for (uptr i = 0; i < size_; i++) {
+    if (descs_[i].addr == addr) {
+      if (destroy || --descs_[i].count == 0)
+        RemovePos(i);
+      return;
+    }
+  }
+}
+
 void MutexSet::RemovePos(uptr i) {
   CHECK_LT(i, size_);
   descs_[i] = descs_[size_ - 1];
index d63881f40290909590814e91b854ea0455fe717c..a448cee5a87731a94f3eb38761cc00e7b58eb21c 100644 (file)
@@ -21,12 +21,22 @@ class MutexSet {
  public:
   // Holds limited number of mutexes.
   // The oldest mutexes are discarded on overflow.
-  static const uptr kMaxSize = 16;
+  static constexpr uptr kMaxSize = 16;
   struct Desc {
+    uptr addr;
+    StackID stack_id;
     u64 id;
     u64 epoch;
-    int count;
+    u32 seq;
+    u32 count;
     bool write;
+
+    Desc() { internal_memset(this, 0, sizeof(*this)); }
+    Desc(const Desc& other) { *this = other; }
+    Desc& operator=(const MutexSet::Desc& other) {
+      internal_memcpy(this, &other, sizeof(*this));
+      return *this;
+    }
   };
 
   MutexSet();
@@ -34,21 +44,19 @@ class MutexSet {
   void Add(u64 id, bool write, u64 epoch);
   void Del(u64 id, bool write);
   void Remove(u64 id);  // Removes the mutex completely (if it's destroyed).
+  void AddAddr(uptr addr, StackID stack_id, bool write);
+  void DelAddr(uptr addr, bool destroy = false);
   uptr Size() const;
   Desc Get(uptr i) const;
 
-  void operator=(const MutexSet &other) {
-    internal_memcpy(this, &other, sizeof(*this));
-  }
-
  private:
 #if !SANITIZER_GO
-  uptr size_;
+  u32 seq_ = 0;
+  uptr size_ = 0;
   Desc descs_[kMaxSize];
-#endif
 
   void RemovePos(uptr i);
-  MutexSet(const MutexSet&);
+#endif
 };
 
 // Go does not have mutexes, so do not spend memory and time.
@@ -59,7 +67,8 @@ MutexSet::MutexSet() {}
 void MutexSet::Add(u64 id, bool write, u64 epoch) {}
 void MutexSet::Del(u64 id, bool write) {}
 void MutexSet::Remove(u64 id) {}
-void MutexSet::RemovePos(uptr i) {}
+void MutexSet::AddAddr(uptr addr, StackID stack_id, bool write) {}
+void MutexSet::DelAddr(uptr addr, bool destroy) {}
 uptr MutexSet::Size() const { return 0; }
 MutexSet::Desc MutexSet::Get(uptr i) const { return Desc(); }
 #endif
index 8bd218e25fd6f5a322514c1f255aa70dd3900b37..fc27a5656aadf1e02d5083f23474f233c9df66eb 100644 (file)
 
 namespace __tsan {
 
-#if defined(__x86_64__)
-#define HAS_48_BIT_ADDRESS_SPACE 1
-#elif SANITIZER_IOSSIM // arm64 iOS simulators (order of #if matters)
-#define HAS_48_BIT_ADDRESS_SPACE 1
-#elif SANITIZER_IOS // arm64 iOS devices (order of #if matters)
-#define HAS_48_BIT_ADDRESS_SPACE 0
-#elif SANITIZER_MAC // arm64 macOS (order of #if matters)
-#define HAS_48_BIT_ADDRESS_SPACE 1
-#else
-#define HAS_48_BIT_ADDRESS_SPACE 0
-#endif
-
-#if !SANITIZER_GO
+enum {
+  // App memory is not mapped onto shadow memory range.
+  kBrokenMapping = 1 << 0,
+  // Mapping app memory and back does not produce the same address,
+  // this can lead to wrong addresses in reports and potentially
+  // other bad consequences.
+  kBrokenReverseMapping = 1 << 1,
+  // Mapping is non-linear for linear user range.
+  // This is bad and can lead to unpredictable memory corruptions, etc
+  // because range access functions assume linearity.
+  kBrokenLinearity = 1 << 2,
+};
 
-#if HAS_48_BIT_ADDRESS_SPACE
 /*
 C/C++ on linux/x86_64 and freebsd/x86_64
 0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB)
@@ -65,9 +63,8 @@ C/C++ on netbsd/amd64 can reuse the same mapping:
  * Stack on NetBSD/amd64 has prereserved 128MB.
  * Heap grows downwards (top-down).
  * ASLR must be disabled per-process or globally.
-
 */
-struct Mapping {
+struct Mapping48AddressSpace {
   static const uptr kMetaShadowBeg = 0x300000000000ull;
   static const uptr kMetaShadowEnd = 0x340000000000ull;
   static const uptr kTraceMemBeg   = 0x600000000000ull;
@@ -82,13 +79,12 @@ struct Mapping {
   static const uptr kMidAppMemEnd  = 0x568000000000ull;
   static const uptr kHiAppMemBeg   = 0x7e8000000000ull;
   static const uptr kHiAppMemEnd   = 0x800000000000ull;
-  static const uptr kAppMemMsk     = 0x780000000000ull;
-  static const uptr kAppMemXor     = 0x040000000000ull;
+  static const uptr kShadowMsk = 0x780000000000ull;
+  static const uptr kShadowXor = 0x040000000000ull;
+  static const uptr kShadowAdd = 0x000000000000ull;
   static const uptr kVdsoBeg       = 0xf000000000000000ull;
 };
 
-#define TSAN_MID_APP_RANGE 1
-#elif defined(__mips64)
 /*
 C/C++ on linux/mips64 (40-bit VMA)
 0000 0000 00 - 0100 0000 00: -                                           (4 GB)
@@ -105,7 +101,7 @@ fe00 0000 00 - ff00 0000 00: heap                                        (4 GB)
 ff00 0000 00 - ff80 0000 00: -                                           (2 GB)
 ff80 0000 00 - ffff ffff ff: modules and main thread stack              (<2 GB)
 */
-struct Mapping40 {
+struct MappingMips64_40 {
   static const uptr kMetaShadowBeg = 0x4000000000ull;
   static const uptr kMetaShadowEnd = 0x5000000000ull;
   static const uptr kTraceMemBeg   = 0xb000000000ull;
@@ -120,14 +116,12 @@ struct Mapping40 {
   static const uptr kMidAppMemEnd  = 0xab00000000ull;
   static const uptr kHiAppMemBeg   = 0xff80000000ull;
   static const uptr kHiAppMemEnd   = 0xffffffffffull;
-  static const uptr kAppMemMsk     = 0xf800000000ull;
-  static const uptr kAppMemXor     = 0x0800000000ull;
+  static const uptr kShadowMsk = 0xf800000000ull;
+  static const uptr kShadowXor = 0x0800000000ull;
+  static const uptr kShadowAdd = 0x0000000000ull;
   static const uptr kVdsoBeg       = 0xfffff00000ull;
 };
 
-#define TSAN_MID_APP_RANGE 1
-#define TSAN_RUNTIME_VMA 1
-#elif defined(__aarch64__) && defined(__APPLE__)
 /*
 C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM)
 0000 0000 00 - 0100 0000 00: -                                    (4 GB)
@@ -141,7 +135,7 @@ C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM)
 0f00 0000 00 - 0fc0 0000 00: traces                               (3 GB)
 0fc0 0000 00 - 1000 0000 00: -
 */
-struct Mapping {
+struct MappingAppleAarch64 {
   static const uptr kLoAppMemBeg   = 0x0100000000ull;
   static const uptr kLoAppMemEnd   = 0x0200000000ull;
   static const uptr kHeapMemBeg    = 0x0200000000ull;
@@ -154,18 +148,14 @@ struct Mapping {
   static const uptr kTraceMemEnd   = 0x0fc0000000ull;
   static const uptr kHiAppMemBeg   = 0x0fc0000000ull;
   static const uptr kHiAppMemEnd   = 0x0fc0000000ull;
-  static const uptr kAppMemMsk     =          0x0ull;
-  static const uptr kAppMemXor     =          0x0ull;
+  static const uptr kShadowMsk = 0x0ull;
+  static const uptr kShadowXor = 0x0ull;
+  static const uptr kShadowAdd = 0x0ull;
   static const uptr kVdsoBeg       = 0x7000000000000000ull;
+  static const uptr kMidAppMemBeg = 0;
+  static const uptr kMidAppMemEnd = 0;
 };
 
-#elif defined(__aarch64__) && !defined(__APPLE__)
-// AArch64 supports multiple VMA which leads to multiple address transformation
-// functions.  To support these multiple VMAS transformations and mappings TSAN
-// runtime for AArch64 uses an external memory read (vmaSize) to select which
-// mapping to use.  Although slower, it make a same instrumented binary run on
-// multiple kernels.
-
 /*
 C/C++ on linux/aarch64 (39-bit VMA)
 0000 0010 00 - 0100 0000 00: main binary
@@ -181,7 +171,7 @@ C/C++ on linux/aarch64 (39-bit VMA)
 7c00 0000 00 - 7d00 0000 00: heap
 7d00 0000 00 - 7fff ffff ff: modules and main thread stack
 */
-struct Mapping39 {
+struct MappingAarch64_39 {
   static const uptr kLoAppMemBeg   = 0x0000001000ull;
   static const uptr kLoAppMemEnd   = 0x0100000000ull;
   static const uptr kShadowBeg     = 0x0800000000ull;
@@ -196,8 +186,9 @@ struct Mapping39 {
   static const uptr kHeapMemEnd    = 0x7d00000000ull;
   static const uptr kHiAppMemBeg   = 0x7e00000000ull;
   static const uptr kHiAppMemEnd   = 0x7fffffffffull;
-  static const uptr kAppMemMsk     = 0x7800000000ull;
-  static const uptr kAppMemXor     = 0x0200000000ull;
+  static const uptr kShadowMsk = 0x7800000000ull;
+  static const uptr kShadowXor = 0x0200000000ull;
+  static const uptr kShadowAdd = 0x0000000000ull;
   static const uptr kVdsoBeg       = 0x7f00000000ull;
 };
 
@@ -216,7 +207,8 @@ C/C++ on linux/aarch64 (42-bit VMA)
 3e000 0000 00 - 3f000 0000 00: heap
 3f000 0000 00 - 3ffff ffff ff: modules and main thread stack
 */
-struct Mapping42 {
+struct MappingAarch64_42 {
+  static const uptr kBroken = kBrokenReverseMapping;
   static const uptr kLoAppMemBeg   = 0x00000001000ull;
   static const uptr kLoAppMemEnd   = 0x01000000000ull;
   static const uptr kShadowBeg     = 0x10000000000ull;
@@ -231,12 +223,13 @@ struct Mapping42 {
   static const uptr kHeapMemEnd    = 0x3f000000000ull;
   static const uptr kHiAppMemBeg   = 0x3f000000000ull;
   static const uptr kHiAppMemEnd   = 0x3ffffffffffull;
-  static const uptr kAppMemMsk     = 0x3c000000000ull;
-  static const uptr kAppMemXor     = 0x04000000000ull;
+  static const uptr kShadowMsk = 0x3c000000000ull;
+  static const uptr kShadowXor = 0x04000000000ull;
+  static const uptr kShadowAdd = 0x00000000000ull;
   static const uptr kVdsoBeg       = 0x37f00000000ull;
 };
 
-struct Mapping48 {
+struct MappingAarch64_48 {
   static const uptr kLoAppMemBeg   = 0x0000000001000ull;
   static const uptr kLoAppMemEnd   = 0x0000200000000ull;
   static const uptr kShadowBeg     = 0x0002000000000ull;
@@ -251,22 +244,12 @@ struct Mapping48 {
   static const uptr kHeapMemEnd    = 0x0ffff00000000ull;
   static const uptr kHiAppMemBeg   = 0x0ffff00000000ull;
   static const uptr kHiAppMemEnd   = 0x1000000000000ull;
-  static const uptr kAppMemMsk     = 0x0fff800000000ull;
-  static const uptr kAppMemXor     = 0x0000800000000ull;
+  static const uptr kShadowMsk = 0x0fff800000000ull;
+  static const uptr kShadowXor = 0x0000800000000ull;
+  static const uptr kShadowAdd = 0x0000000000000ull;
   static const uptr kVdsoBeg       = 0xffff000000000ull;
 };
 
-// Indicates the runtime will define the memory regions at runtime.
-#define TSAN_RUNTIME_VMA 1
-// Indicates that mapping defines a mid range memory segment.
-#define TSAN_MID_APP_RANGE 1
-#elif defined(__powerpc64__)
-// PPC64 supports multiple VMA which leads to multiple address transformation
-// functions.  To support these multiple VMAS transformations and mappings TSAN
-// runtime for PPC64 uses an external memory read (vmaSize) to select which
-// mapping to use.  Although slower, it make a same instrumented binary run on
-// multiple kernels.
-
 /*
 C/C++ on linux/powerpc64 (44-bit VMA)
 0000 0000 0100 - 0001 0000 0000: main binary
@@ -281,7 +264,9 @@ C/C++ on linux/powerpc64 (44-bit VMA)
 0f50 0000 0000 - 0f60 0000 0000: -
 0f60 0000 0000 - 1000 0000 0000: modules and main thread stack
 */
-struct Mapping44 {
+struct MappingPPC64_44 {
+  static const uptr kBroken =
+      kBrokenMapping | kBrokenReverseMapping | kBrokenLinearity;
   static const uptr kMetaShadowBeg = 0x0b0000000000ull;
   static const uptr kMetaShadowEnd = 0x0d0000000000ull;
   static const uptr kTraceMemBeg   = 0x0d0000000000ull;
@@ -294,9 +279,12 @@ struct Mapping44 {
   static const uptr kHeapMemEnd    = 0x0f5000000000ull;
   static const uptr kHiAppMemBeg   = 0x0f6000000000ull;
   static const uptr kHiAppMemEnd   = 0x100000000000ull; // 44 bits
-  static const uptr kAppMemMsk     = 0x0f0000000000ull;
-  static const uptr kAppMemXor     = 0x002100000000ull;
+  static const uptr kShadowMsk = 0x0f0000000000ull;
+  static const uptr kShadowXor = 0x002100000000ull;
+  static const uptr kShadowAdd = 0x000000000000ull;
   static const uptr kVdsoBeg       = 0x3c0000000000000ull;
+  static const uptr kMidAppMemBeg = 0;
+  static const uptr kMidAppMemEnd = 0;
 };
 
 /*
@@ -313,7 +301,7 @@ C/C++ on linux/powerpc64 (46-bit VMA)
 3e00 0000 0000 - 3e80 0000 0000: -
 3e80 0000 0000 - 4000 0000 0000: modules and main thread stack
 */
-struct Mapping46 {
+struct MappingPPC64_46 {
   static const uptr kMetaShadowBeg = 0x100000000000ull;
   static const uptr kMetaShadowEnd = 0x200000000000ull;
   static const uptr kTraceMemBeg   = 0x200000000000ull;
@@ -326,9 +314,12 @@ struct Mapping46 {
   static const uptr kLoAppMemEnd   = 0x010000000000ull;
   static const uptr kHiAppMemBeg   = 0x3e8000000000ull;
   static const uptr kHiAppMemEnd   = 0x400000000000ull; // 46 bits
-  static const uptr kAppMemMsk     = 0x3c0000000000ull;
-  static const uptr kAppMemXor     = 0x020000000000ull;
+  static const uptr kShadowMsk = 0x3c0000000000ull;
+  static const uptr kShadowXor = 0x020000000000ull;
+  static const uptr kShadowAdd = 0x000000000000ull;
   static const uptr kVdsoBeg       = 0x7800000000000000ull;
+  static const uptr kMidAppMemBeg = 0;
+  static const uptr kMidAppMemEnd = 0;
 };
 
 /*
@@ -345,7 +336,7 @@ C/C++ on linux/powerpc64 (47-bit VMA)
 7e00 0000 0000 - 7e80 0000 0000: -
 7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
 */
-struct Mapping47 {
+struct MappingPPC64_47 {
   static const uptr kMetaShadowBeg = 0x100000000000ull;
   static const uptr kMetaShadowEnd = 0x200000000000ull;
   static const uptr kTraceMemBeg   = 0x200000000000ull;
@@ -358,14 +349,14 @@ struct Mapping47 {
   static const uptr kLoAppMemEnd   = 0x010000000000ull;
   static const uptr kHiAppMemBeg   = 0x7e8000000000ull;
   static const uptr kHiAppMemEnd   = 0x800000000000ull; // 47 bits
-  static const uptr kAppMemMsk     = 0x7c0000000000ull;
-  static const uptr kAppMemXor     = 0x020000000000ull;
+  static const uptr kShadowMsk = 0x7c0000000000ull;
+  static const uptr kShadowXor = 0x020000000000ull;
+  static const uptr kShadowAdd = 0x000000000000ull;
   static const uptr kVdsoBeg       = 0x7800000000000000ull;
+  static const uptr kMidAppMemBeg = 0;
+  static const uptr kMidAppMemEnd = 0;
 };
 
-// Indicates the runtime will define the memory regions at runtime.
-#define TSAN_RUNTIME_VMA 1
-#elif defined(__s390x__)
 /*
 C/C++ on linux/s390x
 While the kernel provides a 64-bit address space, we have to restrict ourselves
@@ -380,7 +371,7 @@ a000 0000 0000 - b000 0000 0000: traces - 16TiB (max history * 128k threads)
 b000 0000 0000 - be00 0000 0000: -
 be00 0000 0000 - c000 0000 0000: heap - 2TiB (max supported by the allocator)
 */
-struct Mapping {
+struct MappingS390x {
   static const uptr kMetaShadowBeg = 0x900000000000ull;
   static const uptr kMetaShadowEnd = 0x980000000000ull;
   static const uptr kTraceMemBeg   = 0xa00000000000ull;
@@ -393,13 +384,13 @@ struct Mapping {
   static const uptr kLoAppMemEnd   = 0x0e0000000000ull;
   static const uptr kHiAppMemBeg   = 0xc00000004000ull;
   static const uptr kHiAppMemEnd   = 0xc00000004000ull;
-  static const uptr kAppMemMsk     = 0xb00000000000ull;
-  static const uptr kAppMemXor     = 0x100000000000ull;
+  static const uptr kShadowMsk = 0xb00000000000ull;
+  static const uptr kShadowXor = 0x100000000000ull;
+  static const uptr kShadowAdd = 0x000000000000ull;
   static const uptr kVdsoBeg       = 0xfffffffff000ull;
+  static const uptr kMidAppMemBeg = 0;
+  static const uptr kMidAppMemEnd = 0;
 };
-#endif
-
-#elif SANITIZER_GO && !SANITIZER_WINDOWS && HAS_48_BIT_ADDRESS_SPACE
 
 /* Go on linux, darwin and freebsd on x86_64
 0000 0000 1000 - 0000 1000 0000: executable
@@ -414,46 +405,59 @@ struct Mapping {
 6200 0000 0000 - 8000 0000 0000: -
 */
 
-struct Mapping {
+struct MappingGo48 {
   static const uptr kMetaShadowBeg = 0x300000000000ull;
   static const uptr kMetaShadowEnd = 0x400000000000ull;
   static const uptr kTraceMemBeg   = 0x600000000000ull;
   static const uptr kTraceMemEnd   = 0x620000000000ull;
   static const uptr kShadowBeg     = 0x200000000000ull;
   static const uptr kShadowEnd     = 0x238000000000ull;
-  static const uptr kAppMemBeg     = 0x000000001000ull;
-  static const uptr kAppMemEnd     = 0x00e000000000ull;
+  static const uptr kLoAppMemBeg = 0x000000001000ull;
+  static const uptr kLoAppMemEnd = 0x00e000000000ull;
+  static const uptr kMidAppMemBeg = 0;
+  static const uptr kMidAppMemEnd = 0;
+  static const uptr kHiAppMemBeg = 0;
+  static const uptr kHiAppMemEnd = 0;
+  static const uptr kHeapMemBeg = 0;
+  static const uptr kHeapMemEnd = 0;
+  static const uptr kVdsoBeg = 0;
+  static const uptr kShadowMsk = 0;
+  static const uptr kShadowXor = 0;
+  static const uptr kShadowAdd = 0x200000000000ull;
 };
 
-#elif SANITIZER_GO && SANITIZER_WINDOWS
-
 /* Go on windows
 0000 0000 1000 - 0000 1000 0000: executable
 0000 1000 0000 - 00f8 0000 0000: -
 00c0 0000 0000 - 00e0 0000 0000: heap
 00e0 0000 0000 - 0100 0000 0000: -
 0100 0000 0000 - 0500 0000 0000: shadow
-0500 0000 0000 - 0560 0000 0000: -
-0560 0000 0000 - 0760 0000 0000: traces
-0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
+0500 0000 0000 - 0700 0000 0000: traces
+0700 0000 0000 - 0770 0000 0000: metainfo (memory blocks and sync objects)
 07d0 0000 0000 - 8000 0000 0000: -
 */
 
-struct Mapping {
-  static const uptr kMetaShadowBeg = 0x076000000000ull;
-  static const uptr kMetaShadowEnd = 0x07d000000000ull;
-  static const uptr kTraceMemBeg   = 0x056000000000ull;
-  static const uptr kTraceMemEnd   = 0x076000000000ull;
+struct MappingGoWindows {
+  static const uptr kMetaShadowBeg = 0x070000000000ull;
+  static const uptr kMetaShadowEnd = 0x077000000000ull;
+  static const uptr kTraceMemBeg = 0x050000000000ull;
+  static const uptr kTraceMemEnd = 0x070000000000ull;
   static const uptr kShadowBeg     = 0x010000000000ull;
   static const uptr kShadowEnd     = 0x050000000000ull;
-  static const uptr kAppMemBeg     = 0x000000001000ull;
-  static const uptr kAppMemEnd     = 0x00e000000000ull;
+  static const uptr kLoAppMemBeg = 0x000000001000ull;
+  static const uptr kLoAppMemEnd = 0x00e000000000ull;
+  static const uptr kMidAppMemBeg = 0;
+  static const uptr kMidAppMemEnd = 0;
+  static const uptr kHiAppMemBeg = 0;
+  static const uptr kHiAppMemEnd = 0;
+  static const uptr kHeapMemBeg = 0;
+  static const uptr kHeapMemEnd = 0;
+  static const uptr kVdsoBeg = 0;
+  static const uptr kShadowMsk = 0;
+  static const uptr kShadowXor = 0;
+  static const uptr kShadowAdd = 0x010000000000ull;
 };
 
-#elif SANITIZER_GO && defined(__powerpc64__)
-
-/* Only Mapping46 and Mapping47 are currently supported for powercp64 on Go. */
-
 /* Go on linux/powerpc64 (46-bit VMA)
 0000 0000 1000 - 0000 1000 0000: executable
 0000 1000 0000 - 00c0 0000 0000: -
@@ -467,15 +471,25 @@ struct Mapping {
 3800 0000 0000 - 4000 0000 0000: -
 */
 
-struct Mapping46 {
+struct MappingGoPPC64_46 {
   static const uptr kMetaShadowBeg = 0x240000000000ull;
   static const uptr kMetaShadowEnd = 0x340000000000ull;
   static const uptr kTraceMemBeg   = 0x360000000000ull;
   static const uptr kTraceMemEnd   = 0x380000000000ull;
   static const uptr kShadowBeg     = 0x200000000000ull;
   static const uptr kShadowEnd     = 0x238000000000ull;
-  static const uptr kAppMemBeg     = 0x000000001000ull;
-  static const uptr kAppMemEnd     = 0x00e000000000ull;
+  static const uptr kLoAppMemBeg = 0x000000001000ull;
+  static const uptr kLoAppMemEnd = 0x00e000000000ull;
+  static const uptr kMidAppMemBeg = 0;
+  static const uptr kMidAppMemEnd = 0;
+  static const uptr kHiAppMemBeg = 0;
+  static const uptr kHiAppMemEnd = 0;
+  static const uptr kHeapMemBeg = 0;
+  static const uptr kHeapMemEnd = 0;
+  static const uptr kVdsoBeg = 0;
+  static const uptr kShadowMsk = 0;
+  static const uptr kShadowXor = 0;
+  static const uptr kShadowAdd = 0x200000000000ull;
 };
 
 /* Go on linux/powerpc64 (47-bit VMA)
@@ -491,21 +505,27 @@ struct Mapping46 {
 6200 0000 0000 - 8000 0000 0000: -
 */
 
-struct Mapping47 {
+struct MappingGoPPC64_47 {
   static const uptr kMetaShadowBeg = 0x300000000000ull;
   static const uptr kMetaShadowEnd = 0x400000000000ull;
   static const uptr kTraceMemBeg   = 0x600000000000ull;
   static const uptr kTraceMemEnd   = 0x620000000000ull;
   static const uptr kShadowBeg     = 0x200000000000ull;
   static const uptr kShadowEnd     = 0x300000000000ull;
-  static const uptr kAppMemBeg     = 0x000000001000ull;
-  static const uptr kAppMemEnd     = 0x00e000000000ull;
+  static const uptr kLoAppMemBeg = 0x000000001000ull;
+  static const uptr kLoAppMemEnd = 0x00e000000000ull;
+  static const uptr kMidAppMemBeg = 0;
+  static const uptr kMidAppMemEnd = 0;
+  static const uptr kHiAppMemBeg = 0;
+  static const uptr kHiAppMemEnd = 0;
+  static const uptr kHeapMemBeg = 0;
+  static const uptr kHeapMemEnd = 0;
+  static const uptr kVdsoBeg = 0;
+  static const uptr kShadowMsk = 0;
+  static const uptr kShadowXor = 0;
+  static const uptr kShadowAdd = 0x200000000000ull;
 };
 
-#define TSAN_RUNTIME_VMA 1
-
-#elif SANITIZER_GO && defined(__aarch64__)
-
 /* Go on linux/aarch64 (48-bit VMA) and darwin/aarch64 (47-bit VMA)
 0000 0000 1000 - 0000 1000 0000: executable
 0000 1000 0000 - 00c0 0000 0000: -
@@ -518,22 +538,27 @@ struct Mapping47 {
 6000 0000 0000 - 6200 0000 0000: traces
 6200 0000 0000 - 8000 0000 0000: -
 */
-
-struct Mapping {
+struct MappingGoAarch64 {
   static const uptr kMetaShadowBeg = 0x300000000000ull;
   static const uptr kMetaShadowEnd = 0x400000000000ull;
   static const uptr kTraceMemBeg   = 0x600000000000ull;
   static const uptr kTraceMemEnd   = 0x620000000000ull;
   static const uptr kShadowBeg     = 0x200000000000ull;
   static const uptr kShadowEnd     = 0x300000000000ull;
-  static const uptr kAppMemBeg     = 0x000000001000ull;
-  static const uptr kAppMemEnd     = 0x00e000000000ull;
+  static const uptr kLoAppMemBeg = 0x000000001000ull;
+  static const uptr kLoAppMemEnd = 0x00e000000000ull;
+  static const uptr kMidAppMemBeg = 0;
+  static const uptr kMidAppMemEnd = 0;
+  static const uptr kHiAppMemBeg = 0;
+  static const uptr kHiAppMemEnd = 0;
+  static const uptr kHeapMemBeg = 0;
+  static const uptr kHeapMemEnd = 0;
+  static const uptr kVdsoBeg = 0;
+  static const uptr kShadowMsk = 0;
+  static const uptr kShadowXor = 0;
+  static const uptr kShadowAdd = 0x200000000000ull;
 };
 
-// Indicates the runtime will define the memory regions at runtime.
-#define TSAN_RUNTIME_VMA 1
-
-#elif SANITIZER_GO && defined(__mips64)
 /*
 Go on linux/mips64 (47-bit VMA)
 0000 0000 1000 - 0000 1000 0000: executable
@@ -547,20 +572,27 @@ Go on linux/mips64 (47-bit VMA)
 6000 0000 0000 - 6200 0000 0000: traces
 6200 0000 0000 - 8000 0000 0000: -
 */
-struct Mapping47 {
+struct MappingGoMips64_47 {
   static const uptr kMetaShadowBeg = 0x300000000000ull;
   static const uptr kMetaShadowEnd = 0x400000000000ull;
   static const uptr kTraceMemBeg = 0x600000000000ull;
   static const uptr kTraceMemEnd = 0x620000000000ull;
   static const uptr kShadowBeg = 0x200000000000ull;
   static const uptr kShadowEnd = 0x300000000000ull;
-  static const uptr kAppMemBeg = 0x000000001000ull;
-  static const uptr kAppMemEnd = 0x00e000000000ull;
+  static const uptr kLoAppMemBeg = 0x000000001000ull;
+  static const uptr kLoAppMemEnd = 0x00e000000000ull;
+  static const uptr kMidAppMemBeg = 0;
+  static const uptr kMidAppMemEnd = 0;
+  static const uptr kHiAppMemBeg = 0;
+  static const uptr kHiAppMemEnd = 0;
+  static const uptr kHeapMemBeg = 0;
+  static const uptr kHeapMemEnd = 0;
+  static const uptr kVdsoBeg = 0;
+  static const uptr kShadowMsk = 0;
+  static const uptr kShadowXor = 0;
+  static const uptr kShadowAdd = 0x200000000000ull;
 };
 
-#define TSAN_RUNTIME_VMA 1
-
-#elif SANITIZER_GO && defined(__s390x__)
 /*
 Go on linux/s390x
 0000 0000 1000 - 1000 0000 0000: executable and heap - 16 TiB
@@ -571,622 +603,367 @@ Go on linux/s390x
 9800 0000 0000 - a000 0000 0000: -
 a000 0000 0000 - b000 0000 0000: traces - 16TiB (max history * 128k threads)
 */
-struct Mapping {
+struct MappingGoS390x {
   static const uptr kMetaShadowBeg = 0x900000000000ull;
   static const uptr kMetaShadowEnd = 0x980000000000ull;
   static const uptr kTraceMemBeg   = 0xa00000000000ull;
   static const uptr kTraceMemEnd   = 0xb00000000000ull;
   static const uptr kShadowBeg     = 0x400000000000ull;
   static const uptr kShadowEnd     = 0x800000000000ull;
-  static const uptr kAppMemBeg     = 0x000000001000ull;
-  static const uptr kAppMemEnd     = 0x100000000000ull;
+  static const uptr kLoAppMemBeg = 0x000000001000ull;
+  static const uptr kLoAppMemEnd = 0x100000000000ull;
+  static const uptr kMidAppMemBeg = 0;
+  static const uptr kMidAppMemEnd = 0;
+  static const uptr kHiAppMemBeg = 0;
+  static const uptr kHiAppMemEnd = 0;
+  static const uptr kHeapMemBeg = 0;
+  static const uptr kHeapMemEnd = 0;
+  static const uptr kVdsoBeg = 0;
+  static const uptr kShadowMsk = 0;
+  static const uptr kShadowXor = 0;
+  static const uptr kShadowAdd = 0x400000000000ull;
 };
 
-#else
-# error "Unknown platform"
-#endif
-
-
-#ifdef TSAN_RUNTIME_VMA
 extern uptr vmaSize;
-#endif
-
-
-enum MappingType {
-  MAPPING_LO_APP_BEG,
-  MAPPING_LO_APP_END,
-  MAPPING_HI_APP_BEG,
-  MAPPING_HI_APP_END,
-#ifdef TSAN_MID_APP_RANGE
-  MAPPING_MID_APP_BEG,
-  MAPPING_MID_APP_END,
-#endif
-  MAPPING_HEAP_BEG,
-  MAPPING_HEAP_END,
-  MAPPING_APP_BEG,
-  MAPPING_APP_END,
-  MAPPING_SHADOW_BEG,
-  MAPPING_SHADOW_END,
-  MAPPING_META_SHADOW_BEG,
-  MAPPING_META_SHADOW_END,
-  MAPPING_TRACE_BEG,
-  MAPPING_TRACE_END,
-  MAPPING_VDSO_BEG,
-};
-
-template<typename Mapping, int Type>
-uptr MappingImpl(void) {
-  switch (Type) {
-#if !SANITIZER_GO
-    case MAPPING_LO_APP_BEG: return Mapping::kLoAppMemBeg;
-    case MAPPING_LO_APP_END: return Mapping::kLoAppMemEnd;
-# ifdef TSAN_MID_APP_RANGE
-    case MAPPING_MID_APP_BEG: return Mapping::kMidAppMemBeg;
-    case MAPPING_MID_APP_END: return Mapping::kMidAppMemEnd;
-# endif
-    case MAPPING_HI_APP_BEG: return Mapping::kHiAppMemBeg;
-    case MAPPING_HI_APP_END: return Mapping::kHiAppMemEnd;
-    case MAPPING_HEAP_BEG: return Mapping::kHeapMemBeg;
-    case MAPPING_HEAP_END: return Mapping::kHeapMemEnd;
-    case MAPPING_VDSO_BEG: return Mapping::kVdsoBeg;
-#else
-    case MAPPING_APP_BEG: return Mapping::kAppMemBeg;
-    case MAPPING_APP_END: return Mapping::kAppMemEnd;
-#endif
-    case MAPPING_SHADOW_BEG: return Mapping::kShadowBeg;
-    case MAPPING_SHADOW_END: return Mapping::kShadowEnd;
-    case MAPPING_META_SHADOW_BEG: return Mapping::kMetaShadowBeg;
-    case MAPPING_META_SHADOW_END: return Mapping::kMetaShadowEnd;
-    case MAPPING_TRACE_BEG: return Mapping::kTraceMemBeg;
-    case MAPPING_TRACE_END: return Mapping::kTraceMemEnd;
-  }
-}
 
-template<int Type>
-uptr MappingArchImpl(void) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+template <typename Func, typename Arg>
+ALWAYS_INLINE auto SelectMapping(Arg arg) {
+#if SANITIZER_GO
+#  if defined(__powerpc64__)
   switch (vmaSize) {
-    case 39: return MappingImpl<Mapping39, Type>();
-    case 42: return MappingImpl<Mapping42, Type>();
-    case 48: return MappingImpl<Mapping48, Type>();
+    case 46:
+      return Func::template Apply<MappingGoPPC64_46>(arg);
+    case 47:
+      return Func::template Apply<MappingGoPPC64_47>(arg);
   }
-  DCHECK(0);
-  return 0;
-#elif defined(__powerpc64__)
+#  elif defined(__mips64)
+  return Func::template Apply<MappingGoMips64_47>(arg);
+#  elif defined(__s390x__)
+  return Func::template Apply<MappingGoS390x>(arg);
+#  elif defined(__aarch64__)
+  return Func::template Apply<MappingGoAarch64>(arg);
+#  elif SANITIZER_WINDOWS
+  return Func::template Apply<MappingGoWindows>(arg);
+#  else
+  return Func::template Apply<MappingGo48>(arg);
+#  endif
+#else  // SANITIZER_GO
+#  if defined(__x86_64__) || SANITIZER_IOSSIM || SANITIZER_MAC && !SANITIZER_IOS
+  return Func::template Apply<Mapping48AddressSpace>(arg);
+#  elif defined(__aarch64__) && defined(__APPLE__)
+  return Func::template Apply<MappingAppleAarch64>(arg);
+#  elif defined(__aarch64__) && !defined(__APPLE__)
   switch (vmaSize) {
-#if !SANITIZER_GO
-    case 44: return MappingImpl<Mapping44, Type>();
-#endif
-    case 46: return MappingImpl<Mapping46, Type>();
-    case 47: return MappingImpl<Mapping47, Type>();
+    case 39:
+      return Func::template Apply<MappingAarch64_39>(arg);
+    case 42:
+      return Func::template Apply<MappingAarch64_42>(arg);
+    case 48:
+      return Func::template Apply<MappingAarch64_48>(arg);
   }
-  DCHECK(0);
-  return 0;
-#elif defined(__mips64)
+#  elif defined(__powerpc64__)
   switch (vmaSize) {
-#if !SANITIZER_GO
-    case 40: return MappingImpl<Mapping40, Type>();
-#else
-    case 47: return MappingImpl<Mapping47, Type>();
-#endif
+    case 44:
+      return Func::template Apply<MappingPPC64_44>(arg);
+    case 46:
+      return Func::template Apply<MappingPPC64_46>(arg);
+    case 47:
+      return Func::template Apply<MappingPPC64_47>(arg);
   }
-  DCHECK(0);
-  return 0;
-#else
-  return MappingImpl<Mapping, Type>();
-#endif
+#  elif defined(__mips64)
+  return Func::template Apply<MappingMips64_40>(arg);
+#  elif defined(__s390x__)
+  return Func::template Apply<MappingS390x>(arg);
+#  else
+#    error "unsupported platform"
+#  endif
+#endif
+  Die();
+}
+
+template <typename Func>
+void ForEachMapping() {
+  Func::template Apply<Mapping48AddressSpace>();
+  Func::template Apply<MappingMips64_40>();
+  Func::template Apply<MappingAppleAarch64>();
+  Func::template Apply<MappingAarch64_39>();
+  Func::template Apply<MappingAarch64_42>();
+  Func::template Apply<MappingAarch64_48>();
+  Func::template Apply<MappingPPC64_44>();
+  Func::template Apply<MappingPPC64_46>();
+  Func::template Apply<MappingPPC64_47>();
+  Func::template Apply<MappingS390x>();
+  Func::template Apply<MappingGo48>();
+  Func::template Apply<MappingGoWindows>();
+  Func::template Apply<MappingGoPPC64_46>();
+  Func::template Apply<MappingGoPPC64_47>();
+  Func::template Apply<MappingGoAarch64>();
+  Func::template Apply<MappingGoMips64_47>();
+  Func::template Apply<MappingGoS390x>();
 }
 
-#if !SANITIZER_GO
-ALWAYS_INLINE
-uptr LoAppMemBeg(void) {
-  return MappingArchImpl<MAPPING_LO_APP_BEG>();
-}
-ALWAYS_INLINE
-uptr LoAppMemEnd(void) {
-  return MappingArchImpl<MAPPING_LO_APP_END>();
-}
+enum MappingType {
+  kLoAppMemBeg,
+  kLoAppMemEnd,
+  kHiAppMemBeg,
+  kHiAppMemEnd,
+  kMidAppMemBeg,
+  kMidAppMemEnd,
+  kHeapMemBeg,
+  kHeapMemEnd,
+  kShadowBeg,
+  kShadowEnd,
+  kMetaShadowBeg,
+  kMetaShadowEnd,
+  kTraceMemBeg,
+  kTraceMemEnd,
+  kVdsoBeg,
+};
 
-#ifdef TSAN_MID_APP_RANGE
-ALWAYS_INLINE
-uptr MidAppMemBeg(void) {
-  return MappingArchImpl<MAPPING_MID_APP_BEG>();
-}
-ALWAYS_INLINE
-uptr MidAppMemEnd(void) {
-  return MappingArchImpl<MAPPING_MID_APP_END>();
-}
-#endif
+struct MappingField {
+  template <typename Mapping>
+  static uptr Apply(MappingType type) {
+    switch (type) {
+      case kLoAppMemBeg:
+        return Mapping::kLoAppMemBeg;
+      case kLoAppMemEnd:
+        return Mapping::kLoAppMemEnd;
+      case kMidAppMemBeg:
+        return Mapping::kMidAppMemBeg;
+      case kMidAppMemEnd:
+        return Mapping::kMidAppMemEnd;
+      case kHiAppMemBeg:
+        return Mapping::kHiAppMemBeg;
+      case kHiAppMemEnd:
+        return Mapping::kHiAppMemEnd;
+      case kHeapMemBeg:
+        return Mapping::kHeapMemBeg;
+      case kHeapMemEnd:
+        return Mapping::kHeapMemEnd;
+      case kVdsoBeg:
+        return Mapping::kVdsoBeg;
+      case kShadowBeg:
+        return Mapping::kShadowBeg;
+      case kShadowEnd:
+        return Mapping::kShadowEnd;
+      case kMetaShadowBeg:
+        return Mapping::kMetaShadowBeg;
+      case kMetaShadowEnd:
+        return Mapping::kMetaShadowEnd;
+      case kTraceMemBeg:
+        return Mapping::kTraceMemBeg;
+      case kTraceMemEnd:
+        return Mapping::kTraceMemEnd;
+    }
+    Die();
+  }
+};
 
 ALWAYS_INLINE
-uptr HeapMemBeg(void) {
-  return MappingArchImpl<MAPPING_HEAP_BEG>();
-}
+uptr LoAppMemBeg(void) { return SelectMapping<MappingField>(kLoAppMemBeg); }
 ALWAYS_INLINE
-uptr HeapMemEnd(void) {
-  return MappingArchImpl<MAPPING_HEAP_END>();
-}
+uptr LoAppMemEnd(void) { return SelectMapping<MappingField>(kLoAppMemEnd); }
 
 ALWAYS_INLINE
-uptr HiAppMemBeg(void) {
-  return MappingArchImpl<MAPPING_HI_APP_BEG>();
-}
+uptr MidAppMemBeg(void) { return SelectMapping<MappingField>(kMidAppMemBeg); }
 ALWAYS_INLINE
-uptr HiAppMemEnd(void) {
-  return MappingArchImpl<MAPPING_HI_APP_END>();
-}
+uptr MidAppMemEnd(void) { return SelectMapping<MappingField>(kMidAppMemEnd); }
 
 ALWAYS_INLINE
-uptr VdsoBeg(void) {
-  return MappingArchImpl<MAPPING_VDSO_BEG>();
-}
-
-#else
+uptr HeapMemBeg(void) { return SelectMapping<MappingField>(kHeapMemBeg); }
+ALWAYS_INLINE
+uptr HeapMemEnd(void) { return SelectMapping<MappingField>(kHeapMemEnd); }
 
 ALWAYS_INLINE
-uptr AppMemBeg(void) {
-  return MappingArchImpl<MAPPING_APP_BEG>();
-}
+uptr HiAppMemBeg(void) { return SelectMapping<MappingField>(kHiAppMemBeg); }
 ALWAYS_INLINE
-uptr AppMemEnd(void) {
-  return MappingArchImpl<MAPPING_APP_END>();
-}
-
-#endif
+uptr HiAppMemEnd(void) { return SelectMapping<MappingField>(kHiAppMemEnd); }
 
-static inline
-bool GetUserRegion(int i, uptr *start, uptr *end) {
-  switch (i) {
-  default:
-    return false;
-#if !SANITIZER_GO
-  case 0:
-    *start = LoAppMemBeg();
-    *end = LoAppMemEnd();
-    return true;
-  case 1:
-    *start = HiAppMemBeg();
-    *end = HiAppMemEnd();
-    return true;
-  case 2:
-    *start = HeapMemBeg();
-    *end = HeapMemEnd();
-    return true;
-# ifdef TSAN_MID_APP_RANGE
-  case 3:
-    *start = MidAppMemBeg();
-    *end = MidAppMemEnd();
-    return true;
-# endif
-#else
-  case 0:
-    *start = AppMemBeg();
-    *end = AppMemEnd();
-    return true;
-#endif
-  }
-}
+ALWAYS_INLINE
+uptr VdsoBeg(void) { return SelectMapping<MappingField>(kVdsoBeg); }
 
 ALWAYS_INLINE
-uptr ShadowBeg(void) {
-  return MappingArchImpl<MAPPING_SHADOW_BEG>();
-}
+uptr ShadowBeg(void) { return SelectMapping<MappingField>(kShadowBeg); }
 ALWAYS_INLINE
-uptr ShadowEnd(void) {
-  return MappingArchImpl<MAPPING_SHADOW_END>();
-}
+uptr ShadowEnd(void) { return SelectMapping<MappingField>(kShadowEnd); }
 
 ALWAYS_INLINE
-uptr MetaShadowBeg(void) {
-  return MappingArchImpl<MAPPING_META_SHADOW_BEG>();
-}
+uptr MetaShadowBeg(void) { return SelectMapping<MappingField>(kMetaShadowBeg); }
 ALWAYS_INLINE
-uptr MetaShadowEnd(void) {
-  return MappingArchImpl<MAPPING_META_SHADOW_END>();
-}
+uptr MetaShadowEnd(void) { return SelectMapping<MappingField>(kMetaShadowEnd); }
 
 ALWAYS_INLINE
-uptr TraceMemBeg(void) {
-  return MappingArchImpl<MAPPING_TRACE_BEG>();
-}
+uptr TraceMemBeg(void) { return SelectMapping<MappingField>(kTraceMemBeg); }
 ALWAYS_INLINE
-uptr TraceMemEnd(void) {
-  return MappingArchImpl<MAPPING_TRACE_END>();
-}
-
+uptr TraceMemEnd(void) { return SelectMapping<MappingField>(kTraceMemEnd); }
 
-template<typename Mapping>
-bool IsAppMemImpl(uptr mem) {
-#if !SANITIZER_GO
+struct IsAppMemImpl {
+  template <typename Mapping>
+  static bool Apply(uptr mem) {
   return (mem >= Mapping::kHeapMemBeg && mem < Mapping::kHeapMemEnd) ||
-# ifdef TSAN_MID_APP_RANGE
          (mem >= Mapping::kMidAppMemBeg && mem < Mapping::kMidAppMemEnd) ||
-# endif
          (mem >= Mapping::kLoAppMemBeg && mem < Mapping::kLoAppMemEnd) ||
          (mem >= Mapping::kHiAppMemBeg && mem < Mapping::kHiAppMemEnd);
-#else
-  return mem >= Mapping::kAppMemBeg && mem < Mapping::kAppMemEnd;
-#endif
-}
-
-ALWAYS_INLINE
-bool IsAppMem(uptr mem) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
-  switch (vmaSize) {
-    case 39: return IsAppMemImpl<Mapping39>(mem);
-    case 42: return IsAppMemImpl<Mapping42>(mem);
-    case 48: return IsAppMemImpl<Mapping48>(mem);
-  }
-  DCHECK(0);
-  return false;
-#elif defined(__powerpc64__)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 44: return IsAppMemImpl<Mapping44>(mem);
-#endif
-    case 46: return IsAppMemImpl<Mapping46>(mem);
-    case 47: return IsAppMemImpl<Mapping47>(mem);
   }
-  DCHECK(0);
-  return false;
-#elif defined(__mips64)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 40: return IsAppMemImpl<Mapping40>(mem);
-#else
-    case 47: return IsAppMemImpl<Mapping47>(mem);
-#endif
-  }
-  DCHECK(0);
-  return false;
-#else
-  return IsAppMemImpl<Mapping>(mem);
-#endif
-}
+};
 
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) { return SelectMapping<IsAppMemImpl>(mem); }
 
-template<typename Mapping>
-bool IsShadowMemImpl(uptr mem) {
-  return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd;
-}
+struct IsShadowMemImpl {
+  template <typename Mapping>
+  static bool Apply(uptr mem) {
+    return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd;
+  }
+};
 
 ALWAYS_INLINE
-bool IsShadowMem(uptr mem) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
-  switch (vmaSize) {
-    case 39: return IsShadowMemImpl<Mapping39>(mem);
-    case 42: return IsShadowMemImpl<Mapping42>(mem);
-    case 48: return IsShadowMemImpl<Mapping48>(mem);
-  }
-  DCHECK(0);
-  return false;
-#elif defined(__powerpc64__)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 44: return IsShadowMemImpl<Mapping44>(mem);
-#endif
-    case 46: return IsShadowMemImpl<Mapping46>(mem);
-    case 47: return IsShadowMemImpl<Mapping47>(mem);
-  }
-  DCHECK(0);
-  return false;
-#elif defined(__mips64)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 40: return IsShadowMemImpl<Mapping40>(mem);
-#else
-    case 47: return IsShadowMemImpl<Mapping47>(mem);
-#endif
-  }
-  DCHECK(0);
-  return false;
-#else
-  return IsShadowMemImpl<Mapping>(mem);
-#endif
+bool IsShadowMem(RawShadow *p) {
+  return SelectMapping<IsShadowMemImpl>(reinterpret_cast<uptr>(p));
 }
 
-
-template<typename Mapping>
-bool IsMetaMemImpl(uptr mem) {
-  return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd;
-}
+struct IsMetaMemImpl {
+  template <typename Mapping>
+  static bool Apply(uptr mem) {
+    return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd;
+  }
+};
 
 ALWAYS_INLINE
-bool IsMetaMem(uptr mem) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
-  switch (vmaSize) {
-    case 39: return IsMetaMemImpl<Mapping39>(mem);
-    case 42: return IsMetaMemImpl<Mapping42>(mem);
-    case 48: return IsMetaMemImpl<Mapping48>(mem);
-  }
-  DCHECK(0);
-  return false;
-#elif defined(__powerpc64__)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 44: return IsMetaMemImpl<Mapping44>(mem);
-#endif
-    case 46: return IsMetaMemImpl<Mapping46>(mem);
-    case 47: return IsMetaMemImpl<Mapping47>(mem);
+bool IsMetaMem(const u32 *p) {
+  return SelectMapping<IsMetaMemImpl>(reinterpret_cast<uptr>(p));
+}
+
+struct MemToShadowImpl {
+  template <typename Mapping>
+  static uptr Apply(uptr x) {
+    DCHECK(IsAppMemImpl::Apply<Mapping>(x));
+    return (((x) & ~(Mapping::kShadowMsk | (kShadowCell - 1))) ^
+            Mapping::kShadowXor) *
+               kShadowMultiplier +
+           Mapping::kShadowAdd;
   }
-  DCHECK(0);
-  return false;
-#elif defined(__mips64)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 40: return IsMetaMemImpl<Mapping40>(mem);
-#else
-    case 47: return IsMetaMemImpl<Mapping47>(mem);
-#endif
-  }
-  DCHECK(0);
-  return false;
-#else
-  return IsMetaMemImpl<Mapping>(mem);
-#endif
-}
-
-
-template<typename Mapping>
-uptr MemToShadowImpl(uptr x) {
-  DCHECK(IsAppMem(x));
-#if !SANITIZER_GO
-  return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1)))
-      ^ Mapping::kAppMemXor) * kShadowCnt;
-#else
-# ifndef SANITIZER_WINDOWS
-  return ((x & ~(kShadowCell - 1)) * kShadowCnt) | Mapping::kShadowBeg;
-# else
-  return ((x & ~(kShadowCell - 1)) * kShadowCnt) + Mapping::kShadowBeg;
-# endif
-#endif
-}
+};
 
 ALWAYS_INLINE
-uptr MemToShadow(uptr x) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
-  switch (vmaSize) {
-    case 39: return MemToShadowImpl<Mapping39>(x);
-    case 42: return MemToShadowImpl<Mapping42>(x);
-    case 48: return MemToShadowImpl<Mapping48>(x);
-  }
-  DCHECK(0);
-  return 0;
-#elif defined(__powerpc64__)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 44: return MemToShadowImpl<Mapping44>(x);
-#endif
-    case 46: return MemToShadowImpl<Mapping46>(x);
-    case 47: return MemToShadowImpl<Mapping47>(x);
-  }
-  DCHECK(0);
-  return 0;
-#elif defined(__mips64)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 40: return MemToShadowImpl<Mapping40>(x);
-#else
-    case 47: return MemToShadowImpl<Mapping47>(x);
-#endif
-  }
-  DCHECK(0);
-  return 0;
-#else
-  return MemToShadowImpl<Mapping>(x);
-#endif
+RawShadow *MemToShadow(uptr x) {
+  return reinterpret_cast<RawShadow *>(SelectMapping<MemToShadowImpl>(x));
 }
 
-
-template<typename Mapping>
-u32 *MemToMetaImpl(uptr x) {
-  DCHECK(IsAppMem(x));
-#if !SANITIZER_GO
-  return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1)))) /
-      kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
-#else
-# ifndef SANITIZER_WINDOWS
-  return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
-      kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
-# else
-  return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
-      kMetaShadowCell * kMetaShadowSize) + Mapping::kMetaShadowBeg);
-# endif
-#endif
-}
+struct MemToMetaImpl {
+  template <typename Mapping>
+  static u32 *Apply(uptr x) {
+    DCHECK(IsAppMemImpl::Apply<Mapping>(x));
+    return (u32 *)(((((x) & ~(Mapping::kShadowMsk | (kMetaShadowCell - 1)))) /
+                    kMetaShadowCell * kMetaShadowSize) |
+                   Mapping::kMetaShadowBeg);
+  }
+};
 
 ALWAYS_INLINE
-u32 *MemToMeta(uptr x) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
-  switch (vmaSize) {
-    case 39: return MemToMetaImpl<Mapping39>(x);
-    case 42: return MemToMetaImpl<Mapping42>(x);
-    case 48: return MemToMetaImpl<Mapping48>(x);
-  }
-  DCHECK(0);
-  return 0;
-#elif defined(__powerpc64__)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 44: return MemToMetaImpl<Mapping44>(x);
-#endif
-    case 46: return MemToMetaImpl<Mapping46>(x);
-    case 47: return MemToMetaImpl<Mapping47>(x);
+u32 *MemToMeta(uptr x) { return SelectMapping<MemToMetaImpl>(x); }
+
+struct ShadowToMemImpl {
+  template <typename Mapping>
+  static uptr Apply(uptr sp) {
+    if (!IsShadowMemImpl::Apply<Mapping>(sp))
+      return 0;
+    // The shadow mapping is non-linear and we've lost some bits, so we don't
+    // have an easy way to restore the original app address. But the mapping is
+    // a bijection, so we try to restore the address as belonging to
+    // low/mid/high range consecutively and see if shadow->app->shadow mapping
+    // gives us the same address.
+    uptr p =
+        ((sp - Mapping::kShadowAdd) / kShadowMultiplier) ^ Mapping::kShadowXor;
+    if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd &&
+        MemToShadowImpl::Apply<Mapping>(p) == sp)
+      return p;
+    if (Mapping::kMidAppMemBeg) {
+      uptr p_mid = p + (Mapping::kMidAppMemBeg & Mapping::kShadowMsk);
+      if (p_mid >= Mapping::kMidAppMemBeg && p_mid < Mapping::kMidAppMemEnd &&
+          MemToShadowImpl::Apply<Mapping>(p_mid) == sp)
+        return p_mid;
+    }
+    return p | Mapping::kShadowMsk;
   }
-  DCHECK(0);
-  return 0;
-#elif defined(__mips64)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 40: return MemToMetaImpl<Mapping40>(x);
-#else
-    case 47: return MemToMetaImpl<Mapping47>(x);
-#endif
-  }
-  DCHECK(0);
-  return 0;
-#else
-  return MemToMetaImpl<Mapping>(x);
-#endif
-}
-
-
-template<typename Mapping>
-uptr ShadowToMemImpl(uptr s) {
-  DCHECK(IsShadowMem(s));
-#if !SANITIZER_GO
-  // The shadow mapping is non-linear and we've lost some bits, so we don't have
-  // an easy way to restore the original app address. But the mapping is a
-  // bijection, so we try to restore the address as belonging to low/mid/high
-  // range consecutively and see if shadow->app->shadow mapping gives us the
-  // same address.
-  uptr p = (s / kShadowCnt) ^ Mapping::kAppMemXor;
-  if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd &&
-      MemToShadow(p) == s)
-    return p;
-# ifdef TSAN_MID_APP_RANGE
-  p = ((s / kShadowCnt) ^ Mapping::kAppMemXor) +
-      (Mapping::kMidAppMemBeg & Mapping::kAppMemMsk);
-  if (p >= Mapping::kMidAppMemBeg && p < Mapping::kMidAppMemEnd &&
-      MemToShadow(p) == s)
-    return p;
-# endif
-  return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk;
-#else  // #if !SANITIZER_GO
-# ifndef SANITIZER_WINDOWS
-  return (s & ~Mapping::kShadowBeg) / kShadowCnt;
-# else
-  return (s - Mapping::kShadowBeg) / kShadowCnt;
-# endif // SANITIZER_WINDOWS
-#endif
-}
+};
 
 ALWAYS_INLINE
-uptr ShadowToMem(uptr s) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
-  switch (vmaSize) {
-    case 39: return ShadowToMemImpl<Mapping39>(s);
-    case 42: return ShadowToMemImpl<Mapping42>(s);
-    case 48: return ShadowToMemImpl<Mapping48>(s);
-  }
-  DCHECK(0);
-  return 0;
-#elif defined(__powerpc64__)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 44: return ShadowToMemImpl<Mapping44>(s);
-#endif
-    case 46: return ShadowToMemImpl<Mapping46>(s);
-    case 47: return ShadowToMemImpl<Mapping47>(s);
+uptr ShadowToMem(RawShadow *s) {
+  return SelectMapping<ShadowToMemImpl>(reinterpret_cast<uptr>(s));
+}
+
+// Compresses addr to kCompressedAddrBits stored in least significant bits.
+ALWAYS_INLINE uptr CompressAddr(uptr addr) {
+  return addr & ((1ull << kCompressedAddrBits) - 1);
+}
+
+struct RestoreAddrImpl {
+  typedef uptr Result;
+  template <typename Mapping>
+  static Result Apply(uptr addr) {
+    // To restore the address we go over all app memory ranges and check if top
+    // 3 bits of the compressed addr match that of the app range. If yes, we
+    // assume that the compressed address come from that range and restore the
+    // missing top bits to match the app range address.
+    static constexpr uptr ranges[] = {
+        Mapping::kLoAppMemBeg,  Mapping::kLoAppMemEnd, Mapping::kMidAppMemBeg,
+        Mapping::kMidAppMemEnd, Mapping::kHiAppMemBeg, Mapping::kHiAppMemEnd,
+        Mapping::kHeapMemBeg,   Mapping::kHeapMemEnd,
+    };
+    const uptr indicator = 0x0e0000000000ull;
+    const uptr ind_lsb = 1ull << LeastSignificantSetBitIndex(indicator);
+    for (uptr i = 0; i < ARRAY_SIZE(ranges); i += 2) {
+      uptr beg = ranges[i];
+      uptr end = ranges[i + 1];
+      if (beg == end)
+        continue;
+      for (uptr p = beg; p < end; p = RoundDown(p + ind_lsb, ind_lsb)) {
+        if ((addr & indicator) == (p & indicator))
+          return addr | (p & ~(ind_lsb - 1));
+      }
+    }
+    Printf("ThreadSanitizer: failed to restore address 0x%zx\n", addr);
+    Die();
   }
-  DCHECK(0);
-  return 0;
-#elif defined(__mips64)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 40: return ShadowToMemImpl<Mapping40>(s);
-#else
-    case 47: return ShadowToMemImpl<Mapping47>(s);
-#endif
-  }
-  DCHECK(0);
-  return 0;
-#else
-  return ShadowToMemImpl<Mapping>(s);
-#endif
-}
-
+};
 
+// Restores compressed addr from kCompressedAddrBits to full representation.
+// This is called only during reporting and is not performance-critical.
+inline uptr RestoreAddr(uptr addr) {
+  return SelectMapping<RestoreAddrImpl>(addr);
+}
 
 // The additional page is to catch shadow stack overflow as paging fault.
 // Windows wants 64K alignment for mmaps.
 const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace)
     + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
 
-template<typename Mapping>
-uptr GetThreadTraceImpl(int tid) {
-  uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize;
-  DCHECK_LT(p, Mapping::kTraceMemEnd);
-  return p;
-}
+struct GetThreadTraceImpl {
+  template <typename Mapping>
+  static uptr Apply(uptr tid) {
+    uptr p = Mapping::kTraceMemBeg + tid * kTotalTraceSize;
+    DCHECK_LT(p, Mapping::kTraceMemEnd);
+    return p;
+  }
+};
 
 ALWAYS_INLINE
-uptr GetThreadTrace(int tid) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
-  switch (vmaSize) {
-    case 39: return GetThreadTraceImpl<Mapping39>(tid);
-    case 42: return GetThreadTraceImpl<Mapping42>(tid);
-    case 48: return GetThreadTraceImpl<Mapping48>(tid);
-  }
-  DCHECK(0);
-  return 0;
-#elif defined(__powerpc64__)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 44: return GetThreadTraceImpl<Mapping44>(tid);
-#endif
-    case 46: return GetThreadTraceImpl<Mapping46>(tid);
-    case 47: return GetThreadTraceImpl<Mapping47>(tid);
-  }
-  DCHECK(0);
-  return 0;
-#elif defined(__mips64)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 40: return GetThreadTraceImpl<Mapping40>(tid);
-#else
-    case 47: return GetThreadTraceImpl<Mapping47>(tid);
-#endif
+uptr GetThreadTrace(int tid) { return SelectMapping<GetThreadTraceImpl>(tid); }
+
+struct GetThreadTraceHeaderImpl {
+  template <typename Mapping>
+  static uptr Apply(uptr tid) {
+    uptr p = Mapping::kTraceMemBeg + tid * kTotalTraceSize +
+             kTraceSize * sizeof(Event);
+    DCHECK_LT(p, Mapping::kTraceMemEnd);
+    return p;
   }
-  DCHECK(0);
-  return 0;
-#else
-  return GetThreadTraceImpl<Mapping>(tid);
-#endif
-}
-
-
-template<typename Mapping>
-uptr GetThreadTraceHeaderImpl(int tid) {
-  uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize
-      + kTraceSize * sizeof(Event);
-  DCHECK_LT(p, Mapping::kTraceMemEnd);
-  return p;
-}
+};
 
 ALWAYS_INLINE
 uptr GetThreadTraceHeader(int tid) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
-  switch (vmaSize) {
-    case 39: return GetThreadTraceHeaderImpl<Mapping39>(tid);
-    case 42: return GetThreadTraceHeaderImpl<Mapping42>(tid);
-    case 48: return GetThreadTraceHeaderImpl<Mapping48>(tid);
-  }
-  DCHECK(0);
-  return 0;
-#elif defined(__powerpc64__)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 44: return GetThreadTraceHeaderImpl<Mapping44>(tid);
-#endif
-    case 46: return GetThreadTraceHeaderImpl<Mapping46>(tid);
-    case 47: return GetThreadTraceHeaderImpl<Mapping47>(tid);
-  }
-  DCHECK(0);
-  return 0;
-#elif defined(__mips64)
-  switch (vmaSize) {
-#if !SANITIZER_GO
-    case 40: return GetThreadTraceHeaderImpl<Mapping40>(tid);
-#else
-    case 47: return GetThreadTraceHeaderImpl<Mapping47>(tid);
-#endif
-  }
-  DCHECK(0);
-  return 0;
-#else
-  return GetThreadTraceHeaderImpl<Mapping>(tid);
-#endif
+  return SelectMapping<GetThreadTraceHeaderImpl>(tid);
 }
 
 void InitializePlatform();
@@ -1194,7 +971,7 @@ void InitializePlatformEarly();
 void CheckAndProtect();
 void InitializeShadowMemoryPlatform();
 void FlushShadowMemory();
-void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns);
 int ExtractResolvFDs(void *state, int *fds, int nfd);
 int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
 uptr ExtractLongJmpSp(uptr *env);
index cfe597e5380e5e0320597fa3fd221b26752ba9f0..6134a1be2bf5c69d3a0b8c5620097005c8da09e0 100644 (file)
@@ -85,21 +85,19 @@ static void InitializeLongjmpXorKey();
 static uptr longjmp_xor_key;
 #endif
 
-#ifdef TSAN_RUNTIME_VMA
 // Runtime detected VMA size.
 uptr vmaSize;
-#endif
 
 enum {
-  MemTotal  = 0,
-  MemShadow = 1,
-  MemMeta   = 2,
-  MemFile   = 3,
-  MemMmap   = 4,
-  MemTrace  = 5,
-  MemHeap   = 6,
-  MemOther  = 7,
-  MemCount  = 8,
+  MemTotal,
+  MemShadow,
+  MemMeta,
+  MemFile,
+  MemMmap,
+  MemTrace,
+  MemHeap,
+  MemOther,
+  MemCount,
 };
 
 void FillProfileCallback(uptr p, uptr rss, bool file,
@@ -109,39 +107,47 @@ void FillProfileCallback(uptr p, uptr rss, bool file,
     mem[MemShadow] += rss;
   else if (p >= MetaShadowBeg() && p < MetaShadowEnd())
     mem[MemMeta] += rss;
-#if !SANITIZER_GO
+  else if ((p >= LoAppMemBeg() && p < LoAppMemEnd()) ||
+           (p >= MidAppMemBeg() && p < MidAppMemEnd()) ||
+           (p >= HiAppMemBeg() && p < HiAppMemEnd()))
+    mem[file ? MemFile : MemMmap] += rss;
   else if (p >= HeapMemBeg() && p < HeapMemEnd())
     mem[MemHeap] += rss;
-  else if (p >= LoAppMemBeg() && p < LoAppMemEnd())
-    mem[file ? MemFile : MemMmap] += rss;
-  else if (p >= HiAppMemBeg() && p < HiAppMemEnd())
-    mem[file ? MemFile : MemMmap] += rss;
-#else
-  else if (p >= AppMemBeg() && p < AppMemEnd())
-    mem[file ? MemFile : MemMmap] += rss;
-#endif
   else if (p >= TraceMemBeg() && p < TraceMemEnd())
     mem[MemTrace] += rss;
   else
     mem[MemOther] += rss;
 }
 
-void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
   uptr mem[MemCount];
-  internal_memset(mem, 0, sizeof(mem[0]) * MemCount);
-  __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
+  internal_memset(mem, 0, sizeof(mem));
+  GetMemoryProfile(FillProfileCallback, mem, MemCount);
+  auto meta = ctx->metamap.GetMemoryStats();
   StackDepotStats *stacks = StackDepotGetStats();
-  internal_snprintf(buf, buf_size,
-      "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
-      " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
-      mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
-      mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
-      mem[MemHeap] >> 20, mem[MemOther] >> 20,
-      stacks->allocated >> 20, stacks->n_uniq_ids,
-      nlive, nthread);
+  uptr nthread, nlive;
+  ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
+  uptr internal_stats[AllocatorStatCount];
+  internal_allocator()->GetStats(internal_stats);
+  // All these are allocated from the common mmap region.
+  mem[MemMmap] -= meta.mem_block + meta.sync_obj + stacks->allocated +
+                  internal_stats[AllocatorStatMapped];
+  if (s64(mem[MemMmap]) < 0)
+    mem[MemMmap] = 0;
+  internal_snprintf(
+      buf, buf_size,
+      "%llus: RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
+      " trace:%zd heap:%zd other:%zd intalloc:%zd memblocks:%zd syncobj:%zu"
+      " stacks=%zd[%zd] nthr=%zd/%zd\n",
+      uptime_ns / (1000 * 1000 * 1000), mem[MemTotal] >> 20,
+      mem[MemShadow] >> 20, mem[MemMeta] >> 20, mem[MemFile] >> 20,
+      mem[MemMmap] >> 20, mem[MemTrace] >> 20, mem[MemHeap] >> 20,
+      mem[MemOther] >> 20, internal_stats[AllocatorStatMapped] >> 20,
+      meta.mem_block >> 20, meta.sync_obj >> 20, stacks->allocated >> 20,
+      stacks->n_uniq_ids, nlive, nthread);
 }
 
-#if SANITIZER_LINUX
+#  if SANITIZER_LINUX
 void FlushShadowMemoryCallback(
     const SuspendedThreadsList &suspended_threads_list,
     void *argument) {
@@ -178,12 +184,13 @@ static void MapRodata() {
   internal_unlink(name);  // Unlink it now, so that we can reuse the buffer.
   fd_t fd = openrv;
   // Fill the file with kShadowRodata.
-  const uptr kMarkerSize = 512 * 1024 / sizeof(u64);
-  InternalMmapVector<u64> marker(kMarkerSize);
+  const uptr kMarkerSize = 512 * 1024 / sizeof(RawShadow);
+  InternalMmapVector<RawShadow> marker(kMarkerSize);
   // volatile to prevent insertion of memset
-  for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
+  for (volatile RawShadow *p = marker.data(); p < marker.data() + kMarkerSize;
+       p++)
     *p = kShadowRodata;
-  internal_write(fd, marker.data(), marker.size() * sizeof(u64));
+  internal_write(fd, marker.data(), marker.size() * sizeof(RawShadow));
   // Map the file into memory.
   uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE,
                             MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
@@ -203,9 +210,10 @@ static void MapRodata() {
       char *shadow_start = (char *)MemToShadow(segment.start);
       char *shadow_end = (char *)MemToShadow(segment.end);
       for (char *p = shadow_start; p < shadow_end;
-           p += marker.size() * sizeof(u64)) {
-        internal_mmap(p, Min<uptr>(marker.size() * sizeof(u64), shadow_end - p),
-                      PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
+           p += marker.size() * sizeof(RawShadow)) {
+        internal_mmap(
+            p, Min<uptr>(marker.size() * sizeof(RawShadow), shadow_end - p),
+            PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
       }
     }
   }
@@ -219,7 +227,6 @@ void InitializeShadowMemoryPlatform() {
 #endif  // #if !SANITIZER_GO
 
 void InitializePlatformEarly() {
-#ifdef TSAN_RUNTIME_VMA
   vmaSize =
     (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
 #if defined(__aarch64__)
@@ -265,7 +272,6 @@ void InitializePlatformEarly() {
   }
 # endif
 #endif
-#endif
 }
 
 void InitializePlatform() {
@@ -341,7 +347,7 @@ int ExtractResolvFDs(void *state, int *fds, int nfd) {
 }
 
 // Extract file descriptors passed via UNIX domain sockets.
-// This is requried to properly handle "open" of these fds.
+// This is required to properly handle "open" of these fds.
 // see 'man recvmsg' and 'man 3 cmsg'.
 int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
   int res = 0;
@@ -447,18 +453,19 @@ static void InitializeLongjmpXorKey() {
 }
 #endif
 
+extern "C" void __tsan_tls_initialization() {}
+
 void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
-  // Check that the thr object is in tls;
   const uptr thr_beg = (uptr)thr;
   const uptr thr_end = (uptr)thr + sizeof(*thr);
-  CHECK_GE(thr_beg, tls_addr);
-  CHECK_LE(thr_beg, tls_addr + tls_size);
-  CHECK_GE(thr_end, tls_addr);
-  CHECK_LE(thr_end, tls_addr + tls_size);
-  // Since the thr object is huge, skip it.
-  MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, thr_beg - tls_addr);
-  MemoryRangeImitateWrite(thr, /*pc=*/2, thr_end,
-                          tls_addr + tls_size - thr_end);
+  // ThreadState is normally allocated in TLS and is large,
+  // so we skip it. But unit tests allocate ThreadState outside of TLS.
+  if (thr_beg < tls_addr || thr_end >= tls_addr + tls_size)
+    return;
+  const uptr pc = StackTrace::GetNextInstructionPc(
+      reinterpret_cast<uptr>(__tsan_tls_initialization));
+  MemoryRangeImitateWrite(thr, pc, tls_addr, thr_beg - tls_addr);
+  MemoryRangeImitateWrite(thr, pc, thr_end, tls_addr + tls_size - thr_end);
 }
 
 // Note: this function runs with async signals enabled,
index d9719a136b2163f9062754ceed0729850f6c67c9..f2aff7786e0e47d62153c870d100c08efd1aad9b 100644 (file)
@@ -139,7 +139,7 @@ static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
   *dirty = dirty_pages * GetPageSizeCached();
 }
 
-void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
   uptr shadow_res, shadow_dirty;
   uptr meta_res, meta_dirty;
   uptr trace_res, trace_dirty;
@@ -156,10 +156,12 @@ void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
   RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res, &heap_dirty);
 #else  // !SANITIZER_GO
   uptr app_res, app_dirty;
-  RegionMemUsage(AppMemBeg(), AppMemEnd(), &app_res, &app_dirty);
+  RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &app_res, &app_dirty);
 #endif
 
   StackDepotStats *stacks = StackDepotGetStats();
+  uptr nthread, nlive;
+  ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
   internal_snprintf(buf, buf_size,
     "shadow   (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
     "meta     (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
@@ -169,7 +171,7 @@ void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
     "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
     "heap     (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
 #else  // !SANITIZER_GO
-    "app      (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+      "app      (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
 #endif
     "stacks: %zd unique IDs, %zd kB allocated\n"
     "threads: %zd total, %zd live\n"
@@ -182,13 +184,13 @@ void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
     HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
     HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
 #else  // !SANITIZER_GO
-    AppMemBeg(), AppMemEnd(), app_res / 1024, app_dirty / 1024,
+      LoAppMemBeg(), LoAppMemEnd(), app_res / 1024, app_dirty / 1024,
 #endif
     stacks->n_uniq_ids, stacks->allocated / 1024,
     nthread, nlive);
 }
 
-#if !SANITIZER_GO
+#  if !SANITIZER_GO
 void InitializeShadowMemoryPlatform() { }
 
 // On OS X, GCD worker threads are created without a call to pthread_create. We
@@ -215,8 +217,8 @@ static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
       Processor *proc = ProcCreate();
       ProcWire(proc, thr);
       ThreadState *parent_thread_state = nullptr;  // No parent.
-      int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
-      CHECK_NE(tid, 0);
+      Tid tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
+      CHECK_NE(tid, kMainTid);
       ThreadStart(thr, tid, GetTid(), ThreadType::Worker);
     }
   } else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
@@ -234,11 +236,11 @@ static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
 #endif
 
 void InitializePlatformEarly() {
-#if !SANITIZER_GO && !HAS_48_BIT_ADDRESS_SPACE
+#  if !SANITIZER_GO && SANITIZER_IOS
   uptr max_vm = GetMaxUserVirtualAddress() + 1;
-  if (max_vm != Mapping::kHiAppMemEnd) {
+  if (max_vm != HiAppMemEnd()) {
     Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
-           max_vm, Mapping::kHiAppMemEnd);
+           max_vm, HiAppMemEnd());
     Die();
   }
 #endif
index 1c6198cefcd78e54abf229be5e96fe1e9607576c..763ac444377e0d47a3672c9131caaa78b9988a6e 100644 (file)
 #include "sanitizer_common/sanitizer_platform.h"
 #if SANITIZER_POSIX
 
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_errno.h"
-#include "sanitizer_common/sanitizer_libc.h"
-#include "sanitizer_common/sanitizer_procmaps.h"
-#include "tsan_platform.h"
-#include "tsan_rtl.h"
+#  include <dlfcn.h>
+
+#  include "sanitizer_common/sanitizer_common.h"
+#  include "sanitizer_common/sanitizer_errno.h"
+#  include "sanitizer_common/sanitizer_libc.h"
+#  include "sanitizer_common/sanitizer_procmaps.h"
+#  include "tsan_platform.h"
+#  include "tsan_rtl.h"
 
 namespace __tsan {
 
@@ -29,6 +31,7 @@ static const char kShadowMemoryMappingHint[] =
     "HINT: if %s is not supported in your environment, you may set "
     "TSAN_OPTIONS=%s=0\n";
 
+#  if !SANITIZER_GO
 static void DontDumpShadow(uptr addr, uptr size) {
   if (common_flags()->use_madv_dontdump)
     if (!DontDumpShadowMemory(addr, size)) {
@@ -39,7 +42,6 @@ static void DontDumpShadow(uptr addr, uptr size) {
     }
 }
 
-#if !SANITIZER_GO
 void InitializeShadowMemory() {
   // Map memory shadow.
   if (!MmapFixedSuperNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
@@ -70,6 +72,11 @@ void InitializeShadowMemory() {
       meta, meta + meta_size, meta_size >> 30);
 
   InitializeShadowMemoryPlatform();
+
+  on_initialize = reinterpret_cast<void (*)(void)>(
+      dlsym(RTLD_DEFAULT, "__tsan_on_initialize"));
+  on_finalize =
+      reinterpret_cast<int (*)(int)>(dlsym(RTLD_DEFAULT, "__tsan_on_finalize"));
 }
 
 static bool TryProtectRange(uptr beg, uptr end) {
@@ -98,24 +105,24 @@ void CheckAndProtect() {
       continue;
     if (segment.start >= VdsoBeg())  // vdso
       break;
-    Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n",
+    Printf("FATAL: ThreadSanitizer: unexpected memory mapping 0x%zx-0x%zx\n",
            segment.start, segment.end);
     Die();
   }
 
-#if defined(__aarch64__) && defined(__APPLE__) && !HAS_48_BIT_ADDRESS_SPACE
+#    if defined(__aarch64__) && defined(__APPLE__) && SANITIZER_IOS
   ProtectRange(HeapMemEnd(), ShadowBeg());
   ProtectRange(ShadowEnd(), MetaShadowBeg());
   ProtectRange(MetaShadowEnd(), TraceMemBeg());
 #else
   ProtectRange(LoAppMemEnd(), ShadowBeg());
   ProtectRange(ShadowEnd(), MetaShadowBeg());
-#ifdef TSAN_MID_APP_RANGE
-  ProtectRange(MetaShadowEnd(), MidAppMemBeg());
-  ProtectRange(MidAppMemEnd(), TraceMemBeg());
-#else
-  ProtectRange(MetaShadowEnd(), TraceMemBeg());
-#endif
+  if (MidAppMemBeg()) {
+    ProtectRange(MetaShadowEnd(), MidAppMemBeg());
+    ProtectRange(MidAppMemEnd(), TraceMemBeg());
+  } else {
+    ProtectRange(MetaShadowEnd(), TraceMemBeg());
+  }
   // Memory for traces is mapped lazily in MapThreadTrace.
   // Protect the whole range for now, so that user does not map something here.
   ProtectRange(TraceMemBeg(), TraceMemEnd());
index 19437879a41cb245fabdb243eb58b330f42253ed..fea893768c79f15d1e8f489a4d22d2663686f017 100644 (file)
@@ -23,8 +23,7 @@ namespace __tsan {
 void FlushShadowMemory() {
 }
 
-void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
-}
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {}
 
 void InitializePlatformEarly() {
 }
index 8ef9f0cd4fe801d8f090fdb4fffd208529ef124c..a926c3761ccf917ce2226cbaa732144afb80bf9f 100644 (file)
 
 namespace __tsan {
 
-ReportStack::ReportStack() : frames(nullptr), suppressable(false) {}
-
-ReportStack *ReportStack::New() {
-  void *mem = internal_alloc(MBlockReportStack, sizeof(ReportStack));
-  return new(mem) ReportStack();
-}
-
-ReportLocation::ReportLocation(ReportLocationType type)
-    : type(type), global(), heap_chunk_start(0), heap_chunk_size(0), tid(0),
-      fd(0), suppressable(false), stack(nullptr) {}
-
-ReportLocation *ReportLocation::New(ReportLocationType type) {
-  void *mem = internal_alloc(MBlockReportStack, sizeof(ReportLocation));
-  return new(mem) ReportLocation(type);
-}
-
 class Decorator: public __sanitizer::SanitizerCommonDecorator {
  public:
   Decorator() : SanitizerCommonDecorator() { }
@@ -68,7 +52,7 @@ ReportDesc::~ReportDesc() {
 #if !SANITIZER_GO
 
 const int kThreadBufSize = 32;
-const char *thread_name(char *buf, int tid) {
+const char *thread_name(char *buf, Tid tid) {
   if (tid == kMainTid)
     return "main thread";
   internal_snprintf(buf, kThreadBufSize, "thread T%d", tid);
@@ -189,23 +173,25 @@ static void PrintLocation(const ReportLocation *loc) {
   if (loc->type == ReportLocationGlobal) {
     const DataInfo &global = loc->global;
     if (global.size != 0)
-      Printf("  Location is global '%s' of size %zu at %p (%s+%p)\n\n",
-             global.name, global.size, global.start,
+      Printf("  Location is global '%s' of size %zu at %p (%s+0x%zx)\n\n",
+             global.name, global.size, reinterpret_cast<void *>(global.start),
              StripModuleName(global.module), global.module_offset);
     else
-      Printf("  Location is global '%s' at %p (%s+%p)\n\n", global.name,
-             global.start, StripModuleName(global.module),
-             global.module_offset);
+      Printf("  Location is global '%s' at %p (%s+0x%zx)\n\n", global.name,
+             reinterpret_cast<void *>(global.start),
+             StripModuleName(global.module), global.module_offset);
   } else if (loc->type == ReportLocationHeap) {
     char thrbuf[kThreadBufSize];
     const char *object_type = GetObjectTypeFromTag(loc->external_tag);
     if (!object_type) {
       Printf("  Location is heap block of size %zu at %p allocated by %s:\n",
-             loc->heap_chunk_size, loc->heap_chunk_start,
+             loc->heap_chunk_size,
+             reinterpret_cast<void *>(loc->heap_chunk_start),
              thread_name(thrbuf, loc->tid));
     } else {
       Printf("  Location is %s of size %zu at %p allocated by %s:\n",
-             object_type, loc->heap_chunk_size, loc->heap_chunk_start,
+             object_type, loc->heap_chunk_size,
+             reinterpret_cast<void *>(loc->heap_chunk_start),
              thread_name(thrbuf, loc->tid));
     }
     print_stack = true;
@@ -225,13 +211,14 @@ static void PrintLocation(const ReportLocation *loc) {
 
 static void PrintMutexShort(const ReportMutex *rm, const char *after) {
   Decorator d;
-  Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.Default(), after);
+  Printf("%sM%lld%s%s", d.Mutex(), rm->id, d.Default(), after);
 }
 
 static void PrintMutexShortWithAddress(const ReportMutex *rm,
                                        const char *after) {
   Decorator d;
-  Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.Default(), after);
+  Printf("%sM%lld (%p)%s%s", d.Mutex(), rm->id,
+         reinterpret_cast<void *>(rm->addr), d.Default(), after);
 }
 
 static void PrintMutex(const ReportMutex *rm) {
@@ -242,7 +229,8 @@ static void PrintMutex(const ReportMutex *rm) {
     Printf("%s", d.Default());
   } else {
     Printf("%s", d.Mutex());
-    Printf("  Mutex M%llu (%p) created at:\n", rm->id, rm->addr);
+    Printf("  Mutex M%llu (%p) created at:\n", rm->id,
+           reinterpret_cast<void *>(rm->addr));
     Printf("%s", d.Default());
     PrintStack(rm->stack);
   }
@@ -259,12 +247,13 @@ static void PrintThread(const ReportThread *rt) {
   char thrbuf[kThreadBufSize];
   const char *thread_status = rt->running ? "running" : "finished";
   if (rt->thread_type == ThreadType::Worker) {
-    Printf(" (tid=%zu, %s) is a GCD worker thread\n", rt->os_id, thread_status);
+    Printf(" (tid=%llu, %s) is a GCD worker thread\n", rt->os_id,
+           thread_status);
     Printf("\n");
     Printf("%s", d.Default());
     return;
   }
-  Printf(" (tid=%zu, %s) created by %s", rt->os_id, thread_status,
+  Printf(" (tid=%llu, %s) created by %s", rt->os_id, thread_status,
          thread_name(thrbuf, rt->parent_tid));
   if (rt->stack)
     Printf(" at:");
@@ -394,7 +383,7 @@ void PrintReport(const ReportDesc *rep) {
 
 #else  // #if !SANITIZER_GO
 
-const u32 kMainGoroutineId = 1;
+const Tid kMainGoroutineId = 1;
 
 void PrintStack(const ReportStack *ent) {
   if (ent == 0 || ent->frames == 0) {
@@ -405,16 +394,17 @@ void PrintStack(const ReportStack *ent) {
   for (int i = 0; frame; frame = frame->next, i++) {
     const AddressInfo &info = frame->info;
     Printf("  %s()\n      %s:%d +0x%zx\n", info.function,
-        StripPathPrefix(info.file, common_flags()->strip_path_prefix),
-        info.line, (void *)info.module_offset);
+           StripPathPrefix(info.file, common_flags()->strip_path_prefix),
+           info.line, info.module_offset);
   }
 }
 
 static void PrintMop(const ReportMop *mop, bool first) {
   Printf("\n");
   Printf("%s at %p by ",
-      (first ? (mop->write ? "Write" : "Read")
-             : (mop->write ? "Previous write" : "Previous read")), mop->addr);
+         (first ? (mop->write ? "Write" : "Read")
+                : (mop->write ? "Previous write" : "Previous read")),
+         reinterpret_cast<void *>(mop->addr));
   if (mop->tid == kMainGoroutineId)
     Printf("main goroutine:\n");
   else
@@ -426,8 +416,8 @@ static void PrintLocation(const ReportLocation *loc) {
   switch (loc->type) {
   case ReportLocationHeap: {
     Printf("\n");
-    Printf("Heap block of size %zu at %p allocated by ",
-        loc->heap_chunk_size, loc->heap_chunk_start);
+    Printf("Heap block of size %zu at %p allocated by ", loc->heap_chunk_size,
+           reinterpret_cast<void *>(loc->heap_chunk_start));
     if (loc->tid == kMainGoroutineId)
       Printf("main goroutine:\n");
     else
@@ -438,8 +428,9 @@ static void PrintLocation(const ReportLocation *loc) {
   case ReportLocationGlobal: {
     Printf("\n");
     Printf("Global var %s of size %zu at %p declared at %s:%zu\n",
-        loc->global.name, loc->global.size, loc->global.start,
-        loc->global.file, loc->global.line);
+           loc->global.name, loc->global.size,
+           reinterpret_cast<void *>(loc->global.start), loc->global.file,
+           loc->global.line);
     break;
   }
   default:
@@ -469,13 +460,13 @@ void PrintReport(const ReportDesc *rep) {
   } else if (rep->typ == ReportTypeDeadlock) {
     Printf("WARNING: DEADLOCK\n");
     for (uptr i = 0; i < rep->mutexes.Size(); i++) {
-      Printf("Goroutine %d lock mutex %d while holding mutex %d:\n",
-          999, rep->mutexes[i]->id,
-          rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+      Printf("Goroutine %d lock mutex %llu while holding mutex %llu:\n", 999,
+             rep->mutexes[i]->id,
+             rep->mutexes[(i + 1) % rep->mutexes.Size()]->id);
       PrintStack(rep->stacks[2*i]);
       Printf("\n");
-      Printf("Mutex %d was previously locked here:\n",
-          rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+      Printf("Mutex %llu was previously locked here:\n",
+             rep->mutexes[(i + 1) % rep->mutexes.Size()]->id);
       PrintStack(rep->stacks[2*i + 1]);
       Printf("\n");
     }
index b4e4d89893792afda3b4576ff32840d0563e9fc2..d68c2db88828f1565f287131770d4a2b3235e512 100644 (file)
@@ -38,12 +38,8 @@ enum ReportType {
 };
 
 struct ReportStack {
-  SymbolizedStack *frames;
-  bool suppressable;
-  static ReportStack *New();
-
- private:
-  ReportStack();
+  SymbolizedStack *frames = nullptr;
+  bool suppressable = false;
 };
 
 struct ReportMopMutex {
@@ -73,28 +69,24 @@ enum ReportLocationType {
 };
 
 struct ReportLocation {
-  ReportLocationType type;
-  DataInfo global;
-  uptr heap_chunk_start;
-  uptr heap_chunk_size;
-  uptr external_tag;
-  int tid;
-  int fd;
-  bool suppressable;
-  ReportStack *stack;
-
-  static ReportLocation *New(ReportLocationType type);
- private:
-  explicit ReportLocation(ReportLocationType type);
+  ReportLocationType type = ReportLocationGlobal;
+  DataInfo global = {};
+  uptr heap_chunk_start = 0;
+  uptr heap_chunk_size = 0;
+  uptr external_tag = 0;
+  Tid tid = kInvalidTid;
+  int fd = 0;
+  bool suppressable = false;
+  ReportStack *stack = nullptr;
 };
 
 struct ReportThread {
-  int id;
+  Tid id;
   tid_t os_id;
   bool running;
   ThreadType thread_type;
   char *name;
-  u32 parent_tid;
+  Tid parent_tid;
   ReportStack *stack;
 };
 
@@ -114,7 +106,7 @@ class ReportDesc {
   Vector<ReportLocation*> locs;
   Vector<ReportMutex*> mutexes;
   Vector<ReportThread*> threads;
-  Vector<int> unique_tids;
+  Vector<Tid> unique_tids;
   ReportStack *sleep;
   int count;
 
index bcf489a71d55bed3a81a4ccfd03d3e8074f46490..d67928224545cf7916d8428637261123a2c41e3e 100644 (file)
 #include "tsan_symbolize.h"
 #include "ubsan/ubsan_init.h"
 
-#ifdef __SSE3__
-// <emmintrin.h> transitively includes <stdlib.h>,
-// and it's prohibited to include std headers into tsan runtime.
-// So we do this dirty trick.
-#define _MM_MALLOC_H_INCLUDED
-#define __MM_MALLOC_H
-#include <emmintrin.h>
-typedef __m128i m128;
-#endif
-
 volatile int __tsan_resumed = 0;
 
 extern "C" void __tsan_resume() {
@@ -46,6 +36,11 @@ extern "C" void __tsan_resume() {
 
 namespace __tsan {
 
+#if !SANITIZER_GO
+void (*on_initialize)(void);
+int (*on_finalize)(int);
+#endif
+
 #if !SANITIZER_GO && !SANITIZER_MAC
 __attribute__((tls_model("initial-exec")))
 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
@@ -62,24 +57,21 @@ void OnInitialize();
 SANITIZER_WEAK_CXX_DEFAULT_IMPL
 bool OnFinalize(bool failed) {
 #if !SANITIZER_GO
-  if (auto *ptr = dlsym(RTLD_DEFAULT, "__tsan_on_finalize"))
-    return reinterpret_cast<decltype(&__tsan_on_finalize)>(ptr)(failed);
+  if (on_finalize)
+    return on_finalize(failed);
 #endif
   return failed;
 }
 SANITIZER_WEAK_CXX_DEFAULT_IMPL
 void OnInitialize() {
 #if !SANITIZER_GO
-  if (auto *ptr = dlsym(RTLD_DEFAULT, "__tsan_on_initialize")) {
-    return reinterpret_cast<decltype(&__tsan_on_initialize)>(ptr)();
-  }
+  if (on_initialize)
+    on_initialize();
 #endif
 }
 #endif
 
-static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
-
-static ThreadContextBase *CreateThreadContext(u32 tid) {
+static ThreadContextBase *CreateThreadContext(Tid tid) {
   // Map thread trace when context is created.
   char name[50];
   internal_snprintf(name, sizeof(name), "trace %u", tid);
@@ -98,13 +90,12 @@ static ThreadContextBase *CreateThreadContext(u32 tid) {
     ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace));
     uptr unused = hdr + sizeof(Trace) - hdr_end;
     if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) {
-      Report("ThreadSanitizer: failed to mprotect(%p, %p)\n",
-          hdr_end, unused);
+      Report("ThreadSanitizer: failed to mprotect [0x%zx-0x%zx) \n", hdr_end,
+             unused);
       CHECK("unable to mprotect" && 0);
     }
   }
-  void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
-  return new(mem) ThreadContext(tid);
+  return New<ThreadContext>(tid);
 }
 
 #if !SANITIZER_GO
@@ -117,9 +108,8 @@ Context::Context()
     : initialized(),
       report_mtx(MutexTypeReport),
       nreported(),
-      nmissed_expected(),
-      thread_registry(new (thread_registry_placeholder) ThreadRegistry(
-          CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)),
+      thread_registry(CreateThreadContext, kMaxTid, kThreadQuarantineSize,
+                      kMaxTidReuse),
       racy_mtx(MutexTypeRacy),
       racy_stacks(),
       racy_addresses(),
@@ -129,7 +119,7 @@ Context::Context()
 }
 
 // The objects are allocated in TLS, so one may rely on zero-initialization.
-ThreadState::ThreadState(Context *ctx, u32 tid, int unique_id, u64 epoch,
+ThreadState::ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch,
                          unsigned reuse_count, uptr stk_addr, uptr stk_size,
                          uptr tls_addr, uptr tls_size)
     : fast_state(tid, epoch)
@@ -155,16 +145,49 @@ ThreadState::ThreadState(Context *ctx, u32 tid, int unique_id, u64 epoch,
       last_sleep_clock(tid)
 #endif
 {
+  CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
+#if !SANITIZER_GO
+  shadow_stack_pos = shadow_stack;
+  shadow_stack_end = shadow_stack + kShadowStackSize;
+#else
+  // Setup dynamic shadow stack.
+  const int kInitStackSize = 8;
+  shadow_stack = (uptr *)Alloc(kInitStackSize * sizeof(uptr));
+  shadow_stack_pos = shadow_stack;
+  shadow_stack_end = shadow_stack + kInitStackSize;
+#endif
 }
 
 #if !SANITIZER_GO
-static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
-  uptr n_threads;
-  uptr n_running_threads;
-  ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
+void MemoryProfiler(u64 uptime) {
+  if (ctx->memprof_fd == kInvalidFd)
+    return;
   InternalMmapVector<char> buf(4096);
-  WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
-  WriteToFile(fd, buf.data(), internal_strlen(buf.data()));
+  WriteMemoryProfile(buf.data(), buf.size(), uptime);
+  WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data()));
+}
+
+void InitializeMemoryProfiler() {
+  ctx->memprof_fd = kInvalidFd;
+  const char *fname = flags()->profile_memory;
+  if (!fname || !fname[0])
+    return;
+  if (internal_strcmp(fname, "stdout") == 0) {
+    ctx->memprof_fd = 1;
+  } else if (internal_strcmp(fname, "stderr") == 0) {
+    ctx->memprof_fd = 2;
+  } else {
+    InternalScopedString filename;
+    filename.append("%s.%d", fname, (int)internal_getpid());
+    ctx->memprof_fd = OpenFile(filename.data(), WrOnly);
+    if (ctx->memprof_fd == kInvalidFd) {
+      Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
+             filename.data());
+      return;
+    }
+  }
+  MemoryProfiler(0);
+  MaybeSpawnBackgroundThread();
 }
 
 static void *BackgroundThread(void *arg) {
@@ -175,25 +198,7 @@ static void *BackgroundThread(void *arg) {
   cur_thread_init();
   cur_thread()->ignore_interceptors++;
   const u64 kMs2Ns = 1000 * 1000;
-
-  fd_t mprof_fd = kInvalidFd;
-  if (flags()->profile_memory && flags()->profile_memory[0]) {
-    if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
-      mprof_fd = 1;
-    } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
-      mprof_fd = 2;
-    } else {
-      InternalScopedString filename;
-      filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
-      fd_t fd = OpenFile(filename.data(), WrOnly);
-      if (fd == kInvalidFd) {
-        Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
-               filename.data());
-      } else {
-        mprof_fd = fd;
-      }
-    }
-  }
+  const u64 start = NanoTime();
 
   u64 last_flush = NanoTime();
   uptr last_rss = 0;
@@ -211,7 +216,6 @@ static void *BackgroundThread(void *arg) {
         last_flush = NanoTime();
       }
     }
-    // GetRSS can be expensive on huge programs, so don't do it every 100ms.
     if (flags()->memory_limit_mb > 0) {
       uptr rss = GetRSS();
       uptr limit = uptr(flags()->memory_limit_mb) << 20;
@@ -227,9 +231,7 @@ static void *BackgroundThread(void *arg) {
       last_rss = rss;
     }
 
-    // Write memory profile if requested.
-    if (mprof_fd != kInvalidFd)
-      MemoryProfiler(ctx, mprof_fd, i);
+    MemoryProfiler(now - start);
 
     // Flush symbolizer cache if requested.
     if (flags()->flush_symbolizer_ms > 0) {
@@ -260,7 +262,8 @@ static void StopBackgroundThread() {
 #endif
 
 void DontNeedShadowFor(uptr addr, uptr size) {
-  ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size));
+  ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)),
+                         reinterpret_cast<uptr>(MemToShadow(addr + size)));
 }
 
 #if !SANITIZER_GO
@@ -297,7 +300,7 @@ void MapShadow(uptr addr, uptr size) {
                                  "meta shadow"))
       Die();
   } else {
-    // Mapping continous heap.
+    // Mapping continuous heap.
     // Windows wants 64K alignment.
     meta_begin = RoundDownTo(meta_begin, 64 << 10);
     meta_end = RoundUpTo(meta_end, 64 << 10);
@@ -310,58 +313,22 @@ void MapShadow(uptr addr, uptr size) {
       Die();
     mapped_meta_end = meta_end;
   }
-  VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
-      addr, addr+size, meta_begin, meta_end);
+  VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,
+          addr + size, meta_begin, meta_end);
 }
 
 void MapThreadTrace(uptr addr, uptr size, const char *name) {
-  DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
+  DPrintf("#0: Mapping trace at 0x%zx-0x%zx(0x%zx)\n", addr, addr + size, size);
   CHECK_GE(addr, TraceMemBeg());
   CHECK_LE(addr + size, TraceMemEnd());
   CHECK_EQ(addr, addr & ~((64 << 10) - 1));  // windows wants 64K alignment
   if (!MmapFixedSuperNoReserve(addr, size, name)) {
-    Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n",
-        addr, size);
+    Printf("FATAL: ThreadSanitizer can not mmap thread trace (0x%zx/0x%zx)\n",
+           addr, size);
     Die();
   }
 }
 
-static void CheckShadowMapping() {
-  uptr beg, end;
-  for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
-    // Skip cases for empty regions (heap definition for architectures that
-    // do not use 64-bit allocator).
-    if (beg == end)
-      continue;
-    VPrintf(3, "checking shadow region %p-%p\n", beg, end);
-    uptr prev = 0;
-    for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
-      for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) {
-        const uptr p = RoundDown(p0 + x, kShadowCell);
-        if (p < beg || p >= end)
-          continue;
-        const uptr s = MemToShadow(p);
-        const uptr m = (uptr)MemToMeta(p);
-        VPrintf(3, "  checking pointer %p: shadow=%p meta=%p\n", p, s, m);
-        CHECK(IsAppMem(p));
-        CHECK(IsShadowMem(s));
-        CHECK_EQ(p, ShadowToMem(s));
-        CHECK(IsMetaMem(m));
-        if (prev) {
-          // Ensure that shadow and meta mappings are linear within a single
-          // user range. Lots of code that processes memory ranges assumes it.
-          const uptr prev_s = MemToShadow(prev);
-          const uptr prev_m = (uptr)MemToMeta(prev);
-          CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier);
-          CHECK_EQ((m - prev_m) / kMetaShadowSize,
-                   (p - prev) / kMetaShadowCell);
-        }
-        prev = p;
-      }
-    }
-  }
-}
-
 #if !SANITIZER_GO
 static void OnStackUnwind(const SignalContext &sig, const void *,
                           BufferedStackTrace *stack) {
@@ -386,9 +353,10 @@ void CheckUnwind() {
   PrintCurrentStackSlow(StackTrace::GetCurrentPc());
 }
 
+bool is_initialized;
+
 void Initialize(ThreadState *thr) {
   // Thread safe because done before all threads exist.
-  static bool is_initialized = false;
   if (is_initialized)
     return;
   is_initialized = true;
@@ -420,9 +388,7 @@ void Initialize(ThreadState *thr) {
   Processor *proc = ProcCreate();
   ProcWire(proc, thr);
   InitializeInterceptors();
-  CheckShadowMapping();
   InitializePlatform();
-  InitializeMutex();
   InitializeDynamicAnnotations();
 #if !SANITIZER_GO
   InitializeShadowMemory();
@@ -441,8 +407,8 @@ void Initialize(ThreadState *thr) {
           (int)internal_getpid());
 
   // Initialize thread 0.
-  int tid = ThreadCreate(thr, 0, 0, true);
-  CHECK_EQ(tid, 0);
+  Tid tid = ThreadCreate(thr, 0, 0, true);
+  CHECK_EQ(tid, kMainTid);
   ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
 #if TSAN_CONTAINS_UBSAN
   __ubsan::InitAsPlugin();
@@ -451,6 +417,7 @@ void Initialize(ThreadState *thr) {
 
 #if !SANITIZER_GO
   Symbolizer::LateInitialize();
+  InitializeMemoryProfiler();
 #endif
 
   if (flags()->stop_on_start) {
@@ -507,18 +474,8 @@ int Finalize(ThreadState *thr) {
 #endif
   }
 
-  if (ctx->nmissed_expected) {
-    failed = true;
-    Printf("ThreadSanitizer: missed %d expected races\n",
-        ctx->nmissed_expected);
-  }
-
   if (common_flags()->print_suppressions)
     PrintMatchedSuppressions();
-#if !SANITIZER_GO
-  if (flags()->print_benign)
-    PrintMatchedBenignRaces();
-#endif
 
   failed = OnFinalize(failed);
 
@@ -527,7 +484,7 @@ int Finalize(ThreadState *thr) {
 
 #if !SANITIZER_GO
 void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
-  ctx->thread_registry->Lock();
+  ctx->thread_registry.Lock();
   ctx->report_mtx.Lock();
   ScopedErrorReportLock::Lock();
   // Suppress all reports in the pthread_atfork callbacks.
@@ -546,7 +503,7 @@ void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
   thr->ignore_interceptors--;
   ScopedErrorReportLock::Unlock();
   ctx->report_mtx.Unlock();
-  ctx->thread_registry->Unlock();
+  ctx->thread_registry.Unlock();
 }
 
 void ForkChildAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
@@ -554,10 +511,10 @@ void ForkChildAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
   thr->ignore_interceptors--;
   ScopedErrorReportLock::Unlock();
   ctx->report_mtx.Unlock();
-  ctx->thread_registry->Unlock();
+  ctx->thread_registry.Unlock();
 
   uptr nthread = 0;
-  ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
+  ctx->thread_registry.GetNumberOfThreads(0, 0, &nthread /* alive threads */);
   VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
       " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
   if (nthread == 1) {
@@ -579,19 +536,18 @@ NOINLINE
 void GrowShadowStack(ThreadState *thr) {
   const int sz = thr->shadow_stack_end - thr->shadow_stack;
   const int newsz = 2 * sz;
-  uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
-      newsz * sizeof(uptr));
+  auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));
   internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
-  internal_free(thr->shadow_stack);
+  Free(thr->shadow_stack);
   thr->shadow_stack = newstack;
   thr->shadow_stack_pos = newstack + sz;
   thr->shadow_stack_end = newstack + newsz;
 }
 #endif
 
-u32 CurrentStackId(ThreadState *thr, uptr pc) {
+StackID CurrentStackId(ThreadState *thr, uptr pc) {
   if (!thr->is_inited)  // May happen during bootstrap.
-    return 0;
+    return kInvalidStackID;
   if (pc != 0) {
 #if !SANITIZER_GO
     DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
@@ -602,13 +558,195 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
     thr->shadow_stack_pos[0] = pc;
     thr->shadow_stack_pos++;
   }
-  u32 id = StackDepotPut(
+  StackID id = StackDepotPut(
       StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
   if (pc != 0)
     thr->shadow_stack_pos--;
   return id;
 }
 
+namespace v3 {
+
+ALWAYS_INLINE USED bool TryTraceMemoryAccess(ThreadState *thr, uptr pc,
+                                             uptr addr, uptr size,
+                                             AccessType typ) {
+  DCHECK(size == 1 || size == 2 || size == 4 || size == 8);
+  if (!kCollectHistory)
+    return true;
+  EventAccess *ev;
+  if (UNLIKELY(!TraceAcquire(thr, &ev)))
+    return false;
+  u64 size_log = size == 1 ? 0 : size == 2 ? 1 : size == 4 ? 2 : 3;
+  uptr pc_delta = pc - thr->trace_prev_pc + (1 << (EventAccess::kPCBits - 1));
+  thr->trace_prev_pc = pc;
+  if (LIKELY(pc_delta < (1 << EventAccess::kPCBits))) {
+    ev->is_access = 1;
+    ev->is_read = !!(typ & kAccessRead);
+    ev->is_atomic = !!(typ & kAccessAtomic);
+    ev->size_log = size_log;
+    ev->pc_delta = pc_delta;
+    DCHECK_EQ(ev->pc_delta, pc_delta);
+    ev->addr = CompressAddr(addr);
+    TraceRelease(thr, ev);
+    return true;
+  }
+  auto *evex = reinterpret_cast<EventAccessExt *>(ev);
+  evex->is_access = 0;
+  evex->is_func = 0;
+  evex->type = EventType::kAccessExt;
+  evex->is_read = !!(typ & kAccessRead);
+  evex->is_atomic = !!(typ & kAccessAtomic);
+  evex->size_log = size_log;
+  evex->addr = CompressAddr(addr);
+  evex->pc = pc;
+  TraceRelease(thr, evex);
+  return true;
+}
+
+ALWAYS_INLINE USED bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc,
+                                                  uptr addr, uptr size,
+                                                  AccessType typ) {
+  if (!kCollectHistory)
+    return true;
+  EventAccessRange *ev;
+  if (UNLIKELY(!TraceAcquire(thr, &ev)))
+    return false;
+  thr->trace_prev_pc = pc;
+  ev->is_access = 0;
+  ev->is_func = 0;
+  ev->type = EventType::kAccessRange;
+  ev->is_read = !!(typ & kAccessRead);
+  ev->is_free = !!(typ & kAccessFree);
+  ev->size_lo = size;
+  ev->pc = CompressAddr(pc);
+  ev->addr = CompressAddr(addr);
+  ev->size_hi = size >> EventAccessRange::kSizeLoBits;
+  TraceRelease(thr, ev);
+  return true;
+}
+
+void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+                            AccessType typ) {
+  if (LIKELY(TryTraceMemoryAccessRange(thr, pc, addr, size, typ)))
+    return;
+  TraceSwitchPart(thr);
+  UNUSED bool res = TryTraceMemoryAccessRange(thr, pc, addr, size, typ);
+  DCHECK(res);
+}
+
+void TraceFunc(ThreadState *thr, uptr pc) {
+  if (LIKELY(TryTraceFunc(thr, pc)))
+    return;
+  TraceSwitchPart(thr);
+  UNUSED bool res = TryTraceFunc(thr, pc);
+  DCHECK(res);
+}
+
+void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
+                    StackID stk) {
+  DCHECK(type == EventType::kLock || type == EventType::kRLock);
+  if (!kCollectHistory)
+    return;
+  EventLock ev;
+  ev.is_access = 0;
+  ev.is_func = 0;
+  ev.type = type;
+  ev.pc = CompressAddr(pc);
+  ev.stack_lo = stk;
+  ev.stack_hi = stk >> EventLock::kStackIDLoBits;
+  ev._ = 0;
+  ev.addr = CompressAddr(addr);
+  TraceEvent(thr, ev);
+}
+
+void TraceMutexUnlock(ThreadState *thr, uptr addr) {
+  if (!kCollectHistory)
+    return;
+  EventUnlock ev;
+  ev.is_access = 0;
+  ev.is_func = 0;
+  ev.type = EventType::kUnlock;
+  ev._ = 0;
+  ev.addr = CompressAddr(addr);
+  TraceEvent(thr, ev);
+}
+
+void TraceTime(ThreadState *thr) {
+  if (!kCollectHistory)
+    return;
+  EventTime ev;
+  ev.is_access = 0;
+  ev.is_func = 0;
+  ev.type = EventType::kTime;
+  ev.sid = static_cast<u64>(thr->sid);
+  ev.epoch = static_cast<u64>(thr->epoch);
+  ev._ = 0;
+  TraceEvent(thr, ev);
+}
+
+NOINLINE
+void TraceSwitchPart(ThreadState *thr) {
+  Trace *trace = &thr->tctx->trace;
+  Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
+  DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
+  auto *part = trace->parts.Back();
+  DPrintf("TraceSwitchPart part=%p pos=%p\n", part, pos);
+  if (part) {
+    // We can get here when we still have space in the current trace part.
+    // The fast-path check in TraceAcquire has false positives in the middle of
+    // the part. Check if we are indeed at the end of the current part or not,
+    // and fill any gaps with NopEvent's.
+    Event *end = &part->events[TracePart::kSize];
+    DCHECK_GE(pos, &part->events[0]);
+    DCHECK_LE(pos, end);
+    if (pos + 1 < end) {
+      if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
+          TracePart::kAlignment)
+        *pos++ = NopEvent;
+      *pos++ = NopEvent;
+      DCHECK_LE(pos + 2, end);
+      atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
+      // Ensure we setup trace so that the next TraceAcquire
+      // won't detect trace part end.
+      Event *ev;
+      CHECK(TraceAcquire(thr, &ev));
+      return;
+    }
+    // We are indeed at the end.
+    for (; pos < end; pos++) *pos = NopEvent;
+  }
+#if !SANITIZER_GO
+  if (ctx->after_multithreaded_fork) {
+    // We just need to survive till exec.
+    CHECK(part);
+    atomic_store_relaxed(&thr->trace_pos,
+                         reinterpret_cast<uptr>(&part->events[0]));
+    return;
+  }
+#endif
+  part = new (MmapOrDie(sizeof(TracePart), "TracePart")) TracePart();
+  part->trace = trace;
+  thr->trace_prev_pc = 0;
+  {
+    Lock lock(&trace->mtx);
+    trace->parts.PushBack(part);
+    atomic_store_relaxed(&thr->trace_pos,
+                         reinterpret_cast<uptr>(&part->events[0]));
+  }
+  // Make this part self-sufficient by restoring the current stack
+  // and mutex set in the beginning of the trace.
+  TraceTime(thr);
+  for (uptr *pos = &thr->shadow_stack[0]; pos < thr->shadow_stack_pos; pos++)
+    CHECK(TryTraceFunc(thr, *pos));
+  for (uptr i = 0; i < thr->mset.Size(); i++) {
+    MutexSet::Desc d = thr->mset.Get(i);
+    TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
+                   d.addr, d.stack_id);
+  }
+}
+
+}  // namespace v3
+
 void TraceSwitch(ThreadState *thr) {
 #if !SANITIZER_GO
   if (ctx->after_multithreaded_fork)
@@ -625,9 +763,7 @@ void TraceSwitch(ThreadState *thr) {
   thr->nomalloc--;
 }
 
-Trace *ThreadTrace(int tid) {
-  return (Trace*)GetThreadTraceHeader(tid);
-}
+Trace *ThreadTrace(Tid tid) { return (Trace *)GetThreadTraceHeader(tid); }
 
 uptr TraceTopPC(ThreadState *thr) {
   Event *events = (Event*)GetThreadTrace(thr->tid);
@@ -716,28 +852,28 @@ void MemoryAccessImpl1(ThreadState *thr, uptr addr,
   // threads, which is not enough for the unrolled loop.
 #if SANITIZER_DEBUG
   for (int idx = 0; idx < 4; idx++) {
-#include "tsan_update_shadow_word_inl.h"
+#  include "tsan_update_shadow_word.inc"
   }
 #else
   int idx = 0;
-#include "tsan_update_shadow_word_inl.h"
+#  include "tsan_update_shadow_word.inc"
   idx = 1;
   if (stored) {
-#include "tsan_update_shadow_word_inl.h"
+#  include "tsan_update_shadow_word.inc"
   } else {
-#include "tsan_update_shadow_word_inl.h"
+#  include "tsan_update_shadow_word.inc"
   }
   idx = 2;
   if (stored) {
-#include "tsan_update_shadow_word_inl.h"
+#  include "tsan_update_shadow_word.inc"
   } else {
-#include "tsan_update_shadow_word_inl.h"
+#  include "tsan_update_shadow_word.inc"
   }
   idx = 3;
   if (stored) {
-#include "tsan_update_shadow_word_inl.h"
+#  include "tsan_update_shadow_word.inc"
   } else {
-#include "tsan_update_shadow_word_inl.h"
+#  include "tsan_update_shadow_word.inc"
   }
 #endif
 
@@ -753,8 +889,11 @@ void MemoryAccessImpl1(ThreadState *thr, uptr addr,
   return;
 }
 
-void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
-    int size, bool kAccessIsWrite, bool kIsAtomic) {
+void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+                           AccessType typ) {
+  DCHECK(!(typ & kAccessAtomic));
+  const bool kAccessIsWrite = !(typ & kAccessRead);
+  const bool kIsAtomic = false;
   while (size) {
     int size1 = 1;
     int kAccessSizeLog = kSizeLog1;
@@ -789,10 +928,11 @@ bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
   return false;
 }
 
-#if defined(__SSE3__)
-#define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
-    _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
-    (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
+#if TSAN_VECTORIZE
+#  define SHUF(v0, v1, i0, i1, i2, i3)                    \
+    _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(v0), \
+                                    _mm_castsi128_ps(v1), \
+                                    (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
 ALWAYS_INLINE
 bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
   // This is an optimized version of ContainsSameAccessSlow.
@@ -849,7 +989,7 @@ bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
 
 ALWAYS_INLINE
 bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
-#if defined(__SSE3__)
+#if TSAN_VECTORIZE
   bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
   // NOTE: this check can fail if the shadow is concurrently mutated
   // by other threads. But it still can be useful if you modify
@@ -864,7 +1004,7 @@ bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
 ALWAYS_INLINE USED
 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
     int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
-  u64 *shadow_mem = (u64*)MemToShadow(addr);
+  RawShadow *shadow_mem = MemToShadow(addr);
   DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
       " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
       (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
@@ -876,9 +1016,9 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
     Printf("Access to non app mem %zx\n", addr);
     DCHECK(IsAppMem(addr));
   }
-  if (!IsShadowMem((uptr)shadow_mem)) {
+  if (!IsShadowMem(shadow_mem)) {
     Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
-    DCHECK(IsShadowMem((uptr)shadow_mem));
+    DCHECK(IsShadowMem(shadow_mem));
   }
 #endif
 
@@ -953,9 +1093,9 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
   size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
   // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
   if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) {
-    u64 *p = (u64*)MemToShadow(addr);
-    CHECK(IsShadowMem((uptr)p));
-    CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
+    RawShadow *p = MemToShadow(addr);
+    CHECK(IsShadowMem(p));
+    CHECK(IsShadowMem(p + size * kShadowCnt / kShadowCell - 1));
     // FIXME: may overwrite a part outside the region
     for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
       p[i++] = val;
@@ -965,9 +1105,9 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
   } else {
     // The region is big, reset only beginning and end.
     const uptr kPageSize = GetPageSizeCached();
-    u64 *begin = (u64*)MemToShadow(addr);
-    u64 *end = begin + size / kShadowCell * kShadowCnt;
-    u64 *p = begin;
+    RawShadow *begin = MemToShadow(addr);
+    RawShadow *end = begin + size / kShadowCell * kShadowCnt;
+    RawShadow *p = begin;
     // Set at least first kPageSize/2 to page boundary.
     while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
       *p++ = val;
@@ -975,7 +1115,7 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
         *p++ = 0;
     }
     // Reset middle part.
-    u64 *p1 = p;
+    RawShadow *p1 = p;
     p = RoundDown(end, kPageSize);
     if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1))
       Die();
@@ -1070,18 +1210,18 @@ void FuncExit(ThreadState *thr) {
   thr->shadow_stack_pos--;
 }
 
-void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) {
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
   DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
   thr->ignore_reads_and_writes++;
   CHECK_GT(thr->ignore_reads_and_writes, 0);
   thr->fast_state.SetIgnoreBit();
 #if !SANITIZER_GO
-  if (save_stack && !ctx->after_multithreaded_fork)
+  if (pc && !ctx->after_multithreaded_fork)
     thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
 #endif
 }
 
-void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
+void ThreadIgnoreEnd(ThreadState *thr) {
   DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
   CHECK_GT(thr->ignore_reads_and_writes, 0);
   thr->ignore_reads_and_writes--;
@@ -1101,17 +1241,17 @@ uptr __tsan_testonly_shadow_stack_current_size() {
 }
 #endif
 
-void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) {
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
   DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
   thr->ignore_sync++;
   CHECK_GT(thr->ignore_sync, 0);
 #if !SANITIZER_GO
-  if (save_stack && !ctx->after_multithreaded_fork)
+  if (pc && !ctx->after_multithreaded_fork)
     thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
 #endif
 }
 
-void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
+void ThreadIgnoreSyncEnd(ThreadState *thr) {
   DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
   CHECK_GT(thr->ignore_sync, 0);
   thr->ignore_sync--;
@@ -1133,7 +1273,28 @@ void build_consistency_release() {}
 
 }  // namespace __tsan
 
+#if SANITIZER_CHECK_DEADLOCKS
+namespace __sanitizer {
+using namespace __tsan;
+MutexMeta mutex_meta[] = {
+    {MutexInvalid, "Invalid", {}},
+    {MutexThreadRegistry, "ThreadRegistry", {}},
+    {MutexTypeTrace, "Trace", {MutexLeaf}},
+    {MutexTypeReport, "Report", {MutexTypeSyncVar}},
+    {MutexTypeSyncVar, "SyncVar", {}},
+    {MutexTypeAnnotations, "Annotations", {}},
+    {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}},
+    {MutexTypeFired, "Fired", {MutexLeaf}},
+    {MutexTypeRacy, "Racy", {MutexLeaf}},
+    {MutexTypeGlobalProc, "GlobalProc", {}},
+    {},
+};
+
+void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
+}  // namespace __sanitizer
+#endif
+
 #if !SANITIZER_GO
 // Must be included in this file to make sure everything is inlined.
-#include "tsan_interface_inl.h"
+#  include "tsan_interface.inc"
 #endif
index 6576d40aa488c285c14f8beef6bd4f46ad63147f..4f50656a1ea89e100e8a1db47824fff739d40e2a 100644 (file)
 #include "tsan_clock.h"
 #include "tsan_defs.h"
 #include "tsan_flags.h"
+#include "tsan_ignoreset.h"
 #include "tsan_mman.h"
-#include "tsan_sync.h"
-#include "tsan_trace.h"
-#include "tsan_report.h"
-#include "tsan_platform.h"
 #include "tsan_mutexset.h"
-#include "tsan_ignoreset.h"
+#include "tsan_platform.h"
+#include "tsan_report.h"
+#include "tsan_shadow.h"
 #include "tsan_stack_trace.h"
+#include "tsan_sync.h"
+#include "tsan_trace.h"
 
 #if SANITIZER_WORDSIZE != 64
 # error "ThreadSanitizer is supported only on 64-bit platforms"
@@ -69,6 +70,11 @@ struct AP32 {
 typedef SizeClassAllocator32<AP32> PrimaryAllocator;
 #else
 struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
+#    if defined(__s390x__)
+  typedef MappingS390x Mapping;
+#    else
+  typedef Mapping48AddressSpace Mapping;
+#    endif
   static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
   static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
   static const uptr kMetadataSize = 0;
@@ -84,240 +90,6 @@ typedef Allocator::AllocatorCache AllocatorCache;
 Allocator *allocator();
 #endif
 
-const u64 kShadowRodata = (u64)-1;  // .rodata shadow marker
-
-// FastState (from most significant bit):
-//   ignore          : 1
-//   tid             : kTidBits
-//   unused          : -
-//   history_size    : 3
-//   epoch           : kClkBits
-class FastState {
- public:
-  FastState(u64 tid, u64 epoch) {
-    x_ = tid << kTidShift;
-    x_ |= epoch;
-    DCHECK_EQ(tid, this->tid());
-    DCHECK_EQ(epoch, this->epoch());
-    DCHECK_EQ(GetIgnoreBit(), false);
-  }
-
-  explicit FastState(u64 x)
-      : x_(x) {
-  }
-
-  u64 raw() const {
-    return x_;
-  }
-
-  u64 tid() const {
-    u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
-    return res;
-  }
-
-  u64 TidWithIgnore() const {
-    u64 res = x_ >> kTidShift;
-    return res;
-  }
-
-  u64 epoch() const {
-    u64 res = x_ & ((1ull << kClkBits) - 1);
-    return res;
-  }
-
-  void IncrementEpoch() {
-    u64 old_epoch = epoch();
-    x_ += 1;
-    DCHECK_EQ(old_epoch + 1, epoch());
-    (void)old_epoch;
-  }
-
-  void SetIgnoreBit() { x_ |= kIgnoreBit; }
-  void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
-  bool GetIgnoreBit() const { return (s64)x_ < 0; }
-
-  void SetHistorySize(int hs) {
-    CHECK_GE(hs, 0);
-    CHECK_LE(hs, 7);
-    x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
-  }
-
-  ALWAYS_INLINE
-  int GetHistorySize() const {
-    return (int)((x_ >> kHistoryShift) & kHistoryMask);
-  }
-
-  void ClearHistorySize() {
-    SetHistorySize(0);
-  }
-
-  ALWAYS_INLINE
-  u64 GetTracePos() const {
-    const int hs = GetHistorySize();
-    // When hs == 0, the trace consists of 2 parts.
-    const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
-    return epoch() & mask;
-  }
-
- private:
-  friend class Shadow;
-  static const int kTidShift = 64 - kTidBits - 1;
-  static const u64 kIgnoreBit = 1ull << 63;
-  static const u64 kFreedBit = 1ull << 63;
-  static const u64 kHistoryShift = kClkBits;
-  static const u64 kHistoryMask = 7;
-  u64 x_;
-};
-
-// Shadow (from most significant bit):
-//   freed           : 1
-//   tid             : kTidBits
-//   is_atomic       : 1
-//   is_read         : 1
-//   size_log        : 2
-//   addr0           : 3
-//   epoch           : kClkBits
-class Shadow : public FastState {
- public:
-  explicit Shadow(u64 x)
-      : FastState(x) {
-  }
-
-  explicit Shadow(const FastState &s)
-      : FastState(s.x_) {
-    ClearHistorySize();
-  }
-
-  void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
-    DCHECK_EQ((x_ >> kClkBits) & 31, 0);
-    DCHECK_LE(addr0, 7);
-    DCHECK_LE(kAccessSizeLog, 3);
-    x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
-    DCHECK_EQ(kAccessSizeLog, size_log());
-    DCHECK_EQ(addr0, this->addr0());
-  }
-
-  void SetWrite(unsigned kAccessIsWrite) {
-    DCHECK_EQ(x_ & kReadBit, 0);
-    if (!kAccessIsWrite)
-      x_ |= kReadBit;
-    DCHECK_EQ(kAccessIsWrite, IsWrite());
-  }
-
-  void SetAtomic(bool kIsAtomic) {
-    DCHECK(!IsAtomic());
-    if (kIsAtomic)
-      x_ |= kAtomicBit;
-    DCHECK_EQ(IsAtomic(), kIsAtomic);
-  }
-
-  bool IsAtomic() const {
-    return x_ & kAtomicBit;
-  }
-
-  bool IsZero() const {
-    return x_ == 0;
-  }
-
-  static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
-    u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
-    DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
-    return shifted_xor == 0;
-  }
-
-  static ALWAYS_INLINE
-  bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
-    u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
-    return masked_xor == 0;
-  }
-
-  static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
-      unsigned kS2AccessSize) {
-    bool res = false;
-    u64 diff = s1.addr0() - s2.addr0();
-    if ((s64)diff < 0) {  // s1.addr0 < s2.addr0
-      // if (s1.addr0() + size1) > s2.addr0()) return true;
-      if (s1.size() > -diff)
-        res = true;
-    } else {
-      // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
-      if (kS2AccessSize > diff)
-        res = true;
-    }
-    DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
-    DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
-    return res;
-  }
-
-  u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
-  u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
-  bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
-  bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
-
-  // The idea behind the freed bit is as follows.
-  // When the memory is freed (or otherwise unaccessible) we write to the shadow
-  // values with tid/epoch related to the free and the freed bit set.
-  // During memory accesses processing the freed bit is considered
-  // as msb of tid. So any access races with shadow with freed bit set
-  // (it is as if write from a thread with which we never synchronized before).
-  // This allows us to detect accesses to freed memory w/o additional
-  // overheads in memory access processing and at the same time restore
-  // tid/epoch of free.
-  void MarkAsFreed() {
-     x_ |= kFreedBit;
-  }
-
-  bool IsFreed() const {
-    return x_ & kFreedBit;
-  }
-
-  bool GetFreedAndReset() {
-    bool res = x_ & kFreedBit;
-    x_ &= ~kFreedBit;
-    return res;
-  }
-
-  bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
-    bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
-        | (u64(kIsAtomic) << kAtomicShift));
-    DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
-    return v;
-  }
-
-  bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
-    bool v = ((x_ >> kReadShift) & 3)
-        <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
-    DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
-        (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
-    return v;
-  }
-
-  bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
-    bool v = ((x_ >> kReadShift) & 3)
-        >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
-    DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
-        (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
-    return v;
-  }
-
- private:
-  static const u64 kReadShift   = 5 + kClkBits;
-  static const u64 kReadBit     = 1ull << kReadShift;
-  static const u64 kAtomicShift = 6 + kClkBits;
-  static const u64 kAtomicBit   = 1ull << kAtomicShift;
-
-  u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
-
-  static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
-    if (s1.addr0() == s2.addr0()) return true;
-    if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
-      return true;
-    if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
-      return true;
-    return false;
-  }
-};
-
 struct ThreadSignalContext;
 
 struct JmpBuf {
@@ -380,27 +152,30 @@ struct ThreadState {
   // We do not distinguish beteween ignoring reads and writes
   // for better performance.
   int ignore_reads_and_writes;
+  atomic_sint32_t pending_signals;
   int ignore_sync;
   int suppress_reports;
   // Go does not support ignores.
 #if !SANITIZER_GO
   IgnoreSet mop_ignore_set;
   IgnoreSet sync_ignore_set;
-#endif
-  // C/C++ uses fixed size shadow stack embed into Trace.
+  // C/C++ uses fixed size shadow stack.
+  uptr shadow_stack[kShadowStackSize];
+#else
   // Go uses malloc-allocated shadow stack with dynamic size.
   uptr *shadow_stack;
+#endif
   uptr *shadow_stack_end;
   uptr *shadow_stack_pos;
-  u64 *racy_shadow_addr;
-  u64 racy_state[2];
+  RawShadow *racy_shadow_addr;
+  RawShadow racy_state[2];
   MutexSet mset;
   ThreadClock clock;
 #if !SANITIZER_GO
   Vector<JmpBuf> jmp_bufs;
   int ignore_interceptors;
 #endif
-  const u32 tid;
+  const Tid tid;
   const int unique_id;
   bool in_symbolizer;
   bool in_ignored_lib;
@@ -414,9 +189,6 @@ struct ThreadState {
   const uptr tls_size;
   ThreadContext *tctx;
 
-#if SANITIZER_DEBUG && !SANITIZER_GO
-  InternalDeadlockDetector internal_deadlock_detector;
-#endif
   DDLogicalThread *dd_lt;
 
   // Current wired Processor, or nullptr. Required to handle any events.
@@ -431,7 +203,7 @@ struct ThreadState {
   ThreadSignalContext *signal_ctx;
 
 #if !SANITIZER_GO
-  u32 last_sleep_stack_id;
+  StackID last_sleep_stack_id;
   ThreadClock last_sleep_clock;
 #endif
 
@@ -441,10 +213,17 @@ struct ThreadState {
 
   const ReportDesc *current_report;
 
-  explicit ThreadState(Context *ctx, u32 tid, int unique_id, u64 epoch,
+  // Current position in tctx->trace.Back()->events (Event*).
+  atomic_uintptr_t trace_pos;
+  // PC of the last memory access, used to compute PC deltas in the trace.
+  uptr trace_prev_pc;
+  Sid sid;
+  Epoch epoch;
+
+  explicit ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch,
                        unsigned reuse_count, uptr stk_addr, uptr stk_size,
                        uptr tls_addr, uptr tls_size);
-};
+} ALIGNED(SANITIZER_CACHE_LINE_SIZE);
 
 #if !SANITIZER_GO
 #if SANITIZER_MAC || SANITIZER_ANDROID
@@ -472,10 +251,10 @@ inline void cur_thread_finalize() { }
 
 class ThreadContext final : public ThreadContextBase {
  public:
-  explicit ThreadContext(int tid);
+  explicit ThreadContext(Tid tid);
   ~ThreadContext();
   ThreadState *thr;
-  u32 creation_stack_id;
+  StackID creation_stack_id;
   SyncClock sync;
   // Epoch at which the thread had started.
   // If we see an event from the thread stamped by an older epoch,
@@ -483,6 +262,8 @@ class ThreadContext final : public ThreadContextBase {
   u64 epoch0;
   u64 epoch1;
 
+  v3::Trace trace;
+
   // Override superclass callbacks.
   void OnDead() override;
   void OnJoined(void *arg) override;
@@ -495,13 +276,7 @@ class ThreadContext final : public ThreadContextBase {
 
 struct RacyStacks {
   MD5Hash hash[2];
-  bool operator==(const RacyStacks &other) const {
-    if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
-      return true;
-    if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
-      return true;
-    return false;
-  }
+  bool operator==(const RacyStacks &other) const;
 };
 
 struct RacyAddress {
@@ -527,13 +302,12 @@ struct Context {
 
   Mutex report_mtx;
   int nreported;
-  int nmissed_expected;
   atomic_uint64_t last_symbolize_time_ns;
 
   void *background_thread;
   atomic_uint32_t stop_background_thread;
 
-  ThreadRegistry *thread_registry;
+  ThreadRegistry thread_registry;
 
   Mutex racy_mtx;
   Vector<RacyStacks> racy_stacks;
@@ -546,9 +320,9 @@ struct Context {
   ClockAlloc clock_alloc;
 
   Flags flags;
+  fd_t memprof_fd;
 
-  u64 int_alloc_cnt[MBlockTypeCount];
-  u64 int_alloc_siz[MBlockTypeCount];
+  Mutex slot_mtx;
 };
 
 extern Context *ctx;  // The one and the only global runtime context.
@@ -581,12 +355,12 @@ class ScopedReportBase {
                        const MutexSet *mset);
   void AddStack(StackTrace stack, bool suppressable = false);
   void AddThread(const ThreadContext *tctx, bool suppressable = false);
-  void AddThread(int unique_tid, bool suppressable = false);
-  void AddUniqueTid(int unique_tid);
+  void AddThread(Tid unique_tid, bool suppressable = false);
+  void AddUniqueTid(Tid unique_tid);
   void AddMutex(const SyncVar *s);
   u64 AddMutex(u64 id);
   void AddLocation(uptr addr, uptr size);
-  void AddSleep(u32 stack_id);
+  void AddSleep(StackID stack_id);
   void SetCount(int count);
 
   const ReportDesc *GetReport() const;
@@ -618,7 +392,7 @@ class ScopedReport : public ScopedReportBase {
 
 bool ShouldReport(ThreadState *thr, ReportType typ);
 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
-void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+void RestoreStack(Tid tid, const u64 epoch, VarSizeStackTrace *stk,
                   MutexSet *mset, uptr *tag = nullptr);
 
 // The stack could look like:
@@ -671,7 +445,6 @@ void ReportRace(ThreadState *thr);
 bool OutputReport(ThreadState *thr, const ScopedReport &srep);
 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
 bool IsExpectedReport(uptr addr, uptr size);
-void PrintMatchedBenignRaces();
 
 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
 # define DPrintf Printf
@@ -685,10 +458,11 @@ void PrintMatchedBenignRaces();
 # define DPrintf2(...)
 #endif
 
-u32 CurrentStackId(ThreadState *thr, uptr pc);
-ReportStack *SymbolizeStackId(u32 stack_id);
+StackID CurrentStackId(ThreadState *thr, uptr pc);
+ReportStack *SymbolizeStackId(StackID stack_id);
 void PrintCurrentStack(ThreadState *thr, uptr pc);
 void PrintCurrentStackSlow(uptr pc);  // uses libunwind
+MBlock *JavaHeapBlock(uptr addr, uptr *start);
 
 void Initialize(ThreadState *thr);
 void MaybeSpawnBackgroundThread();
@@ -704,34 +478,44 @@ void MemoryAccessImpl(ThreadState *thr, uptr addr,
     u64 *shadow_mem, Shadow cur);
 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
     uptr size, bool is_write);
-void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
-    uptr size, uptr step, bool is_write);
-void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
-    int size, bool kAccessIsWrite, bool kIsAtomic);
+void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+                           AccessType typ);
 
 const int kSizeLog1 = 0;
 const int kSizeLog2 = 1;
 const int kSizeLog4 = 2;
 const int kSizeLog8 = 3;
 
-void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
-                                     uptr addr, int kAccessSizeLog) {
-  MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
-}
-
-void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
-                                      uptr addr, int kAccessSizeLog) {
-  MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
-}
-
-void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
-                                           uptr addr, int kAccessSizeLog) {
-  MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
-}
-
-void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
-                                            uptr addr, int kAccessSizeLog) {
-  MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
+ALWAYS_INLINE
+void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+                  AccessType typ) {
+  int size_log;
+  switch (size) {
+    case 1:
+      size_log = kSizeLog1;
+      break;
+    case 2:
+      size_log = kSizeLog2;
+      break;
+    case 4:
+      size_log = kSizeLog4;
+      break;
+    default:
+      DCHECK_EQ(size, 8);
+      size_log = kSizeLog8;
+      break;
+  }
+  bool is_write = !(typ & kAccessRead);
+  bool is_atomic = typ & kAccessAtomic;
+  if (typ & kAccessVptr)
+    thr->is_vptr_access = true;
+  if (typ & kAccessFree)
+    thr->is_freeing = true;
+  MemoryAccess(thr, pc, addr, size_log, is_write, is_atomic);
+  if (typ & kAccessVptr)
+    thr->is_vptr_access = false;
+  if (typ & kAccessFree)
+    thr->is_freeing = false;
 }
 
 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
@@ -740,26 +524,26 @@ void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
 void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
                                          uptr size);
 
-void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true);
-void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
-void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true);
-void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreEnd(ThreadState *thr);
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreSyncEnd(ThreadState *thr);
 
 void FuncEntry(ThreadState *thr, uptr pc);
 void FuncExit(ThreadState *thr);
 
-int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
-void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
+Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
+void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
                  ThreadType thread_type);
 void ThreadFinish(ThreadState *thr);
-int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
-void ThreadJoin(ThreadState *thr, uptr pc, int tid);
-void ThreadDetach(ThreadState *thr, uptr pc, int tid);
+Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
+void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);
+void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);
 void ThreadFinalize(ThreadState *thr);
 void ThreadSetName(ThreadState *thr, const char *name);
 int ThreadCount(ThreadState *thr);
-void ProcessPendingSignals(ThreadState *thr);
-void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid);
+void ProcessPendingSignalsImpl(ThreadState *thr);
+void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);
 
 Processor *ProcCreate();
 void ProcDestroy(Processor *proc);
@@ -788,7 +572,7 @@ void Acquire(ThreadState *thr, uptr pc, uptr addr);
 // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
 // right before executing finalizers. This provides a coarse, but simple
 // approximation of the actual required synchronization.
-void AcquireGlobal(ThreadState *thr, uptr pc);
+void AcquireGlobal(ThreadState *thr);
 void Release(ThreadState *thr, uptr pc, uptr addr);
 void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
@@ -824,7 +608,7 @@ void TraceSwitch(ThreadState *thr);
 uptr TraceTopPC(ThreadState *thr);
 uptr TraceSize();
 uptr TraceParts();
-Trace *ThreadTrace(int tid);
+Trace *ThreadTrace(Tid tid);
 
 extern "C" void __tsan_trace_switch();
 void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
@@ -864,6 +648,111 @@ enum FiberSwitchFlags {
   FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
 };
 
+ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) {
+  if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals)))
+    ProcessPendingSignalsImpl(thr);
+}
+
+extern bool is_initialized;
+
+ALWAYS_INLINE
+void LazyInitialize(ThreadState *thr) {
+  // If we can use .preinit_array, assume that __tsan_init
+  // called from .preinit_array initializes runtime before
+  // any instrumented code.
+#if !SANITIZER_CAN_USE_PREINIT_ARRAY
+  if (UNLIKELY(!is_initialized))
+    Initialize(thr);
+#endif
+}
+
+namespace v3 {
+
+void TraceSwitchPart(ThreadState *thr);
+bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
+                  uptr size, AccessType typ, VarSizeStackTrace *pstk,
+                  MutexSet *pmset, uptr *ptag);
+
+template <typename EventT>
+ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
+                                                   EventT **ev) {
+  Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
+#if SANITIZER_DEBUG
+  // TraceSwitch acquires these mutexes,
+  // so we lock them here to detect deadlocks more reliably.
+  { Lock lock(&ctx->slot_mtx); }
+  { Lock lock(&thr->tctx->trace.mtx); }
+  TracePart *current = thr->tctx->trace.parts.Back();
+  if (current) {
+    DCHECK_GE(pos, &current->events[0]);
+    DCHECK_LE(pos, &current->events[TracePart::kSize]);
+  } else {
+    DCHECK_EQ(pos, nullptr);
+  }
+#endif
+  // TracePart is allocated with mmap and is at least 4K aligned.
+  // So the following check is a faster way to check for part end.
+  // It may have false positives in the middle of the trace,
+  // they are filtered out in TraceSwitch.
+  if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))
+    return false;
+  *ev = reinterpret_cast<EventT *>(pos);
+  return true;
+}
+
+template <typename EventT>
+ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {
+  DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
+  atomic_store_relaxed(&thr->trace_pos, (uptr)(evp + 1));
+}
+
+template <typename EventT>
+void TraceEvent(ThreadState *thr, EventT ev) {
+  EventT *evp;
+  if (!TraceAcquire(thr, &evp)) {
+    TraceSwitchPart(thr);
+    UNUSED bool res = TraceAcquire(thr, &evp);
+    DCHECK(res);
+  }
+  *evp = ev;
+  TraceRelease(thr, evp);
+}
+
+ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
+                                                   uptr pc = 0) {
+  if (!kCollectHistory)
+    return true;
+  EventFunc *ev;
+  if (UNLIKELY(!TraceAcquire(thr, &ev)))
+    return false;
+  ev->is_access = 0;
+  ev->is_func = 1;
+  ev->pc = pc;
+  TraceRelease(thr, ev);
+  return true;
+}
+
+WARN_UNUSED_RESULT
+bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+                          AccessType typ);
+WARN_UNUSED_RESULT
+bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+                               AccessType typ);
+void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+                            AccessType typ);
+void TraceFunc(ThreadState *thr, uptr pc = 0);
+void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
+                    StackID stk);
+void TraceMutexUnlock(ThreadState *thr, uptr addr);
+void TraceTime(ThreadState *thr);
+
+}  // namespace v3
+
+#if !SANITIZER_GO
+extern void (*on_initialize)(void);
+extern int (*on_finalize)(int);
+#endif
+
 }  // namespace __tsan
 
 #endif  // TSAN_RTL_H
index a214a336d69f58ac147f4d3d4d77c33aded851d6..7d6b41116aa6f4a24977e344874dbb7bfe9688b9 100644 (file)
@@ -35,7 +35,7 @@ struct Callback final : public DDCallback {
     DDCallback::lt = thr->dd_lt;
   }
 
-  u32 Unwind() override { return CurrentStackId(thr, pc); }
+  StackID Unwind() override { return CurrentStackId(thr, pc); }
   int UniqueTid() override { return thr->unique_id; }
 };
 
@@ -53,7 +53,7 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
     return;
   if (!ShouldReport(thr, typ))
     return;
-  ThreadRegistryLock l(ctx->thread_registry);
+  ThreadRegistryLock l(&ctx->thread_registry);
   ScopedReport rep(typ);
   rep.AddMutex(mid);
   VarSizeStackTrace trace;
@@ -68,46 +68,49 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
   if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
     CHECK(!thr->is_freeing);
     thr->is_freeing = true;
-    MemoryWrite(thr, pc, addr, kSizeLog1);
+    MemoryAccess(thr, pc, addr, 1, kAccessWrite);
     thr->is_freeing = false;
   }
-  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+  Lock l(&s->mtx);
   s->SetFlags(flagz & MutexCreationFlagMask);
+  // Save stack in the case the sync object was created before as atomic.
   if (!SANITIZER_GO && s->creation_stack_id == 0)
     s->creation_stack_id = CurrentStackId(thr, pc);
-  s->mtx.Unlock();
 }
 
 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
   DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
-  SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
-  if (s == 0)
-    return;
-  if ((flagz & MutexFlagLinkerInit)
-      || s->IsFlagSet(MutexFlagLinkerInit)
-      || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
-    // Destroy is no-op for linker-initialized mutexes.
-    s->mtx.Unlock();
-    return;
-  }
-  if (common_flags()->detect_deadlocks) {
-    Callback cb(thr, pc);
-    ctx->dd->MutexDestroy(&cb, &s->dd);
-    ctx->dd->MutexInit(&cb, &s->dd);
-  }
   bool unlock_locked = false;
-  if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
-      !s->IsFlagSet(MutexFlagBroken)) {
-    s->SetFlags(MutexFlagBroken);
-    unlock_locked = true;
+  u64 mid = 0;
+  u64 last_lock = 0;
+  {
+    SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
+    if (s == 0)
+      return;
+    Lock l(&s->mtx);
+    if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit) ||
+        ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
+      // Destroy is no-op for linker-initialized mutexes.
+      return;
+    }
+    if (common_flags()->detect_deadlocks) {
+      Callback cb(thr, pc);
+      ctx->dd->MutexDestroy(&cb, &s->dd);
+      ctx->dd->MutexInit(&cb, &s->dd);
+    }
+    if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
+        !s->IsFlagSet(MutexFlagBroken)) {
+      s->SetFlags(MutexFlagBroken);
+      unlock_locked = true;
+    }
+    mid = s->GetId();
+    last_lock = s->last_lock;
+    if (!unlock_locked)
+      s->Reset(thr->proc());  // must not reset it before the report is printed
   }
-  u64 mid = s->GetId();
-  u64 last_lock = s->last_lock;
-  if (!unlock_locked)
-    s->Reset(thr->proc());  // must not reset it before the report is printed
-  s->mtx.Unlock();
   if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked)) {
-    ThreadRegistryLock l(ctx->thread_registry);
+    ThreadRegistryLock l(&ctx->thread_registry);
     ScopedReport rep(ReportTypeMutexDestroyLocked);
     rep.AddMutex(mid);
     VarSizeStackTrace trace;
@@ -119,38 +122,35 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
     rep.AddLocation(addr, 1);
     OutputReport(thr, rep);
 
-    SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
+    SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
     if (s != 0) {
+      Lock l(&s->mtx);
       s->Reset(thr->proc());
-      s->mtx.Unlock();
     }
   }
   thr->mset.Remove(mid);
   // Imitate a memory write to catch unlock-destroy races.
   // Do this outside of sync mutex, because it can report a race which locks
   // sync mutexes.
-  if (IsAppMem(addr)) {
-    CHECK(!thr->is_freeing);
-    thr->is_freeing = true;
-    MemoryWrite(thr, pc, addr, kSizeLog1);
-    thr->is_freeing = false;
-  }
+  if (IsAppMem(addr))
+    MemoryAccess(thr, pc, addr, 1, kAccessWrite | kAccessFree);
   // s will be destroyed and freed in MetaMap::FreeBlock.
 }
 
 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
   DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
-    SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
-    s->UpdateFlags(flagz);
-    if (s->owner_tid != thr->tid) {
-      Callback cb(thr, pc);
-      ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
-      s->mtx.ReadUnlock();
-      ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
-    } else {
-      s->mtx.ReadUnlock();
+    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+    {
+      ReadLock l(&s->mtx);
+      s->UpdateFlags(flagz);
+      if (s->owner_tid != thr->tid) {
+        Callback cb(thr, pc);
+        ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+      }
     }
+    Callback cb(thr, pc);
+    ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
   }
 }
 
@@ -162,43 +162,45 @@ void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
   else
     rec = 1;
   if (IsAppMem(addr))
-    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
-  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
-  s->UpdateFlags(flagz);
-  thr->fast_state.IncrementEpoch();
-  TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
-  bool report_double_lock = false;
-  if (s->owner_tid == kInvalidTid) {
-    CHECK_EQ(s->recursion, 0);
-    s->owner_tid = thr->tid;
-    s->last_lock = thr->fast_state.raw();
-  } else if (s->owner_tid == thr->tid) {
-    CHECK_GT(s->recursion, 0);
-  } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
-    s->SetFlags(MutexFlagBroken);
-    report_double_lock = true;
-  }
-  const bool first = s->recursion == 0;
-  s->recursion += rec;
-  if (first) {
-    AcquireImpl(thr, pc, &s->clock);
-    AcquireImpl(thr, pc, &s->read_clock);
-  } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
-  }
-  thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
+    MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+  u64 mid = 0;
   bool pre_lock = false;
-  if (first && common_flags()->detect_deadlocks) {
-    pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
-        !(flagz & MutexFlagTryLock);
-    Callback cb(thr, pc);
-    if (pre_lock)
-      ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
-    ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
+  bool first = false;
+  bool report_double_lock = false;
+  {
+    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+    Lock l(&s->mtx);
+    s->UpdateFlags(flagz);
+    thr->fast_state.IncrementEpoch();
+    TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
+    if (s->owner_tid == kInvalidTid) {
+      CHECK_EQ(s->recursion, 0);
+      s->owner_tid = thr->tid;
+      s->last_lock = thr->fast_state.raw();
+    } else if (s->owner_tid == thr->tid) {
+      CHECK_GT(s->recursion, 0);
+    } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+      s->SetFlags(MutexFlagBroken);
+      report_double_lock = true;
+    }
+    first = s->recursion == 0;
+    s->recursion += rec;
+    if (first) {
+      AcquireImpl(thr, pc, &s->clock);
+      AcquireImpl(thr, pc, &s->read_clock);
+    } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
+    }
+    thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
+    if (first && common_flags()->detect_deadlocks) {
+      pre_lock =
+          (flagz & MutexFlagDoPreLockOnPostLock) && !(flagz & MutexFlagTryLock);
+      Callback cb(thr, pc);
+      if (pre_lock)
+        ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+      ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
+    }
+    mid = s->GetId();
   }
-  u64 mid = s->GetId();
-  s->mtx.Unlock();
-  // Can't touch s after this point.
-  s = 0;
   if (report_double_lock)
     ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
   if (first && pre_lock && common_flags()->detect_deadlocks) {
@@ -210,35 +212,37 @@ void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
   DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
   if (IsAppMem(addr))
-    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
-  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
-  thr->fast_state.IncrementEpoch();
-  TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
-  int rec = 0;
+    MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+  u64 mid = 0;
   bool report_bad_unlock = false;
-  if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
-    if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
-      s->SetFlags(MutexFlagBroken);
-      report_bad_unlock = true;
-    }
-  } else {
-    rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
-    s->recursion -= rec;
-    if (s->recursion == 0) {
-      s->owner_tid = kInvalidTid;
-      ReleaseStoreImpl(thr, pc, &s->clock);
+  int rec = 0;
+  {
+    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+    Lock l(&s->mtx);
+    thr->fast_state.IncrementEpoch();
+    TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
+    if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
+      if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+        s->SetFlags(MutexFlagBroken);
+        report_bad_unlock = true;
+      }
     } else {
+      rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
+      s->recursion -= rec;
+      if (s->recursion == 0) {
+        s->owner_tid = kInvalidTid;
+        ReleaseStoreImpl(thr, pc, &s->clock);
+      } else {
+      }
     }
+    thr->mset.Del(s->GetId(), true);
+    if (common_flags()->detect_deadlocks && s->recursion == 0 &&
+        !report_bad_unlock) {
+      Callback cb(thr, pc);
+      ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
+    }
+    mid = s->GetId();
   }
-  thr->mset.Del(s->GetId(), true);
-  if (common_flags()->detect_deadlocks && s->recursion == 0 &&
-      !report_bad_unlock) {
-    Callback cb(thr, pc);
-    ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
-  }
-  u64 mid = s->GetId();
-  s->mtx.Unlock();
-  // Can't touch s after this point.
   if (report_bad_unlock)
     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
   if (common_flags()->detect_deadlocks && !report_bad_unlock) {
@@ -251,11 +255,14 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
   DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
-    SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
-    s->UpdateFlags(flagz);
+    {
+      SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+      ReadLock l(&s->mtx);
+      s->UpdateFlags(flagz);
+      Callback cb(thr, pc);
+      ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+    }
     Callback cb(thr, pc);
-    ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
-    s->mtx.ReadUnlock();
     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
   }
 }
@@ -263,34 +270,35 @@ void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
   DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
   if (IsAppMem(addr))
-    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
-  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
-  s->UpdateFlags(flagz);
-  thr->fast_state.IncrementEpoch();
-  TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
+    MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+  u64 mid = 0;
   bool report_bad_lock = false;
-  if (s->owner_tid != kInvalidTid) {
-    if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
-      s->SetFlags(MutexFlagBroken);
-      report_bad_lock = true;
-    }
-  }
-  AcquireImpl(thr, pc, &s->clock);
-  s->last_lock = thr->fast_state.raw();
-  thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
   bool pre_lock = false;
-  if (common_flags()->detect_deadlocks) {
-    pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
-        !(flagz & MutexFlagTryLock);
-    Callback cb(thr, pc);
-    if (pre_lock)
-      ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
-    ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
+  {
+    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+    ReadLock l(&s->mtx);
+    s->UpdateFlags(flagz);
+    thr->fast_state.IncrementEpoch();
+    TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
+    if (s->owner_tid != kInvalidTid) {
+      if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+        s->SetFlags(MutexFlagBroken);
+        report_bad_lock = true;
+      }
+    }
+    AcquireImpl(thr, pc, &s->clock);
+    s->last_lock = thr->fast_state.raw();
+    thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
+    if (common_flags()->detect_deadlocks) {
+      pre_lock =
+          (flagz & MutexFlagDoPreLockOnPostLock) && !(flagz & MutexFlagTryLock);
+      Callback cb(thr, pc);
+      if (pre_lock)
+        ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+      ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
+    }
+    mid = s->GetId();
   }
-  u64 mid = s->GetId();
-  s->mtx.ReadUnlock();
-  // Can't touch s after this point.
-  s = 0;
   if (report_bad_lock)
     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
   if (pre_lock  && common_flags()->detect_deadlocks) {
@@ -302,25 +310,27 @@ void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
   DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
   if (IsAppMem(addr))
-    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
-  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
-  thr->fast_state.IncrementEpoch();
-  TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+    MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+  u64 mid = 0;
   bool report_bad_unlock = false;
-  if (s->owner_tid != kInvalidTid) {
-    if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
-      s->SetFlags(MutexFlagBroken);
-      report_bad_unlock = true;
+  {
+    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+    Lock l(&s->mtx);
+    thr->fast_state.IncrementEpoch();
+    TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+    if (s->owner_tid != kInvalidTid) {
+      if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+        s->SetFlags(MutexFlagBroken);
+        report_bad_unlock = true;
+      }
     }
+    ReleaseImpl(thr, pc, &s->read_clock);
+    if (common_flags()->detect_deadlocks && s->recursion == 0) {
+      Callback cb(thr, pc);
+      ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
+    }
+    mid = s->GetId();
   }
-  ReleaseImpl(thr, pc, &s->read_clock);
-  if (common_flags()->detect_deadlocks && s->recursion == 0) {
-    Callback cb(thr, pc);
-    ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
-  }
-  u64 mid = s->GetId();
-  s->mtx.Unlock();
-  // Can't touch s after this point.
   thr->mset.Del(mid, false);
   if (report_bad_unlock)
     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
@@ -333,39 +343,41 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
   DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
   if (IsAppMem(addr))
-    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
-  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
-  bool write = true;
+    MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+  u64 mid = 0;
   bool report_bad_unlock = false;
-  if (s->owner_tid == kInvalidTid) {
-    // Seems to be read unlock.
-    write = false;
-    thr->fast_state.IncrementEpoch();
-    TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
-    ReleaseImpl(thr, pc, &s->read_clock);
-  } else if (s->owner_tid == thr->tid) {
-    // Seems to be write unlock.
-    thr->fast_state.IncrementEpoch();
-    TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
-    CHECK_GT(s->recursion, 0);
-    s->recursion--;
-    if (s->recursion == 0) {
-      s->owner_tid = kInvalidTid;
-      ReleaseStoreImpl(thr, pc, &s->clock);
-    } else {
+  {
+    SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+    Lock l(&s->mtx);
+    bool write = true;
+    if (s->owner_tid == kInvalidTid) {
+      // Seems to be read unlock.
+      write = false;
+      thr->fast_state.IncrementEpoch();
+      TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+      ReleaseImpl(thr, pc, &s->read_clock);
+    } else if (s->owner_tid == thr->tid) {
+      // Seems to be write unlock.
+      thr->fast_state.IncrementEpoch();
+      TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
+      CHECK_GT(s->recursion, 0);
+      s->recursion--;
+      if (s->recursion == 0) {
+        s->owner_tid = kInvalidTid;
+        ReleaseStoreImpl(thr, pc, &s->clock);
+      } else {
+      }
+    } else if (!s->IsFlagSet(MutexFlagBroken)) {
+      s->SetFlags(MutexFlagBroken);
+      report_bad_unlock = true;
     }
-  } else if (!s->IsFlagSet(MutexFlagBroken)) {
-    s->SetFlags(MutexFlagBroken);
-    report_bad_unlock = true;
-  }
-  thr->mset.Del(s->GetId(), write);
-  if (common_flags()->detect_deadlocks && s->recursion == 0) {
-    Callback cb(thr, pc);
-    ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
+    thr->mset.Del(s->GetId(), write);
+    if (common_flags()->detect_deadlocks && s->recursion == 0) {
+      Callback cb(thr, pc);
+      ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
+    }
+    mid = s->GetId();
   }
-  u64 mid = s->GetId();
-  s->mtx.Unlock();
-  // Can't touch s after this point.
   if (report_bad_unlock)
     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
   if (common_flags()->detect_deadlocks) {
@@ -376,29 +388,27 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
 
 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
   DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
-  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+  Lock l(&s->mtx);
   s->owner_tid = kInvalidTid;
   s->recursion = 0;
-  s->mtx.Unlock();
 }
 
 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
   DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
-  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
-  u64 mid = s->GetId();
-  s->mtx.Unlock();
-  ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+  ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, s->GetId());
 }
 
 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
   DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
   if (thr->ignore_sync)
     return;
-  SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
+  SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
   if (!s)
     return;
+  ReadLock l(&s->mtx);
   AcquireImpl(thr, pc, &s->clock);
-  s->mtx.ReadUnlock();
 }
 
 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
@@ -412,49 +422,48 @@ static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
   thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
 }
 
-void AcquireGlobal(ThreadState *thr, uptr pc) {
+void AcquireGlobal(ThreadState *thr) {
   DPrintf("#%d: AcquireGlobal\n", thr->tid);
   if (thr->ignore_sync)
     return;
-  ThreadRegistryLock l(ctx->thread_registry);
-  ctx->thread_registry->RunCallbackForEachThreadLocked(
-      UpdateClockCallback, thr);
+  ThreadRegistryLock l(&ctx->thread_registry);
+  ctx->thread_registry.RunCallbackForEachThreadLocked(UpdateClockCallback, thr);
 }
 
 void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
   DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
   if (thr->ignore_sync)
     return;
-  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+  Lock l(&s->mtx);
   thr->fast_state.IncrementEpoch();
   // Can't increment epoch w/o writing to the trace as well.
   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
   ReleaseStoreAcquireImpl(thr, pc, &s->clock);
-  s->mtx.Unlock();
 }
 
 void Release(ThreadState *thr, uptr pc, uptr addr) {
   DPrintf("#%d: Release %zx\n", thr->tid, addr);
   if (thr->ignore_sync)
     return;
-  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+  Lock l(&s->mtx);
   thr->fast_state.IncrementEpoch();
   // Can't increment epoch w/o writing to the trace as well.
   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
   ReleaseImpl(thr, pc, &s->clock);
-  s->mtx.Unlock();
 }
 
 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
   DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
   if (thr->ignore_sync)
     return;
-  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+  SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+  Lock l(&s->mtx);
   thr->fast_state.IncrementEpoch();
   // Can't increment epoch w/o writing to the trace as well.
   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
   ReleaseStoreImpl(thr, pc, &s->clock);
-  s->mtx.Unlock();
 }
 
 #if !SANITIZER_GO
@@ -468,13 +477,13 @@ static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
 }
 
 void AfterSleep(ThreadState *thr, uptr pc) {
-  DPrintf("#%d: AfterSleep %zx\n", thr->tid);
+  DPrintf("#%d: AfterSleep\n", thr->tid);
   if (thr->ignore_sync)
     return;
   thr->last_sleep_stack_id = CurrentStackId(thr, pc);
-  ThreadRegistryLock l(ctx->thread_registry);
-  ctx->thread_registry->RunCallbackForEachThreadLocked(
-      UpdateSleepClockCallback, thr);
+  ThreadRegistryLock l(&ctx->thread_registry);
+  ctx->thread_registry.RunCallbackForEachThreadLocked(UpdateSleepClockCallback,
+                                                      thr);
 }
 #endif
 
@@ -520,7 +529,7 @@ void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
   if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
     return;
-  ThreadRegistryLock l(ctx->thread_registry);
+  ThreadRegistryLock l(&ctx->thread_registry);
   ScopedReport rep(ReportTypeDeadlock);
   for (int i = 0; i < r->n; i++) {
     rep.AddMutex(r->loop[i].mtx_ctx0);
index 9e533a71a9c477ecae59f09ab431b2664554d998..8285e21aa1ec7a797dfcf4840ee5a7851106b497 100644 (file)
@@ -1,6 +1,5 @@
 #include "tsan_ppc_regs.h"
 
-        .machine altivec
         .section .text
         .hidden __tsan_setjmp
         .globl _setjmp
index 706794fdad10dfda88b665a5662cc0a775c76ff7..1f0bcb35ae9f634d7be9a55efe459198bc1e40ea 100644 (file)
@@ -68,8 +68,10 @@ static void StackStripMain(SymbolizedStack *frames) {
   } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
     last_frame->ClearAll();
     last_frame2->next = nullptr;
-  // Strip global ctors init.
-  } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
+    // Strip global ctors init, .preinit_array and main caller.
+  } else if (last && (0 == internal_strcmp(last, "__do_global_ctors_aux") ||
+                      0 == internal_strcmp(last, "__libc_csu_init") ||
+                      0 == internal_strcmp(last, "__libc_start_main"))) {
     last_frame->ClearAll();
     last_frame2->next = nullptr;
   // If both are 0, then we probably just failed to symbolize.
@@ -120,7 +122,7 @@ static ReportStack *SymbolizeStack(StackTrace trace) {
   }
   StackStripMain(top);
 
-  ReportStack *stack = ReportStack::New();
+  auto *stack = New<ReportStack>();
   stack->frames = top;
   return stack;
 }
@@ -129,10 +131,10 @@ bool ShouldReport(ThreadState *thr, ReportType typ) {
   // We set thr->suppress_reports in the fork context.
   // Taking any locking in the fork context can lead to deadlocks.
   // If any locks are already taken, it's too late to do this check.
-  CheckNoLocks(thr);
+  CheckedMutex::CheckNoLocks();
   // For the same reason check we didn't lock thread_registry yet.
   if (SANITIZER_DEBUG)
-    ThreadRegistryLock l(ctx->thread_registry);
+    ThreadRegistryLock l(&ctx->thread_registry);
   if (!flags()->report_bugs || thr->suppress_reports)
     return false;
   switch (typ) {
@@ -154,9 +156,8 @@ bool ShouldReport(ThreadState *thr, ReportType typ) {
 }
 
 ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
-  ctx->thread_registry->CheckLocked();
-  void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
-  rep_ = new(mem) ReportDesc;
+  ctx->thread_registry.CheckLocked();
+  rep_ = New<ReportDesc>();
   rep_->typ = typ;
   rep_->tag = tag;
   ctx->report_mtx.Lock();
@@ -165,7 +166,6 @@ ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
 ScopedReportBase::~ScopedReportBase() {
   ctx->report_mtx.Unlock();
   DestroyAndFree(rep_);
-  rep_ = nullptr;
 }
 
 void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
@@ -176,8 +176,7 @@ void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
 
 void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
                                        StackTrace stack, const MutexSet *mset) {
-  void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
-  ReportMop *mop = new(mem) ReportMop;
+  auto *mop = New<ReportMop>();
   rep_->mops.PushBack(mop);
   mop->tid = s.tid();
   mop->addr = addr + s.addr0();
@@ -196,7 +195,7 @@ void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
   }
 }
 
-void ScopedReportBase::AddUniqueTid(int unique_tid) {
+void ScopedReportBase::AddUniqueTid(Tid unique_tid) {
   rep_->unique_tids.PushBack(unique_tid);
 }
 
@@ -205,8 +204,7 @@ void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
     if ((u32)rep_->threads[i]->id == tctx->tid)
       return;
   }
-  void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
-  ReportThread *rt = new(mem) ReportThread;
+  auto *rt = New<ReportThread>();
   rep_->threads.PushBack(rt);
   rt->id = tctx->tid;
   rt->os_id = tctx->os_id;
@@ -226,17 +224,17 @@ static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
   return tctx->unique_id == (u32)unique_id;
 }
 
-static ThreadContext *FindThreadByUidLocked(int unique_id) {
-  ctx->thread_registry->CheckLocked();
+static ThreadContext *FindThreadByUidLocked(Tid unique_id) {
+  ctx->thread_registry.CheckLocked();
   return static_cast<ThreadContext *>(
-      ctx->thread_registry->FindThreadContextLocked(
+      ctx->thread_registry.FindThreadContextLocked(
           FindThreadByUidLockedCallback, &unique_id));
 }
 
-static ThreadContext *FindThreadByTidLocked(int tid) {
-  ctx->thread_registry->CheckLocked();
-  return static_cast<ThreadContext*>(
-      ctx->thread_registry->GetThreadLocked(tid));
+static ThreadContext *FindThreadByTidLocked(Tid tid) {
+  ctx->thread_registry.CheckLocked();
+  return static_cast<ThreadContext *>(
+      ctx->thread_registry.GetThreadLocked(tid));
 }
 
 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
@@ -251,10 +249,10 @@ static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
 }
 
 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
-  ctx->thread_registry->CheckLocked();
-  ThreadContext *tctx = static_cast<ThreadContext*>(
-      ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
-                                                    (void*)addr));
+  ctx->thread_registry.CheckLocked();
+  ThreadContext *tctx =
+      static_cast<ThreadContext *>(ctx->thread_registry.FindThreadContextLocked(
+          IsInStackOrTls, (void *)addr));
   if (!tctx)
     return 0;
   ThreadState *thr = tctx->thr;
@@ -264,7 +262,7 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
 }
 #endif
 
-void ScopedReportBase::AddThread(int unique_tid, bool suppressable) {
+void ScopedReportBase::AddThread(Tid unique_tid, bool suppressable) {
 #if !SANITIZER_GO
   if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
     AddThread(tctx, suppressable);
@@ -276,8 +274,7 @@ void ScopedReportBase::AddMutex(const SyncVar *s) {
     if (rep_->mutexes[i]->id == s->uid)
       return;
   }
-  void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
-  ReportMutex *rm = new(mem) ReportMutex;
+  auto *rm = New<ReportMutex>();
   rep_->mutexes.PushBack(rm);
   rm->id = s->uid;
   rm->addr = s->addr;
@@ -289,18 +286,17 @@ u64 ScopedReportBase::AddMutex(u64 id) {
   u64 uid = 0;
   u64 mid = id;
   uptr addr = SyncVar::SplitId(id, &uid);
-  SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
+  SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
   // Check that the mutex is still alive.
   // Another mutex can be created at the same address,
   // so check uid as well.
   if (s && s->CheckId(uid)) {
+    Lock l(&s->mtx);
     mid = s->uid;
     AddMutex(s);
   } else {
     AddDeadMutex(id);
   }
-  if (s)
-    s->mtx.Unlock();
   return mid;
 }
 
@@ -309,8 +305,7 @@ void ScopedReportBase::AddDeadMutex(u64 id) {
     if (rep_->mutexes[i]->id == id)
       return;
   }
-  void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
-  ReportMutex *rm = new(mem) ReportMutex;
+  auto *rm = New<ReportMutex>();
   rep_->mutexes.PushBack(rm);
   rm->id = id;
   rm->addr = 0;
@@ -323,10 +318,11 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) {
     return;
 #if !SANITIZER_GO
   int fd = -1;
-  int creat_tid = kInvalidTid;
-  u32 creat_stack = 0;
+  Tid creat_tid = kInvalidTid;
+  StackID creat_stack = 0;
   if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
-    ReportLocation *loc = ReportLocation::New(ReportLocationFD);
+    auto *loc = New<ReportLocation>();
+    loc->type = ReportLocationFD;
     loc->fd = fd;
     loc->tid = creat_tid;
     loc->stack = SymbolizeStackId(creat_stack);
@@ -337,15 +333,19 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) {
     return;
   }
   MBlock *b = 0;
+  uptr block_begin = 0;
   Allocator *a = allocator();
   if (a->PointerIsMine((void*)addr)) {
-    void *block_begin = a->GetBlockBegin((void*)addr);
+    block_begin = (uptr)a->GetBlockBegin((void *)addr);
     if (block_begin)
-      b = ctx->metamap.GetBlock((uptr)block_begin);
+      b = ctx->metamap.GetBlock(block_begin);
   }
+  if (!b)
+    b = JavaHeapBlock(addr, &block_begin);
   if (b != 0) {
     ThreadContext *tctx = FindThreadByTidLocked(b->tid);
-    ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
+    auto *loc = New<ReportLocation>();
+    loc->type = ReportLocationHeap;
     loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
     loc->heap_chunk_size = b->siz;
     loc->external_tag = b->tag;
@@ -358,8 +358,8 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) {
   }
   bool is_stack = false;
   if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
-    ReportLocation *loc =
-        ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
+    auto *loc = New<ReportLocation>();
+    loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
     loc->tid = tctx->tid;
     rep_->locs.PushBack(loc);
     AddThread(tctx);
@@ -373,7 +373,7 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) {
 }
 
 #if !SANITIZER_GO
-void ScopedReportBase::AddSleep(u32 stack_id) {
+void ScopedReportBase::AddSleep(StackID stack_id) {
   rep_->sleep = SymbolizeStackId(stack_id);
 }
 #endif
@@ -387,7 +387,7 @@ ScopedReport::ScopedReport(ReportType typ, uptr tag)
 
 ScopedReport::~ScopedReport() {}
 
-void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+void RestoreStack(Tid tid, const u64 epoch, VarSizeStackTrace *stk,
                   MutexSet *mset, uptr *tag) {
   // This function restores stack trace and mutex set for the thread/epoch.
   // It does so by getting stack trace and mutex set at the beginning of
@@ -450,6 +450,234 @@ void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
   ExtractTagFromStack(stk, tag);
 }
 
+namespace v3 {
+
+// Replays the trace up to last_pos position in the last part
+// or up to the provided epoch/sid (whichever is earlier)
+// and calls the provided function f for each event.
+template <typename Func>
+void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid,
+                 Epoch epoch, Func f) {
+  TracePart *part = trace->parts.Front();
+  Sid ev_sid = kFreeSid;
+  Epoch ev_epoch = kEpochOver;
+  for (;;) {
+    DCHECK_EQ(part->trace, trace);
+    // Note: an event can't start in the last element.
+    // Since an event can take up to 2 elements,
+    // we ensure we have at least 2 before adding an event.
+    Event *end = &part->events[TracePart::kSize - 1];
+    if (part == last)
+      end = last_pos;
+    for (Event *evp = &part->events[0]; evp < end; evp++) {
+      Event *evp0 = evp;
+      if (!evp->is_access && !evp->is_func) {
+        switch (evp->type) {
+          case EventType::kTime: {
+            auto *ev = reinterpret_cast<EventTime *>(evp);
+            ev_sid = static_cast<Sid>(ev->sid);
+            ev_epoch = static_cast<Epoch>(ev->epoch);
+            if (ev_sid == sid && ev_epoch > epoch)
+              return;
+            break;
+          }
+          case EventType::kAccessExt:
+            FALLTHROUGH;
+          case EventType::kAccessRange:
+            FALLTHROUGH;
+          case EventType::kLock:
+            FALLTHROUGH;
+          case EventType::kRLock:
+            // These take 2 Event elements.
+            evp++;
+            break;
+          case EventType::kUnlock:
+            // This takes 1 Event element.
+            break;
+        }
+      }
+      CHECK_NE(ev_sid, kFreeSid);
+      CHECK_NE(ev_epoch, kEpochOver);
+      f(ev_sid, ev_epoch, evp0);
+    }
+    if (part == last)
+      return;
+    part = trace->parts.Next(part);
+    CHECK(part);
+  }
+  CHECK(0);
+}
+
+static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset,
+                              Vector<uptr> *stack, MutexSet *mset, uptr pc,
+                              bool *found) {
+  DPrintf2("    MATCHED\n");
+  *pmset = *mset;
+  stack->PushBack(pc);
+  pstk->Init(&(*stack)[0], stack->Size());
+  stack->PopBack();
+  *found = true;
+}
+
+// Checks if addr1|size1 is fully contained in addr2|size2.
+// We check for fully contained instread of just overlapping
+// because a memory access is always traced once, but can be
+// split into multiple accesses in the shadow.
+static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2,
+                                     uptr size2) {
+  return addr1 >= addr2 && addr1 + size1 <= addr2 + size2;
+}
+
+// Replays the trace of thread tid up to the target event identified
+// by sid/epoch/addr/size/typ and restores and returns stack, mutex set
+// and tag for that event. If there are multiple such events, it returns
+// the last one. Returns false if the event is not present in the trace.
+bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
+                  uptr size, AccessType typ, VarSizeStackTrace *pstk,
+                  MutexSet *pmset, uptr *ptag) {
+  // This function restores stack trace and mutex set for the thread/epoch.
+  // It does so by getting stack trace and mutex set at the beginning of
+  // trace part, and then replaying the trace till the given epoch.
+  DPrintf2("RestoreStack: tid=%u sid=%u@%u addr=0x%zx/%zu typ=%x\n", tid,
+           static_cast<int>(sid), static_cast<int>(epoch), addr, size,
+           static_cast<int>(typ));
+  ctx->slot_mtx.CheckLocked();  // needed to prevent trace part recycling
+  ctx->thread_registry.CheckLocked();
+  ThreadContext *tctx =
+      static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid));
+  Trace *trace = &tctx->trace;
+  // Snapshot first/last parts and the current position in the last part.
+  TracePart *first_part;
+  TracePart *last_part;
+  Event *last_pos;
+  {
+    Lock lock(&trace->mtx);
+    first_part = trace->parts.Front();
+    if (!first_part)
+      return false;
+    last_part = trace->parts.Back();
+    last_pos = trace->final_pos;
+    if (tctx->thr)
+      last_pos = (Event *)atomic_load_relaxed(&tctx->thr->trace_pos);
+  }
+  // Too large for stack.
+  alignas(MutexSet) static char mset_storage[sizeof(MutexSet)];
+  MutexSet &mset = *new (mset_storage) MutexSet();
+  Vector<uptr> stack;
+  uptr prev_pc = 0;
+  bool found = false;
+  bool is_read = typ & kAccessRead;
+  bool is_atomic = typ & kAccessAtomic;
+  bool is_free = typ & kAccessFree;
+  TraceReplay(
+      trace, last_part, last_pos, sid, epoch,
+      [&](Sid ev_sid, Epoch ev_epoch, Event *evp) {
+        bool match = ev_sid == sid && ev_epoch == epoch;
+        if (evp->is_access) {
+          if (evp->is_func == 0 && evp->type == EventType::kAccessExt &&
+              evp->_ == 0)  // NopEvent
+            return;
+          auto *ev = reinterpret_cast<EventAccess *>(evp);
+          uptr ev_addr = RestoreAddr(ev->addr);
+          uptr ev_size = 1 << ev->size_log;
+          uptr ev_pc =
+              prev_pc + ev->pc_delta - (1 << (EventAccess::kPCBits - 1));
+          prev_pc = ev_pc;
+          DPrintf2("  Access: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
+                   ev_addr, ev_size, ev->is_read, ev->is_atomic);
+          if (match && type == EventType::kAccessExt &&
+              IsWithinAccess(addr, size, ev_addr, ev_size) &&
+              is_read == ev->is_read && is_atomic == ev->is_atomic && !is_free)
+            RestoreStackMatch(pstk, pmset, &stack, &mset, ev_pc, &found);
+          return;
+        }
+        if (evp->is_func) {
+          auto *ev = reinterpret_cast<EventFunc *>(evp);
+          if (ev->pc) {
+            DPrintf2("  FuncEnter: pc=0x%llx\n", ev->pc);
+            stack.PushBack(ev->pc);
+          } else {
+            DPrintf2("  FuncExit\n");
+            CHECK(stack.Size());
+            stack.PopBack();
+          }
+          return;
+        }
+        switch (evp->type) {
+          case EventType::kAccessExt: {
+            auto *ev = reinterpret_cast<EventAccessExt *>(evp);
+            uptr ev_addr = RestoreAddr(ev->addr);
+            uptr ev_size = 1 << ev->size_log;
+            prev_pc = ev->pc;
+            DPrintf2("  AccessExt: pc=0x%llx addr=0x%zx/%zu type=%u/%u\n",
+                     ev->pc, ev_addr, ev_size, ev->is_read, ev->is_atomic);
+            if (match && type == EventType::kAccessExt &&
+                IsWithinAccess(addr, size, ev_addr, ev_size) &&
+                is_read == ev->is_read && is_atomic == ev->is_atomic &&
+                !is_free)
+              RestoreStackMatch(pstk, pmset, &stack, &mset, ev->pc, &found);
+            break;
+          }
+          case EventType::kAccessRange: {
+            auto *ev = reinterpret_cast<EventAccessRange *>(evp);
+            uptr ev_addr = RestoreAddr(ev->addr);
+            uptr ev_size =
+                (ev->size_hi << EventAccessRange::kSizeLoBits) + ev->size_lo;
+            uptr ev_pc = RestoreAddr(ev->pc);
+            prev_pc = ev_pc;
+            DPrintf2("  Range: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
+                     ev_addr, ev_size, ev->is_read, ev->is_free);
+            if (match && type == EventType::kAccessExt &&
+                IsWithinAccess(addr, size, ev_addr, ev_size) &&
+                is_read == ev->is_read && !is_atomic && is_free == ev->is_free)
+              RestoreStackMatch(pstk, pmset, &stack, &mset, ev_pc, &found);
+            break;
+          }
+          case EventType::kLock:
+            FALLTHROUGH;
+          case EventType::kRLock: {
+            auto *ev = reinterpret_cast<EventLock *>(evp);
+            bool is_write = ev->type == EventType::kLock;
+            uptr ev_addr = RestoreAddr(ev->addr);
+            uptr ev_pc = RestoreAddr(ev->pc);
+            StackID stack_id =
+                (ev->stack_hi << EventLock::kStackIDLoBits) + ev->stack_lo;
+            DPrintf2("  Lock: pc=0x%zx addr=0x%zx stack=%u write=%d\n", ev_pc,
+                     ev_addr, stack_id, is_write);
+            mset.AddAddr(ev_addr, stack_id, is_write);
+            // Events with ev_pc == 0 are written to the beginning of trace
+            // part as initial mutex set (are not real).
+            if (match && type == EventType::kLock && addr == ev_addr && ev_pc)
+              RestoreStackMatch(pstk, pmset, &stack, &mset, ev_pc, &found);
+            break;
+          }
+          case EventType::kUnlock: {
+            auto *ev = reinterpret_cast<EventUnlock *>(evp);
+            uptr ev_addr = RestoreAddr(ev->addr);
+            DPrintf2("  Unlock: addr=0x%zx\n", ev_addr);
+            mset.DelAddr(ev_addr);
+            break;
+          }
+          case EventType::kTime:
+            // TraceReplay already extracted sid/epoch from it,
+            // nothing else to do here.
+            break;
+        }
+      });
+  ExtractTagFromStack(pstk, ptag);
+  return found;
+}
+
+}  // namespace v3
+
+bool RacyStacks::operator==(const RacyStacks &other) const {
+  if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
+    return true;
+  if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
+    return true;
+  return false;
+}
+
 static bool FindRacyStacks(const RacyStacks &hash) {
   for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
     if (hash == ctx->racy_stacks[i]) {
@@ -596,7 +824,7 @@ static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
 }
 
 void ReportRace(ThreadState *thr) {
-  CheckNoLocks(thr);
+  CheckedMutex::CheckNoLocks();
 
   // Symbolizer makes lots of intercepted calls. If we try to process them,
   // at best it will cause deadlocks on internal mutexes.
@@ -614,7 +842,7 @@ void ReportRace(ThreadState *thr) {
     thr->racy_state[1] = s.raw();
   }
 
-  uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
+  uptr addr = ShadowToMem(thr->racy_shadow_addr);
   uptr addr_min = 0;
   uptr addr_max = 0;
   {
@@ -692,7 +920,7 @@ void ReportRace(ThreadState *thr) {
     }
   }
 
-  ThreadRegistryLock l0(ctx->thread_registry);
+  ThreadRegistryLock l0(&ctx->thread_registry);
   ScopedReport rep(typ, tag);
   for (uptr i = 0; i < kMop; i++) {
     Shadow s(thr->racy_state[i]);
@@ -702,8 +930,8 @@ void ReportRace(ThreadState *thr) {
 
   for (uptr i = 0; i < kMop; i++) {
     FastState s(thr->racy_state[i]);
-    ThreadContext *tctx = static_cast<ThreadContext*>(
-        ctx->thread_registry->GetThreadLocked(s.tid()));
+    ThreadContext *tctx = static_cast<ThreadContext *>(
+        ctx->thread_registry.GetThreadLocked(s.tid()));
     if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
       continue;
     rep.AddThread(tctx);
@@ -738,9 +966,7 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) {
 ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) {
 #if !SANITIZER_GO
   uptr bp = GET_CURRENT_FRAME();
-  BufferedStackTrace *ptrace =
-      new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
-          BufferedStackTrace();
+  auto *ptrace = New<BufferedStackTrace>();
   ptrace->Unwind(pc, bp, nullptr, false);
 
   for (uptr i = 0; i < ptrace->size / 2; i++) {
index cdb6e60ebbd0314b758007272cb0b72ddb50443d..61133a4a3e7eacfb28dc58d9de6b90c7662f7504 100644 (file)
@@ -21,48 +21,14 @@ namespace __tsan {
 
 // ThreadContext implementation.
 
-ThreadContext::ThreadContext(int tid)
-  : ThreadContextBase(tid)
-  , thr()
-  , sync()
-  , epoch0()
-  , epoch1() {
-}
+ThreadContext::ThreadContext(Tid tid)
+    : ThreadContextBase(tid), thr(), sync(), epoch0(), epoch1() {}
 
 #if !SANITIZER_GO
 ThreadContext::~ThreadContext() {
 }
 #endif
 
-void ThreadContext::OnDead() {
-  CHECK_EQ(sync.size(), 0);
-}
-
-void ThreadContext::OnJoined(void *arg) {
-  ThreadState *caller_thr = static_cast<ThreadState *>(arg);
-  AcquireImpl(caller_thr, 0, &sync);
-  sync.Reset(&caller_thr->proc()->clock_cache);
-}
-
-struct OnCreatedArgs {
-  ThreadState *thr;
-  uptr pc;
-};
-
-void ThreadContext::OnCreated(void *arg) {
-  thr = 0;
-  if (tid == kMainTid)
-    return;
-  OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
-  if (!args->thr)  // GCD workers don't have a parent thread.
-    return;
-  args->thr->fast_state.IncrementEpoch();
-  // Can't increment epoch w/o writing to the trace as well.
-  TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
-  ReleaseImpl(args->thr, 0, &sync);
-  creation_stack_id = CurrentStackId(args->thr, args->pc);
-}
-
 void ThreadContext::OnReset() {
   CHECK_EQ(sync.size(), 0);
   uptr trace_p = GetThreadTrace(tid);
@@ -70,94 +36,15 @@ void ThreadContext::OnReset() {
   //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
 }
 
-void ThreadContext::OnDetached(void *arg) {
-  ThreadState *thr1 = static_cast<ThreadState*>(arg);
-  sync.Reset(&thr1->proc()->clock_cache);
-}
-
-struct OnStartedArgs {
-  ThreadState *thr;
-  uptr stk_addr;
-  uptr stk_size;
-  uptr tls_addr;
-  uptr tls_size;
-};
-
-void ThreadContext::OnStarted(void *arg) {
-  OnStartedArgs *args = static_cast<OnStartedArgs*>(arg);
-  thr = args->thr;
-  // RoundUp so that one trace part does not contain events
-  // from different threads.
-  epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
-  epoch1 = (u64)-1;
-  new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
-      args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
-#if !SANITIZER_GO
-  thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
-  thr->shadow_stack_pos = thr->shadow_stack;
-  thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
-#else
-  // Setup dynamic shadow stack.
-  const int kInitStackSize = 8;
-  thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack,
-      kInitStackSize * sizeof(uptr));
-  thr->shadow_stack_pos = thr->shadow_stack;
-  thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
-#endif
-  if (common_flags()->detect_deadlocks)
-    thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
-  thr->fast_state.SetHistorySize(flags()->history_size);
-  // Commit switch to the new part of the trace.
-  // TraceAddEvent will reset stack0/mset0 in the new part for us.
-  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
-
-  thr->fast_synch_epoch = epoch0;
-  AcquireImpl(thr, 0, &sync);
-  sync.Reset(&thr->proc()->clock_cache);
-  thr->is_inited = true;
-  DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
-          "tls_addr=%zx tls_size=%zx\n",
-          tid, (uptr)epoch0, args->stk_addr, args->stk_size,
-          args->tls_addr, args->tls_size);
-}
-
-void ThreadContext::OnFinished() {
-#if SANITIZER_GO
-  internal_free(thr->shadow_stack);
-  thr->shadow_stack = nullptr;
-  thr->shadow_stack_pos = nullptr;
-  thr->shadow_stack_end = nullptr;
-#endif
-  if (!detached) {
-    thr->fast_state.IncrementEpoch();
-    // Can't increment epoch w/o writing to the trace as well.
-    TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
-    ReleaseImpl(thr, 0, &sync);
-  }
-  epoch1 = thr->fast_state.epoch();
-
-  if (common_flags()->detect_deadlocks)
-    ctx->dd->DestroyLogicalThread(thr->dd_lt);
-  thr->clock.ResetCached(&thr->proc()->clock_cache);
-#if !SANITIZER_GO
-  thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
-#endif
-#if !SANITIZER_GO
-  PlatformCleanUpThreadState(thr);
-#endif
-  thr->~ThreadState();
-  thr = 0;
-}
-
 #if !SANITIZER_GO
 struct ThreadLeak {
   ThreadContext *tctx;
   int count;
 };
 
-static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
-  Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg;
-  ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+static void CollectThreadLeaks(ThreadContextBase *tctx_base, void *arg) {
+  auto &leaks = *static_cast<Vector<ThreadLeak> *>(arg);
+  auto *tctx = static_cast<ThreadContext *>(tctx_base);
   if (tctx->detached || tctx->status != ThreadStatusFinished)
     return;
   for (uptr i = 0; i < leaks.Size(); i++) {
@@ -166,8 +53,7 @@ static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
       return;
     }
   }
-  ThreadLeak leak = {tctx, 1};
-  leaks.PushBack(leak);
+  leaks.PushBack({tctx, 1});
 }
 #endif
 
@@ -206,10 +92,10 @@ void ThreadFinalize(ThreadState *thr) {
 #if !SANITIZER_GO
   if (!ShouldReport(thr, ReportTypeThreadLeak))
     return;
-  ThreadRegistryLock l(ctx->thread_registry);
+  ThreadRegistryLock l(&ctx->thread_registry);
   Vector<ThreadLeak> leaks;
-  ctx->thread_registry->RunCallbackForEachThreadLocked(
-      MaybeReportThreadLeak, &leaks);
+  ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks,
+                                                      &leaks);
   for (uptr i = 0; i < leaks.Size(); i++) {
     ScopedReport rep(ReportTypeThreadLeak);
     rep.AddThread(leaks[i].tctx, true);
@@ -221,20 +107,48 @@ void ThreadFinalize(ThreadState *thr) {
 
 int ThreadCount(ThreadState *thr) {
   uptr result;
-  ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
+  ctx->thread_registry.GetNumberOfThreads(0, 0, &result);
   return (int)result;
 }
 
-int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
+struct OnCreatedArgs {
+  ThreadState *thr;
+  uptr pc;
+};
+
+Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
   OnCreatedArgs args = { thr, pc };
   u32 parent_tid = thr ? thr->tid : kInvalidTid;  // No parent for GCD workers.
-  int tid =
-      ctx->thread_registry->CreateThread(uid, detached, parent_tid, &args);
+  Tid tid = ctx->thread_registry.CreateThread(uid, detached, parent_tid, &args);
   DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid);
   return tid;
 }
 
-void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
+void ThreadContext::OnCreated(void *arg) {
+  thr = 0;
+  if (tid == kMainTid)
+    return;
+  OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
+  if (!args->thr)  // GCD workers don't have a parent thread.
+    return;
+  args->thr->fast_state.IncrementEpoch();
+  // Can't increment epoch w/o writing to the trace as well.
+  TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
+  ReleaseImpl(args->thr, 0, &sync);
+  creation_stack_id = CurrentStackId(args->thr, args->pc);
+}
+
+extern "C" void __tsan_stack_initialization() {}
+
+struct OnStartedArgs {
+  ThreadState *thr;
+  uptr stk_addr;
+  uptr stk_size;
+  uptr tls_addr;
+  uptr tls_size;
+};
+
+void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
                  ThreadType thread_type) {
   uptr stk_addr = 0;
   uptr stk_size = 0;
@@ -244,22 +158,13 @@ void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
   if (thread_type != ThreadType::Fiber)
     GetThreadStackAndTls(tid == kMainTid, &stk_addr, &stk_size, &tls_addr,
                          &tls_size);
-
-  if (tid != kMainTid) {
-    if (stk_addr && stk_size)
-      MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
-
-    if (tls_addr && tls_size) ImitateTlsWrite(thr, tls_addr, tls_size);
-  }
 #endif
 
-  ThreadRegistry *tr = ctx->thread_registry;
+  ThreadRegistry *tr = &ctx->thread_registry;
   OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
   tr->StartThread(tid, os_id, thread_type, &args);
 
-  tr->Lock();
-  thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
-  tr->Unlock();
+  while (!thr->tctx->trace.parts.Empty()) thr->tctx->trace.parts.PopBack();
 
 #if !SANITIZER_GO
   if (ctx->after_multithreaded_fork) {
@@ -268,6 +173,51 @@ void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
     ThreadIgnoreSyncBegin(thr, 0);
   }
 #endif
+
+#if !SANITIZER_GO
+  // Don't imitate stack/TLS writes for the main thread,
+  // because its initialization is synchronized with all
+  // subsequent threads anyway.
+  if (tid != kMainTid) {
+    if (stk_addr && stk_size) {
+      const uptr pc = StackTrace::GetNextInstructionPc(
+          reinterpret_cast<uptr>(__tsan_stack_initialization));
+      MemoryRangeImitateWrite(thr, pc, stk_addr, stk_size);
+    }
+
+    if (tls_addr && tls_size)
+      ImitateTlsWrite(thr, tls_addr, tls_size);
+  }
+#endif
+}
+
+void ThreadContext::OnStarted(void *arg) {
+  OnStartedArgs *args = static_cast<OnStartedArgs *>(arg);
+  thr = args->thr;
+  // RoundUp so that one trace part does not contain events
+  // from different threads.
+  epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
+  epoch1 = (u64)-1;
+  new (thr)
+      ThreadState(ctx, tid, unique_id, epoch0, reuse_count, args->stk_addr,
+                  args->stk_size, args->tls_addr, args->tls_size);
+  if (common_flags()->detect_deadlocks)
+    thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
+  thr->fast_state.SetHistorySize(flags()->history_size);
+  // Commit switch to the new part of the trace.
+  // TraceAddEvent will reset stack0/mset0 in the new part for us.
+  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+
+  thr->fast_synch_epoch = epoch0;
+  AcquireImpl(thr, 0, &sync);
+  sync.Reset(&thr->proc()->clock_cache);
+  thr->tctx = this;
+  thr->is_inited = true;
+  DPrintf(
+      "#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
+      "tls_addr=%zx tls_size=%zx\n",
+      tid, (uptr)epoch0, args->stk_addr, args->stk_size, args->tls_addr,
+      args->tls_size);
 }
 
 void ThreadFinish(ThreadState *thr) {
@@ -277,7 +227,34 @@ void ThreadFinish(ThreadState *thr) {
   if (thr->tls_addr && thr->tls_size)
     DontNeedShadowFor(thr->tls_addr, thr->tls_size);
   thr->is_dead = true;
-  ctx->thread_registry->FinishThread(thr->tid);
+  ctx->thread_registry.FinishThread(thr->tid);
+}
+
+void ThreadContext::OnFinished() {
+#if SANITIZER_GO
+  Free(thr->shadow_stack);
+  thr->shadow_stack_pos = nullptr;
+  thr->shadow_stack_end = nullptr;
+#endif
+  if (!detached) {
+    thr->fast_state.IncrementEpoch();
+    // Can't increment epoch w/o writing to the trace as well.
+    TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+    ReleaseImpl(thr, 0, &sync);
+  }
+  epoch1 = thr->fast_state.epoch();
+
+  if (common_flags()->detect_deadlocks)
+    ctx->dd->DestroyLogicalThread(thr->dd_lt);
+  thr->clock.ResetCached(&thr->proc()->clock_cache);
+#if !SANITIZER_GO
+  thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
+#endif
+#if !SANITIZER_GO
+  PlatformCleanUpThreadState(thr);
+#endif
+  thr->~ThreadState();
+  thr = 0;
 }
 
 struct ConsumeThreadContext {
@@ -302,35 +279,48 @@ static bool ConsumeThreadByUid(ThreadContextBase *tctx, void *arg) {
   return false;
 }
 
-int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
+Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
   ConsumeThreadContext findCtx = {uid, nullptr};
-  ctx->thread_registry->FindThread(ConsumeThreadByUid, &findCtx);
-  int tid = findCtx.tctx ? findCtx.tctx->tid : kInvalidTid;
+  ctx->thread_registry.FindThread(ConsumeThreadByUid, &findCtx);
+  Tid tid = findCtx.tctx ? findCtx.tctx->tid : kInvalidTid;
   DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, tid);
   return tid;
 }
 
-void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
+void ThreadJoin(ThreadState *thr, uptr pc, Tid tid) {
   CHECK_GT(tid, 0);
   CHECK_LT(tid, kMaxTid);
   DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
-  ctx->thread_registry->JoinThread(tid, thr);
+  ctx->thread_registry.JoinThread(tid, thr);
 }
 
-void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
+void ThreadContext::OnJoined(void *arg) {
+  ThreadState *caller_thr = static_cast<ThreadState *>(arg);
+  AcquireImpl(caller_thr, 0, &sync);
+  sync.Reset(&caller_thr->proc()->clock_cache);
+}
+
+void ThreadContext::OnDead() { CHECK_EQ(sync.size(), 0); }
+
+void ThreadDetach(ThreadState *thr, uptr pc, Tid tid) {
   CHECK_GT(tid, 0);
   CHECK_LT(tid, kMaxTid);
-  ctx->thread_registry->DetachThread(tid, thr);
+  ctx->thread_registry.DetachThread(tid, thr);
+}
+
+void ThreadContext::OnDetached(void *arg) {
+  ThreadState *thr1 = static_cast<ThreadState *>(arg);
+  sync.Reset(&thr1->proc()->clock_cache);
 }
 
-void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid) {
+void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid) {
   CHECK_GT(tid, 0);
   CHECK_LT(tid, kMaxTid);
-  ctx->thread_registry->SetThreadUserId(tid, uid);
+  ctx->thread_registry.SetThreadUserId(tid, uid);
 }
 
 void ThreadSetName(ThreadState *thr, const char *name) {
-  ctx->thread_registry->SetThreadName(thr->tid, name);
+  ctx->thread_registry.SetThreadName(thr->tid, name);
 }
 
 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
@@ -338,7 +328,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
   if (size == 0)
     return;
 
-  u64 *shadow_mem = (u64*)MemToShadow(addr);
+  RawShadow *shadow_mem = MemToShadow(addr);
   DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
       thr->tid, (void*)pc, (void*)addr,
       (int)size, is_write);
@@ -352,14 +342,14 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
     Printf("Access to non app mem %zx\n", addr + size - 1);
     DCHECK(IsAppMem(addr + size - 1));
   }
-  if (!IsShadowMem((uptr)shadow_mem)) {
+  if (!IsShadowMem(shadow_mem)) {
     Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
-    DCHECK(IsShadowMem((uptr)shadow_mem));
+    DCHECK(IsShadowMem(shadow_mem));
   }
-  if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) {
+  if (!IsShadowMem(shadow_mem + size * kShadowCnt / 8 - 1)) {
     Printf("Bad shadow addr %p (%zx)\n",
                shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1);
-    DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1)));
+    DCHECK(IsShadowMem(shadow_mem + size * kShadowCnt / 8 - 1));
   }
 #endif
 
@@ -421,10 +411,10 @@ void FiberSwitchImpl(ThreadState *from, ThreadState *to) {
 }
 
 ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags) {
-  void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadState));
+  void *mem = Alloc(sizeof(ThreadState));
   ThreadState *fiber = static_cast<ThreadState *>(mem);
   internal_memset(fiber, 0, sizeof(*fiber));
-  int tid = ThreadCreate(thr, pc, 0, true);
+  Tid tid = ThreadCreate(thr, pc, 0, true);
   FiberSwitchImpl(thr, fiber);
   ThreadStart(fiber, tid, 0, ThreadType::Fiber);
   FiberSwitchImpl(fiber, thr);
@@ -435,7 +425,7 @@ void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber) {
   FiberSwitchImpl(thr, fiber);
   ThreadFinish(fiber);
   FiberSwitchImpl(fiber, thr);
-  internal_free(fiber);
+  Free(fiber);
 }
 
 void FiberSwitch(ThreadState *thr, uptr pc,
diff --git a/libsanitizer/tsan/tsan_shadow.h b/libsanitizer/tsan/tsan_shadow.h
new file mode 100644 (file)
index 0000000..8b7bc34
--- /dev/null
@@ -0,0 +1,233 @@
+//===-- tsan_shadow.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_SHADOW_H
+#define TSAN_SHADOW_H
+
+#include "tsan_defs.h"
+#include "tsan_trace.h"
+
+namespace __tsan {
+
+// FastState (from most significant bit):
+//   ignore          : 1
+//   tid             : kTidBits
+//   unused          : -
+//   history_size    : 3
+//   epoch           : kClkBits
+class FastState {
+ public:
+  FastState(u64 tid, u64 epoch) {
+    x_ = tid << kTidShift;
+    x_ |= epoch;
+    DCHECK_EQ(tid, this->tid());
+    DCHECK_EQ(epoch, this->epoch());
+    DCHECK_EQ(GetIgnoreBit(), false);
+  }
+
+  explicit FastState(u64 x) : x_(x) {}
+
+  u64 raw() const { return x_; }
+
+  u64 tid() const {
+    u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
+    return res;
+  }
+
+  u64 TidWithIgnore() const {
+    u64 res = x_ >> kTidShift;
+    return res;
+  }
+
+  u64 epoch() const {
+    u64 res = x_ & ((1ull << kClkBits) - 1);
+    return res;
+  }
+
+  void IncrementEpoch() {
+    u64 old_epoch = epoch();
+    x_ += 1;
+    DCHECK_EQ(old_epoch + 1, epoch());
+    (void)old_epoch;
+  }
+
+  void SetIgnoreBit() { x_ |= kIgnoreBit; }
+  void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
+  bool GetIgnoreBit() const { return (s64)x_ < 0; }
+
+  void SetHistorySize(int hs) {
+    CHECK_GE(hs, 0);
+    CHECK_LE(hs, 7);
+    x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
+  }
+
+  ALWAYS_INLINE
+  int GetHistorySize() const {
+    return (int)((x_ >> kHistoryShift) & kHistoryMask);
+  }
+
+  void ClearHistorySize() { SetHistorySize(0); }
+
+  ALWAYS_INLINE
+  u64 GetTracePos() const {
+    const int hs = GetHistorySize();
+    // When hs == 0, the trace consists of 2 parts.
+    const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
+    return epoch() & mask;
+  }
+
+ private:
+  friend class Shadow;
+  static const int kTidShift = 64 - kTidBits - 1;
+  static const u64 kIgnoreBit = 1ull << 63;
+  static const u64 kFreedBit = 1ull << 63;
+  static const u64 kHistoryShift = kClkBits;
+  static const u64 kHistoryMask = 7;
+  u64 x_;
+};
+
+// Shadow (from most significant bit):
+//   freed           : 1
+//   tid             : kTidBits
+//   is_atomic       : 1
+//   is_read         : 1
+//   size_log        : 2
+//   addr0           : 3
+//   epoch           : kClkBits
+class Shadow : public FastState {
+ public:
+  explicit Shadow(u64 x) : FastState(x) {}
+
+  explicit Shadow(const FastState &s) : FastState(s.x_) { ClearHistorySize(); }
+
+  void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
+    DCHECK_EQ((x_ >> kClkBits) & 31, 0);
+    DCHECK_LE(addr0, 7);
+    DCHECK_LE(kAccessSizeLog, 3);
+    x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
+    DCHECK_EQ(kAccessSizeLog, size_log());
+    DCHECK_EQ(addr0, this->addr0());
+  }
+
+  void SetWrite(unsigned kAccessIsWrite) {
+    DCHECK_EQ(x_ & kReadBit, 0);
+    if (!kAccessIsWrite)
+      x_ |= kReadBit;
+    DCHECK_EQ(kAccessIsWrite, IsWrite());
+  }
+
+  void SetAtomic(bool kIsAtomic) {
+    DCHECK(!IsAtomic());
+    if (kIsAtomic)
+      x_ |= kAtomicBit;
+    DCHECK_EQ(IsAtomic(), kIsAtomic);
+  }
+
+  bool IsAtomic() const { return x_ & kAtomicBit; }
+
+  bool IsZero() const { return x_ == 0; }
+
+  static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
+    u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
+    DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
+    return shifted_xor == 0;
+  }
+
+  static ALWAYS_INLINE bool Addr0AndSizeAreEqual(const Shadow s1,
+                                                 const Shadow s2) {
+    u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
+    return masked_xor == 0;
+  }
+
+  static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
+                                               unsigned kS2AccessSize) {
+    bool res = false;
+    u64 diff = s1.addr0() - s2.addr0();
+    if ((s64)diff < 0) {  // s1.addr0 < s2.addr0
+      // if (s1.addr0() + size1) > s2.addr0()) return true;
+      if (s1.size() > -diff)
+        res = true;
+    } else {
+      // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
+      if (kS2AccessSize > diff)
+        res = true;
+    }
+    DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
+    DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
+    return res;
+  }
+
+  u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
+  u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
+  bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
+  bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
+
+  // The idea behind the freed bit is as follows.
+  // When the memory is freed (or otherwise unaccessible) we write to the shadow
+  // values with tid/epoch related to the free and the freed bit set.
+  // During memory accesses processing the freed bit is considered
+  // as msb of tid. So any access races with shadow with freed bit set
+  // (it is as if write from a thread with which we never synchronized before).
+  // This allows us to detect accesses to freed memory w/o additional
+  // overheads in memory access processing and at the same time restore
+  // tid/epoch of free.
+  void MarkAsFreed() { x_ |= kFreedBit; }
+
+  bool IsFreed() const { return x_ & kFreedBit; }
+
+  bool GetFreedAndReset() {
+    bool res = x_ & kFreedBit;
+    x_ &= ~kFreedBit;
+    return res;
+  }
+
+  bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
+    bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift) |
+                   (u64(kIsAtomic) << kAtomicShift));
+    DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
+    return v;
+  }
+
+  bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
+    bool v = ((x_ >> kReadShift) & 3) <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
+    DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
+                     (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
+    return v;
+  }
+
+  bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
+    bool v = ((x_ >> kReadShift) & 3) >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
+    DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
+                     (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
+    return v;
+  }
+
+ private:
+  static const u64 kReadShift = 5 + kClkBits;
+  static const u64 kReadBit = 1ull << kReadShift;
+  static const u64 kAtomicShift = 6 + kClkBits;
+  static const u64 kAtomicBit = 1ull << kAtomicShift;
+
+  u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
+
+  static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
+    if (s1.addr0() == s2.addr0())
+      return true;
+    if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
+      return true;
+    if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
+      return true;
+    return false;
+  }
+};
+
+const RawShadow kShadowRodata = (RawShadow)-1;  // .rodata shadow marker
+
+}  // namespace __tsan
+
+#endif
index 6c703d7f2b1038f4e9717c88d3d177ed2c67eb33..9bbaafb3a85f595a9b37439f0a4dd45e6e54a774 100644 (file)
@@ -23,14 +23,10 @@ VarSizeStackTrace::~VarSizeStackTrace() {
 }
 
 void VarSizeStackTrace::ResizeBuffer(uptr new_size) {
-  if (trace_buffer) {
-    internal_free(trace_buffer);
-  }
-  trace_buffer =
-      (new_size > 0)
-          ? (uptr *)internal_alloc(MBlockStackTrace,
-                                   new_size * sizeof(trace_buffer[0]))
-          : nullptr;
+  Free(trace_buffer);
+  trace_buffer = (new_size > 0)
+                     ? (uptr *)Alloc(new_size * sizeof(trace_buffer[0]))
+                     : nullptr;
   trace = trace_buffer;
   size = new_size;
 }
index 6478f3a754ac7675c496bfa5213696c6d6b64594..2e2744d2eae782943fd68ec932c9795224a2ebc9 100644 (file)
@@ -110,7 +110,8 @@ ReportLocation *SymbolizeData(uptr addr) {
   DataInfo info;
   if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info))
     return 0;
-  ReportLocation *ent = ReportLocation::New(ReportLocationGlobal);
+  auto *ent = New<ReportLocation>();
+  ent->type = ReportLocationGlobal;
   internal_memcpy(&ent->global, &info, sizeof(info));
   return ent;
 }
index d25434af52a1fd4426588dd92ccc3720088df99a..f042abab74e5ebdaa46d38e38dcd25f52caabd76 100644 (file)
@@ -20,13 +20,14 @@ void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
 
 SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(0); }
 
-void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
+void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid,
+                   bool save_stack) {
   this->addr = addr;
   this->uid = uid;
   this->next = 0;
 
-  creation_stack_id = 0;
-  if (!SANITIZER_GO)  // Go does not use them
+  creation_stack_id = kInvalidStackID;
+  if (save_stack && !SANITIZER_GO)  // Go does not use them
     creation_stack_id = CurrentStackId(thr, pc);
   if (common_flags()->detect_deadlocks)
     DDMutexInit(thr, pc, this);
@@ -34,7 +35,7 @@ void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
 
 void SyncVar::Reset(Processor *proc) {
   uid = 0;
-  creation_stack_id = 0;
+  creation_stack_id = kInvalidStackID;
   owner_tid = kInvalidTid;
   last_lock = 0;
   recursion = 0;
@@ -190,63 +191,41 @@ MBlock* MetaMap::GetBlock(uptr p) {
   }
 }
 
-SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
-                              uptr addr, bool write_lock) {
-  return GetAndLock(thr, pc, addr, write_lock, true);
-}
-
-SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) {
-  return GetAndLock(0, 0, addr, write_lock, false);
-}
-
-SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
-                             uptr addr, bool write_lock, bool create) {
+SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
+                          bool save_stack) {
   u32 *meta = MemToMeta(addr);
   u32 idx0 = *meta;
   u32 myidx = 0;
-  SyncVar *mys = 0;
+  SyncVar *mys = nullptr;
   for (;;) {
-    u32 idx = idx0;
-    for (;;) {
-      if (idx == 0)
-        break;
-      if (idx & kFlagBlock)
-        break;
+    for (u32 idx = idx0; idx && !(idx & kFlagBlock);) {
       DCHECK(idx & kFlagSync);
       SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
-      if (s->addr == addr) {
-        if (myidx != 0) {
+      if (LIKELY(s->addr == addr)) {
+        if (UNLIKELY(myidx != 0)) {
           mys->Reset(thr->proc());
           sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
         }
-        if (write_lock)
-          s->mtx.Lock();
-        else
-          s->mtx.ReadLock();
         return s;
       }
       idx = s->next;
     }
     if (!create)
-      return 0;
-    if (*meta != idx0) {
+      return nullptr;
+    if (UNLIKELY(*meta != idx0)) {
       idx0 = *meta;
       continue;
     }
 
-    if (myidx == 0) {
+    if (LIKELY(myidx == 0)) {
       const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
       myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
       mys = sync_alloc_.Map(myidx);
-      mys->Init(thr, pc, addr, uid);
+      mys->Init(thr, pc, addr, uid, save_stack);
     }
     mys->next = idx0;
     if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
         myidx | kFlagSync, memory_order_release)) {
-      if (write_lock)
-        mys->mtx.Lock();
-      else
-        mys->mtx.ReadLock();
       return mys;
     }
   }
@@ -290,4 +269,11 @@ void MetaMap::OnProcIdle(Processor *proc) {
   sync_alloc_.FlushCache(&proc->sync_cache);
 }
 
+MetaMap::MemoryStats MetaMap::GetMemoryStats() const {
+  MemoryStats stats;
+  stats.mem_block = block_alloc_.AllocatedMemory();
+  stats.sync_obj = sync_alloc_.AllocatedMemory();
+  return stats;
+}
+
 }  // namespace __tsan
index c4056f684d7e868358190bda40921fe18bb4a290..fc8fa288a841808cb52f78c7d0f6062a6c2b770e 100644 (file)
@@ -17,7 +17,6 @@
 #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
 #include "tsan_defs.h"
 #include "tsan_clock.h"
-#include "tsan_mutex.h"
 #include "tsan_dense_alloc.h"
 
 namespace __tsan {
@@ -47,14 +46,16 @@ enum MutexFlags {
                                  MutexFlagNotStatic,
 };
 
+// SyncVar is a descriptor of a user synchronization object
+// (mutex or an atomic variable).
 struct SyncVar {
   SyncVar();
 
   uptr addr;  // overwritten by DenseSlabAlloc freelist
   Mutex mtx;
   u64 uid;  // Globally unique id.
-  u32 creation_stack_id;
-  u32 owner_tid;  // Set only by exclusive owners.
+  StackID creation_stack_id;
+  Tid owner_tid;  // Set only by exclusive owners.
   u64 last_lock;
   int recursion;
   atomic_uint32_t flags;
@@ -65,7 +66,7 @@ struct SyncVar {
   // with the mtx. This reduces contention for hot sync objects.
   SyncClock clock;
 
-  void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
+  void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid, bool save_stack);
   void Reset(Processor *proc);
 
   u64 GetId() const {
@@ -102,10 +103,8 @@ struct SyncVar {
   }
 };
 
-/* MetaMap allows to map arbitrary user pointers onto various descriptors.
-   Currently it maps pointers to heap block descriptors and sync var descs.
-   It uses 1/2 direct shadow, see tsan_platform.h.
-*/
+// MetaMap maps app addresses to heap block (MBlock) and sync var (SyncVar)
+// descriptors. It uses 1/2 direct shadow, see tsan_platform.h for the mapping.
 class MetaMap {
  public:
   MetaMap();
@@ -116,14 +115,25 @@ class MetaMap {
   void ResetRange(Processor *proc, uptr p, uptr sz);
   MBlock* GetBlock(uptr p);
 
-  SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
-                              uptr addr, bool write_lock);
-  SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
+  SyncVar *GetSyncOrCreate(ThreadState *thr, uptr pc, uptr addr,
+                           bool save_stack) {
+    return GetSync(thr, pc, addr, true, save_stack);
+  }
+  SyncVar *GetSyncIfExists(uptr addr) {
+    return GetSync(nullptr, 0, addr, false, false);
+  }
 
   void MoveMemory(uptr src, uptr dst, uptr sz);
 
   void OnProcIdle(Processor *proc);
 
+  struct MemoryStats {
+    uptr mem_block;
+    uptr sync_obj;
+  };
+
+  MemoryStats GetMemoryStats() const;
+
  private:
   static const u32 kFlagMask  = 3u << 30;
   static const u32 kFlagBlock = 1u << 30;
@@ -134,8 +144,8 @@ class MetaMap {
   SyncAlloc sync_alloc_;
   atomic_uint64_t uid_gen_;
 
-  SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
-                      bool create);
+  SyncVar *GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
+                   bool save_stack);
 };
 
 }  // namespace __tsan
index 9f2677b778de524b3c70d9a5ac7273d76f7de6fc..a771ad9f52fd3c2da55d9ee444caf467b89aa54e 100644 (file)
@@ -13,9 +13,9 @@
 #define TSAN_TRACE_H
 
 #include "tsan_defs.h"
-#include "tsan_mutex.h"
-#include "tsan_stack_trace.h"
+#include "tsan_ilist.h"
 #include "tsan_mutexset.h"
+#include "tsan_stack_trace.h"
 
 namespace __tsan {
 
@@ -68,6 +68,155 @@ struct Trace {
   Trace() : mtx(MutexTypeTrace) {}
 };
 
+namespace v3 {
+
+enum class EventType : u64 {
+  kAccessExt,
+  kAccessRange,
+  kLock,
+  kRLock,
+  kUnlock,
+  kTime,
+};
+
+// "Base" type for all events for type dispatch.
+struct Event {
+  // We use variable-length type encoding to give more bits to some event
+  // types that need them. If is_access is set, this is EventAccess.
+  // Otherwise, if is_func is set, this is EventFunc.
+  // Otherwise type denotes the type.
+  u64 is_access : 1;
+  u64 is_func : 1;
+  EventType type : 3;
+  u64 _ : 59;
+};
+static_assert(sizeof(Event) == 8, "bad Event size");
+
+// Nop event used as padding and does not affect state during replay.
+static constexpr Event NopEvent = {1, 0, EventType::kAccessExt, 0};
+
+// Compressed memory access can represent only some events with PCs
+// close enough to each other. Otherwise we fall back to EventAccessExt.
+struct EventAccess {
+  static constexpr uptr kPCBits = 15;
+
+  u64 is_access : 1;  // = 1
+  u64 is_read : 1;
+  u64 is_atomic : 1;
+  u64 size_log : 2;
+  u64 pc_delta : kPCBits;  // signed delta from the previous memory access PC
+  u64 addr : kCompressedAddrBits;
+};
+static_assert(sizeof(EventAccess) == 8, "bad EventAccess size");
+
+// Function entry (pc != 0) or exit (pc == 0).
+struct EventFunc {
+  u64 is_access : 1;  // = 0
+  u64 is_func : 1;    // = 1
+  u64 pc : 62;
+};
+static_assert(sizeof(EventFunc) == 8, "bad EventFunc size");
+
+// Extended memory access with full PC.
+struct EventAccessExt {
+  u64 is_access : 1;   // = 0
+  u64 is_func : 1;     // = 0
+  EventType type : 3;  // = EventType::kAccessExt
+  u64 is_read : 1;
+  u64 is_atomic : 1;
+  u64 size_log : 2;
+  u64 _ : 11;
+  u64 addr : kCompressedAddrBits;
+  u64 pc;
+};
+static_assert(sizeof(EventAccessExt) == 16, "bad EventAccessExt size");
+
+// Access to a memory range.
+struct EventAccessRange {
+  static constexpr uptr kSizeLoBits = 13;
+
+  u64 is_access : 1;   // = 0
+  u64 is_func : 1;     // = 0
+  EventType type : 3;  // = EventType::kAccessRange
+  u64 is_read : 1;
+  u64 is_free : 1;
+  u64 size_lo : kSizeLoBits;
+  u64 pc : kCompressedAddrBits;
+  u64 addr : kCompressedAddrBits;
+  u64 size_hi : 64 - kCompressedAddrBits;
+};
+static_assert(sizeof(EventAccessRange) == 16, "bad EventAccessRange size");
+
+// Mutex lock.
+struct EventLock {
+  static constexpr uptr kStackIDLoBits = 15;
+
+  u64 is_access : 1;   // = 0
+  u64 is_func : 1;     // = 0
+  EventType type : 3;  // = EventType::kLock or EventType::kRLock
+  u64 pc : kCompressedAddrBits;
+  u64 stack_lo : kStackIDLoBits;
+  u64 stack_hi : sizeof(StackID) * kByteBits - kStackIDLoBits;
+  u64 _ : 3;
+  u64 addr : kCompressedAddrBits;
+};
+static_assert(sizeof(EventLock) == 16, "bad EventLock size");
+
+// Mutex unlock.
+struct EventUnlock {
+  u64 is_access : 1;   // = 0
+  u64 is_func : 1;     // = 0
+  EventType type : 3;  // = EventType::kUnlock
+  u64 _ : 15;
+  u64 addr : kCompressedAddrBits;
+};
+static_assert(sizeof(EventUnlock) == 8, "bad EventUnlock size");
+
+// Time change event.
+struct EventTime {
+  u64 is_access : 1;   // = 0
+  u64 is_func : 1;     // = 0
+  EventType type : 3;  // = EventType::kTime
+  u64 sid : sizeof(Sid) * kByteBits;
+  u64 epoch : kEpochBits;
+  u64 _ : 64 - 5 - sizeof(Sid) * kByteBits - kEpochBits;
+};
+static_assert(sizeof(EventTime) == 8, "bad EventTime size");
+
+struct Trace;
+
+struct TraceHeader {
+  Trace* trace = nullptr;  // back-pointer to Trace containing this part
+  INode trace_parts;       // in Trace::parts
+};
+
+struct TracePart : TraceHeader {
+  static constexpr uptr kByteSize = 256 << 10;
+  static constexpr uptr kSize =
+      (kByteSize - sizeof(TraceHeader)) / sizeof(Event);
+  // TraceAcquire does a fast event pointer overflow check by comparing
+  // pointer into TracePart::events with kAlignment mask. Since TracePart's
+  // are allocated page-aligned, this check detects end of the array
+  // (it also have false positives in the middle that are filtered separately).
+  // This also requires events to be the last field.
+  static constexpr uptr kAlignment = 0xff0;
+  Event events[kSize];
+
+  TracePart() {}
+};
+static_assert(sizeof(TracePart) == TracePart::kByteSize, "bad TracePart size");
+
+struct Trace {
+  Mutex mtx;
+  IList<TraceHeader, &TraceHeader::trace_parts, TracePart> parts;
+  Event* final_pos =
+      nullptr;  // final position in the last part for finished threads
+
+  Trace() : mtx(MutexTypeTrace) {}
+};
+
+}  // namespace v3
+
 }  // namespace __tsan
 
 #endif  // TSAN_TRACE_H
similarity index 96%
rename from libsanitizer/tsan/tsan_update_shadow_word_inl.h
rename to libsanitizer/tsan/tsan_update_shadow_word.inc
index d23dfb0ba061c7c693d076fa0fcbf24621b72b8d..a58ef0f17efa197bc2e6d9f253c7d0b8d3a509ca 100644 (file)
@@ -1,4 +1,4 @@
-//===-- tsan_update_shadow_word_inl.h ---------------------------*- C++ -*-===//
+//===-- tsan_update_shadow_word.inc -----------------------------*- C++ -*-===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
diff --git a/libsanitizer/tsan/tsan_vector_clock.cpp b/libsanitizer/tsan/tsan_vector_clock.cpp
new file mode 100644 (file)
index 0000000..2782985
--- /dev/null
@@ -0,0 +1,126 @@
+//===-- tsan_vector_clock.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_vector_clock.h"
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+#if TSAN_VECTORIZE
+const uptr kVectorClockSize = kThreadSlotCount * sizeof(Epoch) / sizeof(m128);
+#endif
+
+VectorClock::VectorClock() { Reset(); }
+
+void VectorClock::Reset() {
+#if !TSAN_VECTORIZE
+  for (uptr i = 0; i < kThreadSlotCount; i++)
+    clk_[i] = kEpochZero;
+#else
+  m128 z = _mm_setzero_si128();
+  m128* vclk = reinterpret_cast<m128*>(clk_);
+  for (uptr i = 0; i < kVectorClockSize; i++) _mm_store_si128(&vclk[i], z);
+#endif
+}
+
+void VectorClock::Acquire(const VectorClock* src) {
+  if (!src)
+    return;
+#if !TSAN_VECTORIZE
+  for (uptr i = 0; i < kThreadSlotCount; i++)
+    clk_[i] = max(clk_[i], src->clk_[i]);
+#else
+  m128* __restrict vdst = reinterpret_cast<m128*>(clk_);
+  m128 const* __restrict vsrc = reinterpret_cast<m128 const*>(src->clk_);
+  for (uptr i = 0; i < kVectorClockSize; i++) {
+    m128 s = _mm_load_si128(&vsrc[i]);
+    m128 d = _mm_load_si128(&vdst[i]);
+    m128 m = _mm_max_epu16(s, d);
+    _mm_store_si128(&vdst[i], m);
+  }
+#endif
+}
+
+static VectorClock* AllocClock(VectorClock** dstp) {
+  if (UNLIKELY(!*dstp))
+    *dstp = New<VectorClock>();
+  return *dstp;
+}
+
+void VectorClock::Release(VectorClock** dstp) const {
+  VectorClock* dst = AllocClock(dstp);
+  dst->Acquire(this);
+}
+
+void VectorClock::ReleaseStore(VectorClock** dstp) const {
+  VectorClock* dst = AllocClock(dstp);
+  *dst = *this;
+}
+
+VectorClock& VectorClock::operator=(const VectorClock& other) {
+#if !TSAN_VECTORIZE
+  for (uptr i = 0; i < kThreadSlotCount; i++)
+    clk_[i] = other.clk_[i];
+#else
+  m128* __restrict vdst = reinterpret_cast<m128*>(clk_);
+  m128 const* __restrict vsrc = reinterpret_cast<m128 const*>(other.clk_);
+  for (uptr i = 0; i < kVectorClockSize; i++) {
+    m128 s = _mm_load_si128(&vsrc[i]);
+    _mm_store_si128(&vdst[i], s);
+  }
+#endif
+  return *this;
+}
+
+void VectorClock::ReleaseStoreAcquire(VectorClock** dstp) {
+  VectorClock* dst = AllocClock(dstp);
+#if !TSAN_VECTORIZE
+  for (uptr i = 0; i < kThreadSlotCount; i++) {
+    Epoch tmp = dst->clk_[i];
+    dst->clk_[i] = clk_[i];
+    clk_[i] = max(clk_[i], tmp);
+  }
+#else
+  m128* __restrict vdst = reinterpret_cast<m128*>(dst->clk_);
+  m128* __restrict vclk = reinterpret_cast<m128*>(clk_);
+  for (uptr i = 0; i < kVectorClockSize; i++) {
+    m128 t = _mm_load_si128(&vdst[i]);
+    m128 c = _mm_load_si128(&vclk[i]);
+    m128 m = _mm_max_epu16(c, t);
+    _mm_store_si128(&vdst[i], c);
+    _mm_store_si128(&vclk[i], m);
+  }
+#endif
+}
+
+void VectorClock::ReleaseAcquire(VectorClock** dstp) {
+  VectorClock* dst = AllocClock(dstp);
+#if !TSAN_VECTORIZE
+  for (uptr i = 0; i < kThreadSlotCount; i++) {
+    dst->clk_[i] = max(dst->clk_[i], clk_[i]);
+    clk_[i] = dst->clk_[i];
+  }
+#else
+  m128* __restrict vdst = reinterpret_cast<m128*>(dst->clk_);
+  m128* __restrict vclk = reinterpret_cast<m128*>(clk_);
+  for (uptr i = 0; i < kVectorClockSize; i++) {
+    m128 c = _mm_load_si128(&vclk[i]);
+    m128 d = _mm_load_si128(&vdst[i]);
+    m128 m = _mm_max_epu16(c, d);
+    _mm_store_si128(&vdst[i], m);
+    _mm_store_si128(&vclk[i], m);
+  }
+#endif
+}
+
+}  // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_vector_clock.h b/libsanitizer/tsan/tsan_vector_clock.h
new file mode 100644 (file)
index 0000000..63b2063
--- /dev/null
@@ -0,0 +1,51 @@
+//===-- tsan_vector_clock.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_VECTOR_CLOCK_H
+#define TSAN_VECTOR_CLOCK_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+// Fixed-size vector clock, used both for threads and sync objects.
+class VectorClock {
+ public:
+  VectorClock();
+
+  Epoch Get(Sid sid) const;
+  void Set(Sid sid, Epoch v);
+
+  void Reset();
+  void Acquire(const VectorClock* src);
+  void Release(VectorClock** dstp) const;
+  void ReleaseStore(VectorClock** dstp) const;
+  void ReleaseStoreAcquire(VectorClock** dstp);
+  void ReleaseAcquire(VectorClock** dstp);
+
+  VectorClock& operator=(const VectorClock& other);
+
+ private:
+  Epoch clk_[kThreadSlotCount] VECTOR_ALIGNED;
+};
+
+ALWAYS_INLINE Epoch VectorClock::Get(Sid sid) const {
+  return clk_[static_cast<u8>(sid)];
+}
+
+ALWAYS_INLINE void VectorClock::Set(Sid sid, Epoch v) {
+  DCHECK_GE(v, clk_[static_cast<u8>(sid)]);
+  clk_[static_cast<u8>(sid)] = v;
+}
+
+}  // namespace __tsan
+
+#endif  // TSAN_VECTOR_CLOCK_H
index ef2e495cac8edc7d3e956e28768ac9f5c125ad17..8de51bc1877010382821d3d194982416c0325a38 100644 (file)
@@ -157,7 +157,7 @@ static void RenderLocation(InternalScopedString *Buffer, Location Loc) {
     return;
   }
   case Location::LK_Memory:
-    Buffer->append("%p", Loc.getMemoryLocation());
+    Buffer->append("%p", reinterpret_cast<void *>(Loc.getMemoryLocation()));
     return;
   case Location::LK_Symbolized: {
     const AddressInfo &Info = Loc.getSymbolizedStack()->info;
@@ -169,7 +169,7 @@ static void RenderLocation(InternalScopedString *Buffer, Location Loc) {
       RenderModuleLocation(Buffer, Info.module, Info.module_offset,
                            Info.module_arch, common_flags()->strip_path_prefix);
     else
-      Buffer->append("%p", Info.address);
+      Buffer->append("%p", reinterpret_cast<void *>(Info.address));
     return;
   }
   case Location::LK_Null:
@@ -286,7 +286,7 @@ static void PrintMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
   Buffer.append("\n");
 
   // Emit highlights.
-  Buffer.append(Decor.Highlight());
+  Buffer.append("%s", Decor.Highlight());
   Range *InRange = upperBound(Min, Ranges, NumRanges);
   for (uptr P = Min; P != Max; ++P) {
     char Pad = ' ', Byte = ' ';
@@ -355,7 +355,7 @@ Diag::~Diag() {
     Buffer.clear();
   }
 
-  Buffer.append(Decor.Bold());
+  Buffer.append("%s", Decor.Bold());
   RenderLocation(&Buffer, Loc);
   Buffer.append(":");
 
index 9a66bd37518b3a0606049b761ffdd7ddf3c3c714..25cefd46ce27ced7fb6092d8d04b5074c56ebe95 100644 (file)
@@ -50,7 +50,6 @@ void InitializeFlags() {
   {
     CommonFlags cf;
     cf.CopyFrom(*common_flags());
-    cf.print_summary = false;
     cf.external_symbolizer_path = GetFlag("UBSAN_SYMBOLIZER_PATH");
     OverrideCommonFlags(cf);
   }
index 2184625aa6e9496eb5c933823a24a081a0599312..e201e6bba22078e3d873aeb5792b98ecd860cdaa 100644 (file)
@@ -894,21 +894,6 @@ void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
 
 }  // namespace __ubsan
 
-void __ubsan::__ubsan_handle_cfi_bad_icall(CFIBadIcallData *CallData,
-                                           ValueHandle Function) {
-  GET_REPORT_OPTIONS(false);
-  CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
-  handleCFIBadIcall(&Data, Function, Opts);
-}
-
-void __ubsan::__ubsan_handle_cfi_bad_icall_abort(CFIBadIcallData *CallData,
-                                                 ValueHandle Function) {
-  GET_REPORT_OPTIONS(true);
-  CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
-  handleCFIBadIcall(&Data, Function, Opts);
-  Die();
-}
-
 void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data,
                                             ValueHandle Value,
                                             uptr ValidVtable) {
index 9f412353fc0ee9719d824dd4b2db75fb97587354..219fb15de55fe02a4544422095baa0d8532baaa0 100644 (file)
@@ -215,20 +215,12 @@ enum CFITypeCheckKind : unsigned char {
   CFITCK_VMFCall,
 };
 
-struct CFIBadIcallData {
-  SourceLocation Loc;
-  const TypeDescriptor &Type;
-};
-
 struct CFICheckFailData {
   CFITypeCheckKind CheckKind;
   SourceLocation Loc;
   const TypeDescriptor &Type;
 };
 
-/// \brief Handle control flow integrity failure for indirect function calls.
-RECOVERABLE(cfi_bad_icall, CFIBadIcallData *Data, ValueHandle Function)
-
 /// \brief Handle control flow integrity failures.
 RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function,
             uptr VtableIsValid)
index ad3e883f0f35e9af401cf79fac1b48410ee3fb77..d2cc2e10bd2f023b8d9aa1685a79a192a6d1e1e8 100644 (file)
@@ -12,7 +12,6 @@
 #ifndef UBSAN_PLATFORM_H
 #define UBSAN_PLATFORM_H
 
-#ifndef CAN_SANITIZE_UB
 // Other platforms should be easy to add, and probably work as-is.
 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) ||        \
     defined(__NetBSD__) || defined(__DragonFly__) ||                           \
@@ -22,6 +21,5 @@
 #else
 # define CAN_SANITIZE_UB 0
 #endif
-#endif //CAN_SANITIZE_UB
 
 #endif