#ifndef _LINUX_INSTRUMENTED_H
#define _LINUX_INSTRUMENTED_H
+#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/kasan-checks.h>
#include <linux/kcsan-checks.h>
kcsan_check_read_write(v, size);
}
+static __always_inline void instrument_atomic_check_alignment(const volatile void *v, size_t size)
+{
+#ifndef __DISABLE_EXPORTS
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ATOMIC) && ((unsigned long)v & (size - 1)));
+#endif
+}
+
/**
* instrument_atomic_read - instrument atomic read access
* @v: address of access
{
kasan_check_read(v, size);
kcsan_check_atomic_read(v, size);
+ instrument_atomic_check_alignment(v, size);
}
/**
{
kasan_check_write(v, size);
kcsan_check_atomic_write(v, size);
+ instrument_atomic_check_alignment(v, size);
}
/**
{
kasan_check_write(v, size);
kcsan_check_atomic_read_write(v, size);
+ instrument_atomic_check_alignment(v, size);
}
/**
depending on workload as it triggers debugging routines for each
this_cpu operation. It should only be used for debugging purposes.
+config DEBUG_ATOMIC
+ bool "Debug atomic variables"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here then the kernel will add a runtime alignment check
+ to atomic accesses. Useful for architectures that do not have trap on
+ mis-aligned access.
+
+ This option has potentially significant overhead.
+
menu "Lock Debugging (spinlocks, mutexes, etc...)"
config LOCK_DEBUGGING_SUPPORT