]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
kcsan: Save instruction pointer for scoped accesses
authorMarco Elver <elver@google.com>
Mon, 9 Aug 2021 11:25:13 +0000 (13:25 +0200)
committerPaul E. McKenney <paulmck@kernel.org>
Mon, 13 Sep 2021 23:41:19 +0000 (16:41 -0700)
Save the instruction pointer for scoped accesses, so that it becomes
possible for the reporting code to construct more accurate stack traces
that will show the start of the scope.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
include/linux/kcsan-checks.h
kernel/kcsan/core.c

index 9fd0ad80fef6cf553de667eef75ce47a909ff823..5f5965246877a35c9f3ab5934a00929f68db9d7a 100644 (file)
@@ -100,9 +100,12 @@ void kcsan_set_access_mask(unsigned long mask);
 /* Scoped access information. */
 struct kcsan_scoped_access {
        struct list_head list;
+       /* Access information. */
        const volatile void *ptr;
        size_t size;
        int type;
+       /* Location where scoped access was set up. */
+       unsigned long ip;
 };
 /*
  * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
index bffd1d95addbb66652f2f9e684cb62c9472d42c9..8b20af541776f02f86b74dded1789b298fd0d321 100644 (file)
@@ -202,6 +202,9 @@ static __always_inline struct kcsan_ctx *get_ctx(void)
        return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
 }
 
+static __always_inline void
+check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
+
 /* Check scoped accesses; never inline because this is a slow-path! */
 static noinline void kcsan_check_scoped_accesses(void)
 {
@@ -210,8 +213,10 @@ static noinline void kcsan_check_scoped_accesses(void)
        struct kcsan_scoped_access *scoped_access;
 
        ctx->scoped_accesses.prev = NULL;  /* Avoid recursion. */
-       list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
-               __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
+       list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
+               check_access(scoped_access->ptr, scoped_access->size,
+                            scoped_access->type, scoped_access->ip);
+       }
        ctx->scoped_accesses.prev = prev_save;
 }
 
@@ -767,6 +772,7 @@ kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
        sa->ptr = ptr;
        sa->size = size;
        sa->type = type;
+       sa->ip = _RET_IP_;
 
        if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
                INIT_LIST_HEAD(&ctx->scoped_accesses);
@@ -798,7 +804,7 @@ void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
 
        ctx->disable_count--;
 
-       __kcsan_check_access(sa->ptr, sa->size, sa->type);
+       check_access(sa->ptr, sa->size, sa->type, sa->ip);
 }
 EXPORT_SYMBOL(kcsan_end_scoped_access);