]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
kcsan, seqlock: Fix incorrect assumption in read_seqbegin()
authorMarco Elver <elver@google.com>
Mon, 4 Nov 2024 15:43:09 +0000 (16:43 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 5 Nov 2024 11:55:35 +0000 (12:55 +0100)
During testing of the preceding changes, I noticed that in some cases,
current->kcsan_ctx.in_flat_atomic remained true until task exit. This is
obviously wrong, because _all_ accesses for the given task will be
treated as atomic, resulting in false negatives i.e. missed data races.

Debugging led to fs/dcache.c, where we can see this usage of seqlock:

struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
{
struct dentry *dentry;
unsigned seq;

do {
seq = read_seqbegin(&rename_lock);
dentry = __d_lookup(parent, name);
if (dentry)
break;
} while (read_seqretry(&rename_lock, seq));
[...]

As can be seen, read_seqretry() is never called if dentry != NULL;
consequently, current->kcsan_ctx.in_flat_atomic will never be reset to
false by read_seqretry().

Give up on the wrong assumption of "assume closing read_seqretry()", and
rely on the already-present annotations in read_seqcount_begin/retry().

Fixes: 88ecd153be95 ("seqlock, kcsan: Add annotations for KCSAN")
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20241104161910.780003-6-elver@google.com
include/linux/seqlock.h

index 45eee0e5dca010c1a9265f693bb69f12e64eb8c4..5298765d6ca4827eb7bf8a9dca020f1383d3b901 100644 (file)
@@ -810,11 +810,7 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
  */
 static inline unsigned read_seqbegin(const seqlock_t *sl)
 {
-       unsigned ret = read_seqcount_begin(&sl->seqcount);
-
-       kcsan_atomic_next(0);  /* non-raw usage, assume closing read_seqretry() */
-       kcsan_flat_atomic_begin();
-       return ret;
+       return read_seqcount_begin(&sl->seqcount);
 }
 
 /**
@@ -830,12 +826,6 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
  */
 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
 {
-       /*
-        * Assume not nested: read_seqretry() may be called multiple times when
-        * completing read critical section.
-        */
-       kcsan_flat_atomic_end();
-
        return read_seqcount_retry(&sl->seqcount, start);
 }