]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
seqlock/latch: Provide raw_read_seqcount_latch_retry()
authorPeter Zijlstra <peterz@infradead.org>
Fri, 19 May 2023 10:20:59 +0000 (12:20 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 14 Dec 2024 18:50:46 +0000 (19:50 +0100)
[ Upstream commit d16317de9b412aa7bd3598c607112298e36b4352 ]

The read side of seqcount_latch consists of:

  do {
    seq = raw_read_seqcount_latch(&latch->seq);
    ...
  } while (read_seqcount_latch_retry(&latch->seq, seq));

which is asymmetric in the raw_ department, and sure enough,
read_seqcount_latch_retry() includes (explicit) instrumentation where
raw_read_seqcount_latch() does not.

This inconsistency becomes a problem when trying to use it from
noinstr code. As such, fix it by renaming and re-implementing
raw_read_seqcount_latch_retry() without the instrumentation.

Specifically the instrumentation in question is kcsan_atomic_next(0)
in do___read_seqcount_retry(). Loosing this annotation is not a
problem because raw_read_seqcount_latch() does not pass through
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Tested-by: Michael Kelley <mikelley@microsoft.com> # Hyper-V
Link: https://lore.kernel.org/r/20230519102715.233598176@infradead.org
Stable-dep-of: 5c1806c41ce0 ("kcsan, seqlock: Support seqcount_latch_t")
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/linux/rbtree_latch.h
include/linux/seqlock.h
kernel/printk/printk.c
kernel/time/sched_clock.c
kernel/time/timekeeping.c

index 3d1a9e716b803d41e6d2c3137d734cfa8ab3eba5..6a0999c26c7cf8865cdc4e53412531afeed74e34 100644 (file)
@@ -206,7 +206,7 @@ latch_tree_find(void *key, struct latch_tree_root *root,
        do {
                seq = raw_read_seqcount_latch(&root->seq);
                node = __lt_find(key, root, seq & 1, ops->comp);
-       } while (read_seqcount_latch_retry(&root->seq, seq));
+       } while (raw_read_seqcount_latch_retry(&root->seq, seq));
 
        return node;
 }
index 2c5d0102315d2d7c9574750b8339c4493bc32497..97831499d5005462d0b12b79b31c7e73b6380c10 100644 (file)
@@ -675,9 +675,9 @@ typedef struct {
  *
  * Return: sequence counter raw value. Use the lowest bit as an index for
  * picking which data copy to read. The full counter must then be checked
- * with read_seqcount_latch_retry().
+ * with raw_read_seqcount_latch_retry().
  */
-static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
+static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
 {
        /*
         * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
@@ -687,16 +687,17 @@ static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
 }
 
 /**
- * read_seqcount_latch_retry() - end a seqcount_latch_t read section
+ * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
  * @s:         Pointer to seqcount_latch_t
  * @start:     count, from raw_read_seqcount_latch()
  *
  * Return: true if a read section retry is required, else false
  */
-static inline int
-read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
+static __always_inline int
+raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
 {
-       return read_seqcount_retry(&s->seqcount, start);
+       smp_rmb();
+       return unlikely(READ_ONCE(s->seqcount.sequence) != start);
 }
 
 /**
@@ -756,7 +757,7 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
  *                     entry = data_query(latch->data[idx], ...);
  *
  *             // This includes needed smp_rmb()
- *             } while (read_seqcount_latch_retry(&latch->seq, seq));
+ *             } while (raw_read_seqcount_latch_retry(&latch->seq, seq));
  *
  *             return entry;
  *     }
index 323931ff61191cd26117e79394615b8e52541d2f..5e81d2a79d5cc9b6ae189f687e5bfda8c8bddbcc 100644 (file)
@@ -457,7 +457,7 @@ static u64 latched_seq_read_nolock(struct latched_seq *ls)
                seq = raw_read_seqcount_latch(&ls->latch);
                idx = seq & 0x1;
                val = ls->val[idx];
-       } while (read_seqcount_latch_retry(&ls->latch, seq));
+       } while (raw_read_seqcount_latch_retry(&ls->latch, seq));
 
        return val;
 }
index b1b9b12899f5e43571e9cc39740f545cd6fad795..f3657e964616a3600e4b26cd7768381faf7b30c4 100644 (file)
@@ -76,7 +76,7 @@ notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
 
 notrace int sched_clock_read_retry(unsigned int seq)
 {
-       return read_seqcount_latch_retry(&cd.seq, seq);
+       return raw_read_seqcount_latch_retry(&cd.seq, seq);
 }
 
 unsigned long long notrace sched_clock(void)
index 7f755127bee414909da543a41d092a555c94d317..07c949c10de28cfb8e76569530a71b415372fb08 100644 (file)
@@ -450,7 +450,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
                tkr = tkf->base + (seq & 0x01);
                now = ktime_to_ns(tkr->base);
                now += fast_tk_get_delta_ns(tkr);
-       } while (read_seqcount_latch_retry(&tkf->seq, seq));
+       } while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
 
        return now;
 }
@@ -549,7 +549,7 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
                basem = ktime_to_ns(tkr->base);
                baser = ktime_to_ns(tkr->base_real);
                delta = fast_tk_get_delta_ns(tkr);
-       } while (read_seqcount_latch_retry(&tkf->seq, seq));
+       } while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
 
        if (mono)
                *mono = basem + delta;