]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched_ext: Use READ_ONCE() for plain reads of scx_watchdog_timeout
authorzhidao su <suzhidao@xiaomi.com>
Tue, 3 Mar 2026 06:09:58 +0000 (14:09 +0800)
committerTejun Heo <tj@kernel.org>
Tue, 3 Mar 2026 08:00:02 +0000 (22:00 -1000)
scx_watchdog_timeout is written with WRITE_ONCE() in scx_enable():

    WRITE_ONCE(scx_watchdog_timeout, timeout);

However, three read-side accesses use plain reads without the matching
READ_ONCE():

    /* check_rq_for_timeouts() - L2824 */
    last_runnable + scx_watchdog_timeout

    /* scx_watchdog_workfn() - L2852 */
    scx_watchdog_timeout / 2

    /* scx_enable() - L5179 */
    scx_watchdog_timeout / 2

The KCSAN documentation requires that if one accessor uses WRITE_ONCE()
to annotate lock-free access, all other accesses must also use the
appropriate accessor. Plain reads alongside WRITE_ONCE() leave the pair
incomplete and can trigger KCSAN warnings.

Note that scx_tick() already uses the correct READ_ONCE() annotation:

    last_check + READ_ONCE(scx_watchdog_timeout)

Fix the three remaining plain reads to match, making all accesses to
scx_watchdog_timeout consistently annotated and KCSAN-clean.

Signed-off-by: zhidao su <suzhidao@xiaomi.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/sched/ext.c

index a566d2cc8a43fded68ed7f56c7466cdf3c6d9728..2ba69b3023688877afd45036e893d5a47cdbde63 100644 (file)
@@ -2739,7 +2739,7 @@ static bool check_rq_for_timeouts(struct rq *rq)
                unsigned long last_runnable = p->scx.runnable_at;
 
                if (unlikely(time_after(jiffies,
-                                       last_runnable + scx_watchdog_timeout))) {
+                                       last_runnable + READ_ONCE(scx_watchdog_timeout)))) {
                        u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
 
                        scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
@@ -2767,7 +2767,7 @@ static void scx_watchdog_workfn(struct work_struct *work)
                cond_resched();
        }
        queue_delayed_work(system_unbound_wq, to_delayed_work(work),
-                          scx_watchdog_timeout / 2);
+                          READ_ONCE(scx_watchdog_timeout) / 2);
 }
 
 void scx_tick(struct rq *rq)
@@ -5081,7 +5081,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
        WRITE_ONCE(scx_watchdog_timeout, timeout);
        WRITE_ONCE(scx_watchdog_timestamp, jiffies);
        queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
-                          scx_watchdog_timeout / 2);
+                          READ_ONCE(scx_watchdog_timeout) / 2);
 
        /*
         * Once __scx_enabled is set, %current can be switched to SCX anytime.