]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 19 Mar 2026 09:31:19 +0000 (10:31 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 19 Mar 2026 09:31:19 +0000 (10:31 +0100)
added patches:
sched_ext-use-write_once-for-the-write-side-of-scx_enable-helper-pointer.patch

queue-6.19/sched_ext-use-write_once-for-the-write-side-of-scx_enable-helper-pointer.patch [new file with mode: 0644]
queue-6.19/series

diff --git a/queue-6.19/sched_ext-use-write_once-for-the-write-side-of-scx_enable-helper-pointer.patch b/queue-6.19/sched_ext-use-write_once-for-the-write-side-of-scx_enable-helper-pointer.patch
new file mode 100644 (file)
index 0000000..8ed57f1
--- /dev/null
@@ -0,0 +1,62 @@
+From 2fcfe5951eb2e8440fc5e1dd6ea977336ff83a1d Mon Sep 17 00:00:00 2001
+From: zhidao su <suzhidao@xiaomi.com>
+Date: Mon, 9 Mar 2026 10:46:12 +0800
+Subject: sched_ext: Use WRITE_ONCE() for the write side of scx_enable helper pointer
+
+From: zhidao su <suzhidao@xiaomi.com>
+
+commit 2fcfe5951eb2e8440fc5e1dd6ea977336ff83a1d upstream.
+
+scx_enable() uses double-checked locking to lazily initialize a static
+kthread_worker pointer. The fast path reads helper locklessly:
+
+    if (!READ_ONCE(helper)) {          // lockless read -- no helper_mutex
+
+The write side initializes helper under helper_mutex, but previously
+used a plain assignment:
+
+        helper = kthread_run_worker(0, "scx_enable_helper");
+                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+                 plain write -- KCSAN data race with READ_ONCE() above
+
+Since READ_ONCE() on the fast path and the plain write on the
+initialization path access the same variable without a common lock,
+they constitute a data race. KCSAN requires that all sides of a
+lock-free access use READ_ONCE()/WRITE_ONCE() consistently.
+
+Use a temporary variable to stage the result of kthread_run_worker(),
+and only WRITE_ONCE() into helper after confirming the pointer is
+valid. This avoids a window where a concurrent caller on the fast path
+could observe an ERR pointer via READ_ONCE(helper) before the error
+check completes.
+
+Fixes: b06ccbabe250 ("sched_ext: Fix starvation of scx_enable() under fair-class saturation")
+Signed-off-by: zhidao su <suzhidao@xiaomi.com>
+Acked-by: Andrea Righi <arighi@nvidia.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c |    9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -5219,13 +5219,14 @@ static int scx_enable(struct sched_ext_o
+       if (!READ_ONCE(helper)) {
+               mutex_lock(&helper_mutex);
+               if (!helper) {
+-                      helper = kthread_run_worker(0, "scx_enable_helper");
+-                      if (IS_ERR_OR_NULL(helper)) {
+-                              helper = NULL;
++                      struct kthread_worker *w =
++                              kthread_run_worker(0, "scx_enable_helper");
++                      if (IS_ERR_OR_NULL(w)) {
+                               mutex_unlock(&helper_mutex);
+                               return -ENOMEM;
+                       }
+-                      sched_set_fifo(helper->task);
++                      sched_set_fifo(w->task);
++                      WRITE_ONCE(helper, w);
+               }
+               mutex_unlock(&helper_mutex);
+       }
index 5f11af78ca46c5574a5fe47f33a4120a5f596b3a..b3ff7f41a3dddf4fd664217ddc6102d4a1e397e1 100644 (file)
@@ -377,3 +377,4 @@ io_uring-ensure-ctx-rings-is-stable-for-task-work-flags-manipulation.patch
 io_uring-eventfd-use-ctx-rings_rcu-for-flags-checking.patch
 cxl-acpi-fix-cxl_acpi-and-cxl_pmem-kconfig-tristate-mismatch.patch
 bpf-drop-kthread_exit-from-noreturn_deny.patch
+sched_ext-use-write_once-for-the-write-side-of-scx_enable-helper-pointer.patch