]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
rseq: Protect event mask against membarrier IPI
authorThomas Gleixner <tglx@linutronix.de>
Wed, 13 Aug 2025 15:02:30 +0000 (17:02 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Sat, 13 Sep 2025 17:51:59 +0000 (19:51 +0200)
rseq_need_restart() reads and clears task::rseq_event_mask with preemption
disabled to guard against the scheduler.

But membarrier() uses an IPI and sets the PREEMPT bit in the event mask
from the IPI, which leaves that RMW operation unprotected.

Use guard(irq) if CONFIG_MEMBARRIER is enabled to fix that.

Fixes: 2a36ab717e8f ("rseq/membarrier: Add MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ")
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Boqun Feng <boqun.feng@gmail.com>
Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: stable@vger.kernel.org
include/linux/rseq.h
kernel/rseq.c

index bc8af3eb5598768ac46d250bb0e5583f0a971a06..1fbeb61babeb8ba24cfc9fb76ffbc5aa0addb617 100644 (file)
@@ -7,6 +7,12 @@
 #include <linux/preempt.h>
 #include <linux/sched.h>
 
+#ifdef CONFIG_MEMBARRIER
+# define RSEQ_EVENT_GUARD      irq
+#else
+# define RSEQ_EVENT_GUARD      preempt
+#endif
+
 /*
  * Map the event mask on the user-space ABI enum rseq_cs_flags
  * for direct mask checks.
@@ -41,9 +47,8 @@ static inline void rseq_handle_notify_resume(struct ksignal *ksig,
 static inline void rseq_signal_deliver(struct ksignal *ksig,
                                       struct pt_regs *regs)
 {
-       preempt_disable();
-       __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
-       preempt_enable();
+       scoped_guard(RSEQ_EVENT_GUARD)
+               __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
        rseq_handle_notify_resume(ksig, regs);
 }
 
index b7a1ec327e8117b47e353cab92d62111dd261520..2452b7366b00e9f5fde1cbac01a56582adf352b3 100644 (file)
@@ -342,12 +342,12 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
 
        /*
         * Load and clear event mask atomically with respect to
-        * scheduler preemption.
+        * scheduler preemption and membarrier IPIs.
         */
-       preempt_disable();
-       event_mask = t->rseq_event_mask;
-       t->rseq_event_mask = 0;
-       preempt_enable();
+       scoped_guard(RSEQ_EVENT_GUARD) {
+               event_mask = t->rseq_event_mask;
+               t->rseq_event_mask = 0;
+       }
 
        return !!event_mask;
 }