]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/irq: Make run_on_irqstack_cond() typesafe
authorThomas Gleixner <tglx@linutronix.de>
Tue, 22 Sep 2020 07:58:52 +0000 (09:58 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 1 Oct 2020 15:36:32 +0000 (17:36 +0200)
commit a7b3474cbb2864d5500d5e4f48dd57c903975cab upstream.

Sami reported that run_on_irqstack_cond() requires the caller to cast
functions to mismatching types, which trips indirect call Control-Flow
Integrity (CFI) in Clang.

Instead of disabling CFI on that function, provide proper helpers for
the three call variants. The actual ASM code stays the same as that is
out of reach.

 [ bp: Fix __run_on_irqstack() prototype to match. ]

Fixes: 931b94145981 ("x86/entry: Provide helpers for executing on the irqstack")
Reported-by: Nathan Chancellor <natechancellor@gmail.com>
Reported-by: Sami Tolvanen <samitolvanen@google.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Tested-by: Sami Tolvanen <samitolvanen@google.com>
Cc: <stable@vger.kernel.org>
Link: https://github.com/ClangBuiltLinux/linux/issues/1052
Link: https://lkml.kernel.org/r/87pn6eb5tv.fsf@nanos.tec.linutronix.de
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/entry/common.c
arch/x86/entry/entry_64.S
arch/x86/include/asm/idtentry.h
arch/x86/include/asm/irq_stack.h
arch/x86/kernel/irq.c
arch/x86/kernel/irq_64.c

index 606c4e25ee934e95737750e048a42b34843daa29..e290164df5ada431be7507b4d5a19ee622f1080b 100644 (file)
@@ -814,7 +814,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
        old_regs = set_irq_regs(regs);
 
        instrumentation_begin();
-       run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
+       run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
        instrumentation_begin();
 
        set_irq_regs(old_regs);
index d2a00c97e53f608ad67a4e24db1f5c64f1b9e880..20f62398477e55b11a0e6c11a0042f8e22075c93 100644 (file)
@@ -687,6 +687,8 @@ SYM_CODE_END(.Lbad_gs)
  * rdx: Function argument (can be NULL if none)
  */
 SYM_FUNC_START(asm_call_on_stack)
+SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
+SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
        /*
         * Save the frame pointer unconditionally. This allows the ORC
         * unwinder to handle the stack switch.
index 80d3b30d3ee3e0237eb99ae567e92f4642b25deb..4abe2e5b3fa76655eb7479408884a81bc7fdd4c9 100644 (file)
@@ -246,7 +246,7 @@ __visible noinstr void func(struct pt_regs *regs)                   \
        instrumentation_begin();                                        \
        irq_enter_rcu();                                                \
        kvm_set_cpu_l1tf_flush_l1d();                                   \
-       run_on_irqstack_cond(__##func, regs, regs);                     \
+       run_sysvec_on_irqstack_cond(__##func, regs);                    \
        irq_exit_rcu();                                                 \
        instrumentation_end();                                          \
        idtentry_exit_cond_rcu(regs, rcu_exit);                         \
index 4ae66f097101d1cae8c2b23f2f4835fd944e4b56..d95616c7e7d40bf97431bf69e386e22dc0a7cee8 100644 (file)
@@ -3,6 +3,7 @@
 #define _ASM_X86_IRQ_STACK_H
 
 #include <linux/ptrace.h>
+#include <linux/irq.h>
 
 #include <asm/processor.h>
 
@@ -12,20 +13,50 @@ static __always_inline bool irqstack_active(void)
        return __this_cpu_read(irq_count) != -1;
 }
 
-void asm_call_on_stack(void *sp, void *func, void *arg);
+void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
+void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
+                             struct pt_regs *regs);
+void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
+                          struct irq_desc *desc);
 
-static __always_inline void __run_on_irqstack(void *func, void *arg)
+static __always_inline void __run_on_irqstack(void (*func)(void))
 {
        void *tos = __this_cpu_read(hardirq_stack_ptr);
 
        __this_cpu_add(irq_count, 1);
-       asm_call_on_stack(tos - 8, func, arg);
+       asm_call_on_stack(tos - 8, func, NULL);
+       __this_cpu_sub(irq_count, 1);
+}
+
+static __always_inline void
+__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
+                        struct pt_regs *regs)
+{
+       void *tos = __this_cpu_read(hardirq_stack_ptr);
+
+       __this_cpu_add(irq_count, 1);
+       asm_call_sysvec_on_stack(tos - 8, func, regs);
+       __this_cpu_sub(irq_count, 1);
+}
+
+static __always_inline void
+__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
+                     struct irq_desc *desc)
+{
+       void *tos = __this_cpu_read(hardirq_stack_ptr);
+
+       __this_cpu_add(irq_count, 1);
+       asm_call_irq_on_stack(tos - 8, func, desc);
        __this_cpu_sub(irq_count, 1);
 }
 
 #else /* CONFIG_X86_64 */
 static inline bool irqstack_active(void) { return false; }
-static inline void __run_on_irqstack(void *func, void *arg) { }
+static inline void __run_on_irqstack(void (*func)(void)) { }
+static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
+                                           struct pt_regs *regs) { }
+static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
+                                        struct irq_desc *desc) { }
 #endif /* !CONFIG_X86_64 */
 
 static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
@@ -37,17 +68,40 @@ static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
        return !user_mode(regs) && !irqstack_active();
 }
 
-static __always_inline void run_on_irqstack_cond(void *func, void *arg,
+
+static __always_inline void run_on_irqstack_cond(void (*func)(void),
                                                 struct pt_regs *regs)
 {
-       void (*__func)(void *arg) = func;
+       lockdep_assert_irqs_disabled();
+
+       if (irq_needs_irq_stack(regs))
+               __run_on_irqstack(func);
+       else
+               func();
+}
+
+static __always_inline void
+run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
+                           struct pt_regs *regs)
+{
+       lockdep_assert_irqs_disabled();
 
+       if (irq_needs_irq_stack(regs))
+               __run_sysvec_on_irqstack(func, regs);
+       else
+               func(regs);
+}
+
+static __always_inline void
+run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
+                        struct pt_regs *regs)
+{
        lockdep_assert_irqs_disabled();
 
        if (irq_needs_irq_stack(regs))
-               __run_on_irqstack(__func, arg);
+               __run_irq_on_irqstack(func, desc);
        else
-               __func(arg);
+               func(desc);
 }
 
 #endif
index 181060247e3cb75789d7522b0b0be0d6483c3596..c5dd50369e2f3394bc483b8744ace94eaaea552b 100644 (file)
@@ -227,7 +227,7 @@ static __always_inline void handle_irq(struct irq_desc *desc,
                                       struct pt_regs *regs)
 {
        if (IS_ENABLED(CONFIG_X86_64))
-               run_on_irqstack_cond(desc->handle_irq, desc, regs);
+               run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
        else
                __handle_irq(desc, regs);
 }
index 1b4fe93a86c5ce90cdc2d636dba732e80649ddde..440eed558558d9891547ee1bd09a4570bc398156 100644 (file)
@@ -74,5 +74,5 @@ int irq_init_percpu_irqstack(unsigned int cpu)
 
 void do_softirq_own_stack(void)
 {
-       run_on_irqstack_cond(__do_softirq, NULL, NULL);
+       run_on_irqstack_cond(__do_softirq, NULL);
 }