]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/static-call: provide a way to do very early static-call updates
authorJuergen Gross <jgross@suse.com>
Fri, 29 Nov 2024 15:15:54 +0000 (16:15 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 19 Dec 2024 17:06:13 +0000 (18:06 +0100)
commit 0ef8047b737d7480a5d4c46d956e97c190f13050 upstream.

Add static_call_update_early() for updating static-call targets in
very early boot.

This will be needed for support of Xen guest type specific hypercall
functions.

This is part of XSA-466 / CVE-2024-53241.

Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Co-developed-by: Peter Zijlstra <peterz@infradead.org>
Co-developed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/static_call.h
arch/x86/include/asm/sync_core.h
arch/x86/kernel/static_call.c
include/linux/compiler.h
include/linux/static_call.h
kernel/static_call.c

index 491aadfac611782af25a0d199c571394c3540e17..df01a3afcf846ef4ad76d95aca289d8abd590616 100644 (file)
 
 extern bool __static_call_fixup(void *tramp, u8 op, void *dest);
 
+extern void __static_call_update_early(void *tramp, void *func);
+
+#define static_call_update_early(name, _func)                          \
+({                                                                     \
+       typeof(&STATIC_CALL_TRAMP(name)) __F = (_func);                 \
+       if (static_call_initialized) {                                  \
+               __static_call_update(&STATIC_CALL_KEY(name),            \
+                                    STATIC_CALL_TRAMP_ADDR(name), __F);\
+       } else {                                                        \
+               WRITE_ONCE(STATIC_CALL_KEY(name).func, _func);          \
+               __static_call_update_early(STATIC_CALL_TRAMP_ADDR(name),\
+                                          __F);                        \
+       }                                                               \
+})
+
 #endif /* _ASM_STATIC_CALL_H */
index ab7382f92aff27405127748074a11341cbda474d..96bda43538ee70a302a37bd110860964ac4cf8ec 100644 (file)
@@ -8,7 +8,7 @@
 #include <asm/special_insns.h>
 
 #ifdef CONFIG_X86_32
-static inline void iret_to_self(void)
+static __always_inline void iret_to_self(void)
 {
        asm volatile (
                "pushfl\n\t"
@@ -19,7 +19,7 @@ static inline void iret_to_self(void)
                : ASM_CALL_CONSTRAINT : : "memory");
 }
 #else
-static inline void iret_to_self(void)
+static __always_inline void iret_to_self(void)
 {
        unsigned int tmp;
 
@@ -55,7 +55,7 @@ static inline void iret_to_self(void)
  * Like all of Linux's memory ordering operations, this is a
  * compiler barrier as well.
  */
-static inline void sync_core(void)
+static __always_inline void sync_core(void)
 {
        /*
         * The SERIALIZE instruction is the most straightforward way to
index 273e9b77b7302615b27c86fde33e2ede0e7e2695..7903e82f60857a53a86b41e7fca9c761533d2a63 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/static_call.h>
 #include <linux/memory.h>
 #include <linux/bug.h>
+#include <asm/sync_core.h>
 #include <asm/text-patching.h>
 
 enum insn_type {
@@ -109,6 +110,15 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
 }
 EXPORT_SYMBOL_GPL(arch_static_call_transform);
 
+noinstr void __static_call_update_early(void *tramp, void *func)
+{
+       BUG_ON(system_state != SYSTEM_BOOTING);
+       BUG_ON(!early_boot_irqs_disabled);
+       BUG_ON(static_call_initialized);
+       __text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE);
+       sync_core();
+}
+
 #ifdef CONFIG_RETHUNK
 /*
  * This is called by apply_returns() to fix up static call trampolines,
index 475d0a3ce059e9c8cde8042c841e341d0e0774cb..13a43651984fbdcb5fae0a67238690b48b5dbe06 100644 (file)
@@ -215,6 +215,23 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 
 #endif /* __KERNEL__ */
 
+/**
+ * offset_to_ptr - convert a relative memory offset to an absolute pointer
+ * @off:       the address of the 32-bit offset value
+ */
+static inline void *offset_to_ptr(const int *off)
+{
+       return (void *)((unsigned long)off + *off);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_64BIT
+#define ARCH_SEL(a,b) a
+#else
+#define ARCH_SEL(a,b) b
+#endif
+
 /*
  * Force the compiler to emit 'sym' as a symbol, so that we can reference
  * it from inline assembler. Necessary in case 'sym' could be inlined
@@ -225,16 +242,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
        static void * __section(".discard.addressable") __used \
                __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
 
-/**
- * offset_to_ptr - convert a relative memory offset to an absolute pointer
- * @off:       the address of the 32-bit offset value
- */
-static inline void *offset_to_ptr(const int *off)
-{
-       return (void *)((unsigned long)off + *off);
-}
+#define __ADDRESSABLE_ASM(sym)                                         \
+       .pushsection .discard.addressable,"aw";                         \
+       .align ARCH_SEL(8,4);                                           \
+       ARCH_SEL(.quad, .long) __stringify(sym);                        \
+       .popsection;
 
-#endif /* __ASSEMBLY__ */
+#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym))
 
 /* &a[0] degrades to a pointer: a different type from an array */
 #define __must_be_array(a)     BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
index 04e6042d252d3fa93fdf21f422df3108144faa85..02284243688056550a26ece28bfd8663e96e0fb5 100644 (file)
 #ifdef CONFIG_HAVE_STATIC_CALL
 #include <asm/static_call.h>
 
+extern bool static_call_initialized;
 /*
  * Either @site or @tramp can be NULL.
  */
index dc5665b628140ee9b0a841011a4ae495d2c9eee8..e9408409eb467fd747b4feb0cf0eca187d02d99c 100644 (file)
@@ -15,7 +15,7 @@ extern struct static_call_site __start_static_call_sites[],
 extern struct static_call_tramp_key __start_static_call_tramp_key[],
                                    __stop_static_call_tramp_key[];
 
-static bool static_call_initialized;
+bool static_call_initialized;
 
 /* mutex to protect key modules/sites */
 static DEFINE_MUTEX(static_call_mutex);