]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/alternatives: Simplify alternative_call() interface
authorJosh Poimboeuf <jpoimboe@kernel.org>
Mon, 3 Mar 2025 01:21:01 +0000 (17:21 -0800)
committerIngo Molnar <mingo@kernel.org>
Tue, 4 Mar 2025 10:21:40 +0000 (11:21 +0100)
Separate the input from the clobbers in preparation for appending the
input.

Do this in preparation of changing the ASM_CALL_CONSTRAINT primitive.

Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
arch/x86/include/asm/alternative.h
arch/x86/include/asm/apic.h
arch/x86/include/asm/asm.h
arch/x86/include/asm/atomic64_32.h
arch/x86/include/asm/page_64.h

index a2141665239b5ad1283a2ccd356142dcec5dc1e4..52626a7251e62fbcbe76dd986dd1ec2604ee0741 100644 (file)
@@ -237,10 +237,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
  * references: i.e., if used for a function, it would add the PLT
  * suffix.
  */
-#define alternative_call(oldfunc, newfunc, ft_flags, output, input...)                 \
+#define alternative_call(oldfunc, newfunc, ft_flags, output, input, clobbers...)       \
        asm_inline volatile(ALTERNATIVE("call %c[old]", "call %c[new]", ft_flags)       \
                : ALT_OUTPUT_SP(output)                                                 \
-               : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
+               : [old] "i" (oldfunc), [new] "i" (newfunc)                              \
+                 COMMA(input)                                                          \
+               : clobbers)
 
 /*
  * Like alternative_call, but there are two features and respective functions.
@@ -249,24 +251,14 @@ static inline int alternatives_text_reserved(void *start, void *end)
  * Otherwise, old function is used.
  */
 #define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2,          \
-                          output, input...)                                            \
+                          output, input, clobbers...)                                  \
        asm_inline volatile(ALTERNATIVE_2("call %c[old]", "call %c[new1]", ft_flags1,   \
                "call %c[new2]", ft_flags2)                                             \
                : ALT_OUTPUT_SP(output)                                                 \
                : [old] "i" (oldfunc), [new1] "i" (newfunc1),                           \
-                 [new2] "i" (newfunc2), ## input)
-
-/*
- * use this macro(s) if you need more than one output parameter
- * in alternative_io
- */
-#define ASM_OUTPUT2(a...) a
-
-/*
- * use this macro if you need clobbers but no inputs in
- * alternative_{input,io,call}()
- */
-#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
+                 [new2] "i" (newfunc2)                                                 \
+                 COMMA(input)                                                          \
+               : clobbers)
 
 #define ALT_OUTPUT_SP(...) ASM_CALL_CONSTRAINT, ## __VA_ARGS__
 
index f21ff19326994e6097633c6dcececb00a71efcad..c903d358405d389b808c6075cf6bfa6032710c2a 100644 (file)
@@ -99,8 +99,8 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
        volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
 
        alternative_io("movl %0, %1", "xchgl %0, %1", X86_BUG_11AP,
-                      ASM_OUTPUT2("=r" (v), "=m" (*addr)),
-                      ASM_OUTPUT2("0" (v), "m" (*addr)));
+                      ASM_OUTPUT("=r" (v), "=m" (*addr)),
+                      ASM_INPUT("0" (v), "m" (*addr)));
 }
 
 static inline u32 native_apic_mem_read(u32 reg)
index 2bec0c89a95c2794cfe15133a9b098e62fe6ef76..975ae7a9397ebc644a05afcac2fcf79bfd6fe7e5 100644 (file)
@@ -213,6 +213,17 @@ static __always_inline __pure void *rip_rel_ptr(void *p)
 
 /* For C file, we already have NOKPROBE_SYMBOL macro */
 
+/* Insert a comma if args are non-empty */
+#define COMMA(x...)            __COMMA(x)
+#define __COMMA(...)           , ##__VA_ARGS__
+
+/*
+ * Combine multiple asm inline constraint args into a single arg for passing to
+ * another macro.
+ */
+#define ASM_OUTPUT(x...)       x
+#define ASM_INPUT(x...)                x
+
 /*
  * This output constraint should be used for any inline asm which has a "call"
  * instruction.  Otherwise the asm may be inserted before the frame pointer
index 797085ecaaa45eae93cede33531d2ccd6077107a..ab838205c1c66233f42de2825b7622afca5aa03b 100644 (file)
@@ -49,16 +49,19 @@ static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
 #endif
 
 #ifdef CONFIG_X86_CX8
-#define __alternative_atomic64(f, g, out, in...) \
-       asm volatile("call %c[func]" \
+#define __alternative_atomic64(f, g, out, in, clobbers...)             \
+       asm volatile("call %c[func]"                                    \
                     : ALT_OUTPUT_SP(out) \
-                    : [func] "i" (atomic64_##g##_cx8), ## in)
+                    : [func] "i" (atomic64_##g##_cx8)                  \
+                      COMMA(in)                                        \
+                    : clobbers)
 
 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
 #else
-#define __alternative_atomic64(f, g, out, in...) \
-       alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
-                        X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
+#define __alternative_atomic64(f, g, out, in, clobbers...)             \
+       alternative_call(atomic64_##f##_386, atomic64_##g##_cx8,        \
+                        X86_FEATURE_CX8, ASM_OUTPUT(out),              \
+                        ASM_INPUT(in), clobbers)
 
 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
        ATOMIC64_DECL_ONE(sym##_386)
@@ -69,8 +72,8 @@ ATOMIC64_DECL_ONE(inc_386);
 ATOMIC64_DECL_ONE(dec_386);
 #endif
 
-#define alternative_atomic64(f, out, in...) \
-       __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
+#define alternative_atomic64(f, out, in, clobbers...) \
+       __alternative_atomic64(f, f, ASM_OUTPUT(out), ASM_INPUT(in), clobbers)
 
 ATOMIC64_DECL(read);
 ATOMIC64_DECL(set);
@@ -105,9 +108,10 @@ static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
        s64 o;
        unsigned high = (unsigned)(n >> 32);
        unsigned low = (unsigned)n;
-       alternative_atomic64(xchg, "=&A" (o),
-                            "S" (v), "b" (low), "c" (high)
-                            : "memory");
+       alternative_atomic64(xchg,
+                            "=&A" (o),
+                            ASM_INPUT("S" (v), "b" (low), "c" (high)),
+                            "memory");
        return o;
 }
 #define arch_atomic64_xchg arch_atomic64_xchg
@@ -116,23 +120,25 @@ static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
 {
        unsigned high = (unsigned)(i >> 32);
        unsigned low = (unsigned)i;
-       alternative_atomic64(set, /* no output */,
-                            "S" (v), "b" (low), "c" (high)
-                            : "eax", "edx", "memory");
+       alternative_atomic64(set,
+                            /* no output */,
+                            ASM_INPUT("S" (v), "b" (low), "c" (high)),
+                            "eax", "edx", "memory");
 }
 
 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
 {
        s64 r;
-       alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
+       alternative_atomic64(read, "=&A" (r), "c" (v), "memory");
        return r;
 }
 
 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
 {
        alternative_atomic64(add_return,
-                            ASM_OUTPUT2("+A" (i), "+c" (v)),
-                            ASM_NO_INPUT_CLOBBER("memory"));
+                            ASM_OUTPUT("+A" (i), "+c" (v)),
+                            /* no input */,
+                            "memory");
        return i;
 }
 #define arch_atomic64_add_return arch_atomic64_add_return
@@ -140,8 +146,9 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
 static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
 {
        alternative_atomic64(sub_return,
-                            ASM_OUTPUT2("+A" (i), "+c" (v)),
-                            ASM_NO_INPUT_CLOBBER("memory"));
+                            ASM_OUTPUT("+A" (i), "+c" (v)),
+                            /* no input */,
+                            "memory");
        return i;
 }
 #define arch_atomic64_sub_return arch_atomic64_sub_return
@@ -149,8 +156,10 @@ static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
 static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v)
 {
        s64 a;
-       alternative_atomic64(inc_return, "=&A" (a),
-                            "S" (v) : "memory", "ecx");
+       alternative_atomic64(inc_return,
+                            "=&A" (a),
+                            "S" (v),
+                            "memory", "ecx");
        return a;
 }
 #define arch_atomic64_inc_return arch_atomic64_inc_return
@@ -158,8 +167,10 @@ static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v)
 static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
 {
        s64 a;
-       alternative_atomic64(dec_return, "=&A" (a),
-                            "S" (v) : "memory", "ecx");
+       alternative_atomic64(dec_return,
+                            "=&A" (a),
+                            "S" (v),
+                            "memory", "ecx");
        return a;
 }
 #define arch_atomic64_dec_return arch_atomic64_dec_return
@@ -167,28 +178,34 @@ static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
 static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
 {
        __alternative_atomic64(add, add_return,
-                              ASM_OUTPUT2("+A" (i), "+c" (v)),
-                              ASM_NO_INPUT_CLOBBER("memory"));
+                              ASM_OUTPUT("+A" (i), "+c" (v)),
+                              /* no input */,
+                              "memory");
 }
 
 static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
 {
        __alternative_atomic64(sub, sub_return,
-                              ASM_OUTPUT2("+A" (i), "+c" (v)),
-                              ASM_NO_INPUT_CLOBBER("memory"));
+                              ASM_OUTPUT("+A" (i), "+c" (v)),
+                              /* no input */,
+                              "memory");
 }
 
 static __always_inline void arch_atomic64_inc(atomic64_t *v)
 {
-       __alternative_atomic64(inc, inc_return, /* no output */,
-                              "S" (v) : "memory", "eax", "ecx", "edx");
+       __alternative_atomic64(inc, inc_return,
+                              /* no output */,
+                              "S" (v),
+                              "memory", "eax", "ecx", "edx");
 }
 #define arch_atomic64_inc arch_atomic64_inc
 
 static __always_inline void arch_atomic64_dec(atomic64_t *v)
 {
-       __alternative_atomic64(dec, dec_return, /* no output */,
-                              "S" (v) : "memory", "eax", "ecx", "edx");
+       __alternative_atomic64(dec, dec_return,
+                              /* no output */,
+                              "S" (v),
+                              "memory", "eax", "ecx", "edx");
 }
 #define arch_atomic64_dec arch_atomic64_dec
 
@@ -197,8 +214,9 @@ static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
        unsigned low = (unsigned)u;
        unsigned high = (unsigned)(u >> 32);
        alternative_atomic64(add_unless,
-                            ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
-                            "S" (v) : "memory");
+                            ASM_OUTPUT("+A" (a), "+c" (low), "+D" (high)),
+                            "S" (v),
+                            "memory");
        return (int)a;
 }
 #define arch_atomic64_add_unless arch_atomic64_add_unless
@@ -206,8 +224,10 @@ static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
 static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v)
 {
        int r;
-       alternative_atomic64(inc_not_zero, "=&a" (r),
-                            "S" (v) : "ecx", "edx", "memory");
+       alternative_atomic64(inc_not_zero,
+                            "=&a" (r),
+                            "S" (v),
+                            "ecx", "edx", "memory");
        return r;
 }
 #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
@@ -215,8 +235,10 @@ static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v)
 static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        s64 r;
-       alternative_atomic64(dec_if_positive, "=&A" (r),
-                            "S" (v) : "ecx", "memory");
+       alternative_atomic64(dec_if_positive,
+                            "=&A" (r),
+                            "S" (v),
+                            "ecx", "memory");
        return r;
 }
 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
index d63576608ce765127bd3b3c654dc4d2d9ca4c46a..d081e8000f349e42fc2f06dd34f4db1131f32c14 100644 (file)
@@ -55,8 +55,8 @@ static inline void clear_page(void *page)
                           clear_page_rep, X86_FEATURE_REP_GOOD,
                           clear_page_erms, X86_FEATURE_ERMS,
                           "=D" (page),
-                          "D" (page)
-                          "cc", "memory", "rax", "rcx");
+                          "D" (page),
+                          "cc", "memory", "rax", "rcx");
 }
 
 void copy_page(void *to, void *from);