]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/retbleed: Move call depth to percpu hot section
authorBrian Gerst <brgerst@gmail.com>
Mon, 3 Mar 2025 16:52:40 +0000 (11:52 -0500)
committerIngo Molnar <mingo@kernel.org>
Tue, 4 Mar 2025 19:30:33 +0000 (20:30 +0100)
No functional change.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250303165246.2175811-6-brgerst@gmail.com
arch/x86/include/asm/current.h
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/cpu/common.c
arch/x86/lib/retpoline.S

index f988462d8b69694aaaebc4bae1aa578410ab30f4..8ba2c0f8bcaf92de0ece35637ef47b3edfddd421 100644 (file)
@@ -14,9 +14,6 @@ struct task_struct;
 
 struct pcpu_hot {
        struct task_struct      *current_task;
-#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
-       u64                     call_depth;
-#endif
        unsigned long           top_of_stack;
        void                    *hardirq_stack_ptr;
        u16                     softirq_pending;
index aee26bb8230f861dda7a4a568159d4cf77df45f6..44c6076fd22bad85a40135034bd85ada34958e09 100644 (file)
@@ -12,7 +12,6 @@
 #include <asm/msr-index.h>
 #include <asm/unwind_hints.h>
 #include <asm/percpu.h>
-#include <asm/current.h>
 
 /*
  * Call depth tracking for Intel SKL CPUs to address the RSB underflow
 #include <asm/asm-offsets.h>
 
 #define CREDIT_CALL_DEPTH                                      \
-       movq    $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth);
+       movq    $-1, PER_CPU_VAR(__x86_call_depth);
 
 #define RESET_CALL_DEPTH                                       \
        xor     %eax, %eax;                                     \
        bts     $63, %rax;                                      \
-       movq    %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth);
+       movq    %rax, PER_CPU_VAR(__x86_call_depth);
 
 #define RESET_CALL_DEPTH_FROM_CALL                             \
        movb    $0xfc, %al;                                     \
        shl     $56, %rax;                                      \
-       movq    %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth);   \
+       movq    %rax, PER_CPU_VAR(__x86_call_depth);            \
        CALL_THUNKS_DEBUG_INC_CALLS
 
 #define INCREMENT_CALL_DEPTH                                   \
-       sarq    $5, PER_CPU_VAR(pcpu_hot + X86_call_depth);     \
+       sarq    $5, PER_CPU_VAR(__x86_call_depth);              \
        CALL_THUNKS_DEBUG_INC_CALLS
 
 #else
@@ -387,6 +386,8 @@ extern void call_depth_return_thunk(void);
                    __stringify(INCREMENT_CALL_DEPTH),          \
                    X86_FEATURE_CALL_DEPTH)
 
+DECLARE_PER_CPU_CACHE_HOT(u64, __x86_call_depth);
+
 #ifdef CONFIG_CALL_THUNKS_DEBUG
 DECLARE_PER_CPU(u64, __x86_call_count);
 DECLARE_PER_CPU(u64, __x86_ret_count);
index a98020bf31bb465c1444b49cd8a5c7ebe21f973b..6fae88f8ae1e7c027afdc9400c1e78c2848207f0 100644 (file)
@@ -109,9 +109,6 @@ static void __used common(void)
        OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
        OFFSET(X86_top_of_stack, pcpu_hot, top_of_stack);
        OFFSET(X86_current_task, pcpu_hot, current_task);
-#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
-       OFFSET(X86_call_depth, pcpu_hot, call_depth);
-#endif
 #if IS_ENABLED(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64)
        /* Offset for fields in aria_ctx */
        BLANK();
index a9d61537d4a640d1f3c3eb8f4b01e9f021009c94..fd224ae57d62e5cb163a0cb2050f25b7b79ef0f8 100644 (file)
@@ -2075,6 +2075,14 @@ DEFINE_PER_CPU_CACHE_HOT(int, __preempt_count) = INIT_PREEMPT_COUNT;
 EXPORT_PER_CPU_SYMBOL(__preempt_count);
 
 #ifdef CONFIG_X86_64
+/*
+ * Note: Do not make this dependant on CONFIG_MITIGATION_CALL_DEPTH_TRACKING
+ * so that this space is reserved in the hot cache section even when the
+ * mitigation is disabled.
+ */
+DEFINE_PER_CPU_CACHE_HOT(u64, __x86_call_depth);
+EXPORT_PER_CPU_SYMBOL(__x86_call_depth);
+
 static void wrmsrl_cstar(unsigned long val)
 {
        /*
index 038f49a43ab4335cf03df6482530b83760996627..a26c43abd47d8dfc8ff1c4ac94da6e76e466beea 100644 (file)
@@ -343,7 +343,7 @@ SYM_FUNC_START(call_depth_return_thunk)
         * case.
         */
        CALL_THUNKS_DEBUG_INC_RETS
-       shlq    $5, PER_CPU_VAR(pcpu_hot + X86_call_depth)
+       shlq    $5, PER_CPU_VAR(__x86_call_depth)
        jz      1f
        ANNOTATE_UNRET_SAFE
        ret