]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/irq: Move irq stacks to percpu hot section
authorBrian Gerst <brgerst@gmail.com>
Mon, 3 Mar 2025 16:52:42 +0000 (11:52 -0500)
committerIngo Molnar <mingo@kernel.org>
Tue, 4 Mar 2025 19:30:33 +0000 (20:30 +0100)
No functional change.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250303165246.2175811-8-brgerst@gmail.com
arch/x86/include/asm/current.h
arch/x86/include/asm/irq_stack.h
arch/x86/include/asm/processor.h
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/irq.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/process_64.c

index f153c77853de1867d060a95242357b390a294fb1..6fad5a4c21d7c0181ce6961cfefd044d9cf28f82 100644 (file)
@@ -15,12 +15,6 @@ struct task_struct;
 struct pcpu_hot {
        struct task_struct      *current_task;
        unsigned long           top_of_stack;
-       void                    *hardirq_stack_ptr;
-#ifdef CONFIG_X86_64
-       bool                    hardirq_stack_inuse;
-#else
-       void                    *softirq_stack_ptr;
-#endif
 };
 
 DECLARE_PER_CPU_CACHE_HOT(struct pcpu_hot, pcpu_hot);
index 562a547c29a5bb048472f9f39ac1f9d2f0658156..735c3a491f6047cae05f50e88359aeed8e93dc9d 100644 (file)
        ASM_CALL_ARG2
 
 #define call_on_irqstack(func, asm_call, argconstr...)                 \
-       call_on_stack(__this_cpu_read(pcpu_hot.hardirq_stack_ptr),      \
+       call_on_stack(__this_cpu_read(hardirq_stack_ptr),               \
                      func, asm_call, argconstr)
 
 /* Macros to assert type correctness for run_*_on_irqstack macros */
         * User mode entry and interrupt on the irq stack do not        \
         * switch stacks. If from user mode the task stack is empty.    \
         */                                                             \
-       if (user_mode(regs) || __this_cpu_read(pcpu_hot.hardirq_stack_inuse)) { \
+       if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) {  \
                irq_enter_rcu();                                        \
                func(c_args);                                           \
                irq_exit_rcu();                                         \
                 * places. Invoke the stack switch macro with the call  \
                 * sequence which matches the above direct invocation.  \
                 */                                                     \
-               __this_cpu_write(pcpu_hot.hardirq_stack_inuse, true);   \
+               __this_cpu_write(hardirq_stack_inuse, true);            \
                call_on_irqstack(func, asm_call, constr);               \
-               __this_cpu_write(pcpu_hot.hardirq_stack_inuse, false);  \
+               __this_cpu_write(hardirq_stack_inuse, false);           \
        }                                                               \
 }
 
  */
 #define do_softirq_own_stack()                                         \
 {                                                                      \
-       __this_cpu_write(pcpu_hot.hardirq_stack_inuse, true);           \
+       __this_cpu_write(hardirq_stack_inuse, true);                    \
        call_on_irqstack(__do_softirq, ASM_CALL_ARG0);                  \
-       __this_cpu_write(pcpu_hot.hardirq_stack_inuse, false);          \
+       __this_cpu_write(hardirq_stack_inuse, false);                   \
 }
 
 #endif
index c241dbc1562c23b6d3343b2d2ec7e108f5a967b0..6bb6af0b543040bcc74243a80519065497365d72 100644 (file)
@@ -415,6 +415,13 @@ struct irq_stack {
        char            stack[IRQ_STACK_SIZE];
 } __aligned(IRQ_STACK_SIZE);
 
+DECLARE_PER_CPU_CACHE_HOT(struct irq_stack *, hardirq_stack_ptr);
+#ifdef CONFIG_X86_64
+DECLARE_PER_CPU_CACHE_HOT(bool, hardirq_stack_inuse);
+#else
+DECLARE_PER_CPU_CACHE_HOT(struct irq_stack *, softirq_stack_ptr);
+#endif
+
 #ifdef CONFIG_X86_64
 static inline unsigned long cpu_kernelmode_gs_base(int cpu)
 {
index b4905d5173fd36d1df190a54725eced03f9170dd..722fd712e1cf029adff9a73c73aad44992dbcef6 100644 (file)
@@ -37,7 +37,7 @@ const char *stack_type_name(enum stack_type type)
 
 static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
 {
-       unsigned long *begin = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr);
+       unsigned long *begin = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
        unsigned long *end   = begin + (THREAD_SIZE / sizeof(long));
 
        /*
@@ -62,7 +62,7 @@ static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
 
 static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
 {
-       unsigned long *begin = (unsigned long *)this_cpu_read(pcpu_hot.softirq_stack_ptr);
+       unsigned long *begin = (unsigned long *)this_cpu_read(softirq_stack_ptr);
        unsigned long *end   = begin + (THREAD_SIZE / sizeof(long));
 
        /*
index f05339fee7785b1da2cc2cec9713e06a1242dbe7..6c5defd6569a3eb867b58c37fccb93dd33decaee 100644 (file)
@@ -134,7 +134,7 @@ static __always_inline bool in_exception_stack(unsigned long *stack, struct stac
 
 static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
 {
-       unsigned long *end = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr);
+       unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
        unsigned long *begin;
 
        /*
index 83a5252a473c3d7d42b45e4cd3b996318c1ce55f..81f9b78e0f7baaa283516f0e86f425cb88757153 100644 (file)
@@ -36,6 +36,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
 DEFINE_PER_CPU_CACHE_HOT(u16, __softirq_pending);
 EXPORT_PER_CPU_SYMBOL(__softirq_pending);
 
+DEFINE_PER_CPU_CACHE_HOT(struct irq_stack *, hardirq_stack_ptr);
+
 atomic_t irq_err_count;
 
 /*
index d301208d35d02c7d146829c26a26a3933f32d45e..c7a5d2960d5759a2292b0dd05fc7c180be6b9bc4 100644 (file)
@@ -49,6 +49,8 @@ static inline bool check_stack_overflow(void) { return false; }
 static inline void print_stack_overflow(void) { }
 #endif
 
+DEFINE_PER_CPU_CACHE_HOT(struct irq_stack *, softirq_stack_ptr);
+
 static void call_on_stack(void *func, void *stack)
 {
        asm volatile("xchgl %[sp], %%esp\n"
@@ -70,7 +72,7 @@ static inline bool execute_on_irq_stack(bool overflow, struct irq_desc *desc)
        u32 *isp, *prev_esp;
 
        curstk = (struct irq_stack *) current_stack();
-       irqstk = __this_cpu_read(pcpu_hot.hardirq_stack_ptr);
+       irqstk = __this_cpu_read(hardirq_stack_ptr);
 
        /*
         * this is where we switch to the IRQ stack. However, if we are
@@ -107,7 +109,7 @@ int irq_init_percpu_irqstack(unsigned int cpu)
        int node = cpu_to_node(cpu);
        struct page *ph, *ps;
 
-       if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu))
+       if (per_cpu(hardirq_stack_ptr, cpu))
                return 0;
 
        ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
@@ -119,8 +121,8 @@ int irq_init_percpu_irqstack(unsigned int cpu)
                return -ENOMEM;
        }
 
-       per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = page_address(ph);
-       per_cpu(pcpu_hot.softirq_stack_ptr, cpu) = page_address(ps);
+       per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
+       per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
        return 0;
 }
 
@@ -130,7 +132,7 @@ void do_softirq_own_stack(void)
        struct irq_stack *irqstk;
        u32 *isp, *prev_esp;
 
-       irqstk = __this_cpu_read(pcpu_hot.softirq_stack_ptr);
+       irqstk = __this_cpu_read(softirq_stack_ptr);
 
        /* build the stack frame on the softirq stack */
        isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
index 56bdeecd8ee02a7b8efc23cbecc33734824cd32f..ca78dce39361f806d1810ed439bce07d1f96c9e0 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/io_apic.h>
 #include <asm/apic.h>
 
+DEFINE_PER_CPU_CACHE_HOT(bool, hardirq_stack_inuse);
 DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible;
 
 #ifdef CONFIG_VMAP_STACK
@@ -50,7 +51,7 @@ static int map_irq_stack(unsigned int cpu)
                return -ENOMEM;
 
        /* Store actual TOS to avoid adjustment in the hotpath */
-       per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
+       per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
        return 0;
 }
 #else
@@ -63,14 +64,14 @@ static int map_irq_stack(unsigned int cpu)
        void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
 
        /* Store actual TOS to avoid adjustment in the hotpath */
-       per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
+       per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
        return 0;
 }
 #endif
 
 int irq_init_percpu_irqstack(unsigned int cpu)
 {
-       if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu))
+       if (per_cpu(hardirq_stack_ptr, cpu))
                return 0;
        return map_irq_stack(cpu);
 }
index f983d2a57ac3c1bd380c0c19a6c255ff2367d435..2f38416deb746ff7217518a1653cb9ff9a77fc06 100644 (file)
@@ -614,7 +614,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        int cpu = smp_processor_id();
 
        WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
-                    this_cpu_read(pcpu_hot.hardirq_stack_inuse));
+                    this_cpu_read(hardirq_stack_inuse));
 
        if (!test_tsk_thread_flag(prev_p, TIF_NEED_FPU_LOAD))
                switch_fpu_prepare(prev_p, cpu);