]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/smp: Allow calling mwait_play_dead with an arbitrary hint
authorPatryk Wlazlyn <patryk.wlazlyn@linux.intel.com>
Wed, 5 Feb 2025 15:52:08 +0000 (17:52 +0200)
committerDave Hansen <dave.hansen@linux.intel.com>
Wed, 5 Feb 2025 18:44:52 +0000 (10:44 -0800)
Introduce a helper function to allow offlined CPUs to enter idle states
with a specific MWAIT hint. The new helper will be used in subsequent
patches by the acpi_idle and intel_idle drivers.

No functional change intended.

Signed-off-by: Patryk Wlazlyn <patryk.wlazlyn@linux.intel.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/all/20250205155211.329780-2-artem.bityutskiy%40linux.intel.com
arch/x86/include/asm/smp.h
arch/x86/kernel/smpboot.c

index ca073f40698fad98b0ded3ea854dc35c203fa5f4..80f8bfd83fc7b4b267bedcaa14e3e46e6c0e7b83 100644 (file)
@@ -114,6 +114,7 @@ void wbinvd_on_cpu(int cpu);
 int wbinvd_on_all_cpus(void);
 
 void smp_kick_mwait_play_dead(void);
+void mwait_play_dead(unsigned int eax_hint);
 
 void native_smp_send_reschedule(int cpu);
 void native_send_call_func_ipi(const struct cpumask *mask);
@@ -164,6 +165,8 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu)
 {
        return (struct cpumask *)cpumask_of(0);
 }
+
+static inline void mwait_play_dead(unsigned int eax_hint) { }
 #endif /* CONFIG_SMP */
 
 #ifdef CONFIG_DEBUG_NMI_SELFTEST
index c10850ae6f094db892ba5c741ce7986f3712bee6..8aad14e43f549520e0125e0cbe3a5cbddd194b1f 100644 (file)
@@ -1258,47 +1258,9 @@ void play_dead_common(void)
        local_irq_disable();
 }
 
-/*
- * We need to flush the caches before going to sleep, lest we have
- * dirty data in our caches when we come back up.
- */
-static inline void mwait_play_dead(void)
+void __noreturn mwait_play_dead(unsigned int eax_hint)
 {
        struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead);
-       unsigned int eax, ebx, ecx, edx;
-       unsigned int highest_cstate = 0;
-       unsigned int highest_subcstate = 0;
-       int i;
-
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
-           boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
-               return;
-       if (!this_cpu_has(X86_FEATURE_MWAIT))
-               return;
-       if (!this_cpu_has(X86_FEATURE_CLFLUSH))
-               return;
-
-       eax = CPUID_LEAF_MWAIT;
-       ecx = 0;
-       native_cpuid(&eax, &ebx, &ecx, &edx);
-
-       /*
-        * eax will be 0 if EDX enumeration is not valid.
-        * Initialized below to cstate, sub_cstate value when EDX is valid.
-        */
-       if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
-               eax = 0;
-       } else {
-               edx >>= MWAIT_SUBSTATE_SIZE;
-               for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
-                       if (edx & MWAIT_SUBSTATE_MASK) {
-                               highest_cstate = i;
-                               highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
-                       }
-               }
-               eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
-                       (highest_subcstate - 1);
-       }
 
        /* Set up state for the kexec() hack below */
        md->status = CPUDEAD_MWAIT_WAIT;
@@ -1319,7 +1281,7 @@ static inline void mwait_play_dead(void)
                mb();
                __monitor(md, 0, 0);
                mb();
-               __mwait(eax, 0);
+               __mwait(eax_hint, 0);
 
                if (READ_ONCE(md->control) == CPUDEAD_MWAIT_KEXEC_HLT) {
                        /*
@@ -1341,6 +1303,50 @@ static inline void mwait_play_dead(void)
        }
 }
 
+/*
+ * We need to flush the caches before going to sleep, lest we have
+ * dirty data in our caches when we come back up.
+ */
+static inline void mwait_play_dead_cpuid_hint(void)
+{
+       unsigned int eax, ebx, ecx, edx;
+       unsigned int highest_cstate = 0;
+       unsigned int highest_subcstate = 0;
+       int i;
+
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+           boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+               return;
+       if (!this_cpu_has(X86_FEATURE_MWAIT))
+               return;
+       if (!this_cpu_has(X86_FEATURE_CLFLUSH))
+               return;
+
+       eax = CPUID_LEAF_MWAIT;
+       ecx = 0;
+       native_cpuid(&eax, &ebx, &ecx, &edx);
+
+       /*
+        * eax will be 0 if EDX enumeration is not valid.
+        * Initialized below to cstate, sub_cstate value when EDX is valid.
+        */
+       if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
+               eax = 0;
+       } else {
+               edx >>= MWAIT_SUBSTATE_SIZE;
+               for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
+                       if (edx & MWAIT_SUBSTATE_MASK) {
+                               highest_cstate = i;
+                               highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
+                       }
+               }
+               eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+                       (highest_subcstate - 1);
+       }
+
+       mwait_play_dead(eax);
+}
+
 /*
  * Kick all "offline" CPUs out of mwait on kexec(). See comment in
  * mwait_play_dead().
@@ -1391,7 +1397,7 @@ void native_play_dead(void)
        play_dead_common();
        tboot_shutdown(TB_SHUTDOWN_WFS);
 
-       mwait_play_dead();
+       mwait_play_dead_cpuid_hint();
        if (cpuidle_play_dead())
                hlt_play_dead();
 }