From: Uwe Kleine-König Date: Thu, 19 Sep 2024 08:11:21 +0000 (+0200) Subject: cpufreq: intel_pstate: Make hwp_notify_lock a raw spinlock X-Git-Tag: v6.12-rc2~19^2~1 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=8b4865cd904650cbed7f2407e653934c621b8127;p=thirdparty%2Fkernel%2Flinux.git cpufreq: intel_pstate: Make hwp_notify_lock a raw spinlock notify_hwp_interrupt() is called via sysvec_thermal() -> smp_thermal_vector() -> intel_thermal_interrupt() in hard irq context. For this reason it must not use a simple spin_lock that sleeps with PREEMPT_RT enabled. So convert it to a raw spinlock. Reported-by: xiao sheng wen Link: https://bugs.debian.org/1076483 Signed-off-by: Uwe Kleine-König Acked-by: Srinivas Pandruvada Acked-by: Sebastian Andrzej Siewior Tested-by: xiao sheng wen Link: https://patch.msgid.link/20240919081121.10784-2-ukleinek@debian.org Cc: All applicable Signed-off-by: Rafael J. Wysocki --- diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index aaea9a39ecedb..b0018f371ea3a 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1845,7 +1845,7 @@ static void intel_pstate_notify_work(struct work_struct *work) wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } -static DEFINE_SPINLOCK(hwp_notify_lock); +static DEFINE_RAW_SPINLOCK(hwp_notify_lock); static cpumask_t hwp_intr_enable_mask; #define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0) @@ -1868,7 +1868,7 @@ void notify_hwp_interrupt(void) if (!(value & status_mask)) return; - spin_lock_irqsave(&hwp_notify_lock, flags); + raw_spin_lock_irqsave(&hwp_notify_lock, flags); if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) goto ack_intr; @@ -1876,13 +1876,13 @@ void notify_hwp_interrupt(void) schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work, msecs_to_jiffies(10)); - spin_unlock_irqrestore(&hwp_notify_lock, flags); + raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); return; ack_intr: wrmsrl_safe(MSR_HWP_STATUS, 0); - spin_unlock_irqrestore(&hwp_notify_lock, flags); + raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); } static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) @@ -1895,9 +1895,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); - spin_lock_irq(&hwp_notify_lock); + raw_spin_lock_irq(&hwp_notify_lock); cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask); - spin_unlock_irq(&hwp_notify_lock); + raw_spin_unlock_irq(&hwp_notify_lock); if (cancel_work) cancel_delayed_work_sync(&cpudata->hwp_notify_work); @@ -1912,10 +1912,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ; - spin_lock_irq(&hwp_notify_lock); + raw_spin_lock_irq(&hwp_notify_lock); INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); - spin_unlock_irq(&hwp_notify_lock); + raw_spin_unlock_irq(&hwp_notify_lock); if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;