]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
cpufreq: intel_pstate: Make hwp_notify_lock a raw spinlock
authorUwe Kleine-König <ukleinek@debian.org>
Thu, 19 Sep 2024 08:11:21 +0000 (10:11 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 1 Oct 2024 18:43:39 +0000 (20:43 +0200)
notify_hwp_interrupt() is called via sysvec_thermal() ->
smp_thermal_vector() -> intel_thermal_interrupt() in hard irq context.
For this reason it must not use a simple spin_lock that sleeps with
PREEMPT_RT enabled. So convert it to a raw spinlock.

Reported-by: xiao sheng wen <atzlinux@sina.com>
Link: https://bugs.debian.org/1076483
Signed-off-by: Uwe Kleine-König <ukleinek@debian.org>
Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Tested-by: xiao sheng wen <atzlinux@sina.com>
Link: https://patch.msgid.link/20240919081121.10784-2-ukleinek@debian.org
Cc: All applicable <stable@vger.kernel.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
drivers/cpufreq/intel_pstate.c

index aaea9a39ecedb4a3f4de5779fd1a26f501e18255..b0018f371ea3a5e996cc9e94dd98e4bc7ae5af02 100644 (file)
@@ -1845,7 +1845,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
        wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
 }
 
-static DEFINE_SPINLOCK(hwp_notify_lock);
+static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
 static cpumask_t hwp_intr_enable_mask;
 
 #define HWP_GUARANTEED_PERF_CHANGE_STATUS      BIT(0)
@@ -1868,7 +1868,7 @@ void notify_hwp_interrupt(void)
        if (!(value & status_mask))
                return;
 
-       spin_lock_irqsave(&hwp_notify_lock, flags);
+       raw_spin_lock_irqsave(&hwp_notify_lock, flags);
 
        if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
                goto ack_intr;
@@ -1876,13 +1876,13 @@ void notify_hwp_interrupt(void)
        schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
                              msecs_to_jiffies(10));
 
-       spin_unlock_irqrestore(&hwp_notify_lock, flags);
+       raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
 
        return;
 
 ack_intr:
        wrmsrl_safe(MSR_HWP_STATUS, 0);
-       spin_unlock_irqrestore(&hwp_notify_lock, flags);
+       raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
 }
 
 static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
@@ -1895,9 +1895,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
        /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
        wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
 
-       spin_lock_irq(&hwp_notify_lock);
+       raw_spin_lock_irq(&hwp_notify_lock);
        cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
-       spin_unlock_irq(&hwp_notify_lock);
+       raw_spin_unlock_irq(&hwp_notify_lock);
 
        if (cancel_work)
                cancel_delayed_work_sync(&cpudata->hwp_notify_work);
@@ -1912,10 +1912,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
        if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
                u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ;
 
-               spin_lock_irq(&hwp_notify_lock);
+               raw_spin_lock_irq(&hwp_notify_lock);
                INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
                cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
-               spin_unlock_irq(&hwp_notify_lock);
+               raw_spin_unlock_irq(&hwp_notify_lock);
 
                if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
                        interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;