summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUwe Kleine-König <ukleinek@debian.org>2024-10-06 23:51:06 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-10-10 13:01:08 +0300
commitc0bdc6c6a97cdda5627ab9f7e0596720e70d30d5 (patch)
tree1158048a7d7a94c92efaa745065d3dfdcf9cb325
parent73e441be033d3ed0bdff09b575da3e7d4606ffc9 (diff)
downloadlinux-c0bdc6c6a97cdda5627ab9f7e0596720e70d30d5.tar.xz
cpufreq: intel_pstate: Make hwp_notify_lock a raw spinlock
commit 8b4865cd904650cbed7f2407e653934c621b8127 upstream. notify_hwp_interrupt() is called via sysvec_thermal() -> smp_thermal_vector() -> intel_thermal_interrupt() in hard irq context. For this reason it must not use a simple spin_lock that sleeps with PREEMPT_RT enabled. So convert it to a raw spinlock. Reported-by: xiao sheng wen <atzlinux@sina.com> Link: https://bugs.debian.org/1076483 Signed-off-by: Uwe Kleine-König <ukleinek@debian.org> Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Tested-by: xiao sheng wen <atzlinux@sina.com> Link: https://patch.msgid.link/20240919081121.10784-2-ukleinek@debian.org Cc: All applicable <stable@vger.kernel.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> [ukleinek: Backport to v6.10.y] Signed-off-by: Uwe Kleine-König <ukleinek@debian.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--drivers/cpufreq/intel_pstate.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index c31914a9876f..b694e474acec 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1622,7 +1622,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
-static DEFINE_SPINLOCK(hwp_notify_lock);
+static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
static cpumask_t hwp_intr_enable_mask;
void notify_hwp_interrupt(void)
@@ -1638,7 +1638,7 @@ void notify_hwp_interrupt(void)
if (!(value & 0x01))
return;
- spin_lock_irqsave(&hwp_notify_lock, flags);
+ raw_spin_lock_irqsave(&hwp_notify_lock, flags);
if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
goto ack_intr;
@@ -1646,13 +1646,13 @@ void notify_hwp_interrupt(void)
schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
msecs_to_jiffies(10));
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
return;
ack_intr:
wrmsrl_safe(MSR_HWP_STATUS, 0);
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
}
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
@@ -1665,9 +1665,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
- spin_lock_irq(&hwp_notify_lock);
+ raw_spin_lock_irq(&hwp_notify_lock);
cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
- spin_unlock_irq(&hwp_notify_lock);
+ raw_spin_unlock_irq(&hwp_notify_lock);
if (cancel_work)
cancel_delayed_work_sync(&cpudata->hwp_notify_work);
@@ -1677,10 +1677,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
{
/* Enable HWP notification interrupt for guaranteed performance change */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
- spin_lock_irq(&hwp_notify_lock);
+ raw_spin_lock_irq(&hwp_notify_lock);
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
- spin_unlock_irq(&hwp_notify_lock);
+ raw_spin_unlock_irq(&hwp_notify_lock);
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);