diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2024-02-29 01:20:32 +0300 |
---|---|---|
committer | Borislav Petkov (AMD) <bp@alien8.de> | 2024-03-04 19:39:24 +0300 |
commit | 35ce64922c8263448e58a2b9e8d15a64e11e9b2d (patch) | |
tree | 48f9c9e832c298fa03e74a9d56ee6c944065c89f /arch/x86 | |
parent | 5f75916ec6ecdc6314b637746f3ad809f2fc7379 (diff) | |
download | linux-35ce64922c8263448e58a2b9e8d15a64e11e9b2d.tar.xz |
x86/idle: Select idle routine only once
The idle routine selection is done on every CPU bringup operation and
has a guard in place which is effective after the first invocation,
which is a pointless exercise.
Invoke it once on the boot CPU and mark the related functions __init.
The guard check has to stay as xen_set_default_idle() runs early.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/87edcu6vaq.ffs@tglx
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 8 |
3 files changed, 8 insertions, 6 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 1188e8bf76a2..523c466c2fc9 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -558,7 +558,7 @@ static inline void load_sp0(unsigned long sp0) unsigned long __get_wchan(struct task_struct *p); -extern void select_idle_routine(const struct cpuinfo_x86 *c); +extern void select_idle_routine(void); extern void amd_e400_c1e_apic_setup(void); extern unsigned long boot_option_idle_override; diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 8f367d376520..5c72af16dd06 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1938,8 +1938,6 @@ static void identify_cpu(struct cpuinfo_x86 *c) /* Init Machine Check Exception if available. */ mcheck_cpu_init(c); - select_idle_routine(c); - #ifdef CONFIG_NUMA numa_add_cpu(smp_processor_id()); #endif @@ -2344,6 +2342,8 @@ void __init arch_cpu_finalize_init(void) { identify_boot_cpu(); + select_idle_routine(); + /* * identify_boot_cpu() initialized SMT support information, let the * core code know. diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index ccaacc7f9681..f0166b31a803 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -853,8 +853,9 @@ void __noreturn stop_this_cpu(void *dummy) * Do not prefer MWAIT if MONITOR instruction has a bug or idle=nomwait * is passed to kernel commandline parameter. */ -static bool prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) +static __init bool prefer_mwait_c1_over_halt(void) { + const struct cpuinfo_x86 *c = &boot_cpu_data; u32 eax, ebx, ecx, edx; /* If override is enforced on the command line, fall back to HALT. */ @@ -908,7 +909,7 @@ static __cpuidle void mwait_idle(void) __current_clr_polling(); } -void select_idle_routine(const struct cpuinfo_x86 *c) +void __init select_idle_routine(void) { if (boot_option_idle_override == IDLE_POLL) { if (IS_ENABLED(CONFIG_SMP) && smp_num_siblings > 1) @@ -916,10 +917,11 @@ void select_idle_routine(const struct cpuinfo_x86 *c) return; } + /* Required to guard against xen_set_default_idle() */ if (x86_idle_set()) return; - if (prefer_mwait_c1_over_halt(c)) { + if (prefer_mwait_c1_over_halt()) { pr_info("using mwait in idle threads\n"); static_call_update(x86_idle, mwait_idle); } else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) { |