Skip to content

Commit

Permalink
x86/idle: Select idle routine only once
Browse files Browse the repository at this point in the history
The idle routine selection is done on every CPU bringup operation and
has a guard in place which is effective after the first invocation,
which is a pointless exercise.

Invoke it once on the boot CPU and mark the related functions __init.
The guard check has to stay as xen_set_default_idle() runs early.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/87edcu6vaq.ffs@tglx
  • Loading branch information
KAGA-KOKO authored and bp3tk0v committed Mar 4, 2024
1 parent 5f75916 commit 35ce649
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 6 deletions.
2 changes: 1 addition & 1 deletion arch/x86/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -558,7 +558,7 @@ static inline void load_sp0(unsigned long sp0)

unsigned long __get_wchan(struct task_struct *p);

extern void select_idle_routine(const struct cpuinfo_x86 *c);
extern void select_idle_routine(void);
extern void amd_e400_c1e_apic_setup(void);

extern unsigned long boot_option_idle_override;
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1938,8 +1938,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
/* Init Machine Check Exception if available. */
mcheck_cpu_init(c);

select_idle_routine(c);

#ifdef CONFIG_NUMA
numa_add_cpu(smp_processor_id());
#endif
Expand Down Expand Up @@ -2344,6 +2342,8 @@ void __init arch_cpu_finalize_init(void)
{
identify_boot_cpu();

select_idle_routine();

/*
* identify_boot_cpu() initialized SMT support information, let the
* core code know.
Expand Down
8 changes: 5 additions & 3 deletions arch/x86/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -853,8 +853,9 @@ void __noreturn stop_this_cpu(void *dummy)
* Do not prefer MWAIT if MONITOR instruction has a bug or idle=nomwait
* is passed to kernel commandline parameter.
*/
static bool prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
static __init bool prefer_mwait_c1_over_halt(void)
{
const struct cpuinfo_x86 *c = &boot_cpu_data;
u32 eax, ebx, ecx, edx;

/* If override is enforced on the command line, fall back to HALT. */
Expand Down Expand Up @@ -908,18 +909,19 @@ static __cpuidle void mwait_idle(void)
__current_clr_polling();
}

void select_idle_routine(const struct cpuinfo_x86 *c)
void __init select_idle_routine(void)
{
if (boot_option_idle_override == IDLE_POLL) {
if (IS_ENABLED(CONFIG_SMP) && smp_num_siblings > 1)
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
return;
}

/* Required to guard against xen_set_default_idle() */
if (x86_idle_set())
return;

if (prefer_mwait_c1_over_halt(c)) {
if (prefer_mwait_c1_over_halt()) {
pr_info("using mwait in idle threads\n");
static_call_update(x86_idle, mwait_idle);
} else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
Expand Down

0 comments on commit 35ce649

Please # to comment.