aboutsummaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-06-09 16:59:53 +0200
committerIngo Molnar <mingo@elte.hu>2008-06-10 15:52:01 +0200
commit6ddd2a27948f0bd02a2ad001e8a6816898eba0dc (patch)
tree4a9a8b4d1186bb5a03df6c26f23d4a4a8ba6f7bd /arch/x86
parent9e26d84273541a8c6c2efb705457ca8e6245fb73 (diff)
x86: simplify idle selection
default_idle is selected in cpu_idle(), when no other idle routine is selected. Select it in select_idle_routine() when mwait is not selected. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/process.c18
-rw-r--r--arch/x86/kernel/process_32.c7
-rw-r--r--arch/x86/kernel/process_64.c7
3 files changed, 10 insertions, 22 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index ba370dc8685..b3078f4ce25 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -139,27 +139,23 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
- static int selected;
-
- if (selected)
- return;
#ifdef CONFIG_X86_SMP
if (pm_idle == poll_idle && smp_num_siblings > 1) {
printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
" performance may degrade.\n");
}
#endif
+ if (pm_idle)
+ return;
+
if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
/*
- * Skip, if setup has overridden idle.
* One CPU supports mwait => All CPUs supports mwait
*/
- if (!pm_idle) {
- printk(KERN_INFO "using mwait in idle threads.\n");
- pm_idle = mwait_idle;
- }
- }
- selected = 1;
+ printk(KERN_INFO "using mwait in idle threads.\n");
+ pm_idle = mwait_idle;
+ } else
+ pm_idle = default_idle;
}
static int __init idle_setup(char *str)
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index f8476dfbb60..ee4ab461c50 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -168,24 +168,19 @@ void cpu_idle(void)
while (1) {
tick_nohz_stop_sched_tick();
while (!need_resched()) {
- void (*idle)(void);
check_pgt_cache();
rmb();
- idle = pm_idle;
if (rcu_pending(cpu))
rcu_check_callbacks(cpu, 0);
- if (!idle)
- idle = default_idle;
-
if (cpu_is_offline(cpu))
play_dead();
local_irq_disable();
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
- idle();
+ pm_idle();
}
tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index e2319f39988..db3d89a0439 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -150,12 +150,9 @@ void cpu_idle(void)
while (1) {
tick_nohz_stop_sched_tick();
while (!need_resched()) {
- void (*idle)(void);
rmb();
- idle = pm_idle;
- if (!idle)
- idle = default_idle;
+
if (cpu_is_offline(smp_processor_id()))
play_dead();
/*
@@ -165,7 +162,7 @@ void cpu_idle(void)
*/
local_irq_disable();
enter_idle();
- idle();
+ pm_idle();
/* In many cases the interrupt that ended idle
has already called exit_idle. But some idle
loops can be woken up without interrupt. */