aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2007-03-20 11:06:37 -0400
committerLen Brown <len.brown@intel.com>2007-03-20 11:06:37 -0400
commitcddece4beccaa72dcb57d64a7f1e496b2e61a16b (patch)
tree9e2f0ba308bf9122c80c283fb95c93b5b19d8675
parentb25e84425ee21c5560fcaec15afcf58fe4a0a414 (diff)
parent25496caec111481161e7f06bbfa12a533c43cc6f (diff)
Pull c2 into release branch
-rw-r--r--drivers/acpi/processor_idle.c38
1 files changed, 29 insertions, 9 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 60773005b8a..562124ed785 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -89,6 +89,12 @@ module_param(nocst, uint, 0000);
static unsigned int bm_history __read_mostly =
(HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
module_param(bm_history, uint, 0644);
+
+static unsigned use_ipi = 2;
+module_param(use_ipi, uint, 0644);
+MODULE_PARM_DESC(use_ipi, "IPI (vs. LAPIC) irqs for not waking up from C2/C3"
+ " machines. 0=apic, 1=ipi, 2=auto\n");
+
/* --------------------------------------------------------------------------
Power Management
-------------------------------------------------------------------------- */
@@ -260,9 +266,8 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
/*
* Some BIOS implementations switch to C3 in the published C2 state.
- * This seems to be a common problem on AMD boxen, but other vendors
- * are affected too. We pick the most conservative approach: we assume
- * that the local APIC stops in both C2 and C3.
+ * This seems to be a common problem on AMD boxen and Intel Dothan/Banias
+ * Pentium M machines.
*/
static void acpi_timer_check_state(int state, struct acpi_processor *pr,
struct acpi_processor_cx *cx)
@@ -276,8 +281,17 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
if (pwr->timer_broadcast_on_state < state)
return;
- if (cx->type >= ACPI_STATE_C2)
- pr->power.timer_broadcast_on_state = state;
+ if (cx->type >= ACPI_STATE_C2) {
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ pr->power.timer_broadcast_on_state = state;
+ else if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6) &&
+ (boot_cpu_data.x86_model == 13 ||
+ boot_cpu_data.x86_model == 9))
+ {
+ pr->power.timer_broadcast_on_state = state;
+ }
+ }
}
static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
@@ -292,10 +306,16 @@ static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
#else
cpumask_t mask = cpumask_of_cpu(pr->id);
- if (pr->power.timer_broadcast_on_state < INT_MAX)
+ if (use_ipi == 0)
on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
- else
+ else if (use_ipi == 1)
on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
+ else {
+ if (pr->power.timer_broadcast_on_state < INT_MAX)
+ on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
+ else
+ on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
+ }
#endif
}
@@ -1013,13 +1033,13 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
case ACPI_STATE_C2:
acpi_processor_power_verify_c2(cx);
- if (cx->valid)
+ if (cx->valid && use_ipi != 0 && use_ipi != 1)
acpi_timer_check_state(i, pr, cx);
break;
case ACPI_STATE_C3:
acpi_processor_power_verify_c3(pr, cx);
- if (cx->valid)
+ if (cx->valid && use_ipi != 0 && use_ipi != 1)
acpi_timer_check_state(i, pr, cx);
break;
}