aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-08 14:20:16 +0100
committerIngo Molnar <mingo@elte.hu>2008-12-08 15:56:42 +0100
commit87b9cf4623ad4e5fc009e48c020593dffd5d3793 (patch)
treebd49f47b3fee4f4dc956e7db303fb1c20d53eee3
parent241771ef016b5c0c83cd7a4372a74321c973c1e6 (diff)
x86, perfcounters: read out MSR_CORE_PERF_GLOBAL_STATUS with counters disabled
Impact: make perfcounter NMI and IRQ sequence more robust Make __smp_perf_counter_interrupt() a bit more conservative: first disable all counters, then read out the status. Most invocations are because there are real events, so there's no performance impact. Code flow gets a bit simpler as well this way. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 82440cbed0e..615e953208e 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -383,18 +383,16 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
struct cpu_hw_counters *cpuc;
u64 ack, status;
- rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
- if (!status) {
- ack_APIC_irq();
- return;
- }
-
/* Disable counters globally */
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
ack_APIC_irq();
cpuc = &per_cpu(cpu_hw_counters, cpu);
+ rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
+ if (!status)
+ goto out;
+
again:
ack = status;
for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) {
@@ -440,7 +438,7 @@ again:
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
if (status)
goto again;
-
+out:
/*
* Do not reenable when global enable is off:
*/