aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-09 11:40:46 +0100
committerIngo Molnar <mingo@elte.hu>2008-12-09 19:28:48 +0100
commit7e2ae34749edf19e76e594b9c4b2cdde1066afc5 (patch)
tree165879c7b508cac9b3238750bb8a3786e018b352 /arch
parent4c59e4676dc95f6f58a2cff5390b2699fa5b5549 (diff)
perfcounters, x86: simplify disable/enable of counters
Impact: fix spurious missed counter wakeups In the case of NMI events, close a race window that can occur if an NMI hits counter code that temporarily disables+enables a counter, and the NMI leaks into the disabled section. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 615e953208e..7d528ffc2d2 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -136,14 +136,25 @@ void hw_perf_disable_all(void)
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
}
+static inline void
+__hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx)
+{
+ wrmsr(hwc->config_base + idx, hwc->config, 0);
+}
+
static DEFINE_PER_CPU(u64, prev_next_count[MAX_HW_COUNTERS]);
-static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx)
+static void __hw_perf_counter_set_period(struct hw_perf_counter *hwc, int idx)
{
per_cpu(prev_next_count[idx], smp_processor_id()) = hwc->next_count;
wrmsr(hwc->counter_base + idx, hwc->next_count, 0);
- wrmsr(hwc->config_base + idx, hwc->config, 0);
+}
+
+static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx)
+{
+ wrmsr(hwc->config_base + idx,
+ hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
}
void hw_perf_counter_enable(struct perf_counter *counter)
@@ -161,11 +172,11 @@ void hw_perf_counter_enable(struct perf_counter *counter)
perf_counters_lapic_init(hwc->nmi);
- wrmsr(hwc->config_base + idx,
- hwc->config & ~ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
+ __hw_perf_counter_disable(hwc, idx);
cpuc->counters[idx] = counter;
- counter->hw.config |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+
+ __hw_perf_counter_set_period(hwc, idx);
__hw_perf_counter_enable(hwc, idx);
}
@@ -286,8 +297,7 @@ void hw_perf_counter_disable(struct perf_counter *counter)
struct hw_perf_counter *hwc = &counter->hw;
unsigned int idx = hwc->idx;
- counter->hw.config &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsr(hwc->config_base + idx, hwc->config, 0);
+ __hw_perf_counter_disable(hwc, idx);
clear_bit(idx, cpuc->used);
cpuc->counters[idx] = NULL;
@@ -328,18 +338,24 @@ static void perf_store_irq_data(struct perf_counter *counter, u64 data)
}
}
+/*
+ * NMI-safe enable method:
+ */
static void perf_save_and_restart(struct perf_counter *counter)
{
struct hw_perf_counter *hwc = &counter->hw;
int idx = hwc->idx;
+ u64 pmc_ctrl;
+ int err;
- wrmsr(hwc->config_base + idx,
- hwc->config & ~ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
+ err = rdmsrl_safe(MSR_ARCH_PERFMON_EVENTSEL0 + idx, &pmc_ctrl);
+ WARN_ON_ONCE(err);
- if (hwc->config & ARCH_PERFMON_EVENTSEL0_ENABLE) {
- __hw_perf_save_counter(counter, hwc, idx);
+ __hw_perf_save_counter(counter, hwc, idx);
+ __hw_perf_counter_set_period(hwc, idx);
+
+ if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE)
__hw_perf_counter_enable(hwc, idx);
- }
}
static void