aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2009-01-23 10:13:01 +0100
committerIngo Molnar <mingo@elte.hu>2009-01-23 11:33:18 +0100
commit1b023a96d9b44f50f4d8ff28c15f5b80e354760f (patch)
tree8b6a5956c4461c13c2e2b3769096afac5b767524 /arch
parent05e3423c8577126800841bc55de8a509f2433dca (diff)
perfcounters: throttle on too high IRQ rates
Starting kerneltop with only -c 100 seems to be a bad idea, it can easily lock the system due to perfcounter IRQ overload. So add throttling: if a new IRQ arrives in a shorter than PERFMON_MIN_PERIOD_NS time, turn off perfcounters and untrottle them from the next timer tick. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/apic.c2
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c38
2 files changed, 34 insertions, 6 deletions
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index 7b434e5b14c..849c23009bf 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -781,6 +781,8 @@ static void local_apic_timer_interrupt(void)
inc_irq_stat(apic_timer_irqs);
evt->event_handler(evt);
+
+ perf_counter_unthrottle();
}
/*
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 9376771f757..1a040b179b5 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -33,6 +33,9 @@ static int nr_counters_fixed __read_mostly;
struct cpu_hw_counters {
struct perf_counter *counters[X86_PMC_IDX_MAX];
unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ u64 last_interrupt;
+ u64 global_enable;
+ int throttled;
};
/*
@@ -474,16 +477,19 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
{
int bit, cpu = smp_processor_id();
- u64 ack, status, saved_global;
- struct cpu_hw_counters *cpuc;
+ u64 ack, status, now;
+ struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
- rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
+ rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
/* Disable counters globally */
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
ack_APIC_irq();
- cpuc = &per_cpu(cpu_hw_counters, cpu);
+ now = sched_clock();
+ if (now - cpuc->last_interrupt < PERFMON_MIN_PERIOD_NS)
+ cpuc->throttled = 1;
+ cpuc->last_interrupt = now;
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
if (!status)
@@ -533,9 +539,29 @@ again:
goto again;
out:
/*
- * Restore - do not reenable when global enable is off:
+ * Restore - do not reenable when global enable is off or throttled:
*/
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
+ if (!cpuc->throttled)
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
+}
+
+void perf_counter_unthrottle(void)
+{
+ struct cpu_hw_counters *cpuc;
+
+ if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+ return;
+
+ if (unlikely(!perf_counters_initialized))
+ return;
+
+ cpuc = &per_cpu(cpu_hw_counters, smp_processor_id());
+ if (cpuc->throttled) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "PERFMON: max event frequency exceeded!\n");
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
+ cpuc->throttled = 0;
+ }
}
void smp_perf_counter_interrupt(struct pt_regs *regs)