From 3f4dee227348daac32f36daad9a91059efd0723e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 May 2009 11:25:09 +0200 Subject: perf_counter: Fix cpuctx->task_ctx races Peter noticed that we are sometimes reading cpuctx->task_ctx with interrupts enabled. Noticed-by: Peter Zijlstra Acked-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Corey Ashford Cc: Marcelo Tosatti Cc: Arnaldo Carvalho de Melo Cc: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) (limited to 'kernel/perf_counter.c') diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index db843f812a6..eb346048f00 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -234,15 +234,18 @@ static void __perf_counter_remove_from_context(void *info) struct perf_counter_context *ctx = counter->ctx; unsigned long flags; + local_irq_save(flags); /* * If this is a task context, we need to check whether it is * the current task context of this cpu. If not it has been * scheduled out before the smp call arrived. */ - if (ctx->task && cpuctx->task_ctx != ctx) + if (ctx->task && cpuctx->task_ctx != ctx) { + local_irq_restore(flags); return; + } - spin_lock_irqsave(&ctx->lock, flags); + spin_lock(&ctx->lock); /* * Protect the list operation against NMI by disabling the * counters on a global level. @@ -382,14 +385,17 @@ static void __perf_counter_disable(void *info) struct perf_counter_context *ctx = counter->ctx; unsigned long flags; + local_irq_save(flags); /* * If this is a per-task counter, need to check whether this * counter's task is the current task on this cpu. */ - if (ctx->task && cpuctx->task_ctx != ctx) + if (ctx->task && cpuctx->task_ctx != ctx) { + local_irq_restore(flags); return; + } - spin_lock_irqsave(&ctx->lock, flags); + spin_lock(&ctx->lock); /* * If the counter is on, turn it off. @@ -615,6 +621,7 @@ static void __perf_install_in_context(void *info) unsigned long flags; int err; + local_irq_save(flags); /* * If this is a task context, we need to check whether it is * the current task context of this cpu. If not it has been @@ -623,12 +630,14 @@ static void __perf_install_in_context(void *info) * on this cpu because it had no counters. */ if (ctx->task && cpuctx->task_ctx != ctx) { - if (cpuctx->task_ctx || ctx->task != current) + if (cpuctx->task_ctx || ctx->task != current) { + local_irq_restore(flags); return; + } cpuctx->task_ctx = ctx; } - spin_lock_irqsave(&ctx->lock, flags); + spin_lock(&ctx->lock); ctx->is_active = 1; update_context_time(ctx); @@ -745,17 +754,20 @@ static void __perf_counter_enable(void *info) unsigned long flags; int err; + local_irq_save(flags); /* * If this is a per-task counter, need to check whether this * counter's task is the current task on this cpu. */ if (ctx->task && cpuctx->task_ctx != ctx) { - if (cpuctx->task_ctx || ctx->task != current) + if (cpuctx->task_ctx || ctx->task != current) { + local_irq_restore(flags); return; + } cpuctx->task_ctx = ctx; } - spin_lock_irqsave(&ctx->lock, flags); + spin_lock(&ctx->lock); ctx->is_active = 1; update_context_time(ctx); -- cgit v1.2.3