aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-30 19:07:03 +0200
committerIngo Molnar <mingo@elte.hu>2009-04-06 09:30:37 +0200
commit38ff667b321b00f5e6830e93fb4ab11a653a2920 (patch)
tree37c24148228d978824a014899f4984072da4e077 /kernel
parent925d519ab82b6dd7aca9420d809ee83819c08db2 (diff)
perf_counter: fix update_userpage()
It just occured to me it is possible to have multiple contending updates of the userpage (mmap information vs overflow vs counter). This would break the seqlock logic. It appear the arch code uses this from NMI context, so we cannot possibly serialize its use, therefore separate the data_head update from it and let it return to its original use. The arch code needs to make sure there are no contending callers by disabling the counter before using it -- powerpc appears to do this nicely. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Orig-LKML-Reference: <20090330171023.241410660@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c38
1 files changed, 23 insertions, 15 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index f70ff80e79d..c95e92329b9 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1316,10 +1316,22 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return err;
}
-static void __perf_counter_update_userpage(struct perf_counter *counter,
- struct perf_mmap_data *data)
+/*
+ * Callers need to ensure there can be no nesting of this function, otherwise
+ * the seqlock logic goes bad. We can not serialize this because the arch
+ * code calls this from NMI context.
+ */
+void perf_counter_update_userpage(struct perf_counter *counter)
{
- struct perf_counter_mmap_page *userpg = data->user_page;
+ struct perf_mmap_data *data;
+ struct perf_counter_mmap_page *userpg;
+
+ rcu_read_lock();
+ data = rcu_dereference(counter->data);
+ if (!data)
+ goto unlock;
+
+ userpg = data->user_page;
/*
* Disable preemption so as to not let the corresponding user-space
@@ -1333,20 +1345,10 @@ static void __perf_counter_update_userpage(struct perf_counter *counter,
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
userpg->offset -= atomic64_read(&counter->hw.prev_count);
- userpg->data_head = atomic_read(&data->head);
smp_wmb();
++userpg->lock;
preempt_enable();
-}
-
-void perf_counter_update_userpage(struct perf_counter *counter)
-{
- struct perf_mmap_data *data;
-
- rcu_read_lock();
- data = rcu_dereference(counter->data);
- if (data)
- __perf_counter_update_userpage(counter, data);
+unlock:
rcu_read_unlock();
}
@@ -1547,7 +1549,13 @@ void perf_counter_wakeup(struct perf_counter *counter)
data = rcu_dereference(counter->data);
if (data) {
(void)atomic_xchg(&data->wakeup, POLL_IN);
- __perf_counter_update_userpage(counter, data);
+ /*
+ * Ensure all data writes are issued before updating the
+ * user-space data head information. The matching rmb()
+ * will be in userspace after reading this value.
+ */
+ smp_wmb();
+ data->user_page->data_head = atomic_read(&data->head);
}
rcu_read_unlock();