aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-19 20:26:19 +0100
committerIngo Molnar <mingo@elte.hu>2009-04-06 09:30:17 +0200
commit0322cd6ec504b0bf08ca7b2c3d7f43bda37d79c9 (patch)
tree8e5487e1a6700eb865ec1f268d51e9d6d3d38a71 /arch
parentb8e83514b64577b48bfb794fe85fcde40a9343ca (diff)
perf_counter: unify irq output code
Impact: cleanup Having 3 slightly different copies of the same code around does nobody any good. First step in revamping the output format. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Orig-LKML-Reference: <20090319194233.929962222@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/perf_counter.c51
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c53
2 files changed, 3 insertions, 101 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 26f69dc7130..88b72eb4af1 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -663,41 +663,6 @@ void perf_counter_do_pending(void)
}
/*
- * Record data for an irq counter.
- * This function was lifted from the x86 code; maybe it should
- * go in the core?
- */
-static void perf_store_irq_data(struct perf_counter *counter, u64 data)
-{
- struct perf_data *irqdata = counter->irqdata;
-
- if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
- irqdata->overrun++;
- } else {
- u64 *p = (u64 *) &irqdata->data[irqdata->len];
-
- *p = data;
- irqdata->len += sizeof(u64);
- }
-}
-
-/*
- * Record all the values of the counters in a group
- */
-static void perf_handle_group(struct perf_counter *counter)
-{
- struct perf_counter *leader, *sub;
-
- leader = counter->group_leader;
- list_for_each_entry(sub, &leader->sibling_list, list_entry) {
- if (sub != counter)
- sub->hw_ops->read(sub);
- perf_store_irq_data(counter, sub->hw_event.event_config);
- perf_store_irq_data(counter, atomic64_read(&sub->count));
- }
-}
-
-/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
@@ -736,20 +701,8 @@ static void record_and_restart(struct perf_counter *counter, long val,
/*
* Finally record data if requested.
*/
- if (record) {
- switch (counter->hw_event.record_type) {
- case PERF_RECORD_SIMPLE:
- break;
- case PERF_RECORD_IRQ:
- perf_store_irq_data(counter, instruction_pointer(regs));
- counter->wakeup_pending = 1;
- break;
- case PERF_RECORD_GROUP:
- perf_handle_group(counter);
- counter->wakeup_pending = 1;
- break;
- }
- }
+ if (record)
+ perf_counter_output(counter, 1, regs);
}
/*
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index d844ae41d5a..902282d68b0 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -674,20 +674,6 @@ static void pmc_generic_disable(struct perf_counter *counter)
x86_perf_counter_update(counter, hwc, idx);
}
-static void perf_store_irq_data(struct perf_counter *counter, u64 data)
-{
- struct perf_data *irqdata = counter->irqdata;
-
- if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
- irqdata->overrun++;
- } else {
- u64 *p = (u64 *) &irqdata->data[irqdata->len];
-
- *p = data;
- irqdata->len += sizeof(u64);
- }
-}
-
/*
* Save and restart an expired counter. Called by NMI contexts,
* so it has to be careful about preempting normal counter ops:
@@ -704,22 +690,6 @@ static void perf_save_and_restart(struct perf_counter *counter)
__pmc_generic_enable(counter, hwc, idx);
}
-static void
-perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
-{
- struct perf_counter *counter, *group_leader = sibling->group_leader;
-
- /*
- * Store sibling timestamps (if any):
- */
- list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
-
- x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
- perf_store_irq_data(sibling, counter->hw_event.event_config);
- perf_store_irq_data(sibling, atomic64_read(&counter->count));
- }
-}
-
/*
* Maximum interrupt frequency of 100KHz per CPU
*/
@@ -754,28 +724,7 @@ again:
continue;
perf_save_and_restart(counter);
-
- switch (counter->hw_event.record_type) {
- case PERF_RECORD_SIMPLE:
- continue;
- case PERF_RECORD_IRQ:
- perf_store_irq_data(counter, instruction_pointer(regs));
- break;
- case PERF_RECORD_GROUP:
- perf_handle_group(counter, &status, &ack);
- break;
- }
- /*
- * From NMI context we cannot call into the scheduler to
- * do a task wakeup - but we mark these generic as
- * wakeup_pending and initate a wakeup callback:
- */
- if (nmi) {
- counter->wakeup_pending = 1;
- set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
- } else {
- wake_up(&counter->waitq);
- }
+ perf_counter_output(counter, nmi, regs);
}
hw_perf_ack_status(ack);