aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-04-08 15:01:32 +0200
committerIngo Molnar <mingo@elte.hu>2009-04-08 19:05:55 +0200
commit4d855457d84b819fefcd1cd1b0a2a0a0ec475c07 (patch)
tree7d0875fd698119befe218e7d6f584fe59064982f
parentde9ac07bbf8f51e0ce40e5428c3a8f627bd237c2 (diff)
perf_counter: move PERF_RECORD_TIME
Move PERF_RECORD_TIME so that all the fixed length items come before the variable length ones. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090408130409.307926436@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/perf_counter.h9
-rw-r--r--kernel/perf_counter.c26
2 files changed, 17 insertions, 18 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index a70a55f2759..8bd1be58c93 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -100,9 +100,9 @@ enum sw_event_ids {
enum perf_counter_record_format {
PERF_RECORD_IP = 1U << 0,
PERF_RECORD_TID = 1U << 1,
- PERF_RECORD_GROUP = 1U << 2,
- PERF_RECORD_CALLCHAIN = 1U << 3,
- PERF_RECORD_TIME = 1U << 4,
+ PERF_RECORD_TIME = 1U << 2,
+ PERF_RECORD_GROUP = 1U << 3,
+ PERF_RECORD_CALLCHAIN = 1U << 4,
};
/*
@@ -250,6 +250,7 @@ enum perf_event_type {
*
* { u64 ip; } && PERF_RECORD_IP
* { u32 pid, tid; } && PERF_RECORD_TID
+ * { u64 time; } && PERF_RECORD_TIME
*
* { u64 nr;
* { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP
@@ -259,8 +260,6 @@ enum perf_event_type {
* kernel,
* user;
* u64 ips[nr]; } && PERF_RECORD_CALLCHAIN
- *
- * { u64 time; } && PERF_RECORD_TIME
* };
*/
};
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 2d4aebb2982..4dc8600d282 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1850,6 +1850,16 @@ static void perf_counter_output(struct perf_counter *counter,
header.size += sizeof(tid_entry);
}
+ if (record_type & PERF_RECORD_TIME) {
+ /*
+ * Maybe do better on x86 and provide cpu_clock_nmi()
+ */
+ time = sched_clock();
+
+ header.type |= PERF_RECORD_TIME;
+ header.size += sizeof(u64);
+ }
+
if (record_type & PERF_RECORD_GROUP) {
header.type |= PERF_RECORD_GROUP;
header.size += sizeof(u64) +
@@ -1867,16 +1877,6 @@ static void perf_counter_output(struct perf_counter *counter,
}
}
- if (record_type & PERF_RECORD_TIME) {
- /*
- * Maybe do better on x86 and provide cpu_clock_nmi()
- */
- time = sched_clock();
-
- header.type |= PERF_RECORD_TIME;
- header.size += sizeof(u64);
- }
-
ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
if (ret)
return;
@@ -1889,6 +1889,9 @@ static void perf_counter_output(struct perf_counter *counter,
if (record_type & PERF_RECORD_TID)
perf_output_put(&handle, tid_entry);
+ if (record_type & PERF_RECORD_TIME)
+ perf_output_put(&handle, time);
+
if (record_type & PERF_RECORD_GROUP) {
struct perf_counter *leader, *sub;
u64 nr = counter->nr_siblings;
@@ -1910,9 +1913,6 @@ static void perf_counter_output(struct perf_counter *counter,
if (callchain)
perf_output_copy(&handle, callchain, callchain_size);
- if (record_type & PERF_RECORD_TIME)
- perf_output_put(&handle, time);
-
perf_output_end(&handle);
}