aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-20 12:21:22 +0200
committerIngo Molnar <mingo@elte.hu>2009-05-20 12:43:34 +0200
commitafedadf23a2c90f3ba0d963282cbe6a6be129494 (patch)
tree3fa284b19482158c0a8dab8fa79bf41180ebd256 /kernel
parentb986d7ec0f8b7ea3cc7366d80a137fbe839df227 (diff)
perf_counter: Optimize sched in/out of counters
Avoid a function call for !group counters by directly calling the counter function. [ Impact: micro-optimize the code ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090520102553.511933670@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 473ed2cafbf..69d4de81596 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -826,8 +826,12 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
perf_disable();
if (ctx->nr_active) {
- list_for_each_entry(counter, &ctx->counter_list, list_entry)
- group_sched_out(counter, cpuctx, ctx);
+ list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ if (counter != counter->group_leader)
+ counter_sched_out(counter, cpuctx, ctx);
+ else
+ group_sched_out(counter, cpuctx, ctx);
+ }
}
perf_enable();
out:
@@ -903,8 +907,12 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
if (counter->cpu != -1 && counter->cpu != cpu)
continue;
- if (group_can_go_on(counter, cpuctx, 1))
- group_sched_in(counter, cpuctx, ctx, cpu);
+ if (counter != counter->group_leader)
+ counter_sched_in(counter, cpuctx, ctx, cpu);
+ else {
+ if (group_can_go_on(counter, cpuctx, 1))
+ group_sched_in(counter, cpuctx, ctx, cpu);
+ }
/*
* If this pinned group hasn't been scheduled,
@@ -932,9 +940,14 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
if (counter->cpu != -1 && counter->cpu != cpu)
continue;
- if (group_can_go_on(counter, cpuctx, can_add_hw)) {
- if (group_sched_in(counter, cpuctx, ctx, cpu))
+ if (counter != counter->group_leader) {
+ if (counter_sched_in(counter, cpuctx, ctx, cpu))
can_add_hw = 0;
+ } else {
+ if (group_can_go_on(counter, cpuctx, can_add_hw)) {
+ if (group_sched_in(counter, cpuctx, ctx, cpu))
+ can_add_hw = 0;
+ }
}
}
perf_enable();