diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-17 08:54:56 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-23 12:45:09 +0100 |
commit | 7995888fcb0246543ee8027bf2835a250ba8c925 (patch) | |
tree | ef7a2f699d99c3613ba7559d3b9772fa00014718 /kernel | |
parent | 8fb9331391af95ca1f4e5c0a0da8120b13cbae01 (diff) |
perfcounters: tweak group scheduling
Impact: schedule in groups atomically
If there are multiple groups in a task, make sure they are scheduled
in and out atomically.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index aab6c123b02..f8a4d9a5d5d 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -367,21 +367,26 @@ counter_sched_in(struct perf_counter *counter, ctx->nr_active++; } -static void +static int group_sched_in(struct perf_counter *group_counter, struct perf_cpu_context *cpuctx, struct perf_counter_context *ctx, int cpu) { struct perf_counter *counter; + int was_group = 0; counter_sched_in(group_counter, cpuctx, ctx, cpu); /* * Schedule in siblings as one group (if any): */ - list_for_each_entry(counter, &group_counter->sibling_list, list_entry) + list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { counter_sched_in(counter, cpuctx, ctx, cpu); + was_group = 1; + } + + return was_group; } /* @@ -416,7 +421,12 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu) if (counter->cpu != -1 && counter->cpu != cpu) continue; - group_sched_in(counter, cpuctx, ctx, cpu); + /* + * If we scheduled in a group atomically and + * exclusively, break out: + */ + if (group_sched_in(counter, cpuctx, ctx, cpu)) + break; } spin_unlock(&ctx->lock); |