aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-06-01 17:53:16 +1000
committerIngo Molnar <mingo@elte.hu>2009-06-02 13:10:55 +0200
commitbf4e0ed3d027ce581be18496036862131b5f32aa (patch)
treef55e5c85b286b3ace8b81f3ffc7e48590f4a7020
parent3f731ca60afc29f5bcdb5fd2a04391466313a9ac (diff)
perf_counter: Remove unused prev_state field
This removes the prev_state field of struct perf_counter since it is now unused. It was only used by the cpu migration counter, which doesn't use it any more. Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <18979.35052.915728.626374@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/perf_counter.h1
-rw-r--r--kernel/perf_counter.c4
2 files changed, 0 insertions, 5 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index deb9acf9ad2..d970fbc16af 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -427,7 +427,6 @@ struct perf_counter {
const struct pmu *pmu;
enum perf_counter_active_state state;
- enum perf_counter_active_state prev_state;
atomic64_t count;
/*
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index cd94cf3bf9e..fbed4d28ad7 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -572,7 +572,6 @@ group_sched_in(struct perf_counter *group_counter,
if (ret)
return ret < 0 ? ret : 0;
- group_counter->prev_state = group_counter->state;
if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
return -EAGAIN;
@@ -580,7 +579,6 @@ group_sched_in(struct perf_counter *group_counter,
* Schedule in siblings as one group (if any):
*/
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
- counter->prev_state = counter->state;
if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
partial_group = counter;
goto group_error;
@@ -657,7 +655,6 @@ static void add_counter_to_ctx(struct perf_counter *counter,
struct perf_counter_context *ctx)
{
list_add_counter(counter, ctx);
- counter->prev_state = PERF_COUNTER_STATE_OFF;
counter->tstamp_enabled = ctx->time;
counter->tstamp_running = ctx->time;
counter->tstamp_stopped = ctx->time;
@@ -820,7 +817,6 @@ static void __perf_counter_enable(void *info)
ctx->is_active = 1;
update_context_time(ctx);
- counter->prev_state = counter->state;
if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
goto unlock;
counter->state = PERF_COUNTER_STATE_INACTIVE;