aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c2
-rw-r--r--include/linux/perf_counter.h11
-rw-r--r--kernel/perf_counter.c29
3 files changed, 25 insertions, 17 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 3e1dbebe22b..4854cca7fff 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -332,7 +332,7 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
* Then store sibling timestamps (if any):
*/
list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
- if (!counter->active) {
+ if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
/*
* When counter was not in the overflow mask, we have to
* read it from hardware. We read it as well, when it
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 97d86c293ee..8cb095fa442 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -128,6 +128,15 @@ struct hw_perf_counter_ops {
};
/**
+ * enum perf_counter_active_state - the states of a counter
+ */
+enum perf_counter_active_state {
+ PERF_COUNTER_STATE_OFF = -1,
+ PERF_COUNTER_STATE_INACTIVE = 0,
+ PERF_COUNTER_STATE_ACTIVE = 1,
+};
+
+/**
* struct perf_counter - performance counter kernel representation:
*/
struct perf_counter {
@@ -136,7 +145,7 @@ struct perf_counter {
struct perf_counter *group_leader;
const struct hw_perf_counter_ops *hw_ops;
- int active;
+ enum perf_counter_active_state state;
#if BITS_PER_LONG == 64
atomic64_t count;
#else
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 4e679b91d8b..559130b8774 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -167,9 +167,9 @@ static void __perf_counter_remove_from_context(void *info)
spin_lock(&ctx->lock);
- if (counter->active) {
+ if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
counter->hw_ops->hw_perf_counter_disable(counter);
- counter->active = 0;
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
ctx->nr_active--;
cpuctx->active_oncpu--;
counter->task = NULL;
@@ -281,7 +281,7 @@ static void __perf_install_in_context(void *info)
if (cpuctx->active_oncpu < perf_max_counters) {
counter->hw_ops->hw_perf_counter_enable(counter);
- counter->active = 1;
+ counter->state = PERF_COUNTER_STATE_ACTIVE;
counter->oncpu = cpu;
ctx->nr_active++;
cpuctx->active_oncpu++;
@@ -328,7 +328,6 @@ retry:
spin_lock_irq(&ctx->lock);
/*
- * If the context is active and the counter has not been added
* we need to retry the smp call.
*/
if (ctx->nr_active && list_empty(&counter->list_entry)) {
@@ -353,12 +352,12 @@ counter_sched_out(struct perf_counter *counter,
struct perf_cpu_context *cpuctx,
struct perf_counter_context *ctx)
{
- if (!counter->active)
+ if (counter->state != PERF_COUNTER_STATE_ACTIVE)
return;
counter->hw_ops->hw_perf_counter_disable(counter);
- counter->active = 0;
- counter->oncpu = -1;
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
+ counter->oncpu = -1;
cpuctx->active_oncpu--;
ctx->nr_active--;
@@ -415,11 +414,11 @@ counter_sched_in(struct perf_counter *counter,
struct perf_counter_context *ctx,
int cpu)
{
- if (counter->active == -1)
+ if (counter->state == PERF_COUNTER_STATE_OFF)
return;
counter->hw_ops->hw_perf_counter_enable(counter);
- counter->active = 1;
+ counter->state = PERF_COUNTER_STATE_ACTIVE;
counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
cpuctx->active_oncpu++;
@@ -506,8 +505,8 @@ int perf_counter_task_disable(void)
perf_flags = hw_perf_save_disable();
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
- WARN_ON_ONCE(counter->active == 1);
- counter->active = -1;
+ WARN_ON_ONCE(counter->state == PERF_COUNTER_STATE_ACTIVE);
+ counter->state = PERF_COUNTER_STATE_OFF;
}
hw_perf_restore(perf_flags);
@@ -540,9 +539,9 @@ int perf_counter_task_enable(void)
perf_flags = hw_perf_save_disable();
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
- if (counter->active != -1)
+ if (counter->state != PERF_COUNTER_STATE_OFF)
continue;
- counter->active = 0;
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
}
hw_perf_restore(perf_flags);
@@ -620,7 +619,7 @@ static u64 perf_counter_read(struct perf_counter *counter)
* If counter is enabled and currently active on a CPU, update the
* value in the counter structure:
*/
- if (counter->active) {
+ if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
smp_call_function_single(counter->oncpu,
__hw_perf_counter_read, counter, 1);
}
@@ -673,7 +672,7 @@ static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
retry:
spin_lock_irq(&ctx->lock);
- if (!counter->active) {
+ if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
counter->irqdata = counter->usrdata;
counter->usrdata = oldirqdata;
spin_unlock_irq(&ctx->lock);