aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-12-08 19:35:37 +0100
committerIngo Molnar <mingo@elte.hu>2008-12-11 15:45:47 +0100
commitdfa7c899b401d7dc5d85aca416aee64ac82812f2 (patch)
tree496b0fee69989fd4127905a888de7135a7969e9e
parenteab656ae04b9d3b83265e3db01c0d2c46b748ef7 (diff)
perf counters: expand use of counter->event
Impact: change syscall, cleanup Make use of the new perf_counters event type. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c22
-rw-r--r--include/linux/perf_counter.h4
-rw-r--r--kernel/perf_counter.c10
3 files changed, 17 insertions, 19 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 0a7f3bea2dc..30e7ebf7827 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -56,9 +56,10 @@ const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
/*
* Setup the hardware configuration for a given hw_event_type
*/
-int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type)
+int hw_perf_counter_init(struct perf_counter *counter)
{
struct hw_perf_counter *hwc = &counter->hw;
+ u32 hw_event_type = counter->event.hw_event_type;
if (unlikely(!perf_counters_initialized))
return -EINVAL;
@@ -83,7 +84,7 @@ int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type)
hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;
- hwc->irq_period = counter->__irq_period;
+ hwc->irq_period = counter->event.hw_event_period;
/*
* Intel PMCs cannot be accessed sanely above 32 bit width,
* so we install an artificial 1<<31 period regardless of
@@ -95,21 +96,19 @@ int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type)
hwc->next_count = -((s32) hwc->irq_period);
/*
- * Negative event types mean raw encoded event+umask values:
+ * Raw event type provide the config in the event structure
*/
- if (hw_event_type < 0) {
- counter->hw_event_type = -hw_event_type;
- counter->hw_event_type &= ~PERF_COUNT_NMI;
+ hw_event_type &= ~PERF_COUNT_NMI;
+ if (hw_event_type == PERF_COUNT_RAW) {
+ hwc->config |= counter->event.hw_raw_ctrl;
} else {
- hw_event_type &= ~PERF_COUNT_NMI;
if (hw_event_type >= max_intel_perfmon_events)
return -EINVAL;
/*
* The generic map:
*/
- counter->hw_event_type = intel_perfmon_event_map[hw_event_type];
+ hwc->config |= intel_perfmon_event_map[hw_event_type];
}
- hwc->config |= counter->hw_event_type;
counter->wakeup_pending = 0;
return 0;
@@ -373,7 +372,7 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
perf_save_and_restart(counter);
}
}
- perf_store_irq_data(leader, counter->hw_event_type);
+ perf_store_irq_data(leader, counter->event.hw_event_type);
perf_store_irq_data(leader, atomic64_counter_read(counter));
}
}
@@ -418,7 +417,8 @@ again:
perf_store_irq_data(counter, instruction_pointer(regs));
break;
case PERF_RECORD_GROUP:
- perf_store_irq_data(counter, counter->hw_event_type);
+ perf_store_irq_data(counter,
+ counter->event.hw_event_type);
perf_store_irq_data(counter,
atomic64_counter_read(counter));
perf_handle_group(counter, &status, &ack);
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index daedd7d87c2..1f0017673e7 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -96,8 +96,7 @@ struct perf_counter {
#else
atomic_t count32[2];
#endif
- u64 __irq_period;
-
+ struct perf_counter_event event;
struct hw_perf_counter hw;
struct perf_counter_context *ctx;
@@ -111,7 +110,6 @@ struct perf_counter {
int oncpu;
int cpu;
- s32 hw_event_type;
enum perf_record_type record_type;
/* read() / irq related data */
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 96c333a5b0f..2557c670a3b 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -37,7 +37,7 @@ static DEFINE_MUTEX(perf_resource_mutex);
* Architecture provided APIs - weak aliases:
*/
-int __weak hw_perf_counter_init(struct perf_counter *counter, u32 hw_event_type)
+int __weak hw_perf_counter_init(struct perf_counter *counter)
{
return -EINVAL;
}
@@ -707,7 +707,7 @@ static const struct file_operations perf_fops = {
* Allocate and initialize a counter structure
*/
static struct perf_counter *
-perf_counter_alloc(u32 hw_event_period, int cpu, u32 record_type)
+perf_counter_alloc(struct perf_counter_event *event, int cpu, u32 record_type)
{
struct perf_counter *counter = kzalloc(sizeof(*counter), GFP_KERNEL);
@@ -722,7 +722,7 @@ perf_counter_alloc(u32 hw_event_period, int cpu, u32 record_type)
counter->usrdata = &counter->data[1];
counter->cpu = cpu;
counter->record_type = record_type;
- counter->__irq_period = hw_event_period;
+ counter->event = *event;
counter->wakeup_pending = 0;
return counter;
@@ -750,11 +750,11 @@ sys_perf_counter_open(struct perf_counter_event __user *uevent, u32 record_type,
return PTR_ERR(ctx);
ret = -ENOMEM;
- counter = perf_counter_alloc(event.hw_event_period, cpu, record_type);
+ counter = perf_counter_alloc(&event, cpu, record_type);
if (!counter)
goto err_put_context;
- ret = hw_perf_counter_init(counter, event.hw_event_type);
+ ret = hw_perf_counter_init(counter);
if (ret)
goto err_free_put_context;