aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-11 14:59:31 +0100
committerIngo Molnar <mingo@elte.hu>2008-12-11 15:45:55 +0100
commit1d1c7ddbfab358445a542715551301b7fc363e28 (patch)
treed4421834d109b206f39c2019ea039fd42ed22e1d /kernel
parentbae43c9945ebeef15e7952e317efb02393d3bfc7 (diff)
perf counters: add prctl interface to disable/enable counters
Add a way for self-monitoring tasks to disable/enable counters summarily, via a prctl: PR_TASK_PERF_COUNTERS_DISABLE 31 PR_TASK_PERF_COUNTERS_ENABLE 32 Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c86
-rw-r--r--kernel/sys.c7
2 files changed, 86 insertions, 7 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index a0fe8474ee2..4e679b91d8b 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -415,6 +415,9 @@ counter_sched_in(struct perf_counter *counter,
struct perf_counter_context *ctx,
int cpu)
{
+ if (counter->active == -1)
+ return;
+
counter->hw_ops->hw_perf_counter_enable(counter);
counter->active = 1;
counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
@@ -479,6 +482,79 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu)
cpuctx->task_ctx = ctx;
}
+int perf_counter_task_disable(void)
+{
+ struct task_struct *curr = current;
+ struct perf_counter_context *ctx = &curr->perf_counter_ctx;
+ struct perf_counter *counter;
+ u64 perf_flags;
+ int cpu;
+
+ if (likely(!ctx->nr_counters))
+ return 0;
+
+ local_irq_disable();
+ cpu = smp_processor_id();
+
+ perf_counter_task_sched_out(curr, cpu);
+
+ spin_lock(&ctx->lock);
+
+ /*
+ * Disable all the counters:
+ */
+ perf_flags = hw_perf_save_disable();
+
+ list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ WARN_ON_ONCE(counter->active == 1);
+ counter->active = -1;
+ }
+ hw_perf_restore(perf_flags);
+
+ spin_unlock(&ctx->lock);
+
+ local_irq_enable();
+
+ return 0;
+}
+
+int perf_counter_task_enable(void)
+{
+ struct task_struct *curr = current;
+ struct perf_counter_context *ctx = &curr->perf_counter_ctx;
+ struct perf_counter *counter;
+ u64 perf_flags;
+ int cpu;
+
+ if (likely(!ctx->nr_counters))
+ return 0;
+
+ local_irq_disable();
+ cpu = smp_processor_id();
+
+ spin_lock(&ctx->lock);
+
+ /*
+ * Disable all the counters:
+ */
+ perf_flags = hw_perf_save_disable();
+
+ list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ if (counter->active != -1)
+ continue;
+ counter->active = 0;
+ }
+ hw_perf_restore(perf_flags);
+
+ spin_unlock(&ctx->lock);
+
+ perf_counter_task_sched_in(curr, cpu);
+
+ local_irq_enable();
+
+ return 0;
+}
+
void perf_counter_task_tick(struct task_struct *curr, int cpu)
{
struct perf_counter_context *ctx = &curr->perf_counter_ctx;
@@ -951,13 +1027,9 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
* @cpu: target cpu
* @group_fd: group leader counter fd
*/
-asmlinkage int sys_perf_counter_open(
-
- struct perf_counter_hw_event *hw_event_uptr __user,
- pid_t pid,
- int cpu,
- int group_fd)
-
+asmlinkage int
+sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr __user,
+ pid_t pid, int cpu, int group_fd)
{
struct perf_counter *counter, *group_leader;
struct perf_counter_hw_event hw_event;
diff --git a/kernel/sys.c b/kernel/sys.c
index 31deba8f7d1..0f66633be31 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -14,6 +14,7 @@
#include <linux/prctl.h>
#include <linux/highuid.h>
#include <linux/fs.h>
+#include <linux/perf_counter.h>
#include <linux/resource.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
@@ -1716,6 +1717,12 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
case PR_SET_TSC:
error = SET_TSC_CTL(arg2);
break;
+ case PR_TASK_PERF_COUNTERS_DISABLE:
+ error = perf_counter_task_disable();
+ break;
+ case PR_TASK_PERF_COUNTERS_ENABLE:
+ error = perf_counter_task_enable();
+ break;
case PR_GET_TIMERSLACK:
error = current->timer_slack_ns;
break;