aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-05-22 00:22:19 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-26 22:51:22 +0200
commit7e18d8e701b6798a5df11e0a16881a60ab1018b6 (patch)
tree788be0413af1f39fb7b493089dd5a497251279f6 /kernel
parent4902f8849da6d2805bd291551a6dfd48f1b4f604 (diff)
ftrace: add function tracing to wake up tracing
This patch adds function tracing to the functions that are called on the CPU of the task being traced. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Cc: pq@iki.fi Cc: proski@gnu.org Cc: sandmann@redhat.com Cc: a.p.zijlstra@chello.nl Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace_sched_wakeup.c67
1 files changed, 66 insertions, 1 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 5d2fb48e47f..bf7e91caef5 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -30,6 +30,69 @@ static DEFINE_SPINLOCK(wakeup_lock);
static void __wakeup_reset(struct trace_array *tr);
+#ifdef CONFIG_FTRACE
+/*
+ * irqsoff uses its own tracer function to keep the overhead down:
+ */
+static void
+wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
+{
+ struct trace_array *tr = wakeup_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int resched;
+ int cpu;
+
+ if (likely(!wakeup_task))
+ return;
+
+ resched = need_resched();
+ preempt_disable_notrace();
+
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+ if (unlikely(disabled != 1))
+ goto out;
+
+ spin_lock_irqsave(&wakeup_lock, flags);
+
+ if (unlikely(!wakeup_task))
+ goto unlock;
+
+ /*
+ * The task can't disappear because it needs to
+ * wake up first, and we have the wakeup_lock.
+ */
+ if (task_cpu(wakeup_task) != cpu)
+ goto unlock;
+
+ trace_function(tr, data, ip, parent_ip, flags);
+
+ unlock:
+ spin_unlock_irqrestore(&wakeup_lock, flags);
+
+ out:
+ atomic_dec(&data->disabled);
+
+ /*
+ * To prevent recursion from the scheduler, if the
+ * resched flag was set before we entered, then
+ * don't reschedule.
+ */
+ if (resched)
+ preempt_enable_no_resched_notrace();
+ else
+ preempt_enable_notrace();
+}
+
+static struct ftrace_ops trace_ops __read_mostly =
+{
+ .func = wakeup_tracer_call,
+};
+#endif /* CONFIG_FTRACE */
+
/*
* Should this new latency be reported/recorded?
*/
@@ -73,7 +136,7 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
if (next != wakeup_task)
return;
- /* The task we are waitng for is waking up */
+ /* The task we are waiting for is waking up */
data = tr->data[wakeup_cpu];
/* disable local data, not wakeup_cpu data */
@@ -290,6 +353,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
smp_wmb();
tracer_enabled = 1;
+ register_ftrace_function(&trace_ops);
return;
fail_deprobe_wake_new:
@@ -305,6 +369,7 @@ fail_deprobe:
static void stop_wakeup_tracer(struct trace_array *tr)
{
tracer_enabled = 0;
+ unregister_ftrace_function(&trace_ops);
marker_probe_unregister("kernel_sched_schedule",
sched_switch_callback,
&wakeup_trace);