aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig19
-rw-r--r--kernel/trace/ftrace.c101
-rw-r--r--kernel/trace/ring_buffer.c79
-rw-r--r--kernel/trace/trace.c15
-rw-r--r--kernel/trace/trace_branch.c74
-rw-r--r--kernel/trace/trace_mmiotrace.c16
6 files changed, 252 insertions, 52 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 87fc34a1bb9..9cbf7761f49 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -169,14 +169,29 @@ config TRACE_BRANCH_PROFILING
This tracer profiles all the the likely and unlikely macros
in the kernel. It will display the results in:
- /debugfs/tracing/profile_likely
- /debugfs/tracing/profile_unlikely
+ /debugfs/tracing/profile_annotated_branch
Note: this will add a significant overhead, only turn this
on if you need to profile the system's use of these macros.
Say N if unsure.
+config PROFILE_ALL_BRANCHES
+ bool "Profile all if conditionals"
+ depends on TRACE_BRANCH_PROFILING
+ help
+ This tracer profiles all branch conditions. Every if ()
+ taken in the kernel is recorded whether it hit or miss.
+ The results will be displayed in:
+
+ /debugfs/tracing/profile_branch
+
+ This configuration, when enabled, will impose a great overhead
+ on the system. This should only be enabled when the system
+ is to be analyzed
+
+ Say N if unsure.
+
config TRACING_BRANCHES
bool
help
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f212da48668..53042f118f2 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1498,10 +1498,77 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
#ifdef CONFIG_FUNCTION_RET_TRACER
+static atomic_t ftrace_retfunc_active;
+
/* The callback that hooks the return of a function */
trace_function_return_t ftrace_function_return =
(trace_function_return_t)ftrace_stub;
+
+/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
+static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
+{
+ int i;
+ int ret = 0;
+ unsigned long flags;
+ int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
+ struct task_struct *g, *t;
+
+ for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
+ ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
+ * sizeof(struct ftrace_ret_stack),
+ GFP_KERNEL);
+ if (!ret_stack_list[i]) {
+ start = 0;
+ end = i;
+ ret = -ENOMEM;
+ goto free;
+ }
+ }
+
+ read_lock_irqsave(&tasklist_lock, flags);
+ do_each_thread(g, t) {
+ if (start == end) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ if (t->ret_stack == NULL) {
+ t->ret_stack = ret_stack_list[start++];
+ t->curr_ret_stack = -1;
+ atomic_set(&t->trace_overrun, 0);
+ }
+ } while_each_thread(g, t);
+
+unlock:
+ read_unlock_irqrestore(&tasklist_lock, flags);
+free:
+ for (i = start; i < end; i++)
+ kfree(ret_stack_list[i]);
+ return ret;
+}
+
+/* Allocate a return stack for each task */
+static int start_return_tracing(void)
+{
+ struct ftrace_ret_stack **ret_stack_list;
+ int ret;
+
+ ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
+ sizeof(struct ftrace_ret_stack *),
+ GFP_KERNEL);
+
+ if (!ret_stack_list)
+ return -ENOMEM;
+
+ do {
+ ret = alloc_retstack_tasklist(ret_stack_list);
+ } while (ret == -EAGAIN);
+
+ kfree(ret_stack_list);
+ return ret;
+}
+
int register_ftrace_return(trace_function_return_t func)
{
int ret = 0;
@@ -1516,7 +1583,12 @@ int register_ftrace_return(trace_function_return_t func)
ret = -EBUSY;
goto out;
}
-
+ atomic_inc(&ftrace_retfunc_active);
+ ret = start_return_tracing();
+ if (ret) {
+ atomic_dec(&ftrace_retfunc_active);
+ goto out;
+ }
ftrace_tracing_type = FTRACE_TYPE_RETURN;
ftrace_function_return = func;
ftrace_startup();
@@ -1530,6 +1602,7 @@ void unregister_ftrace_return(void)
{
mutex_lock(&ftrace_sysctl_lock);
+ atomic_dec(&ftrace_retfunc_active);
ftrace_function_return = (trace_function_return_t)ftrace_stub;
ftrace_shutdown();
/* Restore normal tracing type */
@@ -1537,6 +1610,32 @@ void unregister_ftrace_return(void)
mutex_unlock(&ftrace_sysctl_lock);
}
+
+/* Allocate a return stack for newly created task */
+void ftrace_retfunc_init_task(struct task_struct *t)
+{
+ if (atomic_read(&ftrace_retfunc_active)) {
+ t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
+ * sizeof(struct ftrace_ret_stack),
+ GFP_KERNEL);
+ if (!t->ret_stack)
+ return;
+ t->curr_ret_stack = -1;
+ atomic_set(&t->trace_overrun, 0);
+ } else
+ t->ret_stack = NULL;
+}
+
+void ftrace_retfunc_exit_task(struct task_struct *t)
+{
+ struct ftrace_ret_stack *ret_stack = t->ret_stack;
+
+ t->ret_stack = NULL;
+ /* NULL must become visible to IRQs before we free it: */
+ barrier();
+
+ kfree(ret_stack);
+}
#endif
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 85ced143c2c..e206951603c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -18,8 +18,46 @@
#include "trace.h"
-/* Global flag to disable all recording to ring buffers */
-static int ring_buffers_off __read_mostly;
+/*
+ * A fast way to enable or disable all ring buffers is to
+ * call tracing_on or tracing_off. Turning off the ring buffers
+ * prevents all ring buffers from being recorded to.
+ * Turning this switch on, makes it OK to write to the
+ * ring buffer, if the ring buffer is enabled itself.
+ *
+ * There's three layers that must be on in order to write
+ * to the ring buffer.
+ *
+ * 1) This global flag must be set.
+ * 2) The ring buffer must be enabled for recording.
+ * 3) The per cpu buffer must be enabled for recording.
+ *
+ * In case of an anomaly, this global flag has a bit set that
+ * will permantly disable all ring buffers.
+ */
+
+/*
+ * Global flag to disable all recording to ring buffers
+ * This has two bits: ON, DISABLED
+ *
+ * ON DISABLED
+ * ---- ----------
+ * 0 0 : ring buffers are off
+ * 1 0 : ring buffers are on
+ * X 1 : ring buffers are permanently disabled
+ */
+
+enum {
+ RB_BUFFERS_ON_BIT = 0,
+ RB_BUFFERS_DISABLED_BIT = 1,
+};
+
+enum {
+ RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
+ RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
+};
+
+static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
/**
* tracing_on - enable all tracing buffers
@@ -29,7 +67,7 @@ static int ring_buffers_off __read_mostly;
*/
void tracing_on(void)
{
- ring_buffers_off = 0;
+ set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
}
/**
@@ -42,7 +80,18 @@ void tracing_on(void)
*/
void tracing_off(void)
{
- ring_buffers_off = 1;
+ clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
+}
+
+/**
+ * tracing_off_permanent - permanently disable ring buffers
+ *
+ * This function, once called, will disable all ring buffers
+ * permanenty.
+ */
+void tracing_off_permanent(void)
+{
+ set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
}
#include "trace.h"
@@ -1185,7 +1234,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
struct ring_buffer_event *event;
int cpu, resched;
- if (ring_buffers_off)
+ if (ring_buffer_flags != RB_BUFFERS_ON)
return NULL;
if (atomic_read(&buffer->record_disabled))
@@ -1297,7 +1346,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
int ret = -EBUSY;
int cpu, resched;
- if (ring_buffers_off)
+ if (ring_buffer_flags != RB_BUFFERS_ON)
return -EBUSY;
if (atomic_read(&buffer->record_disabled))
@@ -2178,12 +2227,14 @@ static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- int *p = filp->private_data;
+ long *p = filp->private_data;
char buf[64];
int r;
- /* !ring_buffers_off == tracing_on */
- r = sprintf(buf, "%d\n", !*p);
+ if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
+ r = sprintf(buf, "permanently disabled\n");
+ else
+ r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
@@ -2192,7 +2243,7 @@ static ssize_t
rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- int *p = filp->private_data;
+ long *p = filp->private_data;
char buf[64];
long val;
int ret;
@@ -2209,8 +2260,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
if (ret < 0)
return ret;
- /* !ring_buffers_off == tracing_on */
- *p = !val;
+ if (val)
+ set_bit(RB_BUFFERS_ON_BIT, p);
+ else
+ clear_bit(RB_BUFFERS_ON_BIT, p);
(*ppos)++;
@@ -2232,7 +2285,7 @@ static __init int rb_init_debugfs(void)
d_tracer = tracing_init_dentry();
entry = debugfs_create_file("tracing_on", 0644, d_tracer,
- &ring_buffers_off, &rb_simple_fops);
+ &ring_buffer_flags, &rb_simple_fops);
if (!entry)
pr_warning("Could not create debugfs 'tracing_on' entry\n");
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 48d1536f1ca..a45b59e53fb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -686,6 +686,21 @@ static int trace_stop_count;
static DEFINE_SPINLOCK(tracing_start_lock);
/**
+ * ftrace_off_permanent - disable all ftrace code permanently
+ *
+ * This should only be called when a serious anomally has
+ * been detected. This will turn off the function tracing,
+ * ring buffers, and other tracing utilites. It takes no
+ * locks and can be called from any context.
+ */
+void ftrace_off_permanent(void)
+{
+ tracing_disabled = 1;
+ ftrace_stop();
+ tracing_off_permanent();
+}
+
+/**
* tracing_start - quick start of the tracer
*
* If tracing is enabled but was stopped by tracing_stop,
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 23f9b02ce96..877ee88e6a7 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -185,12 +185,13 @@ EXPORT_SYMBOL(ftrace_likely_update);
struct ftrace_pointer {
void *start;
void *stop;
+ int hit;
};
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct ftrace_pointer *f = m->private;
+ const struct ftrace_pointer *f = m->private;
struct ftrace_branch_data *p = v;
(*pos)++;
@@ -223,13 +224,17 @@ static void t_stop(struct seq_file *m, void *p)
static int t_show(struct seq_file *m, void *v)
{
+ const struct ftrace_pointer *fp = m->private;
struct ftrace_branch_data *p = v;
const char *f;
- unsigned long percent;
+ long percent;
if (v == (void *)1) {
- seq_printf(m, " correct incorrect %% "
- " Function "
+ if (fp->hit)
+ seq_printf(m, " miss hit %% ");
+ else
+ seq_printf(m, " correct incorrect %% ");
+ seq_printf(m, " Function "
" File Line\n"
" ------- --------- - "
" -------- "
@@ -243,13 +248,20 @@ static int t_show(struct seq_file *m, void *v)
f--;
f++;
+ /*
+ * The miss is overlayed on correct, and hit on incorrect.
+ */
if (p->correct) {
percent = p->incorrect * 100;
percent /= p->correct + p->incorrect;
} else
- percent = p->incorrect ? 100 : 0;
+ percent = p->incorrect ? 100 : -1;
- seq_printf(m, "%8lu %8lu %3lu ", p->correct, p->incorrect, percent);
+ seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
+ if (percent < 0)
+ seq_printf(m, " X ");
+ else
+ seq_printf(m, "%3ld ", percent);
seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
return 0;
}
@@ -261,7 +273,7 @@ static struct seq_operations tracing_likely_seq_ops = {
.show = t_show,
};
-static int tracing_likely_open(struct inode *inode, struct file *file)
+static int tracing_branch_open(struct inode *inode, struct file *file)
{
int ret;
@@ -274,25 +286,30 @@ static int tracing_likely_open(struct inode *inode, struct file *file)
return ret;
}
-static struct file_operations tracing_likely_fops = {
- .open = tracing_likely_open,
+static const struct file_operations tracing_branch_fops = {
+ .open = tracing_branch_open,
.read = seq_read,
.llseek = seq_lseek,
};
-extern unsigned long __start_likely_profile[];
-extern unsigned long __stop_likely_profile[];
-extern unsigned long __start_unlikely_profile[];
-extern unsigned long __stop_unlikely_profile[];
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+extern unsigned long __start_branch_profile[];
+extern unsigned long __stop_branch_profile[];
-static struct ftrace_pointer ftrace_likely_pos = {
- .start = __start_likely_profile,
- .stop = __stop_likely_profile,
+static const struct ftrace_pointer ftrace_branch_pos = {
+ .start = __start_branch_profile,
+ .stop = __stop_branch_profile,
+ .hit = 1,
};
-static struct ftrace_pointer ftrace_unlikely_pos = {
- .start = __start_unlikely_profile,
- .stop = __stop_unlikely_profile,
+#endif /* CONFIG_PROFILE_ALL_BRANCHES */
+
+extern unsigned long __start_annotated_branch_profile[];
+extern unsigned long __stop_annotated_branch_profile[];
+
+static const struct ftrace_pointer ftrace_annotated_branch_pos = {
+ .start = __start_annotated_branch_profile,
+ .stop = __stop_annotated_branch_profile,
};
static __init int ftrace_branch_init(void)
@@ -302,18 +319,21 @@ static __init int ftrace_branch_init(void)
d_tracer = tracing_init_dentry();
- entry = debugfs_create_file("profile_likely", 0444, d_tracer,
- &ftrace_likely_pos,
- &tracing_likely_fops);
+ entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer,
+ (void *)&ftrace_annotated_branch_pos,
+ &tracing_branch_fops);
if (!entry)
- pr_warning("Could not create debugfs 'profile_likely' entry\n");
+ pr_warning("Could not create debugfs "
+ "'profile_annotatet_branch' entry\n");
- entry = debugfs_create_file("profile_unlikely", 0444, d_tracer,
- &ftrace_unlikely_pos,
- &tracing_likely_fops);
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+ entry = debugfs_create_file("profile_branch", 0444, d_tracer,
+ (void *)&ftrace_branch_pos,
+ &tracing_branch_fops);
if (!entry)
pr_warning("Could not create debugfs"
- " 'profile_unlikely' entry\n");
+ " 'profile_branch' entry\n");
+#endif
return 0;
}
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 433d650eda9..2a98a206acc 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -18,12 +18,14 @@ struct header_iter {
static struct trace_array *mmio_trace_array;
static bool overrun_detected;
+static unsigned long prev_overruns;
static void mmio_reset_data(struct trace_array *tr)
{
int cpu;
overrun_detected = false;
+ prev_overruns = 0;
tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu)
@@ -123,16 +125,12 @@ static void mmio_close(struct trace_iterator *iter)
static unsigned long count_overruns(struct trace_iterator *iter)
{
- int cpu;
unsigned long cnt = 0;
-/* FIXME: */
-#if 0
- for_each_online_cpu(cpu) {
- cnt += iter->overrun[cpu];
- iter->overrun[cpu] = 0;
- }
-#endif
- (void)cpu;
+ unsigned long over = ring_buffer_overruns(iter->tr->buffer);
+
+ if (over > prev_overruns)
+ cnt = over - prev_overruns;
+ prev_overruns = over;
return cnt;
}