aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/ftrace.c15
-rw-r--r--kernel/trace/ring_buffer.c311
-rw-r--r--kernel/trace/trace.c16
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_functions_graph.c168
-rw-r--r--kernel/trace/trace_stack.c13
8 files changed, 418 insertions, 112 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 8b6b673b4d6..bde6f03512d 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -67,6 +67,7 @@ config FUNCTION_GRAPH_TRACER
bool "Kernel Function Graph Tracer"
depends on HAVE_FUNCTION_GRAPH_TRACER
depends on FUNCTION_TRACER
+ default y
help
Enable the kernel to trace a function at both its return
and its entry.
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2e78628443e..65b9e863056 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1636,11 +1636,15 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
static atomic_t ftrace_graph_active;
+int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
+{
+ return 0;
+}
+
/* The callbacks that hook a function */
trace_func_graph_ret_t ftrace_graph_return =
(trace_func_graph_ret_t)ftrace_stub;
-trace_func_graph_ent_t ftrace_graph_entry =
- (trace_func_graph_ent_t)ftrace_stub;
+trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -1738,7 +1742,7 @@ void unregister_ftrace_graph(void)
atomic_dec(&ftrace_graph_active);
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
- ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
+ ftrace_graph_entry = ftrace_graph_entry_stub;
ftrace_shutdown(FTRACE_STOP_FUNC_RET);
mutex_unlock(&ftrace_sysctl_lock);
@@ -1769,5 +1773,10 @@ void ftrace_graph_exit_task(struct task_struct *t)
kfree(ret_stack);
}
+
+void ftrace_graph_stop(void)
+{
+ ftrace_stop();
+}
#endif
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index e206951603c..7f69cfeaadf 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -195,20 +195,24 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
#define TS_MASK ((1ULL << TS_SHIFT) - 1)
#define TS_DELTA_TEST (~TS_MASK)
-/*
- * This hack stolen from mm/slob.c.
- * We can store per page timing information in the page frame of the page.
- * Thanks to Peter Zijlstra for suggesting this idea.
- */
-struct buffer_page {
+struct buffer_data_page {
u64 time_stamp; /* page time stamp */
- local_t write; /* index for next write */
local_t commit; /* write commited index */
+ unsigned char data[]; /* data of buffer page */
+};
+
+struct buffer_page {
+ local_t write; /* index for next write */
unsigned read; /* index for next read */
struct list_head list; /* list of free pages */
- void *page; /* Actual data page */
+ struct buffer_data_page *page; /* Actual data page */
};
+static void rb_init_page(struct buffer_data_page *bpage)
+{
+ local_set(&bpage->commit, 0);
+}
+
/*
* Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
* this issue out.
@@ -230,7 +234,7 @@ static inline int test_time_stamp(u64 delta)
return 0;
}
-#define BUF_PAGE_SIZE PAGE_SIZE
+#define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page))
/*
* head_page == tail_page && head == tail then buffer is empty.
@@ -294,19 +298,19 @@ struct ring_buffer_iter {
static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
struct list_head *head = &cpu_buffer->pages;
- struct buffer_page *page, *tmp;
+ struct buffer_page *bpage, *tmp;
if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
return -1;
if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
return -1;
- list_for_each_entry_safe(page, tmp, head, list) {
+ list_for_each_entry_safe(bpage, tmp, head, list) {
if (RB_WARN_ON(cpu_buffer,
- page->list.next->prev != &page->list))
+ bpage->list.next->prev != &bpage->list))
return -1;
if (RB_WARN_ON(cpu_buffer,
- page->list.prev->next != &page->list))
+ bpage->list.prev->next != &bpage->list))
return -1;
}
@@ -317,22 +321,23 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
unsigned nr_pages)
{
struct list_head *head = &cpu_buffer->pages;
- struct buffer_page *page, *tmp;
+ struct buffer_page *bpage, *tmp;
unsigned long addr;
LIST_HEAD(pages);
unsigned i;
for (i = 0; i < nr_pages; i++) {
- page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
+ bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
- if (!page)
+ if (!bpage)
goto free_pages;
- list_add(&page->list, &pages);
+ list_add(&bpage->list, &pages);
addr = __get_free_page(GFP_KERNEL);
if (!addr)
goto free_pages;
- page->page = (void *)addr;
+ bpage->page = (void *)addr;
+ rb_init_page(bpage->page);
}
list_splice(&pages, head);
@@ -342,9 +347,9 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
return 0;
free_pages:
- list_for_each_entry_safe(page, tmp, &pages, list) {
- list_del_init(&page->list);
- free_buffer_page(page);
+ list_for_each_entry_safe(bpage, tmp, &pages, list) {
+ list_del_init(&bpage->list);
+ free_buffer_page(bpage);
}
return -ENOMEM;
}
@@ -353,7 +358,7 @@ static struct ring_buffer_per_cpu *
rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
- struct buffer_page *page;
+ struct buffer_page *bpage;
unsigned long addr;
int ret;
@@ -368,16 +373,17 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&cpu_buffer->pages);
- page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
+ bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
- if (!page)
+ if (!bpage)
goto fail_free_buffer;
- cpu_buffer->reader_page = page;
+ cpu_buffer->reader_page = bpage;
addr = __get_free_page(GFP_KERNEL);
if (!addr)
goto fail_free_reader;
- page->page = (void *)addr;
+ bpage->page = (void *)addr;
+ rb_init_page(bpage->page);
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
@@ -402,14 +408,14 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
{
struct list_head *head = &cpu_buffer->pages;
- struct buffer_page *page, *tmp;
+ struct buffer_page *bpage, *tmp;
list_del_init(&cpu_buffer->reader_page->list);
free_buffer_page(cpu_buffer->reader_page);
- list_for_each_entry_safe(page, tmp, head, list) {
- list_del_init(&page->list);
- free_buffer_page(page);
+ list_for_each_entry_safe(bpage, tmp, head, list) {
+ list_del_init(&bpage->list);
+ free_buffer_page(bpage);
}
kfree(cpu_buffer);
}
@@ -506,7 +512,7 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
static void
rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
{
- struct buffer_page *page;
+ struct buffer_page *bpage;
struct list_head *p;
unsigned i;
@@ -517,9 +523,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
return;
p = cpu_buffer->pages.next;
- page = list_entry(p, struct buffer_page, list);
- list_del_init(&page->list);
- free_buffer_page(page);
+ bpage = list_entry(p, struct buffer_page, list);
+ list_del_init(&bpage->list);
+ free_buffer_page(bpage);
}
if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
return;
@@ -536,7 +542,7 @@ static void
rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
struct list_head *pages, unsigned nr_pages)
{
- struct buffer_page *page;
+ struct buffer_page *bpage;
struct list_head *p;
unsigned i;
@@ -547,9 +553,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
return;
p = pages->next;
- page = list_entry(p, struct buffer_page, list);
- list_del_init(&page->list);
- list_add_tail(&page->list, &cpu_buffer->pages);
+ bpage = list_entry(p, struct buffer_page, list);
+ list_del_init(&bpage->list);
+ list_add_tail(&bpage->list, &cpu_buffer->pages);
}
rb_reset_cpu(cpu_buffer);
@@ -576,7 +582,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned nr_pages, rm_pages, new_pages;
- struct buffer_page *page, *tmp;
+ struct buffer_page *bpage, *tmp;
unsigned long buffer_size;
unsigned long addr;
LIST_HEAD(pages);
@@ -637,16 +643,17 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
for_each_buffer_cpu(buffer, cpu) {
for (i = 0; i < new_pages; i++) {
- page = kzalloc_node(ALIGN(sizeof(*page),
+ bpage = kzalloc_node(ALIGN(sizeof(*bpage),
cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
- if (!page)
+ if (!bpage)
goto free_pages;
- list_add(&page->list, &pages);
+ list_add(&bpage->list, &pages);
addr = __get_free_page(GFP_KERNEL);
if (!addr)
goto free_pages;
- page->page = (void *)addr;
+ bpage->page = (void *)addr;
+ rb_init_page(bpage->page);
}
}
@@ -667,9 +674,9 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
return size;
free_pages:
- list_for_each_entry_safe(page, tmp, &pages, list) {
- list_del_init(&page->list);
- free_buffer_page(page);
+ list_for_each_entry_safe(bpage, tmp, &pages, list) {
+ list_del_init(&bpage->list);
+ free_buffer_page(bpage);
}
mutex_unlock(&buffer->mutex);
return -ENOMEM;
@@ -680,9 +687,15 @@ static inline int rb_null_event(struct ring_buffer_event *event)
return event->type == RINGBUF_TYPE_PADDING;
}
-static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
+static inline void *
+__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
{
- return page->page + index;
+ return bpage->data + index;
+}
+
+static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
+{
+ return bpage->page->data + index;
}
static inline struct ring_buffer_event *
@@ -712,7 +725,7 @@ static inline unsigned rb_page_write(struct buffer_page *bpage)
static inline unsigned rb_page_commit(struct buffer_page *bpage)
{
- return local_read(&bpage->commit);
+ return local_read(&bpage->page->commit);
}
/* Size is determined by what has been commited */
@@ -758,14 +771,14 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
}
static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
- struct buffer_page **page)
+ struct buffer_page **bpage)
{
- struct list_head *p = (*page)->list.next;
+ struct list_head *p = (*bpage)->list.next;
if (p == &cpu_buffer->pages)
p = p->next;
- *page = list_entry(p, struct buffer_page, list);
+ *bpage = list_entry(p, struct buffer_page, list);
}
static inline unsigned
@@ -804,14 +817,15 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
if (RB_WARN_ON(cpu_buffer,
cpu_buffer->commit_page == cpu_buffer->tail_page))
return;
- cpu_buffer->commit_page->commit =
+ cpu_buffer->commit_page->page->commit =
cpu_buffer->commit_page->write;
rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
- cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
+ cpu_buffer->write_stamp =
+ cpu_buffer->commit_page->page->time_stamp;
}
/* Now set the commit to the event's index */
- local_set(&cpu_buffer->commit_page->commit, index);
+ local_set(&cpu_buffer->commit_page->page->commit, index);
}
static inline void
@@ -826,16 +840,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
* assign the commit to the tail.
*/
while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
- cpu_buffer->commit_page->commit =
+ cpu_buffer->commit_page->page->commit =
cpu_buffer->commit_page->write;
rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
- cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
+ cpu_buffer->write_stamp =
+ cpu_buffer->commit_page->page->time_stamp;
/* add barrier to keep gcc from optimizing too much */
barrier();
}
while (rb_commit_index(cpu_buffer) !=
rb_page_write(cpu_buffer->commit_page)) {
- cpu_buffer->commit_page->commit =
+ cpu_buffer->commit_page->page->commit =
cpu_buffer->commit_page->write;
barrier();
}
@@ -843,7 +858,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
{
- cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
+ cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
cpu_buffer->reader_page->read = 0;
}
@@ -862,7 +877,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter)
else
rb_inc_page(cpu_buffer, &iter->head_page);
- iter->read_stamp = iter->head_page->time_stamp;
+ iter->read_stamp = iter->head_page->page->time_stamp;
iter->head = 0;
}
@@ -998,12 +1013,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
*/
if (tail_page == cpu_buffer->tail_page) {
local_set(&next_page->write, 0);
- local_set(&next_page->commit, 0);
+ local_set(&next_page->page->commit, 0);
cpu_buffer->tail_page = next_page;
/* reread the time stamp */
*ts = ring_buffer_time_stamp(cpu_buffer->cpu);
- cpu_buffer->tail_page->time_stamp = *ts;
+ cpu_buffer->tail_page->page->time_stamp = *ts;
}
/*
@@ -1048,7 +1063,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
* this page's time stamp.
*/
if (!tail && rb_is_commit(cpu_buffer, event))
- cpu_buffer->commit_page->time_stamp = *ts;
+ cpu_buffer->commit_page->page->time_stamp = *ts;
return event;
@@ -1099,7 +1114,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
event->time_delta = *delta & TS_MASK;
event->array[0] = *delta >> TS_SHIFT;
} else {
- cpu_buffer->commit_page->time_stamp = *ts;
+ cpu_buffer->commit_page->page->time_stamp = *ts;
event->time_delta = 0;
event->array[0] = 0;
}
@@ -1552,7 +1567,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
if (iter->head)
iter->read_stamp = cpu_buffer->read_stamp;
else
- iter->read_stamp = iter->head_page->time_stamp;
+ iter->read_stamp = iter->head_page->page->time_stamp;
}
/**
@@ -1696,7 +1711,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->reader_page->list.prev = reader->list.prev;
local_set(&cpu_buffer->reader_page->write, 0);
- local_set(&cpu_buffer->reader_page->commit, 0);
+ local_set(&cpu_buffer->reader_page->page->commit, 0);
/* Make the reader page now replace the head */
reader->list.prev->next = &cpu_buffer->reader_page->list;
@@ -2088,7 +2103,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->head_page
= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
local_set(&cpu_buffer->head_page->write, 0);
- local_set(&cpu_buffer->head_page->commit, 0);
+ local_set(&cpu_buffer->head_page->page->commit, 0);
cpu_buffer->head_page->read = 0;
@@ -2097,7 +2112,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
local_set(&cpu_buffer->reader_page->write, 0);
- local_set(&cpu_buffer->reader_page->commit, 0);
+ local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->read = 0;
cpu_buffer->overrun = 0;
@@ -2223,6 +2238,166 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
return 0;
}
+static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_data_page *bpage)
+{
+ struct ring_buffer_event *event;
+ unsigned long head;
+
+ __raw_spin_lock(&cpu_buffer->lock);
+ for (head = 0; head < local_read(&bpage->commit);
+ head += rb_event_length(event)) {
+
+ event = __rb_data_page_index(bpage, head);
+ if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
+ return;
+ /* Only count data entries */
+ if (event->type != RINGBUF_TYPE_DATA)
+ continue;
+ cpu_buffer->entries--;
+ }
+ __raw_spin_unlock(&cpu_buffer->lock);
+}
+
+/**
+ * ring_buffer_alloc_read_page - allocate a page to read from buffer
+ * @buffer: the buffer to allocate for.
+ *
+ * This function is used in conjunction with ring_buffer_read_page.
+ * When reading a full page from the ring buffer, these functions
+ * can be used to speed up the process. The calling function should
+ * allocate a few pages first with this function. Then when it
+ * needs to get pages from the ring buffer, it passes the result
+ * of this function into ring_buffer_read_page, which will swap
+ * the page that was allocated, with the read page of the buffer.
+ *
+ * Returns:
+ * The page allocated, or NULL on error.
+ */
+void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
+{
+ unsigned long addr;
+ struct buffer_data_page *bpage;
+
+ addr = __get_free_page(GFP_KERNEL);
+ if (!addr)
+ return NULL;
+
+ bpage = (void *)addr;
+
+ return bpage;
+}
+
+/**
+ * ring_buffer_free_read_page - free an allocated read page
+ * @buffer: the buffer the page was allocate for
+ * @data: the page to free
+ *
+ * Free a page allocated from ring_buffer_alloc_read_page.
+ */
+void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
+{
+ free_page((unsigned long)data);
+}
+
+/**
+ * ring_buffer_read_page - extract a page from the ring buffer
+ * @buffer: buffer to extract from
+ * @data_page: the page to use allocated from ring_buffer_alloc_read_page
+ * @cpu: the cpu of the buffer to extract
+ * @full: should the extraction only happen when the page is full.
+ *
+ * This function will pull out a page from the ring buffer and consume it.
+ * @data_page must be the address of the variable that was returned
+ * from ring_buffer_alloc_read_page. This is because the page might be used
+ * to swap with a page in the ring buffer.
+ *
+ * for example:
+ * rpage = ring_buffer_alloc_page(buffer);
+ * if (!rpage)
+ * return error;
+ * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
+ * if (ret)
+ * process_page(rpage);
+ *
+ * When @full is set, the function will not return true unless
+ * the writer is off the reader page.
+ *
+ * Note: it is up to the calling functions to handle sleeps and wakeups.
+ * The ring buffer can be used anywhere in the kernel and can not
+ * blindly call wake_up. The layer that uses the ring buffer must be
+ * responsible for that.
+ *
+ * Returns:
+ * 1 if data has been transferred
+ * 0 if no data has been transferred.
+ */
+int ring_buffer_read_page(struct ring_buffer *buffer,
+ void **data_page, int cpu, int full)
+{
+ struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ struct ring_buffer_event *event;
+ struct buffer_data_page *bpage;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!data_page)
+ return 0;
+
+ bpage = *data_page;
+ if (!bpage)
+ return 0;
+
+ spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+
+ /*
+ * rb_buffer_peek will get the next ring buffer if
+ * the current reader page is empty.
+ */
+ event = rb_buffer_peek(buffer, cpu, NULL);
+ if (!event)
+ goto out;
+
+ /* check for data */
+ if (!local_read(&cpu_buffer->reader_page->page->commit))
+ goto out;
+ /*
+ * If the writer is already off of the read page, then simply
+ * switch the read page with the given page. Otherwise
+ * we need to copy the data from the reader to the writer.
+ */
+ if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
+ unsigned int read = cpu_buffer->reader_page->read;
+
+ if (full)
+ goto out;
+ /* The writer is still on the reader page, we must copy */
+ bpage = cpu_buffer->reader_page->page;
+ memcpy(bpage->data,
+ cpu_buffer->reader_page->page->data + read,
+ local_read(&bpage->commit) - read);
+
+ /* consume what was read */
+ cpu_buffer->reader_page += read;
+
+ } else {
+ /* swap the pages */
+ rb_init_page(bpage);
+ bpage = cpu_buffer->reader_page->page;
+ cpu_buffer->reader_page->page = *data_page;
+ cpu_buffer->reader_page->read = 0;
+ *data_page = bpage;
+ }
+ ret = 1;
+
+ /* update the entry counter */
+ rb_remove_entries(cpu_buffer, bpage);
+ out:
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ return ret;
+}
+
static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 91887a280ab..8b6409a62b5 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1200,7 +1200,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-void trace_graph_entry(struct ftrace_graph_ent *trace)
+int trace_graph_entry(struct ftrace_graph_ent *trace)
{
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
@@ -1209,7 +1209,7 @@ void trace_graph_entry(struct ftrace_graph_ent *trace)
int cpu;
int pc;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
@@ -1218,7 +1218,9 @@ void trace_graph_entry(struct ftrace_graph_ent *trace)
__trace_graph_entry(tr, data, trace, flags, pc);
}
atomic_dec(&data->disabled);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
+
+ return 1;
}
void trace_graph_return(struct ftrace_graph_ret *trace)
@@ -1230,7 +1232,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
int cpu;
int pc;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
@@ -1239,7 +1241,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
__trace_graph_return(tr, data, trace, flags, pc);
}
atomic_dec(&data->disabled);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -2645,7 +2647,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
if (err)
goto err_unlock;
- raw_local_irq_disable();
+ local_irq_disable();
__raw_spin_lock(&ftrace_max_lock);
for_each_tracing_cpu(cpu) {
/*
@@ -2662,7 +2664,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
}
}
__raw_spin_unlock(&ftrace_max_lock);
- raw_local_irq_enable();
+ local_irq_enable();
tracing_cpumask = tracing_cpumask_new;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f96f4e787ff..0565ae9a221 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -412,7 +412,7 @@ void trace_function(struct trace_array *tr,
unsigned long flags, int pc);
void trace_graph_return(struct ftrace_graph_ret *trace);
-void trace_graph_entry(struct ftrace_graph_ent *trace);
+int trace_graph_entry(struct ftrace_graph_ent *trace);
void trace_bts(struct trace_array *tr,
unsigned long from,
unsigned long to);
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index bc972753568..6c00feb3bac 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -42,7 +42,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
if (unlikely(!tr))
return;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
cpu = raw_smp_processor_id();
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out;
@@ -74,7 +74,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
out:
atomic_dec(&tr->data[cpu]->disabled);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
}
static inline
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 894b50bca31..c66578f2fdc 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -19,6 +19,7 @@
#define TRACE_GRAPH_PRINT_OVERRUN 0x1
#define TRACE_GRAPH_PRINT_CPU 0x2
#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
+#define TRACE_GRAPH_PRINT_PROC 0x8
static struct tracer_opt trace_opts[] = {
/* Display overruns ? */
@@ -27,11 +28,13 @@ static struct tracer_opt trace_opts[] = {
{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
/* Display Overhead ? */
{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
+ /* Display proc name/pid */
+ { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
{ } /* Empty entry */
};
static struct tracer_flags tracer_flags = {
- /* Don't display overruns by default */
+ /* Don't display overruns and proc by default */
.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
.opts = trace_opts
};
@@ -104,23 +107,63 @@ print_graph_cpu(struct trace_seq *s, int cpu)
return TRACE_TYPE_HANDLED;
}
+#define TRACE_GRAPH_PROCINFO_LENGTH 14
+
+static enum print_line_t
+print_graph_proc(struct trace_seq *s, pid_t pid)
+{
+ int i;
+ int ret;
+ int len;
+ char comm[8];
+ int spaces = 0;
+ /* sign + log10(MAX_INT) + '\0' */
+ char pid_str[11];
+
+ strncpy(comm, trace_find_cmdline(pid), 7);
+ comm[7] = '\0';
+ sprintf(pid_str, "%d", pid);
+
+ /* 1 stands for the "-" character */
+ len = strlen(comm) + strlen(pid_str) + 1;
+
+ if (len < TRACE_GRAPH_PROCINFO_LENGTH)
+ spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
+
+ /* First spaces to align center */
+ for (i = 0; i < spaces / 2; i++) {
+ ret = trace_seq_printf(s, " ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
+ ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Last spaces to align center */
+ for (i = 0; i < spaces - (spaces / 2); i++) {
+ ret = trace_seq_printf(s, " ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+ return TRACE_TYPE_HANDLED;
+}
+
/* If the pid changed since the last trace, output this event */
-static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
+static enum print_line_t
+verif_pid(struct trace_seq *s, pid_t pid, int cpu)
{
- char *comm, *prev_comm;
pid_t prev_pid;
int ret;
if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
- return 1;
+ return TRACE_TYPE_HANDLED;
prev_pid = last_pid[cpu];
last_pid[cpu] = pid;
- comm = trace_find_cmdline(pid);
- prev_comm = trace_find_cmdline(prev_pid);
-
/*
* Context-switch trace line:
@@ -130,11 +173,31 @@ static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
*/
ret = trace_seq_printf(s,
- " ------------------------------------------\n");
- ret += trace_seq_printf(s, " | %d) %s-%d => %s-%d\n",
- cpu, prev_comm, prev_pid, comm, pid);
- ret += trace_seq_printf(s,
- " ------------------------------------------\n\n");
+ "\n ------------------------------------------\n |");
+ if (!ret)
+ TRACE_TYPE_PARTIAL_LINE;
+
+ ret = print_graph_cpu(s, cpu);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ TRACE_TYPE_PARTIAL_LINE;
+
+ ret = print_graph_proc(s, prev_pid);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ TRACE_TYPE_PARTIAL_LINE;
+
+ ret = trace_seq_printf(s, " => ");
+ if (!ret)
+ TRACE_TYPE_PARTIAL_LINE;
+
+ ret = print_graph_proc(s, pid);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ TRACE_TYPE_PARTIAL_LINE;
+
+ ret = trace_seq_printf(s,
+ "\n ------------------------------------------\n\n");
+ if (!ret)
+ TRACE_TYPE_PARTIAL_LINE;
+
return ret;
}
@@ -169,11 +232,50 @@ trace_branch_is_leaf(struct trace_iterator *iter,
}
-static inline int
+static enum print_line_t
print_graph_duration(unsigned long long duration, struct trace_seq *s)
{
unsigned long nsecs_rem = do_div(duration, 1000);
- return trace_seq_printf(s, "%4llu.%3lu us | ", duration, nsecs_rem);
+ /* log10(ULONG_MAX) + '\0' */
+ char msecs_str[21];
+ char nsecs_str[5];
+ int ret, len;
+ int i;
+
+ sprintf(msecs_str, "%lu", (unsigned long) duration);
+
+ /* Print msecs */
+ ret = trace_seq_printf(s, msecs_str);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ len = strlen(msecs_str);
+
+ /* Print nsecs (we don't want to exceed 7 numbers) */
+ if (len < 7) {
+ snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
+ ret = trace_seq_printf(s, ".%s", nsecs_str);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ len += strlen(nsecs_str);
+ }
+
+ ret = trace_seq_printf(s, " us ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Print remaining spaces to fit the row's width */
+ for (i = len; i < 7; i++) {
+ ret = trace_seq_printf(s, " ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
+ ret = trace_seq_printf(s, "| ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_HANDLED;
+
}
/* Signal a overhead of time execution to the output */
@@ -210,10 +312,6 @@ print_graph_entry_leaf(struct trace_iterator *iter,
call = &entry->graph_ent;
duration = graph_ret->rettime - graph_ret->calltime;
- /* Must not exceed 8 characters: 9999.999 us */
- if (duration > 10000000ULL)
- duration = 9999999ULL;
-
/* Overhead */
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
ret = print_graph_overhead(duration, s);
@@ -223,7 +321,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
/* Duration */
ret = print_graph_duration(duration, s);
- if (!ret)
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
/* Function */
@@ -288,12 +386,23 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
struct trace_entry *ent = iter->ent;
/* Pid */
- if (!verif_pid(s, ent->pid, cpu))
+ if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
/* Cpu */
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
ret = print_graph_cpu(s, cpu);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
+ /* Proc */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
+ ret = print_graph_proc(s, ent->pid);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ ret = trace_seq_printf(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
@@ -313,17 +422,24 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
int ret;
unsigned long long duration = trace->rettime - trace->calltime;
- /* Must not exceed 8 characters: xxxx.yyy us */
- if (duration > 10000000ULL)
- duration = 9999999ULL;
-
/* Pid */
- if (!verif_pid(s, ent->pid, cpu))
+ if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
/* Cpu */
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
ret = print_graph_cpu(s, cpu);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
+ /* Proc */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
+ ret = print_graph_proc(s, ent->pid);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ ret = trace_seq_printf(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
@@ -337,7 +453,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
/* Duration */
ret = print_graph_duration(duration, s);
- if (!ret)
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
/* Closing brace */
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index fde3be15c64..0b863f2cbc8 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -48,7 +48,7 @@ static inline void check_stack(void)
if (!object_is_on_stack(&this_size))
return;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
__raw_spin_lock(&max_stack_lock);
/* a race could have already updated it */
@@ -78,6 +78,7 @@ static inline void check_stack(void)
* on a new max, so it is far from a fast path.
*/
while (i < max_stack_trace.nr_entries) {
+ int found = 0;
stack_dump_index[i] = this_size;
p = start;
@@ -86,17 +87,19 @@ static inline void check_stack(void)
if (*p == stack_dump_trace[i]) {
this_size = stack_dump_index[i++] =
(top - p) * sizeof(unsigned long);
+ found = 1;
/* Start the search from here */
start = p + 1;
}
}
- i++;
+ if (!found)
+ i++;
}
out:
__raw_spin_unlock(&max_stack_lock);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
}
static void
@@ -162,11 +165,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
if (ret < 0)
return ret;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
__raw_spin_lock(&max_stack_lock);
*ptr = val;
__raw_spin_unlock(&max_stack_lock);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
return count;
}