aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/blktrace.c2
-rw-r--r--kernel/trace/trace.c20
-rw-r--r--kernel/trace/trace_clock.c9
-rw-r--r--kernel/trace/trace_functions_graph.c2
-rw-r--r--kernel/trace/trace_workqueue.c19
5 files changed, 29 insertions, 23 deletions
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index bec69d3678c..1f32e4edf49 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -423,7 +423,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
if (!bt->sequence)
goto err;
- bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG);
+ bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
if (!bt->msg_data)
goto err;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 14c98f6a47b..c3946a6df34 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1478,11 +1478,11 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
total = entries +
ring_buffer_overruns(iter->tr->buffer);
- seq_printf(m, "%s latency trace v1.1.5 on %s\n",
+ seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
name, UTS_RELEASE);
- seq_puts(m, "-----------------------------------"
+ seq_puts(m, "# -----------------------------------"
"---------------------------------\n");
- seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
+ seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
" (M:%s VP:%d, KP:%d, SP:%d HP:%d",
nsecs_to_usecs(data->saved_latency),
entries,
@@ -1504,24 +1504,24 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
#else
seq_puts(m, ")\n");
#endif
- seq_puts(m, " -----------------\n");
- seq_printf(m, " | task: %.16s-%d "
+ seq_puts(m, "# -----------------\n");
+ seq_printf(m, "# | task: %.16s-%d "
"(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
data->comm, data->pid, data->uid, data->nice,
data->policy, data->rt_priority);
- seq_puts(m, " -----------------\n");
+ seq_puts(m, "# -----------------\n");
if (data->critical_start) {
- seq_puts(m, " => started at: ");
+ seq_puts(m, "# => started at: ");
seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
trace_print_seq(m, &iter->seq);
- seq_puts(m, "\n => ended at: ");
+ seq_puts(m, "\n# => ended at: ");
seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
trace_print_seq(m, &iter->seq);
- seq_puts(m, "\n");
+ seq_puts(m, "#\n");
}
- seq_puts(m, "\n");
+ seq_puts(m, "#\n");
}
static void test_cpu_buff_start(struct trace_iterator *iter)
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 2d4953f9356..05b176abfd3 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -27,12 +27,19 @@
*/
u64 notrace trace_clock_local(void)
{
+ unsigned long flags;
+ u64 clock;
+
/*
* sched_clock() is an architecture implemented, fast, scalable,
* lockless clock. It is not guaranteed to be coherent across
* CPUs, nor across CPU idle events.
*/
- return sched_clock();
+ raw_local_irq_save(flags);
+ clock = sched_clock();
+ raw_local_irq_restore(flags);
+
+ return clock;
}
/*
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index d1493b853e4..8566c14b3e9 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -837,7 +837,7 @@ static void graph_trace_open(struct trace_iterator *iter)
static void graph_trace_close(struct trace_iterator *iter)
{
- percpu_free(iter->private);
+ free_percpu(iter->private);
}
static struct tracer graph_trace __read_mostly = {
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
index e542483df62..fb5ccac8bbc 100644
--- a/kernel/trace/trace_workqueue.c
+++ b/kernel/trace/trace_workqueue.c
@@ -91,7 +91,7 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
struct cpu_workqueue_stats *cws;
unsigned long flags;
- WARN_ON(cpu < 0 || cpu >= num_possible_cpus());
+ WARN_ON(cpu < 0);
/* Workqueues are sometimes created in atomic context */
cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
@@ -99,8 +99,6 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
pr_warning("trace_workqueue: not enough memory\n");
return;
}
- tracing_record_cmdline(wq_thread);
-
INIT_LIST_HEAD(&cws->list);
cws->cpu = cpu;
@@ -177,12 +175,12 @@ static void *workqueue_stat_next(void *prev, int idx)
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
- for (++cpu ; cpu < num_possible_cpus(); cpu++) {
- ret = workqueue_stat_start_cpu(cpu);
- if (ret)
- return ret;
- }
- return NULL;
+ do {
+ cpu = cpumask_next(cpu, cpu_possible_mask);
+ if (cpu >= nr_cpu_ids)
+ return NULL;
+ } while (!(ret = workqueue_stat_start_cpu(cpu)));
+ return ret;
}
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
@@ -195,11 +193,12 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
struct cpu_workqueue_stats *cws = p;
unsigned long flags;
int cpu = cws->cpu;
+ struct task_struct *tsk = find_task_by_vpid(cws->pid);
seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
atomic_read(&cws->inserted),
cws->executed,
- trace_find_cmdline(cws->pid));
+ tsk ? tsk->comm : "<...>");
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
if (&cws->list == workqueue_cpu_stat(cpu)->list.next)