aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace/trace_functions_graph.c
blob: d31d695174aa3bf691056ac2996042b06a12b813 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
/*
 *
 * Function graph tracer.
 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/fs.h>

#include "trace.h"

#define TRACE_GRAPH_INDENT	2

#define TRACE_GRAPH_PRINT_OVERRUN	0x1
static struct tracer_opt trace_opts[] = {
	/* Display overruns or not */
	{ TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
	.val = 0, /* Don't display overruns by default */
	.opts = trace_opts
};

/* pid on the last trace processed */
static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };

static int graph_trace_init(struct trace_array *tr)
{
	int cpu, ret;

	for_each_online_cpu(cpu)
		tracing_reset(tr, cpu);

	ret = register_ftrace_graph(&trace_graph_return,
					&trace_graph_entry);
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
}

static void graph_trace_reset(struct trace_array *tr)
{
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
}

/* If the pid changed since the last trace, output this event */
static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
{
	char *comm;

	if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
		return 1;

	last_pid[cpu] = pid;
	comm = trace_find_cmdline(pid);

	return trace_seq_printf(s, "\nCPU[%03d]"
				    " ------------8<---------- thread %s-%d"
				    " ------------8<----------\n\n",
				    cpu, comm, pid);
}

static enum print_line_t
print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s,
		  struct trace_entry *ent, int cpu)
{
	int i;
	int ret;

	if (!verif_pid(s, ent->pid, cpu))
		return TRACE_TYPE_PARTIAL_LINE;

	ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	ret = seq_print_ip_sym(s, call->func, 0);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	ret = trace_seq_printf(s, "() {\n");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
		   struct trace_entry *ent, int cpu)
{
	int i;
	int ret;

	if (!verif_pid(s, ent->pid, cpu))
		return TRACE_TYPE_PARTIAL_LINE;

	ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	ret = trace_seq_printf(s, "} ");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	ret = trace_seq_printf(s, "%llu\n", trace->rettime - trace->calltime);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
					trace->overrun);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
	return TRACE_TYPE_HANDLED;
}

enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *entry = iter->ent;

	switch (entry->type) {
	case TRACE_GRAPH_ENT: {
		struct ftrace_graph_ent_entry *field;
		trace_assign_type(field, entry);
		return print_graph_entry(&field->graph_ent, s, entry,
					 iter->cpu);
	}
	case TRACE_GRAPH_RET: {
		struct ftrace_graph_ret_entry *field;
		trace_assign_type(field, entry);
		return print_graph_return(&field->ret, s, entry, iter->cpu);
	}
	default:
		return TRACE_TYPE_UNHANDLED;
	}
}

static struct tracer graph_trace __read_mostly = {
	.name	     = "function-graph",
	.init	     = graph_trace_init,
	.reset	     = graph_trace_reset,
	.print_line = print_graph_function,
	.flags		= &tracer_flags,
};

static __init int init_graph_trace(void)
{
	return register_tracer(&graph_trace);
}

device_initcall(init_graph_trace);