aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-17 08:37:20 +0100
committerIngo Molnar <mingo@elte.hu>2009-03-17 08:37:20 +0100
commite4106133341121aeaad732d4613de06d9033b5ac (patch)
tree8d7ecb57fdefdfac5bdff6ee3a82485e2ea4340a
parent7243f2145a9b06e5cf9a49fc9b8b9a4fff6fb42e (diff)
parent2fc1dfbe17e7705c55b7a99da995fa565e26f151 (diff)
Merge branch 'tracing/syscalls' into tracing/core
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/trace.c5
-rw-r--r--kernel/trace/trace_selftest.c16
-rw-r--r--kernel/trace/trace_syscalls.c41
4 files changed, 34 insertions, 29 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 95a0ad191f1..b0a46f88965 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -182,6 +182,7 @@ config FTRACE_SYSCALLS
bool "Trace syscalls"
depends on HAVE_FTRACE_SYSCALLS
select TRACING
+ select KALLSYMS
help
Basic tracer to catch the syscall entry and exit events.
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index efe3202c020..ae32d3b99b4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2494,7 +2494,7 @@ static int tracing_set_tracer(const char *buf)
if (!ring_buffer_expanded) {
ret = tracing_resize_ring_buffer(trace_buf_size);
if (ret < 0)
- return ret;
+ goto out;
ret = 0;
}
@@ -4125,7 +4125,8 @@ __init static int tracer_alloc_buffers(void)
&trace_panic_notifier);
register_die_notifier(&trace_die_notifier);
- ret = 0;
+
+ return 0;
out_free_cpumask:
free_cpumask_var(tracing_reader_cpumask);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index f907a2b2902..a2ca6f0fef9 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -414,7 +414,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
- goto out;
+ goto out_no_start;
}
/* reset the max latency */
@@ -432,21 +432,16 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(tr, NULL);
- if (ret) {
- tracing_start();
+ if (ret)
goto out;
- }
ret = trace_test_buffer(&max_tr, &count);
- if (ret) {
- tracing_start();
+ if (ret)
goto out;
- }
if (!ret && !count) {
printk(KERN_CONT ".. no entries found ..");
ret = -1;
- tracing_start();
goto out;
}
@@ -475,9 +470,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
goto out;
}
- out:
- trace->reset(tr);
+out:
tracing_start();
+out_no_start:
+ trace->reset(tr);
tracing_max_latency = save_max;
return ret;
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index c72e599230f..a2a3af29c94 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -5,9 +5,13 @@
#include "trace_output.h"
#include "trace.h"
-static atomic_t refcount;
+/* Keep a counter of the syscall tracing users */
+static int refcount;
-/* Our two options */
+/* Prevent from races on thread flags toggling */
+static DEFINE_MUTEX(syscall_trace_lock);
+
+/* Option to display the parameters types */
enum {
TRACE_SYSCALLS_OPT_TYPES = 0x1,
};
@@ -18,7 +22,7 @@ static struct tracer_opt syscalls_opts[] = {
};
static struct tracer_flags syscalls_flags = {
- .val = 0, /* By default: no args types */
+ .val = 0, /* By default: no parameters types */
.opts = syscalls_opts
};
@@ -96,8 +100,11 @@ void start_ftrace_syscalls(void)
unsigned long flags;
struct task_struct *g, *t;
- if (atomic_inc_return(&refcount) != 1)
- goto out;
+ mutex_lock(&syscall_trace_lock);
+
+ /* Don't enable the flag on the tasks twice */
+ if (++refcount != 1)
+ goto unlock;
arch_init_ftrace_syscalls();
read_lock_irqsave(&tasklist_lock, flags);
@@ -107,8 +114,9 @@ void start_ftrace_syscalls(void)
} while_each_thread(g, t);
read_unlock_irqrestore(&tasklist_lock, flags);
-out:
- atomic_dec(&refcount);
+
+unlock:
+ mutex_unlock(&syscall_trace_lock);
}
void stop_ftrace_syscalls(void)
@@ -116,8 +124,11 @@ void stop_ftrace_syscalls(void)
unsigned long flags;
struct task_struct *g, *t;
- if (atomic_dec_return(&refcount))
- goto out;
+ mutex_lock(&syscall_trace_lock);
+
+ /* There are perhaps still some users */
+ if (--refcount)
+ goto unlock;
read_lock_irqsave(&tasklist_lock, flags);
@@ -126,8 +137,9 @@ void stop_ftrace_syscalls(void)
} while_each_thread(g, t);
read_unlock_irqrestore(&tasklist_lock, flags);
-out:
- atomic_inc(&refcount);
+
+unlock:
+ mutex_unlock(&syscall_trace_lock);
}
void ftrace_syscall_enter(struct pt_regs *regs)
@@ -137,12 +149,9 @@ void ftrace_syscall_enter(struct pt_regs *regs)
struct ring_buffer_event *event;
int size;
int syscall_nr;
- int cpu;
syscall_nr = syscall_get_nr(current, regs);
- cpu = raw_smp_processor_id();
-
sys_data = syscall_nr_to_meta(syscall_nr);
if (!sys_data)
return;
@@ -168,12 +177,9 @@ void ftrace_syscall_exit(struct pt_regs *regs)
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
int syscall_nr;
- int cpu;
syscall_nr = syscall_get_nr(current, regs);
- cpu = raw_smp_processor_id();
-
sys_data = syscall_nr_to_meta(syscall_nr);
if (!sys_data)
return;
@@ -201,6 +207,7 @@ static int init_syscall_tracer(struct trace_array *tr)
static void reset_syscall_tracer(struct trace_array *tr)
{
stop_ftrace_syscalls();
+ tracing_reset_online_cpus(tr);
}
static struct trace_event syscall_enter_event = {