aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-12-02 15:34:05 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-03 08:56:19 +0100
commita5e25883a445dce94a087ca479b21a5959cd5c18 (patch)
tree9453b1116b9784b91d63fd6a1110af0d62fd02b7 /kernel/trace
parentdfdc5437bd62dd6a26961e27f26b671374749875 (diff)
ftrace: replace raw_local_irq_save with local_irq_save
Impact: fix for lockdep and ftrace The raw_local_irq_save/restore confuses lockdep. This patch converts them to the local_irq_save/restore variants. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c12
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_stack.c8
3 files changed, 12 insertions, 12 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 91887a280ab..380de630ebc 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1209,7 +1209,7 @@ void trace_graph_entry(struct ftrace_graph_ent *trace)
int cpu;
int pc;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
@@ -1218,7 +1218,7 @@ void trace_graph_entry(struct ftrace_graph_ent *trace)
__trace_graph_entry(tr, data, trace, flags, pc);
}
atomic_dec(&data->disabled);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
}
void trace_graph_return(struct ftrace_graph_ret *trace)
@@ -1230,7 +1230,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
int cpu;
int pc;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
@@ -1239,7 +1239,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
__trace_graph_return(tr, data, trace, flags, pc);
}
atomic_dec(&data->disabled);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -2645,7 +2645,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
if (err)
goto err_unlock;
- raw_local_irq_disable();
+ local_irq_disable();
__raw_spin_lock(&ftrace_max_lock);
for_each_tracing_cpu(cpu) {
/*
@@ -2662,7 +2662,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
}
}
__raw_spin_unlock(&ftrace_max_lock);
- raw_local_irq_enable();
+ local_irq_enable();
tracing_cpumask = tracing_cpumask_new;
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index bc972753568..6c00feb3bac 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -42,7 +42,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
if (unlikely(!tr))
return;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
cpu = raw_smp_processor_id();
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out;
@@ -74,7 +74,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
out:
atomic_dec(&tr->data[cpu]->disabled);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
}
static inline
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index fde3be15c64..06a16115be0 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -48,7 +48,7 @@ static inline void check_stack(void)
if (!object_is_on_stack(&this_size))
return;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
__raw_spin_lock(&max_stack_lock);
/* a race could have already updated it */
@@ -96,7 +96,7 @@ static inline void check_stack(void)
out:
__raw_spin_unlock(&max_stack_lock);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
}
static void
@@ -162,11 +162,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
if (ret < 0)
return ret;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
__raw_spin_lock(&max_stack_lock);
*ptr = val;
__raw_spin_unlock(&max_stack_lock);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
return count;
}