diff options
author | Andrew Morton <akpm@linux-foundation.org> | 2009-09-19 11:55:44 -0700 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-20 20:00:32 +0200 |
commit | 89f19f04dc72363d912fd007a399cb10310eff6e (patch) | |
tree | daa64e5de98a668eed4a2ceeb6ca95c478be4053 | |
parent | 3f04e8cd5b24727a2500f8ab8f3de730ba47b02c (diff) |
sched: Fix raciness in runqueue_is_locked()
runqueue_is_locked() is unavoidably racy due to a poor interface design.
It does
cpu = get_cpu()
ret = some_perpcu_thing(cpu);
put_cpu(cpu);
return ret;
Its return value is unreliable.
Fix.
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <200909191855.n8JItiko022148@imap1.linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/sched.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace.c | 8 |
3 files changed, 10 insertions, 10 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8af3d249170..cc37a3fa506 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -257,7 +257,7 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle_bootup_task(struct task_struct *idle); -extern int runqueue_is_locked(void); +extern int runqueue_is_locked(int cpu); extern void task_rq_unlock_wait(struct task_struct *p); extern cpumask_var_t nohz_cpu_mask; diff --git a/kernel/sched.c b/kernel/sched.c index faf4d463bbf..575fb013903 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -681,15 +681,9 @@ inline void update_rq_clock(struct rq *rq) * This interface allows printk to be called with the runqueue lock * held and know whether or not it is OK to wake up the klogd. */ -int runqueue_is_locked(void) +int runqueue_is_locked(int cpu) { - int cpu = get_cpu(); - struct rq *rq = cpu_rq(cpu); - int ret; - - ret = spin_is_locked(&rq->lock); - put_cpu(); - return ret; + return spin_is_locked(&cpu_rq(cpu)->lock); } /* diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index fd52a19dd17..420232a1fbb 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -275,12 +275,18 @@ static DEFINE_SPINLOCK(tracing_start_lock); */ void trace_wake_up(void) { + int cpu; + + if (trace_flags & TRACE_ITER_BLOCK) + return; /* * The runqueue_is_locked() can fail, but this is the best we * have for now: */ - if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked()) + cpu = get_cpu(); + if (!runqueue_is_locked(cpu)) wake_up(&trace_wait); + put_cpu(); } static int __init set_buf_size(char *str) |