From 8db559b83009bed92e1b5dd13a651ff273d9ff62 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 12 May 2008 21:21:05 +0200 Subject: rcu: add memory barriers and comments to rcu_check_callbacks() Add comments to the logic that infers quiescent states when interrupting from either user mode or the idle loop. Also add a memory barrier: it appears that James Huang was in fact onto something, as the scheduler is much less synchronization happy than it once was, so we can no longer rely on its memory barriers in all cases. Signed-off-by: Paul E. McKenney Reported-by: James Huang Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- kernel/rcuclassic.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) (limited to 'kernel/rcuclassic.c') diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index f4ffbd0f306..d8348792f9f 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c @@ -502,10 +502,38 @@ void rcu_check_callbacks(int cpu, int user) if (user || (idle_cpu(cpu) && !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { + + /* + * Get here if this CPU took its interrupt from user + * mode or from the idle loop, and if this is not a + * nested interrupt. In this case, the CPU is in + * a quiescent state, so count it. + * + * Also do a memory barrier. This is needed to handle + * the case where writes from a preempt-disable section + * of code get reordered into schedule() by this CPU's + * write buffer. The memory barrier makes sure that + * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see + * by other CPUs to happen after any such write. + */ + + smp_mb(); /* See above block comment. */ rcu_qsctr_inc(cpu); rcu_bh_qsctr_inc(cpu); - } else if (!in_softirq()) + + } else if (!in_softirq()) { + + /* + * Get here if this CPU did not take its interrupt from + * softirq, in other words, if it is not interrupting + * a rcu_bh read-side critical section. This is an _bh + * critical section, so count it. The memory barrier + * is needed for the same reason as is the above one. + */ + + smp_mb(); /* See above block comment. */ rcu_bh_qsctr_inc(cpu); + } raise_rcu_softirq(); } -- cgit v1.2.3 From 199a952876adbfc2b6c13b8b07adabebf4ff54b2 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 26 Jun 2008 10:06:43 +0800 Subject: rcu classic: update qlen when cpu offline When callbacks are moved from offline cpu to this cpu, the qlen field of this rdp should be updated. [ Paul E. McKenney: ] The effect of this bug would be for force_quiescent_state() to be invoked when it should not and vice versa -- wasting cycles in the first case and letting RCU callbacks remain piled up in the second case. The bug is thus "benign" in that it does not result in premature grace-period termination, but should of course be fixed nonetheless. Preemption is disabled by the caller's get_cpu_var(), so we are guaranteed to remain on the same CPU, as required. The local_irq_disable() is indeed needed, otherwise, an interrupt might invoke call_rcu() or call_rcu_bh(), which could cause that interrupt's increment of ->qlen to be lost. Signed-off-by: Lai Jiangshan Cc: Andrew Morton Reviewed-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- kernel/rcuclassic.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel/rcuclassic.c') diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 214e1cde981..529190c485f 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c @@ -387,6 +387,10 @@ static void __rcu_offline_cpu(struct rcu_data *this_rdp, rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); + + local_irq_disable(); + this_rdp->qlen += rdp->qlen; + local_irq_enable(); } static void rcu_offline_cpu(int cpu) -- cgit v1.2.3