summaryrefslogtreecommitdiffstats
path: root/kernel/rcuclassic.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-15 21:10:12 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-15 21:10:12 +0200
commit6c9fcaf2eec1b9f85226a694230dd957dd7926b3 (patch)
treef8c824c6c64dc411752c844f116e693760768bcc /kernel/rcuclassic.c
parentb9d2252c1e44fa83a4e65fdc9eb93db6297c55af (diff)
parent199a952876adbfc2b6c13b8b07adabebf4ff54b2 (diff)
downloadblackbird-op-linux-6c9fcaf2eec1b9f85226a694230dd957dd7926b3.tar.gz
blackbird-op-linux-6c9fcaf2eec1b9f85226a694230dd957dd7926b3.zip
Merge branch 'core/rcu' into core/rcu-for-linus
Diffstat (limited to 'kernel/rcuclassic.c')
-rw-r--r--kernel/rcuclassic.c34
1 files changed, 33 insertions, 1 deletions
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 65c0906080ef..16eeeaa9d618 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -387,6 +387,10 @@ static void __rcu_offline_cpu(struct rcu_data *this_rdp,
rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
+
+ local_irq_disable();
+ this_rdp->qlen += rdp->qlen;
+ local_irq_enable();
}
static void rcu_offline_cpu(int cpu)
@@ -516,10 +520,38 @@ void rcu_check_callbacks(int cpu, int user)
if (user ||
(idle_cpu(cpu) && !in_softirq() &&
hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+
+ /*
+ * Get here if this CPU took its interrupt from user
+ * mode or from the idle loop, and if this is not a
+ * nested interrupt. In this case, the CPU is in
+ * a quiescent state, so count it.
+ *
+ * Also do a memory barrier. This is needed to handle
+ * the case where writes from a preempt-disable section
+ * of code get reordered into schedule() by this CPU's
+ * write buffer. The memory barrier makes sure that
+ * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
+ * by other CPUs to happen after any such write.
+ */
+
+ smp_mb(); /* See above block comment. */
rcu_qsctr_inc(cpu);
rcu_bh_qsctr_inc(cpu);
- } else if (!in_softirq())
+
+ } else if (!in_softirq()) {
+
+ /*
+ * Get here if this CPU did not take its interrupt from
+ * softirq, in other words, if it is not interrupting
+ * a rcu_bh read-side critical section. This is an _bh
+ * critical section, so count it. The memory barrier
+ * is needed for the same reason as is the above one.
+ */
+
+ smp_mb(); /* See above block comment. */
rcu_bh_qsctr_inc(cpu);
+ }
raise_rcu_softirq();
}
OpenPOWER on IntegriCloud