summaryrefslogtreecommitdiffstats
path: root/kernel/rcu/srcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/srcutree.c')
-rw-r--r--kernel/rcu/srcutree.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 743d18379256..fb560fca9ef4 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -386,7 +386,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp)
flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
WARN_ON(srcu_readers_active(sp))) {
- pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
+ pr_info("%s: Active srcu_struct %p state: %d\n", __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
return; /* Caller forgot to stop doing call_srcu()? */
}
free_percpu(sp->sda);
@@ -526,11 +526,11 @@ static void srcu_gp_end(struct srcu_struct *sp)
{
unsigned long cbdelay;
bool cbs;
+ bool last_lvl;
int cpu;
unsigned long flags;
unsigned long gpseq;
int idx;
- int idxnext;
unsigned long mask;
struct srcu_data *sdp;
struct srcu_node *snp;
@@ -554,11 +554,11 @@ static void srcu_gp_end(struct srcu_struct *sp)
/* Initiate callback invocation as needed. */
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
- idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
rcu_for_each_node_breadth_first(sp, snp) {
spin_lock_irq_rcu_node(snp);
cbs = false;
- if (snp >= sp->level[rcu_num_lvls - 1])
+ last_lvl = snp >= sp->level[rcu_num_lvls - 1];
+ if (last_lvl)
cbs = snp->srcu_have_cbs[idx] == gpseq;
snp->srcu_have_cbs[idx] = gpseq;
rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
@@ -571,13 +571,16 @@ static void srcu_gp_end(struct srcu_struct *sp)
srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
/* Occasionally prevent srcu_data counter wrap. */
- if (!(gpseq & counter_wrap_check))
+ if (!(gpseq & counter_wrap_check) && last_lvl)
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
sdp = per_cpu_ptr(sp->sda, cpu);
spin_lock_irqsave_rcu_node(sdp, flags);
if (ULONG_CMP_GE(gpseq,
sdp->srcu_gp_seq_needed + 100))
sdp->srcu_gp_seq_needed = gpseq;
+ if (ULONG_CMP_GE(gpseq,
+ sdp->srcu_gp_seq_needed_exp + 100))
+ sdp->srcu_gp_seq_needed_exp = gpseq;
spin_unlock_irqrestore_rcu_node(sdp, flags);
}
}
@@ -592,9 +595,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
srcu_gp_start(sp);
spin_unlock_irq_rcu_node(sp);
- /* Throttle expedited grace periods: Should be rare! */
- srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
- ? 0 : SRCU_INTERVAL);
+ srcu_reschedule(sp, 0);
} else {
spin_unlock_irq_rcu_node(sp);
}
OpenPOWER on IntegriCloud