summaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/sync.c25
-rw-r--r--kernel/rcu/tree.c50
-rw-r--r--kernel/rcu/tree.h15
-rw-r--r--kernel/rcu/tree_exp.h10
-rw-r--r--kernel/rcu/tree_plugin.h46
-rw-r--r--kernel/rcu/update.c9
6 files changed, 83 insertions, 72 deletions
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 3f943efcf61c..be10036fa621 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -44,15 +44,15 @@ static const struct {
__INIT_HELD(rcu_read_lock_held)
},
[RCU_SCHED_SYNC] = {
- .sync = synchronize_sched,
- .call = call_rcu_sched,
- .wait = rcu_barrier_sched,
+ .sync = synchronize_rcu,
+ .call = call_rcu,
+ .wait = rcu_barrier,
__INIT_HELD(rcu_read_lock_sched_held)
},
[RCU_BH_SYNC] = {
- .sync = synchronize_rcu_bh,
- .call = call_rcu_bh,
- .wait = rcu_barrier_bh,
+ .sync = synchronize_rcu,
+ .call = call_rcu,
+ .wait = rcu_barrier,
__INIT_HELD(rcu_read_lock_bh_held)
},
};
@@ -125,8 +125,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
rsp->gp_state = GP_PENDING;
spin_unlock_irq(&rsp->rss_lock);
- BUG_ON(need_wait && need_sync);
-
+ WARN_ON_ONCE(need_wait && need_sync);
if (need_sync) {
gp_ops[rsp->gp_type].sync();
rsp->gp_state = GP_PASSED;
@@ -139,7 +138,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
* Nobody has yet been allowed the 'fast' path and thus we can
* avoid doing any sync(). The callback will get 'dropped'.
*/
- BUG_ON(rsp->gp_state != GP_PASSED);
+ WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
}
}
@@ -166,8 +165,8 @@ static void rcu_sync_func(struct rcu_head *rhp)
struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
unsigned long flags;
- BUG_ON(rsp->gp_state != GP_PASSED);
- BUG_ON(rsp->cb_state == CB_IDLE);
+ WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
+ WARN_ON_ONCE(rsp->cb_state == CB_IDLE);
spin_lock_irqsave(&rsp->rss_lock, flags);
if (rsp->gp_count) {
@@ -225,7 +224,7 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
{
int cb_state;
- BUG_ON(rsp->gp_count);
+ WARN_ON_ONCE(rsp->gp_count);
spin_lock_irq(&rsp->rss_lock);
if (rsp->cb_state == CB_REPLAY)
@@ -235,6 +234,6 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
if (cb_state != CB_IDLE) {
gp_ops[rsp->gp_type].wait();
- BUG_ON(rsp->cb_state != CB_IDLE);
+ WARN_ON_ONCE(rsp->cb_state != CB_IDLE);
}
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 121f833acd04..6ec3abbe90e2 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -500,16 +500,29 @@ void rcu_force_quiescent_state(void)
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
/*
+ * Convert a ->gp_state value to a character string.
+ */
+static const char *gp_state_getname(short gs)
+{
+ if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
+ return "???";
+ return gp_state_names[gs];
+}
+
+/*
* Show the state of the grace-period kthreads.
*/
void show_rcu_gp_kthreads(void)
{
int cpu;
+ unsigned long j;
struct rcu_data *rdp;
struct rcu_node *rnp;
- pr_info("%s: wait state: %d ->state: %#lx\n", rcu_state.name,
- rcu_state.gp_state, rcu_state.gp_kthread->state);
+ j = jiffies - READ_ONCE(rcu_state.gp_activity);
+ pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %ld\n",
+ rcu_state.name, gp_state_getname(rcu_state.gp_state),
+ rcu_state.gp_state, rcu_state.gp_kthread->state, j);
rcu_for_each_node_breadth_first(rnp) {
if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
continue;
@@ -891,12 +904,12 @@ void rcu_irq_enter_irqson(void)
}
/**
- * rcu_is_watching - see if RCU thinks that the current CPU is idle
+ * rcu_is_watching - see if RCU thinks that the current CPU is not idle
*
* Return true if RCU is watching the running CPU, which means that this
* CPU can safely enter RCU read-side critical sections. In other words,
- * if the current CPU is in its idle loop and is neither in an interrupt
- * or NMI handler, return true.
+ * if the current CPU is not in its idle loop or is in an interrupt or
+ * NMI handler, return true.
*/
bool notrace rcu_is_watching(void)
{
@@ -1143,16 +1156,6 @@ static void record_gp_stall_check_time(void)
}
/*
- * Convert a ->gp_state value to a character string.
- */
-static const char *gp_state_getname(short gs)
-{
- if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
- return "???";
- return gp_state_names[gs];
-}
-
-/*
* Complain about starvation of grace-period kthread.
*/
static void rcu_check_gp_kthread_starvation(void)
@@ -2032,9 +2035,9 @@ static void rcu_gp_cleanup(void)
rnp = rcu_get_root();
raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
- /* Declare grace period done. */
- rcu_seq_end(&rcu_state.gp_seq);
+ /* Declare grace period done, trace first to use old GP number. */
trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
+ rcu_seq_end(&rcu_state.gp_seq);
rcu_state.gp_state = RCU_GP_IDLE;
/* Check for GP requests since above loop. */
rdp = this_cpu_ptr(&rcu_data);
@@ -2600,10 +2603,10 @@ static void force_quiescent_state(void)
* This function checks for grace-period requests that fail to motivate
* RCU to come out of its idle mode.
*/
-static void
-rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
+void
+rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
+ const unsigned long gpssdelay)
{
- const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
unsigned long flags;
unsigned long j;
struct rcu_node *rnp_root = rcu_get_root();
@@ -2690,7 +2693,7 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
local_irq_restore(flags);
}
- rcu_check_gp_start_stall(rnp, rdp);
+ rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
/* If there are callbacks ready, invoke them. */
if (rcu_segcblist_ready_cbs(&rdp->cblist))
@@ -2826,7 +2829,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
* Very early boot, before rcu_init(). Initialize if needed
* and then drop through to queue the callback.
*/
- BUG_ON(cpu != -1);
+ WARN_ON_ONCE(cpu != -1);
WARN_ON_ONCE(!rcu_is_watching());
if (rcu_segcblist_empty(&rdp->cblist))
rcu_segcblist_init(&rdp->cblist);
@@ -3485,7 +3488,8 @@ static int __init rcu_spawn_gp_kthread(void)
rcu_scheduler_fully_active = 1;
t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
- BUG_ON(IS_ERR(t));
+ if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
+ return 0;
rnp = rcu_get_root();
raw_spin_lock_irqsave_rcu_node(rnp, flags);
rcu_state.gp_kthread = t;
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 703e19ff532d..c3e2807a834a 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -57,7 +57,7 @@ struct rcu_node {
/* some rcu_state fields as well as */
/* following. */
unsigned long gp_seq; /* Track rsp->rcu_gp_seq. */
- unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed. */
+ unsigned long gp_seq_needed; /* Track furthest future GP request. */
unsigned long completedqs; /* All QSes done for this node. */
unsigned long qsmask; /* CPUs or groups that need to switch in */
/* order for current grace period to proceed.*/
@@ -163,7 +163,7 @@ union rcu_noqs {
struct rcu_data {
/* 1) quiescent-state and grace-period handling : */
unsigned long gp_seq; /* Track rsp->rcu_gp_seq counter. */
- unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed ctr. */
+ unsigned long gp_seq_needed; /* Track furthest future GP request. */
union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
bool core_needs_qs; /* Core waits for quiesc state. */
bool beenonline; /* CPU online at least once. */
@@ -398,17 +398,6 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
#define RCU_NAME rcu_name
#endif /* #else #ifdef CONFIG_TRACING */
-/*
- * RCU implementation internal declarations:
- */
-extern struct rcu_state rcu_sched_state;
-
-extern struct rcu_state rcu_bh_state;
-
-#ifdef CONFIG_PREEMPT_RCU
-extern struct rcu_state rcu_preempt_state;
-#endif /* #ifdef CONFIG_PREEMPT_RCU */
-
int rcu_dynticks_snap(struct rcu_data *rdp);
#ifdef CONFIG_RCU_BOOST
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 8d18c1014e2b..928fe5893a57 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -450,10 +450,12 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
}
INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
preempt_disable();
- cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
+ cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
/* If all offline, queue the work on an unbound CPU. */
- if (unlikely(cpu > rnp->grphi))
+ if (unlikely(cpu > rnp->grphi - rnp->grplo))
cpu = WORK_CPU_UNBOUND;
+ else
+ cpu += rnp->grplo;
queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
preempt_enable();
rnp->exp_need_flush = true;
@@ -690,8 +692,10 @@ static void sync_rcu_exp_handler(void *unused)
*/
if (t->rcu_read_lock_nesting > 0) {
raw_spin_lock_irqsave_rcu_node(rnp, flags);
- if (rnp->expmask & rdp->grpmask)
+ if (rnp->expmask & rdp->grpmask) {
rdp->deferred_qs = true;
+ WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true);
+ }
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 05915e536336..605ff3b06098 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -397,6 +397,11 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
return rnp->gp_tasks != NULL;
}
+/* Bias and limit values for ->rcu_read_lock_nesting. */
+#define RCU_NEST_BIAS INT_MAX
+#define RCU_NEST_NMAX (-INT_MAX / 2)
+#define RCU_NEST_PMAX (INT_MAX / 2)
+
/*
* Preemptible RCU implementation for rcu_read_lock().
* Just increment ->rcu_read_lock_nesting, shared state will be updated
@@ -405,6 +410,8 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
void __rcu_read_lock(void)
{
current->rcu_read_lock_nesting++;
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING))
+ WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX);
barrier(); /* critical section after entry code. */
}
EXPORT_SYMBOL_GPL(__rcu_read_lock);
@@ -424,20 +431,18 @@ void __rcu_read_unlock(void)
--t->rcu_read_lock_nesting;
} else {
barrier(); /* critical section before exit code. */
- t->rcu_read_lock_nesting = INT_MIN;
+ t->rcu_read_lock_nesting = -RCU_NEST_BIAS;
barrier(); /* assign before ->rcu_read_unlock_special load */
if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
rcu_read_unlock_special(t);
barrier(); /* ->rcu_read_unlock_special load before assign */
t->rcu_read_lock_nesting = 0;
}
-#ifdef CONFIG_PROVE_LOCKING
- {
- int rrln = READ_ONCE(t->rcu_read_lock_nesting);
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
+ int rrln = t->rcu_read_lock_nesting;
- WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
+ WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX);
}
-#endif /* #ifdef CONFIG_PROVE_LOCKING */
}
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@@ -597,7 +602,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
*/
static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{
- return (this_cpu_ptr(&rcu_data)->deferred_qs ||
+ return (__this_cpu_read(rcu_data.deferred_qs) ||
READ_ONCE(t->rcu_read_unlock_special.s)) &&
t->rcu_read_lock_nesting <= 0;
}
@@ -617,11 +622,11 @@ static void rcu_preempt_deferred_qs(struct task_struct *t)
if (!rcu_preempt_need_deferred_qs(t))
return;
if (couldrecurse)
- t->rcu_read_lock_nesting -= INT_MIN;
+ t->rcu_read_lock_nesting -= RCU_NEST_BIAS;
local_irq_save(flags);
rcu_preempt_deferred_qs_irqrestore(t, flags);
if (couldrecurse)
- t->rcu_read_lock_nesting += INT_MIN;
+ t->rcu_read_lock_nesting += RCU_NEST_BIAS;
}
/*
@@ -642,13 +647,21 @@ static void rcu_read_unlock_special(struct task_struct *t)
local_irq_save(flags);
irqs_were_disabled = irqs_disabled_flags(flags);
- if ((preempt_bh_were_disabled || irqs_were_disabled) &&
- t->rcu_read_unlock_special.b.blocked) {
+ if (preempt_bh_were_disabled || irqs_were_disabled) {
+ WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
/* Need to defer quiescent state until everything is enabled. */
- raise_softirq_irqoff(RCU_SOFTIRQ);
+ if (irqs_were_disabled) {
+ /* Enabling irqs does not reschedule, so... */
+ raise_softirq_irqoff(RCU_SOFTIRQ);
+ } else {
+ /* Enabling BH or preempt does reschedule, so... */
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
+ }
local_irq_restore(flags);
return;
}
+ WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
rcu_preempt_deferred_qs_irqrestore(t, flags);
}
@@ -1464,7 +1477,8 @@ static void __init rcu_spawn_boost_kthreads(void)
for_each_possible_cpu(cpu)
per_cpu(rcu_cpu_has_work, cpu) = 0;
- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
+ if (WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__))
+ return;
rcu_for_each_leaf_node(rnp)
(void)rcu_spawn_one_boost_kthread(rnp);
}
@@ -2322,7 +2336,8 @@ static int rcu_nocb_kthread(void *arg)
tail = rdp->nocb_follower_tail;
rdp->nocb_follower_tail = &rdp->nocb_follower_head;
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
- BUG_ON(!list);
+ if (WARN_ON_ONCE(!list))
+ continue;
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeNonEmpty"));
/* Each pass through the following loop invokes a callback. */
@@ -2495,7 +2510,8 @@ static void rcu_spawn_one_nocb_kthread(int cpu)
/* Spawn the kthread for this CPU. */
t = kthread_run(rcu_nocb_kthread, rdp_spawn,
"rcuo%c/%d", rcu_state.abbr, cpu);
- BUG_ON(IS_ERR(t));
+ if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo kthread, OOM is now expected behavior\n", __func__))
+ return;
WRITE_ONCE(rdp_spawn->nocb_kthread, t);
}
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index f203b94f6b5b..1971869c4072 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -335,8 +335,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
/* Initialize and register callbacks for each crcu_array element. */
for (i = 0; i < n; i++) {
if (checktiny &&
- (crcu_array[i] == call_rcu ||
- crcu_array[i] == call_rcu_bh)) {
+ (crcu_array[i] == call_rcu)) {
might_sleep();
continue;
}
@@ -352,8 +351,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
/* Wait for all callbacks to be invoked. */
for (i = 0; i < n; i++) {
if (checktiny &&
- (crcu_array[i] == call_rcu ||
- crcu_array[i] == call_rcu_bh))
+ (crcu_array[i] == call_rcu))
continue;
for (j = 0; j < i; j++)
if (crcu_array[j] == crcu_array[i])
@@ -822,7 +820,8 @@ static int __init rcu_spawn_tasks_kthread(void)
struct task_struct *t;
t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
- BUG_ON(IS_ERR(t));
+ if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__))
+ return 0;
smp_mb(); /* Ensure others see full kthread. */
WRITE_ONCE(rcu_tasks_kthread_ptr, t);
return 0;
OpenPOWER on IntegriCloud