diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcutree.c | 6 | ||||
-rw-r--r-- | kernel/rcutree.h | 1 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 13 | ||||
-rw-r--r-- | kernel/time/Kconfig | 10 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 77 |
5 files changed, 58 insertions, 49 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index f5ab50235cba..1d4ceff793a4 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -1695,7 +1695,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) { /* No-CBs CPUs do not have orphanable callbacks. */ - if (is_nocb_cpu(rdp->cpu)) + if (rcu_is_nocb_cpu(rdp->cpu)) return; /* @@ -2757,10 +2757,10 @@ static void _rcu_barrier(struct rcu_state *rsp) * corresponding CPU's preceding callbacks have been invoked. */ for_each_possible_cpu(cpu) { - if (!cpu_online(cpu) && !is_nocb_cpu(cpu)) + if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu)) continue; rdp = per_cpu_ptr(rsp->rda, cpu); - if (is_nocb_cpu(cpu)) { + if (rcu_is_nocb_cpu(cpu)) { _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, rsp->n_barrier_done); atomic_inc(&rsp->barrier_cpu_count); diff --git a/kernel/rcutree.h b/kernel/rcutree.h index f993c0ac47db..38acc49da2c6 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -529,7 +529,6 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); static void print_cpu_stall_info_end(void); static void zero_cpu_stall_ticks(struct rcu_data *rdp); static void increment_cpu_stall_ticks(void); -static bool is_nocb_cpu(int cpu); static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, bool lazy); static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index a5745e9b5d5a..0cd91cc18db4 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -2167,7 +2167,7 @@ static int __init parse_rcu_nocb_poll(char *arg) early_param("rcu_nocb_poll", parse_rcu_nocb_poll); /* Is the specified CPU a no-CPUs CPU? */ -static bool is_nocb_cpu(int cpu) +bool rcu_is_nocb_cpu(int cpu) { if (have_rcu_nocb_mask) return cpumask_test_cpu(cpu, rcu_nocb_mask); @@ -2225,7 +2225,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, bool lazy) { - if (!is_nocb_cpu(rdp->cpu)) + if (!rcu_is_nocb_cpu(rdp->cpu)) return 0; __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy); return 1; @@ -2242,7 +2242,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, long qll = rsp->qlen_lazy; /* If this is not a no-CBs CPU, tell the caller to do it the old way. */ - if (!is_nocb_cpu(smp_processor_id())) + if (!rcu_is_nocb_cpu(smp_processor_id())) return 0; rsp->qlen = 0; rsp->qlen_lazy = 0; @@ -2282,7 +2282,7 @@ static bool nocb_cpu_expendable(int cpu) * If there are no no-CB CPUs or if this CPU is not a no-CB CPU, * then offlining this CPU is harmless. Let it happen. */ - if (!have_rcu_nocb_mask || is_nocb_cpu(cpu)) + if (!have_rcu_nocb_mask || rcu_is_nocb_cpu(cpu)) return 1; /* If no memory, play it safe and keep the CPU around. */ @@ -2464,11 +2464,6 @@ static void __init rcu_init_nocb(void) #else /* #ifdef CONFIG_RCU_NOCB_CPU */ -static bool is_nocb_cpu(int cpu) -{ - return false; -} - static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, bool lazy) { diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 358d601a4fec..99c3f13dd478 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -128,6 +128,16 @@ config NO_HZ_FULL endchoice +config NO_HZ_FULL_ALL + bool "Full dynticks system on all CPUs by default" + depends on NO_HZ_FULL + help + If the user doesn't pass the nohz_full boot option to + define the range of full dynticks CPUs, consider that all + CPUs in the system are full dynticks by default. + Note the boot CPU will still be kept outside the range to + handle the timekeeping duty. + config NO_HZ bool "Old Idle dynticks config" depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 369b5769fc97..a76e09044f9f 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -158,11 +158,21 @@ int tick_nohz_full_cpu(int cpu) /* Parse the boot-time nohz CPU list from the kernel parameters. */ static int __init tick_nohz_full_setup(char *str) { + int cpu; + alloc_bootmem_cpumask_var(&nohz_full_mask); - if (cpulist_parse(str, nohz_full_mask) < 0) + if (cpulist_parse(str, nohz_full_mask) < 0) { pr_warning("NOHZ: Incorrect nohz_full cpumask\n"); - else - have_nohz_full_mask = true; + return 1; + } + + cpu = smp_processor_id(); + if (cpumask_test_cpu(cpu, nohz_full_mask)) { + pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu); + cpumask_clear_cpu(cpu, nohz_full_mask); + } + have_nohz_full_mask = true; + return 1; } __setup("nohz_full=", tick_nohz_full_setup); @@ -193,51 +203,46 @@ static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb, */ static char __initdata nohz_full_buf[NR_CPUS + 1]; -static int __init init_tick_nohz_full(void) +static int tick_nohz_init_all(void) { - cpumask_var_t online_nohz; - int cpu; + int err = -1; - if (!have_nohz_full_mask) - return 0; +#ifdef CONFIG_NO_HZ_FULL_ALL + if (!alloc_cpumask_var(&nohz_full_mask, GFP_KERNEL)) { + pr_err("NO_HZ: Can't allocate full dynticks cpumask\n"); + return err; + } + err = 0; + cpumask_setall(nohz_full_mask); + cpumask_clear_cpu(smp_processor_id(), nohz_full_mask); + have_nohz_full_mask = true; +#endif + return err; +} - cpu_notifier(tick_nohz_cpu_down_callback, 0); +void __init tick_nohz_init(void) +{ + int cpu; - if (!zalloc_cpumask_var(&online_nohz, GFP_KERNEL)) { - pr_warning("NO_HZ: Not enough memory to check full nohz mask\n"); - return -ENOMEM; + if (!have_nohz_full_mask) { + if (tick_nohz_init_all() < 0) + return; } - /* - * CPUs can probably not be concurrently offlined on initcall time. - * But we are paranoid, aren't we? - */ - get_online_cpus(); + cpu_notifier(tick_nohz_cpu_down_callback, 0); - /* Ensure we keep a CPU outside the dynticks range for timekeeping */ - cpumask_and(online_nohz, cpu_online_mask, nohz_full_mask); - if (cpumask_equal(online_nohz, cpu_online_mask)) { - pr_warning("NO_HZ: Must keep at least one online CPU " - "out of nohz_full range\n"); - /* - * We know the current CPU doesn't have its tick stopped. - * Let's use it for the timekeeping duty. - */ - preempt_disable(); - cpu = smp_processor_id(); - pr_warning("NO_HZ: Clearing %d from nohz_full range\n", cpu); - cpumask_clear_cpu(cpu, nohz_full_mask); - preempt_enable(); + /* Make sure full dynticks CPU are also RCU nocbs */ + for_each_cpu(cpu, nohz_full_mask) { + if (!rcu_is_nocb_cpu(cpu)) { + pr_warning("NO_HZ: CPU %d is not RCU nocb: " + "cleared from nohz_full range", cpu); + cpumask_clear_cpu(cpu, nohz_full_mask); + } } - put_online_cpus(); - free_cpumask_var(online_nohz); cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask); pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); - - return 0; } -core_initcall(init_tick_nohz_full); #else #define have_nohz_full_mask (0) #endif |