diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 196 | ||||
-rw-r--r-- | kernel/padata.c | 88 | ||||
-rw-r--r-- | kernel/relay.c | 124 | ||||
-rw-r--r-- | kernel/softirq.c | 27 |
4 files changed, 203 insertions, 232 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index c506485eaa75..7c783876cbcb 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -23,6 +23,8 @@ #include <linux/tick.h> #include <linux/irq.h> #include <linux/smpboot.h> +#include <linux/relay.h> +#include <linux/slab.h> #include <trace/events/power.h> #define CREATE_TRACE_POINTS @@ -73,15 +75,15 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state); struct cpuhp_step { const char *name; union { - int (*startup)(unsigned int cpu); - int (*startup_multi)(unsigned int cpu, - struct hlist_node *node); - }; + int (*single)(unsigned int cpu); + int (*multi)(unsigned int cpu, + struct hlist_node *node); + } startup; union { - int (*teardown)(unsigned int cpu); - int (*teardown_multi)(unsigned int cpu, - struct hlist_node *node); - }; + int (*single)(unsigned int cpu); + int (*multi)(unsigned int cpu, + struct hlist_node *node); + } teardown; struct hlist_head list; bool skip_onerr; bool cant_stop; @@ -127,7 +129,7 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, int ret, cnt; if (!step->multi_instance) { - cb = bringup ? step->startup : step->teardown; + cb = bringup ? step->startup.single : step->teardown.single; if (!cb) return 0; trace_cpuhp_enter(cpu, st->target, state, cb); @@ -135,7 +137,7 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, trace_cpuhp_exit(cpu, st->state, state, ret); return ret; } - cbm = bringup ? step->startup_multi : step->teardown_multi; + cbm = bringup ? step->startup.multi : step->teardown.multi; if (!cbm) return 0; @@ -160,7 +162,7 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, return 0; err: /* Rollback the instances if one failed */ - cbm = !bringup ? step->startup_multi : step->teardown_multi; + cbm = !bringup ? step->startup.multi : step->teardown.multi; if (!cbm) return ret; @@ -331,10 +333,17 @@ void cpu_hotplug_disable(void) } EXPORT_SYMBOL_GPL(cpu_hotplug_disable); +static void __cpu_hotplug_enable(void) +{ + if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n")) + return; + cpu_hotplug_disabled--; +} + void cpu_hotplug_enable(void) { cpu_maps_update_begin(); - WARN_ON(--cpu_hotplug_disabled < 0); + __cpu_hotplug_enable(); cpu_maps_update_done(); } EXPORT_SYMBOL_GPL(cpu_hotplug_enable); @@ -401,12 +410,6 @@ static int notify_online(unsigned int cpu) return 0; } -static int notify_starting(unsigned int cpu) -{ - cpu_notify(CPU_STARTING, cpu); - return 0; -} - static int bringup_wait_for_ap(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); @@ -752,12 +755,6 @@ static int notify_down_prepare(unsigned int cpu) return err; } -static int notify_dying(unsigned int cpu) -{ - cpu_notify(CPU_DYING, cpu); - return 0; -} - /* Take this CPU down. */ static int take_cpu_down(void *_param) { @@ -816,7 +813,7 @@ static int takedown_cpu(unsigned int cpu) BUG_ON(cpu_online(cpu)); /* - * The migration_call() CPU_DYING callback will have removed all + * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all * runnable tasks from the cpu, there's only the idle task left now * that the migration thread is done doing the stop_machine thing. * @@ -869,7 +866,6 @@ void cpuhp_report_idle_dead(void) #define notify_down_prepare NULL #define takedown_cpu NULL #define notify_dead NULL -#define notify_dying NULL #endif #ifdef CONFIG_HOTPLUG_CPU @@ -959,10 +955,9 @@ EXPORT_SYMBOL(cpu_down); #endif /*CONFIG_HOTPLUG_CPU*/ /** - * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers + * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU * @cpu: cpu that just started * - * This function calls the cpu_chain notifiers with CPU_STARTING. * It must be called by the arch code on the new cpu, before the new cpu * enables interrupts and before the "boot" cpu returns from __cpu_up(). */ @@ -1160,7 +1155,7 @@ void enable_nonboot_cpus(void) /* Allow everyone to use the CPU hotplug again */ cpu_maps_update_begin(); - WARN_ON(--cpu_hotplug_disabled < 0); + __cpu_hotplug_enable(); if (cpumask_empty(frozen_cpus)) goto out; @@ -1249,40 +1244,50 @@ core_initcall(cpu_hotplug_pm_sync_init); static struct cpuhp_step cpuhp_bp_states[] = { [CPUHP_OFFLINE] = { .name = "offline", - .startup = NULL, - .teardown = NULL, + .startup.single = NULL, + .teardown.single = NULL, }, #ifdef CONFIG_SMP [CPUHP_CREATE_THREADS]= { - .name = "threads:create", - .startup = smpboot_create_threads, - .teardown = NULL, + .name = "threads:prepare", + .startup.single = smpboot_create_threads, + .teardown.single = NULL, .cant_stop = true, }, [CPUHP_PERF_PREPARE] = { - .name = "perf prepare", - .startup = perf_event_init_cpu, - .teardown = perf_event_exit_cpu, + .name = "perf:prepare", + .startup.single = perf_event_init_cpu, + .teardown.single = perf_event_exit_cpu, }, [CPUHP_WORKQUEUE_PREP] = { - .name = "workqueue prepare", - .startup = workqueue_prepare_cpu, - .teardown = NULL, + .name = "workqueue:prepare", + .startup.single = workqueue_prepare_cpu, + .teardown.single = NULL, }, [CPUHP_HRTIMERS_PREPARE] = { - .name = "hrtimers prepare", - .startup = hrtimers_prepare_cpu, - .teardown = hrtimers_dead_cpu, + .name = "hrtimers:prepare", + .startup.single = hrtimers_prepare_cpu, + .teardown.single = hrtimers_dead_cpu, }, [CPUHP_SMPCFD_PREPARE] = { - .name = "SMPCFD prepare", - .startup = smpcfd_prepare_cpu, - .teardown = smpcfd_dead_cpu, + .name = "smpcfd:prepare", + .startup.single = smpcfd_prepare_cpu, + .teardown.single = smpcfd_dead_cpu, + }, + [CPUHP_RELAY_PREPARE] = { + .name = "relay:prepare", + .startup.single = relay_prepare_cpu, + .teardown.single = NULL, + }, + [CPUHP_SLAB_PREPARE] = { + .name = "slab:prepare", + .startup.single = slab_prepare_cpu, + .teardown.single = slab_dead_cpu, }, [CPUHP_RCUTREE_PREP] = { - .name = "RCU-tree prepare", - .startup = rcutree_prepare_cpu, - .teardown = rcutree_dead_cpu, + .name = "RCU/tree:prepare", + .startup.single = rcutree_prepare_cpu, + .teardown.single = rcutree_dead_cpu, }, /* * Preparatory and dead notifiers. Will be replaced once the notifiers @@ -1290,8 +1295,8 @@ static struct cpuhp_step cpuhp_bp_states[] = { */ [CPUHP_NOTIFY_PREPARE] = { .name = "notify:prepare", - .startup = notify_prepare, - .teardown = notify_dead, + .startup.single = notify_prepare, + .teardown.single = notify_dead, .skip_onerr = true, .cant_stop = true, }, @@ -1301,20 +1306,21 @@ static struct cpuhp_step cpuhp_bp_states[] = { * otherwise a RCU stall occurs. */ [CPUHP_TIMERS_DEAD] = { - .name = "timers dead", - .startup = NULL, - .teardown = timers_dead_cpu, + .name = "timers:dead", + .startup.single = NULL, + .teardown.single = timers_dead_cpu, }, /* Kicks the plugged cpu into life */ [CPUHP_BRINGUP_CPU] = { .name = "cpu:bringup", - .startup = bringup_cpu, - .teardown = NULL, + .startup.single = bringup_cpu, + .teardown.single = NULL, .cant_stop = true, }, [CPUHP_AP_SMPCFD_DYING] = { - .startup = NULL, - .teardown = smpcfd_dying_cpu, + .name = "smpcfd:dying", + .startup.single = NULL, + .teardown.single = smpcfd_dying_cpu, }, /* * Handled on controll processor until the plugged processor manages @@ -1322,8 +1328,8 @@ static struct cpuhp_step cpuhp_bp_states[] = { */ [CPUHP_TEARDOWN_CPU] = { .name = "cpu:teardown", - .startup = NULL, - .teardown = takedown_cpu, + .startup.single = NULL, + .teardown.single = takedown_cpu, .cant_stop = true, }, #else @@ -1349,24 +1355,13 @@ static struct cpuhp_step cpuhp_ap_states[] = { /* First state is scheduler control. Interrupts are disabled */ [CPUHP_AP_SCHED_STARTING] = { .name = "sched:starting", - .startup = sched_cpu_starting, - .teardown = sched_cpu_dying, + .startup.single = sched_cpu_starting, + .teardown.single = sched_cpu_dying, }, [CPUHP_AP_RCUTREE_DYING] = { - .startup = NULL, - .teardown = rcutree_dying_cpu, - }, - /* - * Low level startup/teardown notifiers. Run with interrupts - * disabled. Will be removed once the notifiers are converted to - * states. - */ - [CPUHP_AP_NOTIFY_STARTING] = { - .name = "notify:starting", - .startup = notify_starting, - .teardown = notify_dying, - .skip_onerr = true, - .cant_stop = true, + .name = "RCU/tree:dying", + .startup.single = NULL, + .teardown.single = rcutree_dying_cpu, }, /* Entry state on starting. Interrupts enabled from here on. Transient * state for synchronsization */ @@ -1375,24 +1370,24 @@ static struct cpuhp_step cpuhp_ap_states[] = { }, /* Handle smpboot threads park/unpark */ [CPUHP_AP_SMPBOOT_THREADS] = { - .name = "smpboot:threads", - .startup = smpboot_unpark_threads, - .teardown = NULL, + .name = "smpboot/threads:online", + .startup.single = smpboot_unpark_threads, + .teardown.single = NULL, }, [CPUHP_AP_PERF_ONLINE] = { - .name = "perf online", - .startup = perf_event_init_cpu, - .teardown = perf_event_exit_cpu, + .name = "perf:online", + .startup.single = perf_event_init_cpu, + .teardown.single = perf_event_exit_cpu, }, [CPUHP_AP_WORKQUEUE_ONLINE] = { - .name = "workqueue online", - .startup = workqueue_online_cpu, - .teardown = workqueue_offline_cpu, + .name = "workqueue:online", + .startup.single = workqueue_online_cpu, + .teardown.single = workqueue_offline_cpu, }, [CPUHP_AP_RCUTREE_ONLINE] = { - .name = "RCU-tree online", - .startup = rcutree_online_cpu, - .teardown = rcutree_offline_cpu, + .name = "RCU/tree:online", + .startup.single = rcutree_online_cpu, + .teardown.single = rcutree_offline_cpu, }, /* @@ -1401,8 +1396,8 @@ static struct cpuhp_step cpuhp_ap_states[] = { */ [CPUHP_AP_NOTIFY_ONLINE] = { .name = "notify:online", - .startup = notify_online, - .teardown = notify_down_prepare, + .startup.single = notify_online, + .teardown.single = notify_down_prepare, .skip_onerr = true, }, #endif @@ -1414,16 +1409,16 @@ static struct cpuhp_step cpuhp_ap_states[] = { /* Last state is scheduler control setting the cpu active */ [CPUHP_AP_ACTIVE] = { .name = "sched:active", - .startup = sched_cpu_activate, - .teardown = sched_cpu_deactivate, + .startup.single = sched_cpu_activate, + .teardown.single = sched_cpu_deactivate, }, #endif /* CPU is fully up and running. */ [CPUHP_ONLINE] = { .name = "online", - .startup = NULL, - .teardown = NULL, + .startup.single = NULL, + .teardown.single = NULL, }, }; @@ -1446,8 +1441,8 @@ static void cpuhp_store_callbacks(enum cpuhp_state state, mutex_lock(&cpuhp_state_mutex); sp = cpuhp_get_step(state); - sp->startup = startup; - sp->teardown = teardown; + sp->startup.single = startup; + sp->teardown.single = teardown; sp->name = name; sp->multi_instance = multi_instance; INIT_HLIST_HEAD(&sp->list); @@ -1456,7 +1451,7 @@ static void cpuhp_store_callbacks(enum cpuhp_state state, static void *cpuhp_get_teardown_cb(enum cpuhp_state state) { - return cpuhp_get_step(state)->teardown; + return cpuhp_get_step(state)->teardown.single; } /* @@ -1469,7 +1464,8 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, struct cpuhp_step *sp = cpuhp_get_step(state); int ret; - if ((bringup && !sp->startup) || (!bringup && !sp->teardown)) + if ((bringup && !sp->startup.single) || + (!bringup && !sp->teardown.single)) return 0; /* * The non AP bound callbacks can fail on bringup. On teardown @@ -1547,7 +1543,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, get_online_cpus(); - if (!invoke || !sp->startup_multi) + if (!invoke || !sp->startup.multi) goto add_node; /* @@ -1563,7 +1559,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, ret = cpuhp_issue_call(cpu, state, true, node); if (ret) { - if (sp->teardown_multi) + if (sp->teardown.multi) cpuhp_rollback_install(cpu, state, node); goto err; } diff --git a/kernel/padata.c b/kernel/padata.c index 993278895ccc..7848f0566403 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -30,6 +30,7 @@ #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/rcupdate.h> +#include <linux/module.h> #define MAX_OBJ_NUM 1000 @@ -769,52 +770,43 @@ static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); } - -static int padata_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) { - int err; struct padata_instance *pinst; - int cpu = (unsigned long)hcpu; + int ret; - pinst = container_of(nfb, struct padata_instance, cpu_notifier); + pinst = hlist_entry_safe(node, struct padata_instance, node); + if (!pinst_has_cpu(pinst, cpu)) + return 0; - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - if (!pinst_has_cpu(pinst, cpu)) - break; - mutex_lock(&pinst->lock); - err = __padata_add_cpu(pinst, cpu); - mutex_unlock(&pinst->lock); - if (err) - return notifier_from_errno(err); - break; + mutex_lock(&pinst->lock); + ret = __padata_add_cpu(pinst, cpu); + mutex_unlock(&pinst->lock); + return ret; +} - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - if (!pinst_has_cpu(pinst, cpu)) - break; - mutex_lock(&pinst->lock); - err = __padata_remove_cpu(pinst, cpu); - mutex_unlock(&pinst->lock); - if (err) - return notifier_from_errno(err); - break; - } +static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node) +{ + struct padata_instance *pinst; + int ret; + + pinst = hlist_entry_safe(node, struct padata_instance, node); + if (!pinst_has_cpu(pinst, cpu)) + return 0; - return NOTIFY_OK; + mutex_lock(&pinst->lock); + ret = __padata_remove_cpu(pinst, cpu); + mutex_unlock(&pinst->lock); + return ret; } + +static enum cpuhp_state hp_online; #endif static void __padata_free(struct padata_instance *pinst) { #ifdef CONFIG_HOTPLUG_CPU - unregister_hotcpu_notifier(&pinst->cpu_notifier); + cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node); #endif padata_stop(pinst); @@ -1012,11 +1004,8 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq, mutex_init(&pinst->lock); #ifdef CONFIG_HOTPLUG_CPU - pinst->cpu_notifier.notifier_call = padata_cpu_callback; - pinst->cpu_notifier.priority = 0; - register_hotcpu_notifier(&pinst->cpu_notifier); + cpuhp_state_add_instance_nocalls(hp_online, &pinst->node); #endif - return pinst; err_free_masks: @@ -1039,3 +1028,26 @@ void padata_free(struct padata_instance *pinst) kobject_put(&pinst->kobj); } EXPORT_SYMBOL(padata_free); + +#ifdef CONFIG_HOTPLUG_CPU + +static __init int padata_driver_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online", + padata_cpu_online, + padata_cpu_prep_down); + if (ret < 0) + return ret; + hp_online = ret; + return 0; +} +module_init(padata_driver_init); + +static __exit void padata_driver_exit(void) +{ + cpuhp_remove_multi_state(hp_online); +} +module_exit(padata_driver_exit); +#endif diff --git a/kernel/relay.c b/kernel/relay.c index d797502140b9..fc9b4a4af463 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -214,7 +214,7 @@ static void relay_destroy_buf(struct rchan_buf *buf) __free_page(buf->page_array[i]); relay_free_page_array(buf->page_array); } - chan->buf[buf->cpu] = NULL; + *per_cpu_ptr(chan->buf, buf->cpu) = NULL; kfree(buf->padding); kfree(buf); kref_put(&chan->kref, relay_destroy_channel); @@ -382,20 +382,21 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init) */ void relay_reset(struct rchan *chan) { + struct rchan_buf *buf; unsigned int i; if (!chan) return; - if (chan->is_global && chan->buf[0]) { - __relay_reset(chan->buf[0], 0); + if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) { + __relay_reset(buf, 0); return; } mutex_lock(&relay_channels_mutex); for_each_possible_cpu(i) - if (chan->buf[i]) - __relay_reset(chan->buf[i], 0); + if ((buf = *per_cpu_ptr(chan->buf, i))) + __relay_reset(buf, 0); mutex_unlock(&relay_channels_mutex); } EXPORT_SYMBOL_GPL(relay_reset); @@ -440,7 +441,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu) struct dentry *dentry; if (chan->is_global) - return chan->buf[0]; + return *per_cpu_ptr(chan->buf, 0); buf = relay_create_buf(chan); if (!buf) @@ -464,7 +465,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu) __relay_reset(buf, 1); if(chan->is_global) { - chan->buf[0] = buf; + *per_cpu_ptr(chan->buf, 0) = buf; buf->cpu = 0; } @@ -512,46 +513,25 @@ static void setup_callbacks(struct rchan *chan, chan->cb = cb; } -/** - * relay_hotcpu_callback - CPU hotplug callback - * @nb: notifier block - * @action: hotplug action to take - * @hcpu: CPU number - * - * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) - */ -static int relay_hotcpu_callback(struct notifier_block *nb, - unsigned long action, - void *hcpu) +int relay_prepare_cpu(unsigned int cpu) { - unsigned int hotcpu = (unsigned long)hcpu; struct rchan *chan; + struct rchan_buf *buf; - switch(action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - mutex_lock(&relay_channels_mutex); - list_for_each_entry(chan, &relay_channels, list) { - if (chan->buf[hotcpu]) - continue; - chan->buf[hotcpu] = relay_open_buf(chan, hotcpu); - if(!chan->buf[hotcpu]) { - printk(KERN_ERR - "relay_hotcpu_callback: cpu %d buffer " - "creation failed\n", hotcpu); - mutex_unlock(&relay_channels_mutex); - return notifier_from_errno(-ENOMEM); - } + mutex_lock(&relay_channels_mutex); + list_for_each_entry(chan, &relay_channels, list) { + if ((buf = *per_cpu_ptr(chan->buf, cpu))) + continue; + buf = relay_open_buf(chan, cpu); + if (!buf) { + pr_err("relay: cpu %d buffer creation failed\n", cpu); + mutex_unlock(&relay_channels_mutex); + return -ENOMEM; } - mutex_unlock(&relay_channels_mutex); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - /* No need to flush the cpu : will be flushed upon - * final relay_flush() call. */ - break; + *per_cpu_ptr(chan->buf, cpu) = buf; } - return NOTIFY_OK; + mutex_unlock(&relay_channels_mutex); + return 0; } /** @@ -583,6 +563,7 @@ struct rchan *relay_open(const char *base_filename, { unsigned int i; struct rchan *chan; + struct rchan_buf *buf; if (!(subbuf_size && n_subbufs)) return NULL; @@ -593,6 +574,7 @@ struct rchan *relay_open(const char *base_filename, if (!chan) return NULL; + chan->buf = alloc_percpu(struct rchan_buf *); chan->version = RELAYFS_CHANNEL_VERSION; chan->n_subbufs = n_subbufs; chan->subbuf_size = subbuf_size; @@ -608,9 +590,10 @@ struct rchan *relay_open(const char *base_filename, mutex_lock(&relay_channels_mutex); for_each_online_cpu(i) { - chan->buf[i] = relay_open_buf(chan, i); - if (!chan->buf[i]) + buf = relay_open_buf(chan, i); + if (!buf) goto free_bufs; + *per_cpu_ptr(chan->buf, i) = buf; } list_add(&chan->list, &relay_channels); mutex_unlock(&relay_channels_mutex); @@ -619,8 +602,8 @@ struct rchan *relay_open(const char *base_filename, free_bufs: for_each_possible_cpu(i) { - if (chan->buf[i]) - relay_close_buf(chan->buf[i]); + if ((buf = *per_cpu_ptr(chan->buf, i))) + relay_close_buf(buf); } kref_put(&chan->kref, relay_destroy_channel); @@ -666,6 +649,7 @@ int relay_late_setup_files(struct rchan *chan, unsigned int i, curr_cpu; unsigned long flags; struct dentry *dentry; + struct rchan_buf *buf; struct rchan_percpu_buf_dispatcher disp; if (!chan || !base_filename) @@ -684,10 +668,11 @@ int relay_late_setup_files(struct rchan *chan, if (chan->is_global) { err = -EINVAL; - if (!WARN_ON_ONCE(!chan->buf[0])) { - dentry = relay_create_buf_file(chan, chan->buf[0], 0); + buf = *per_cpu_ptr(chan->buf, 0); + if (!WARN_ON_ONCE(!buf)) { + dentry = relay_create_buf_file(chan, buf, 0); if (dentry && !WARN_ON_ONCE(!chan->is_global)) { - relay_set_buf_dentry(chan->buf[0], dentry); + relay_set_buf_dentry(buf, dentry); err = 0; } } @@ -702,13 +687,14 @@ int relay_late_setup_files(struct rchan *chan, * on all currently online CPUs. */ for_each_online_cpu(i) { - if (unlikely(!chan->buf[i])) { + buf = *per_cpu_ptr(chan->buf, i); + if (unlikely(!buf)) { WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n"); err = -EINVAL; break; } - dentry = relay_create_buf_file(chan, chan->buf[i], i); + dentry = relay_create_buf_file(chan, buf, i); if (unlikely(!dentry)) { err = -EINVAL; break; @@ -716,10 +702,10 @@ int relay_late_setup_files(struct rchan *chan, if (curr_cpu == i) { local_irq_save(flags); - relay_set_buf_dentry(chan->buf[i], dentry); + relay_set_buf_dentry(buf, dentry); local_irq_restore(flags); } else { - disp.buf = chan->buf[i]; + disp.buf = buf; disp.dentry = dentry; smp_mb(); /* relay_channels_mutex must be held, so wait. */ @@ -822,11 +808,10 @@ void relay_subbufs_consumed(struct rchan *chan, if (!chan) return; - if (cpu >= NR_CPUS || !chan->buf[cpu] || - subbufs_consumed > chan->n_subbufs) + buf = *per_cpu_ptr(chan->buf, cpu); + if (cpu >= NR_CPUS || !buf || subbufs_consumed > chan->n_subbufs) return; - buf = chan->buf[cpu]; if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed) buf->subbufs_consumed = buf->subbufs_produced; else @@ -842,18 +827,19 @@ EXPORT_SYMBOL_GPL(relay_subbufs_consumed); */ void relay_close(struct rchan *chan) { + struct rchan_buf *buf; unsigned int i; if (!chan) return; mutex_lock(&relay_channels_mutex); - if (chan->is_global && chan->buf[0]) - relay_close_buf(chan->buf[0]); + if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) + relay_close_buf(buf); else for_each_possible_cpu(i) - if (chan->buf[i]) - relay_close_buf(chan->buf[i]); + if ((buf = *per_cpu_ptr(chan->buf, i))) + relay_close_buf(buf); if (chan->last_toobig) printk(KERN_WARNING "relay: one or more items not logged " @@ -874,20 +860,21 @@ EXPORT_SYMBOL_GPL(relay_close); */ void relay_flush(struct rchan *chan) { + struct rchan_buf *buf; unsigned int i; if (!chan) return; - if (chan->is_global && chan->buf[0]) { - relay_switch_subbuf(chan->buf[0], 0); + if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) { + relay_switch_subbuf(buf, 0); return; } mutex_lock(&relay_channels_mutex); for_each_possible_cpu(i) - if (chan->buf[i]) - relay_switch_subbuf(chan->buf[i], 0); + if ((buf = *per_cpu_ptr(chan->buf, i))) + relay_switch_subbuf(buf, 0); mutex_unlock(&relay_channels_mutex); } EXPORT_SYMBOL_GPL(relay_flush); @@ -1377,12 +1364,3 @@ const struct file_operations relay_file_operations = { .splice_read = relay_file_splice_read, }; EXPORT_SYMBOL_GPL(relay_file_operations); - -static __init int relay_init(void) -{ - - hotcpu_notifier(relay_hotcpu_callback, 0); - return 0; -} - -early_initcall(relay_init); diff --git a/kernel/softirq.c b/kernel/softirq.c index 17caf4b63342..c372114494f5 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -700,7 +700,7 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) BUG(); } -static void takeover_tasklets(unsigned int cpu) +static int takeover_tasklets(unsigned int cpu) { /* CPU is dead, so no lock needed. */ local_irq_disable(); @@ -723,27 +723,12 @@ static void takeover_tasklets(unsigned int cpu) raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); + return 0; } +#else +#define takeover_tasklets NULL #endif /* CONFIG_HOTPLUG_CPU */ -static int cpu_callback(struct notifier_block *nfb, unsigned long action, - void *hcpu) -{ - switch (action) { -#ifdef CONFIG_HOTPLUG_CPU - case CPU_DEAD: - case CPU_DEAD_FROZEN: - takeover_tasklets((unsigned long)hcpu); - break; -#endif /* CONFIG_HOTPLUG_CPU */ - } - return NOTIFY_OK; -} - -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback -}; - static struct smp_hotplug_thread softirq_threads = { .store = &ksoftirqd, .thread_should_run = ksoftirqd_should_run, @@ -753,8 +738,8 @@ static struct smp_hotplug_thread softirq_threads = { static __init int spawn_ksoftirqd(void) { - register_cpu_notifier(&cpu_nfb); - + cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, + takeover_tasklets); BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); return 0; |