diff options
author | Nathan Lynch <ntl@pobox.com> | 2006-01-08 01:05:12 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-08 20:14:08 -0800 |
commit | f756d5e256059018d753f0ba79980ebeb87a1bc0 (patch) | |
tree | e96ae0117aaf6dea19dad88079c3229f679c1948 /kernel | |
parent | 945f390f02ce44a13aefc6d9449c99f33c9286a5 (diff) | |
download | blackbird-op-linux-f756d5e256059018d753f0ba79980ebeb87a1bc0.tar.gz blackbird-op-linux-f756d5e256059018d753f0ba79980ebeb87a1bc0.zip |
[PATCH] fix workqueue oops during cpu offline
Use first_cpu(cpu_possible_map) for the single-thread workqueue case. We
used to hardcode 0, but that broke on systems where !cpu_possible(0) when
workqueue_struct->cpu_workqueue_struct was changed from a static array to
alloc_percpu.
Commit id bce61dd49d6ba7799be2de17c772e4c701558f14 ("Fix hardcoded cpu=0 in
workqueue for per_cpu_ptr() calls") fixed that for Ben's funky sparc64
system, but it regressed my Power5. Offlining cpu 0 oopses upon the next
call to queue_work for a single-thread workqueue, because now we try to
manipulate per_cpu_ptr(wq->cpu_wq, 1), which is uninitialized.
So we need to establish an unchanging "slot" for single-thread workqueues
which will have a valid percpu allocation. Since alloc_percpu keys off of
cpu_possible_map, which must not change after initialization, make this
slot == first_cpu(cpu_possible_map).
Signed-off-by: Nathan Lynch <ntl@pobox.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e72fb6478da6..82c4fa70595c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -29,7 +29,8 @@ #include <linux/kthread.h> /* - * The per-CPU workqueue (if single thread, we always use cpu 0's). + * The per-CPU workqueue (if single thread, we always use the first + * possible cpu). * * The sequence counters are for flush_scheduled_work(). It wants to wait * until until all currently-scheduled works are completed, but it doesn't @@ -69,6 +70,8 @@ struct workqueue_struct { static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); +static int singlethread_cpu; + /* If it's single threaded, it isn't in the list of workqueues. */ static inline int is_single_threaded(struct workqueue_struct *wq) { @@ -102,7 +105,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) if (!test_and_set_bit(0, &work->pending)) { if (unlikely(is_single_threaded(wq))) - cpu = any_online_cpu(cpu_online_map); + cpu = singlethread_cpu; BUG_ON(!list_empty(&work->entry)); __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); ret = 1; @@ -118,7 +121,7 @@ static void delayed_work_timer_fn(unsigned long __data) int cpu = smp_processor_id(); if (unlikely(is_single_threaded(wq))) - cpu = any_online_cpu(cpu_online_map); + cpu = singlethread_cpu; __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); } @@ -267,7 +270,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) if (is_single_threaded(wq)) { /* Always use first cpu's area. */ - flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map))); + flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); } else { int cpu; @@ -325,7 +328,7 @@ struct workqueue_struct *__create_workqueue(const char *name, lock_cpu_hotplug(); if (singlethread) { INIT_LIST_HEAD(&wq->list); - p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map)); + p = create_workqueue_thread(wq, singlethread_cpu); if (!p) destroy = 1; else @@ -379,7 +382,7 @@ void destroy_workqueue(struct workqueue_struct *wq) /* We don't need the distraction of CPUs appearing and vanishing. */ lock_cpu_hotplug(); if (is_single_threaded(wq)) - cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map)); + cleanup_workqueue_thread(wq, singlethread_cpu); else { for_each_online_cpu(cpu) cleanup_workqueue_thread(wq, cpu); @@ -567,6 +570,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, void init_workqueues(void) { + singlethread_cpu = first_cpu(cpu_possible_map); hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); |