diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2009-04-02 16:58:24 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-02 19:05:00 -0700 |
commit | 2355b70fd59cb5be7de2052a9edeee7afb7ff099 (patch) | |
tree | 93f192f9dab4748a313ec8d78041b1937e232f33 /kernel/workqueue.c | |
parent | 1ee1184485df9c9a3503d3a684b911fb7c73d259 (diff) | |
download | blackbird-op-linux-2355b70fd59cb5be7de2052a9edeee7afb7ff099.tar.gz blackbird-op-linux-2355b70fd59cb5be7de2052a9edeee7afb7ff099.zip |
workqueue: avoid recursion in run_workqueue()
1) lockdep will complain when run_workqueue() performs recursion.
2) The recursive implementation of run_workqueue() means that
flush_workqueue() and its documentation are inconsistent. This may
hide deadlocks and other bugs.
3) The recursion in run_workqueue() will poison cwq->current_work, but
flush_work() and __cancel_work_timer(), etcetera need a reliable
cwq->current_work.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 41 |
1 files changed, 11 insertions, 30 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9aedd9fd825b..32f8e0d2bf5a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -48,8 +48,6 @@ struct cpu_workqueue_struct { struct workqueue_struct *wq; struct task_struct *thread; - - int run_depth; /* Detect run_workqueue() recursion depth */ } ____cacheline_aligned; /* @@ -262,13 +260,6 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on); static void run_workqueue(struct cpu_workqueue_struct *cwq) { spin_lock_irq(&cwq->lock); - cwq->run_depth++; - if (cwq->run_depth > 3) { - /* morton gets to eat his hat */ - printk("%s: recursion depth exceeded: %d\n", - __func__, cwq->run_depth); - dump_stack(); - } while (!list_empty(&cwq->worklist)) { struct work_struct *work = list_entry(cwq->worklist.next, struct work_struct, entry); @@ -311,7 +302,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) spin_lock_irq(&cwq->lock); cwq->current_work = NULL; } - cwq->run_depth--; spin_unlock_irq(&cwq->lock); } @@ -368,29 +358,20 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) { - int active; + int active = 0; + struct wq_barrier barr; - if (cwq->thread == current) { - /* - * Probably keventd trying to flush its own queue. So simply run - * it by hand rather than deadlocking. - */ - run_workqueue(cwq); - active = 1; - } else { - struct wq_barrier barr; + WARN_ON(cwq->thread == current); - active = 0; - spin_lock_irq(&cwq->lock); - if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { - insert_wq_barrier(cwq, &barr, &cwq->worklist); - active = 1; - } - spin_unlock_irq(&cwq->lock); - - if (active) - wait_for_completion(&barr.done); + spin_lock_irq(&cwq->lock); + if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { + insert_wq_barrier(cwq, &barr, &cwq->worklist); + active = 1; } + spin_unlock_irq(&cwq->lock); + + if (active) + wait_for_completion(&barr.done); return active; } |