diff options
author | Bjorn Helgaas <bhelgaas@google.com> | 2013-11-18 11:00:29 -0700 |
---|---|---|
committer | Bjorn Helgaas <bhelgaas@google.com> | 2013-11-25 14:37:22 -0700 |
commit | 12997d1a999cd1b22e21a238c96780f2a55e4e13 (patch) | |
tree | 70470549b47f1a0d90861609c6ce8ca2d95e286e /kernel | |
parent | 12c3156f10c5d8c5f1fb3f0bbdb8c1ddb1d1f65c (diff) | |
download | blackbird-op-linux-12997d1a999cd1b22e21a238c96780f2a55e4e13.tar.gz blackbird-op-linux-12997d1a999cd1b22e21a238c96780f2a55e4e13.zip |
Revert "workqueue: allow work_on_cpu() to be called recursively"
This reverts commit c2fda509667b0fda4372a237f5a59ea4570b1627.
c2fda509667b removed lockdep annotation from work_on_cpu() to work around
the PCI path that calls work_on_cpu() from within a work_on_cpu() work item
(PF driver .probe() method -> pci_enable_sriov() -> add VFs -> VF driver
.probe method).
961da7fb6b22 ("PCI: Avoid unnecessary CPU switch when calling driver
.probe() method) avoids that recursive work_on_cpu() use in a different
way, so this revert restores the work_on_cpu() lockdep annotation.
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Acked-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 32 |
1 files changed, 10 insertions, 22 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 987293d03ebc..5690b8eabfbc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2840,19 +2840,6 @@ already_gone: return false; } -static bool __flush_work(struct work_struct *work) -{ - struct wq_barrier barr; - - if (start_flush_work(work, &barr)) { - wait_for_completion(&barr.done); - destroy_work_on_stack(&barr.work); - return true; - } else { - return false; - } -} - /** * flush_work - wait for a work to finish executing the last queueing instance * @work: the work to flush @@ -2866,10 +2853,18 @@ static bool __flush_work(struct work_struct *work) */ bool flush_work(struct work_struct *work) { + struct wq_barrier barr; + lock_map_acquire(&work->lockdep_map); lock_map_release(&work->lockdep_map); - return __flush_work(work); + if (start_flush_work(work, &barr)) { + wait_for_completion(&barr.done); + destroy_work_on_stack(&barr.work); + return true; + } else { + return false; + } } EXPORT_SYMBOL_GPL(flush_work); @@ -4814,14 +4809,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg) INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); schedule_work_on(cpu, &wfc.work); - - /* - * The work item is on-stack and can't lead to deadlock through - * flushing. Use __flush_work() to avoid spurious lockdep warnings - * when work_on_cpu()s are nested. - */ - __flush_work(&wfc.work); - + flush_work(&wfc.work); return wfc.ret; } EXPORT_SYMBOL_GPL(work_on_cpu); |