diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2014-05-20 17:46:27 +0800 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-05-20 10:59:30 -0400 |
commit | 9625ab1727743f6a164df26b7b1eeeced7380b42 (patch) | |
tree | d3da8ffc218ca3077ebbc140eb7f345a2bae38f1 /kernel/workqueue.c | |
parent | cf416171e7e1d966111f53bdae82f51af05e7bf8 (diff) | |
download | blackbird-op-linux-9625ab1727743f6a164df26b7b1eeeced7380b42.tar.gz blackbird-op-linux-9625ab1727743f6a164df26b7b1eeeced7380b42.zip |
workqueue: use manager lock only to protect worker_idr
worker_idr is highly bound to managers and is always/only accessed in manager
lock context. So we don't need pool->lock for it.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 34 |
1 files changed, 6 insertions, 28 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c8411085466f..910d963f6b76 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -124,8 +124,7 @@ enum { * cpu or grabbing pool->lock is enough for read access. If * POOL_DISASSOCIATED is set, it's identical to L. * - * MG: pool->manager_mutex and pool->lock protected. Writes require both - * locks. Reads can happen under either lock. + * M: pool->manager_mutex protected. * * PL: wq_pool_mutex protected. * @@ -164,7 +163,7 @@ struct worker_pool { /* see manage_workers() for details on the two manager mutexes */ struct mutex manager_arb; /* manager arbitration */ struct mutex manager_mutex; /* manager exclusion */ - struct idr worker_idr; /* MG: worker IDs and iteration */ + struct idr worker_idr; /* M: worker IDs and iteration */ struct workqueue_attrs *attrs; /* I: worker attributes */ struct hlist_node hash_node; /* PL: unbound_pool_hash node */ @@ -340,16 +339,6 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, lockdep_is_held(&wq->mutex), \ "sched RCU or wq->mutex should be held") -#ifdef CONFIG_LOCKDEP -#define assert_manager_or_pool_lock(pool) \ - WARN_ONCE(debug_locks && \ - !lockdep_is_held(&(pool)->manager_mutex) && \ - !lockdep_is_held(&(pool)->lock), \ - "pool->manager_mutex or ->lock should be held") -#else -#define assert_manager_or_pool_lock(pool) do { } while (0) -#endif - #define for_each_cpu_worker_pool(pool, cpu) \ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ @@ -378,14 +367,14 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, * @wi: integer used for iteration * @pool: worker_pool to iterate workers of * - * This must be called with either @pool->manager_mutex or ->lock held. + * This must be called with @pool->manager_mutex. * * The if/else clause exists only for the lockdep assertion and can be * ignored. */ #define for_each_pool_worker(worker, wi, pool) \ idr_for_each_entry(&(pool)->worker_idr, (worker), (wi)) \ - if (({ assert_manager_or_pool_lock((pool)); false; })) { } \ + if (({ lockdep_assert_held(&pool->manager_mutex); false; })) { } \ else /** @@ -1725,13 +1714,7 @@ static struct worker *create_worker(struct worker_pool *pool) * ID is needed to determine kthread name. Allocate ID first * without installing the pointer. */ - idr_preload(GFP_KERNEL); - spin_lock_irq(&pool->lock); - - id = idr_alloc(&pool->worker_idr, NULL, 0, 0, GFP_NOWAIT); - - spin_unlock_irq(&pool->lock); - idr_preload_end(); + id = idr_alloc(&pool->worker_idr, NULL, 0, 0, GFP_KERNEL); if (id < 0) goto fail; @@ -1773,18 +1756,13 @@ static struct worker *create_worker(struct worker_pool *pool) worker->flags |= WORKER_UNBOUND; /* successful, commit the pointer to idr */ - spin_lock_irq(&pool->lock); idr_replace(&pool->worker_idr, worker, worker->id); - spin_unlock_irq(&pool->lock); return worker; fail: - if (id >= 0) { - spin_lock_irq(&pool->lock); + if (id >= 0) idr_remove(&pool->worker_idr, id); - spin_unlock_irq(&pool->lock); - } kfree(worker); return NULL; } |