diff options
author | Tejun Heo <tj@kernel.org> | 2010-06-29 10:07:14 +0200 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-06-29 10:07:14 +0200 |
commit | dcd989cb73ab0f7b722d64ab6516f101d9f43f88 (patch) | |
tree | 8c2f14e708367cb67dd9d29f2da0f7e5f454cf31 /kernel | |
parent | d320c03830b17af64e4547075003b1eeb274bc6c (diff) | |
download | blackbird-op-linux-dcd989cb73ab0f7b722d64ab6516f101d9f43f88.tar.gz blackbird-op-linux-dcd989cb73ab0f7b722d64ab6516f101d9f43f88.zip |
workqueue: implement several utility APIs
Implement the following utility APIs.
workqueue_set_max_active() : adjust max_active of a wq
workqueue_congested() : test whether a wq is contested
work_cpu() : determine the last / current cpu of a work
work_busy() : query whether a work is busy
* Anton Blanchard fixed missing ret initialization in work_busy().
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Anton Blanchard <anton@samba.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 108 |
1 files changed, 107 insertions, 1 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 16ce617974d2..c1aa65c2ff38 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -203,7 +203,7 @@ struct workqueue_struct { cpumask_var_t mayday_mask; /* cpus requesting rescue */ struct worker *rescuer; /* I: rescue worker */ - int saved_max_active; /* I: saved cwq max_active */ + int saved_max_active; /* W: saved cwq max_active */ const char *name; /* I: workqueue name */ #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; @@ -2675,6 +2675,112 @@ void destroy_workqueue(struct workqueue_struct *wq) } EXPORT_SYMBOL_GPL(destroy_workqueue); +/** + * workqueue_set_max_active - adjust max_active of a workqueue + * @wq: target workqueue + * @max_active: new max_active value. + * + * Set max_active of @wq to @max_active. + * + * CONTEXT: + * Don't call from IRQ context. + */ +void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) +{ + unsigned int cpu; + + max_active = wq_clamp_max_active(max_active, wq->name); + + spin_lock(&workqueue_lock); + + wq->saved_max_active = max_active; + + for_each_possible_cpu(cpu) { + struct global_cwq *gcwq = get_gcwq(cpu); + + spin_lock_irq(&gcwq->lock); + + if (!(wq->flags & WQ_FREEZEABLE) || + !(gcwq->flags & GCWQ_FREEZING)) + get_cwq(gcwq->cpu, wq)->max_active = max_active; + + spin_unlock_irq(&gcwq->lock); + } + + spin_unlock(&workqueue_lock); +} +EXPORT_SYMBOL_GPL(workqueue_set_max_active); + +/** + * workqueue_congested - test whether a workqueue is congested + * @cpu: CPU in question + * @wq: target workqueue + * + * Test whether @wq's cpu workqueue for @cpu is congested. There is + * no synchronization around this function and the test result is + * unreliable and only useful as advisory hints or for debugging. + * + * RETURNS: + * %true if congested, %false otherwise. + */ +bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) +{ + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + return !list_empty(&cwq->delayed_works); +} +EXPORT_SYMBOL_GPL(workqueue_congested); + +/** + * work_cpu - return the last known associated cpu for @work + * @work: the work of interest + * + * RETURNS: + * CPU number if @work was ever queued. NR_CPUS otherwise. + */ +unsigned int work_cpu(struct work_struct *work) +{ + struct global_cwq *gcwq = get_work_gcwq(work); + + return gcwq ? gcwq->cpu : NR_CPUS; +} +EXPORT_SYMBOL_GPL(work_cpu); + +/** + * work_busy - test whether a work is currently pending or running + * @work: the work to be tested + * + * Test whether @work is currently pending or running. There is no + * synchronization around this function and the test result is + * unreliable and only useful as advisory hints or for debugging. + * Especially for reentrant wqs, the pending state might hide the + * running state. + * + * RETURNS: + * OR'd bitmask of WORK_BUSY_* bits. + */ +unsigned int work_busy(struct work_struct *work) +{ + struct global_cwq *gcwq = get_work_gcwq(work); + unsigned long flags; + unsigned int ret = 0; + + if (!gcwq) + return false; + + spin_lock_irqsave(&gcwq->lock, flags); + + if (work_pending(work)) + ret |= WORK_BUSY_PENDING; + if (find_worker_executing_work(gcwq, work)) + ret |= WORK_BUSY_RUNNING; + + spin_unlock_irqrestore(&gcwq->lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(work_busy); + /* * CPU hotplug. * |