diff options
author | Tejun Heo <tj@kernel.org> | 2012-08-21 13:18:24 -0700 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-08-21 13:18:24 -0700 |
commit | e7c2f967445dd2041f0f8e3179cca22bb8bb7f79 (patch) | |
tree | cb6c1d3593d2497e740d313f55592f41e8ae2039 | |
parent | e0aecdd874d78b7129a64b056c20e529e2c916df (diff) | |
download | blackbird-op-linux-e7c2f967445dd2041f0f8e3179cca22bb8bb7f79.tar.gz blackbird-op-linux-e7c2f967445dd2041f0f8e3179cca22bb8bb7f79.zip |
workqueue: use mod_delayed_work() instead of __cancel + queue
Now that mod_delayed_work() is safe to call from IRQ handlers,
__cancel_delayed_work() followed by queue_delayed_work() can be
replaced with mod_delayed_work().
Most conversions are straight-forward except for the following.
* net/core/link_watch.c: linkwatch_schedule_work() was doing a quite
elaborate dancing around its delayed_work. Collapse it such that
linkwatch_work is queued for immediate execution if LW_URGENT and
existing timer is kept otherwise.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Tomi Valkeinen <tomi.valkeinen@ti.com>
-rw-r--r-- | block/blk-core.c | 6 | ||||
-rw-r--r-- | block/blk-throttle.c | 7 | ||||
-rw-r--r-- | drivers/block/floppy.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/core/mad.c | 14 | ||||
-rw-r--r-- | drivers/input/keyboard/qt2160.c | 3 | ||||
-rw-r--r-- | drivers/input/mouse/synaptics_i2c.c | 7 | ||||
-rw-r--r-- | net/core/link_watch.c | 21 |
7 files changed, 17 insertions, 44 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 4b4dbdfbca89..4b8b606dbb01 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -319,10 +319,8 @@ EXPORT_SYMBOL(__blk_run_queue); */ void blk_run_queue_async(struct request_queue *q) { - if (likely(!blk_queue_stopped(q))) { - __cancel_delayed_work(&q->delay_work); - queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); - } + if (likely(!blk_queue_stopped(q))) + mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); } EXPORT_SYMBOL(blk_run_queue_async); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 5a58e779912b..a9664fa0b609 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -929,12 +929,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) /* schedule work if limits changed even if no bio is queued */ if (total_nr_queued(td) || td->limits_changed) { - /* - * We might have a work scheduled to be executed in future. - * Cancel that and schedule a new one. - */ - __cancel_delayed_work(dwork); - queue_delayed_work(kthrotld_workqueue, dwork, delay); + mod_delayed_work(kthrotld_workqueue, dwork, delay); throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies); } diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index a7d6347aaa79..55a5bc002c06 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -672,7 +672,6 @@ static void __reschedule_timeout(int drive, const char *message) if (drive == current_reqD) drive = current_drive; - __cancel_delayed_work(&fd_timeout); if (drive < 0 || drive >= N_DRIVE) { delay = 20UL * HZ; @@ -680,7 +679,7 @@ static void __reschedule_timeout(int drive, const char *message) } else delay = UDP->timeout; - queue_delayed_work(floppy_wq, &fd_timeout, delay); + mod_delayed_work(floppy_wq, &fd_timeout, delay); if (UDP->flags & FD_DEBUG) DPRINT("reschedule timeout %s\n", message); timeout_message = message; diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index b0d0bc8a6fb6..b5938147fc89 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2013,13 +2013,11 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) if (time_after(mad_agent_priv->timeout, mad_send_wr->timeout)) { mad_agent_priv->timeout = mad_send_wr->timeout; - __cancel_delayed_work(&mad_agent_priv->timed_work); delay = mad_send_wr->timeout - jiffies; if ((long)delay <= 0) delay = 1; - queue_delayed_work(mad_agent_priv->qp_info-> - port_priv->wq, - &mad_agent_priv->timed_work, delay); + mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, + &mad_agent_priv->timed_work, delay); } } } @@ -2052,11 +2050,9 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) list_add(&mad_send_wr->agent_list, list_item); /* Reschedule a work item if we have a shorter timeout */ - if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { - __cancel_delayed_work(&mad_agent_priv->timed_work); - queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq, - &mad_agent_priv->timed_work, delay); - } + if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) + mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, + &mad_agent_priv->timed_work, delay); } void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c index e7a5e36e1203..76b7d430d03a 100644 --- a/drivers/input/keyboard/qt2160.c +++ b/drivers/input/keyboard/qt2160.c @@ -156,8 +156,7 @@ static irqreturn_t qt2160_irq(int irq, void *_qt2160) spin_lock_irqsave(&qt2160->lock, flags); - __cancel_delayed_work(&qt2160->dwork); - schedule_delayed_work(&qt2160->dwork, 0); + mod_delayed_work(system_wq, &qt2160->dwork, 0); spin_unlock_irqrestore(&qt2160->lock, flags); diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c index f14675702c0f..063a174d3a88 100644 --- a/drivers/input/mouse/synaptics_i2c.c +++ b/drivers/input/mouse/synaptics_i2c.c @@ -376,12 +376,7 @@ static void synaptics_i2c_reschedule_work(struct synaptics_i2c *touch, spin_lock_irqsave(&touch->lock, flags); - /* - * If work is already scheduled then subsequent schedules will not - * change the scheduled time that's why we have to cancel it first. - */ - __cancel_delayed_work(&touch->dwork); - schedule_delayed_work(&touch->dwork, delay); + mod_delayed_work(system_wq, &touch->dwork, delay); spin_unlock_irqrestore(&touch->lock, flags); } diff --git a/net/core/link_watch.c b/net/core/link_watch.c index c3519c6d1b16..8e397a69005a 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -120,22 +120,13 @@ static void linkwatch_schedule_work(int urgent) delay = 0; /* - * This is true if we've scheduled it immeditately or if we don't - * need an immediate execution and it's already pending. + * If urgent, schedule immediate execution; otherwise, don't + * override the existing timer. */ - if (schedule_delayed_work(&linkwatch_work, delay) == !delay) - return; - - /* Don't bother if there is nothing urgent. */ - if (!test_bit(LW_URGENT, &linkwatch_flags)) - return; - - /* It's already running which is good enough. */ - if (!__cancel_delayed_work(&linkwatch_work)) - return; - - /* Otherwise we reschedule it again for immediate execution. */ - schedule_delayed_work(&linkwatch_work, 0); + if (test_bit(LW_URGENT, &linkwatch_flags)) + mod_delayed_work(system_wq, &linkwatch_work, 0); + else + schedule_delayed_work(&linkwatch_work, delay); } |