diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-03-02 11:08:00 -0500 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-03-10 08:45:54 +0100 |
commit | 3cca6dc1c81e2407928dc4c6105252146fd3924f (patch) | |
tree | b78b0d93e7c02abdc37e1d5a6204ab6b94d56fd4 | |
parent | 53f22956effe1c9e7961b8c6e4362ecca5e460b7 (diff) | |
download | blackbird-op-linux-3cca6dc1c81e2407928dc4c6105252146fd3924f.tar.gz blackbird-op-linux-3cca6dc1c81e2407928dc4c6105252146fd3924f.zip |
block: add API for delaying work/request_fn a little bit
Currently we use plugging for that, but as plugging is going away,
we need an alternative mechanism.
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r-- | block/blk-core.c | 29 | ||||
-rw-r--r-- | include/linux/blkdev.h | 6 |
2 files changed, 35 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 3cc17e6064d6..e958c7a1e462 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -197,6 +197,32 @@ void blk_dump_rq_flags(struct request *rq, char *msg) } EXPORT_SYMBOL(blk_dump_rq_flags); +static void blk_delay_work(struct work_struct *work) +{ + struct request_queue *q; + + q = container_of(work, struct request_queue, delay_work.work); + spin_lock_irq(q->queue_lock); + q->request_fn(q); + spin_unlock_irq(q->queue_lock); +} + +/** + * blk_delay_queue - restart queueing after defined interval + * @q: The &struct request_queue in question + * @msecs: Delay in msecs + * + * Description: + * Sometimes queueing needs to be postponed for a little while, to allow + * resources to come back. This function will make sure that queueing is + * restarted around the specified time. + */ +void blk_delay_queue(struct request_queue *q, unsigned long msecs) +{ + schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs)); +} +EXPORT_SYMBOL(blk_delay_queue); + /* * "plug" the device if there are no outstanding requests: this will * force the transfer to start only after we have put all the requests @@ -363,6 +389,7 @@ EXPORT_SYMBOL(blk_start_queue); void blk_stop_queue(struct request_queue *q) { blk_remove_plug(q); + cancel_delayed_work(&q->delay_work); queue_flag_set(QUEUE_FLAG_STOPPED, q); } EXPORT_SYMBOL(blk_stop_queue); @@ -387,6 +414,7 @@ void blk_sync_queue(struct request_queue *q) del_timer_sync(&q->timeout); cancel_work_sync(&q->unplug_work); throtl_shutdown_timer_wq(q); + cancel_delayed_work_sync(&q->delay_work); } EXPORT_SYMBOL(blk_sync_queue); @@ -534,6 +562,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) INIT_LIST_HEAD(&q->flush_queue[1]); INIT_LIST_HEAD(&q->flush_data_in_flight); INIT_WORK(&q->unplug_work, blk_unplug_work); + INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); kobject_init(&q->kobj, &blk_queue_ktype); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e3ee74fc5903..f55b2a8b6610 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -300,6 +300,11 @@ struct request_queue unsigned long unplug_delay; /* After this many jiffies */ struct work_struct unplug_work; + /* + * Delayed queue handling + */ + struct delayed_work delay_work; + struct backing_dev_info backing_dev_info; /* @@ -677,6 +682,7 @@ extern int blk_insert_cloned_request(struct request_queue *q, extern void blk_plug_device(struct request_queue *); extern void blk_plug_device_unlocked(struct request_queue *); extern int blk_remove_plug(struct request_queue *); +extern void blk_delay_queue(struct request_queue *, unsigned long); extern void blk_recount_segments(struct request_queue *, struct bio *); extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, unsigned int, void __user *); |