diff options
author | Christoph Hellwig <hch@lst.de> | 2014-05-27 20:59:47 +0200 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-05-28 09:49:21 -0600 |
commit | 5dee857720db15e2c8ef0c03f7eeac00c4c63cb2 (patch) | |
tree | 7607431cfe22086dadbf8c74f2b25ee8555f5bbd /block | |
parent | 4ce01dd1a07d9cf3eaf44fbf4ea9a61b11badccc (diff) | |
download | blackbird-op-linux-5dee857720db15e2c8ef0c03f7eeac00c4c63cb2.tar.gz blackbird-op-linux-5dee857720db15e2c8ef0c03f7eeac00c4c63cb2.zip |
blk-mq: initialize request in __blk_mq_alloc_request
Both callers if __blk_mq_alloc_request want to initialize the request, so
lift it into the common path.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 62 |
1 files changed, 30 insertions, 32 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 63d581d72a70..04ef7ecb3c7f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -92,30 +92,6 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); } -static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, - struct blk_mq_ctx *ctx, - gfp_t gfp, bool reserved) -{ - struct request *rq; - unsigned int tag; - - tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved); - if (tag != BLK_MQ_TAG_FAIL) { - rq = hctx->tags->rqs[tag]; - - rq->cmd_flags = 0; - if (blk_mq_tag_busy(hctx)) { - rq->cmd_flags = REQ_MQ_INFLIGHT; - atomic_inc(&hctx->nr_active); - } - - rq->tag = tag; - return rq; - } - - return NULL; -} - static int blk_mq_queue_enter(struct request_queue *q) { int ret; @@ -263,6 +239,32 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, ctx->rq_dispatched[rw_is_sync(rw_flags)]++; } +static struct request * +__blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved) +{ + struct request *rq; + unsigned int tag; + + tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved); + if (tag != BLK_MQ_TAG_FAIL) { + rq = hctx->tags->rqs[tag]; + + rq->cmd_flags = 0; + if (blk_mq_tag_busy(hctx)) { + rq->cmd_flags = REQ_MQ_INFLIGHT; + atomic_inc(&hctx->nr_active); + } + + rq->tag = tag; + blk_mq_rq_ctx_init(q, ctx, rq, rw); + return rq; + } + + return NULL; +} + + static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, int rw, gfp_t gfp, bool reserved) @@ -273,12 +275,10 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); - rq = __blk_mq_alloc_request(hctx, ctx, gfp & ~__GFP_WAIT, + rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT, reserved); - if (rq) { - blk_mq_rq_ctx_init(q, ctx, rq, rw); + if (rq) break; - } if (gfp & __GFP_WAIT) { __blk_mq_run_hw_queue(hctx); @@ -1178,10 +1178,8 @@ static struct request *blk_mq_map_request(struct request_queue *q, rw |= REQ_SYNC; trace_block_getrq(q, bio, rw); - rq = __blk_mq_alloc_request(hctx, ctx, GFP_ATOMIC, false); - if (likely(rq)) - blk_mq_rq_ctx_init(q, ctx, rq, rw); - else { + rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false); + if (unlikely(!rq)) { blk_mq_put_ctx(ctx); trace_block_sleeprq(q, bio, rw); rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC, |