diff options
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r-- | block/blk-mq-sched.c | 45 |
1 files changed, 15 insertions, 30 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 9e8d6795a8c1..09af8ff18719 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -110,15 +110,14 @@ struct request *blk_mq_sched_get_request(struct request_queue *q, struct blk_mq_alloc_data *data) { struct elevator_queue *e = q->elevator; - struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx; struct request *rq; blk_queue_enter_live(q); - ctx = blk_mq_get_ctx(q); - hctx = blk_mq_map_queue(q, ctx->cpu); - - blk_mq_set_alloc_data(data, q, data->flags, ctx, hctx); + data->q = q; + if (likely(!data->ctx)) + data->ctx = blk_mq_get_ctx(q); + if (likely(!data->hctx)) + data->hctx = blk_mq_map_queue(q, data->ctx->cpu); if (e) { data->flags |= BLK_MQ_REQ_INTERNAL; @@ -135,8 +134,6 @@ struct request *blk_mq_sched_get_request(struct request_queue *q, rq = __blk_mq_alloc_request(data, op); } else { rq = __blk_mq_alloc_request(data, op); - if (rq) - data->hctx->tags->rqs[rq->tag] = rq; } if (rq) { @@ -205,7 +202,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) * needing a restart in that case. */ if (!list_empty(&rq_list)) { - blk_mq_sched_mark_restart(hctx); + blk_mq_sched_mark_restart_hctx(hctx); did_work = blk_mq_dispatch_rq_list(hctx, &rq_list); } else if (!has_sched_dispatch) { blk_mq_flush_busy_ctxs(hctx, &rq_list); @@ -331,20 +328,16 @@ static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx) { + struct request_queue *q = hctx->queue; unsigned int i; - if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) + if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { + if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_sched_restart_hctx(hctx); + } + } else { blk_mq_sched_restart_hctx(hctx); - else { - struct request_queue *q = hctx->queue; - - if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) - return; - - clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags); - - queue_for_each_hw_ctx(q, hctx, i) - blk_mq_sched_restart_hctx(hctx); } } @@ -458,7 +451,8 @@ int blk_mq_sched_setup(struct request_queue *q) */ ret = 0; queue_for_each_hw_ctx(q, hctx, i) { - hctx->sched_tags = blk_mq_alloc_rq_map(set, i, q->nr_requests, 0); + hctx->sched_tags = blk_mq_alloc_rq_map(set, i, + q->nr_requests, set->reserved_tags); if (!hctx->sched_tags) { ret = -ENOMEM; break; @@ -498,15 +492,6 @@ int blk_mq_sched_init(struct request_queue *q) { int ret; -#if defined(CONFIG_DEFAULT_SQ_NONE) - if (q->nr_hw_queues == 1) - return 0; -#endif -#if defined(CONFIG_DEFAULT_MQ_NONE) - if (q->nr_hw_queues > 1) - return 0; -#endif - mutex_lock(&q->sysfs_lock); ret = elevator_init(q, NULL); mutex_unlock(&q->sysfs_lock); |