diff options
author | Jens Axboe <axboe@fb.com> | 2014-09-22 11:57:32 -0600 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-22 11:57:32 -0600 |
commit | 6d11fb454b161a4565c57be6f1c5527235741003 (patch) | |
tree | c238ed3df2f654181c2a0746478a33b32214cc60 /block | |
parent | b207892b061da7608878e273ae22ba9bf9be264b (diff) | |
parent | 8b95741569eabc5eb17da71d1d3668cdb0bef86c (diff) | |
download | talos-obmc-linux-6d11fb454b161a4565c57be6f1c5527235741003.tar.gz talos-obmc-linux-6d11fb454b161a4565c57be6f1c5527235741003.zip |
Merge branch 'for-linus' into for-3.18/core
Moving patches from for-linus to 3.18 instead, pull in this changes
that will go to Linus today.
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-exec.c | 1 | ||||
-rw-r--r-- | block/blk-mq.c | 28 |
2 files changed, 18 insertions, 11 deletions
diff --git a/block/blk-exec.c b/block/blk-exec.c index f4d27b12c90b..9924725fa50d 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -56,6 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, bool is_pm_resume; WARN_ON(irqs_disabled()); + WARN_ON(rq->cmd_type == REQ_TYPE_FS); rq->rq_disk = bd_disk; rq->end_io = done; diff --git a/block/blk-mq.c b/block/blk-mq.c index 067e600002d3..e743d28620b2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -203,7 +203,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) if (tag != BLK_MQ_TAG_FAIL) { rq = data->hctx->tags->rqs[tag]; - rq->cmd_flags = 0; if (blk_mq_tag_busy(data->hctx)) { rq->cmd_flags = REQ_MQ_INFLIGHT; atomic_inc(&data->hctx->nr_active); @@ -262,6 +261,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, if (rq->cmd_flags & REQ_MQ_INFLIGHT) atomic_dec(&hctx->nr_active); + rq->cmd_flags = 0; clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); blk_mq_put_tag(hctx, tag, &ctx->last_tag); @@ -397,6 +397,12 @@ static void blk_mq_start_request(struct request *rq, bool last) blk_add_timer(rq); /* + * Ensure that ->deadline is visible before set the started + * flag and clear the completed flag. + */ + smp_mb__before_atomic(); + + /* * Mark us as started and clear complete. Complete might have been * set if requeue raced with timeout, which then marked it as * complete. So be sure to clear complete again when we start @@ -477,7 +483,11 @@ static void blk_mq_requeue_work(struct work_struct *work) blk_mq_insert_request(rq, false, false, false); } - blk_mq_run_queues(q, false); + /* + * Use the start variant of queue running here, so that running + * the requeue work will kick stopped queues. + */ + blk_mq_start_hw_queues(q); } void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) @@ -961,14 +971,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, hctx = q->mq_ops->map_queue(q, ctx->cpu); - if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) && - !(rq->cmd_flags & (REQ_FLUSH_SEQ))) { - blk_insert_flush(rq); - } else { - spin_lock(&ctx->lock); - __blk_mq_insert_request(hctx, rq, at_head); - spin_unlock(&ctx->lock); - } + spin_lock(&ctx->lock); + __blk_mq_insert_request(hctx, rq, at_head); + spin_unlock(&ctx->lock); if (run_queue) blk_mq_run_hw_queue(hctx, async); @@ -1408,6 +1413,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, left -= to_do * rq_size; for (j = 0; j < to_do; j++) { tags->rqs[i] = p; + tags->rqs[i]->atomic_flags = 0; + tags->rqs[i]->cmd_flags = 0; if (set->ops->init_request) { if (set->ops->init_request(set->driver_data, tags->rqs[i], hctx_idx, i, @@ -1960,7 +1967,6 @@ out_unwind: while (--i >= 0) blk_mq_free_rq_map(set, set->tags[i], i); - set->tags = NULL; return -ENOMEM; } |