summaryrefslogtreecommitdiffstats
path: root/block/blk-flush.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@canonical.com>2014-09-25 23:23:46 +0800
committerJens Axboe <axboe@fb.com>2014-09-25 15:22:44 -0600
commite97c293cdf77263abdc021de280516e0017afc84 (patch)
tree77e1bef4a3835fdbfe5975c78b3d0ba53f266459 /block/blk-flush.c
parent0bae352da54a95435f721705d3670a6eaefdcf87 (diff)
downloadtalos-op-linux-e97c293cdf77263abdc021de280516e0017afc84.tar.gz
talos-op-linux-e97c293cdf77263abdc021de280516e0017afc84.zip
block: introduce 'blk_mq_ctx' parameter to blk_get_flush_queue
This patch adds 'blk_mq_ctx' parameter to blk_get_flush_queue(), so that this function can find the corresponding blk_flush_queue bound with current mq context since the flush queue will become per hw-queue. For legacy queue, the parameter can be simply 'NULL'. For multiqueue case, the parameter should be set as the context from which the related request is originated. With this context info, the hw queue and related flush queue can be found easily. Signed-off-by: Ming Lei <ming.lei@canonical.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-flush.c')
-rw-r--r--block/blk-flush.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 9bc5b4f35c23..004d95e4098e 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -223,7 +223,7 @@ static void flush_end_io(struct request *flush_rq, int error)
bool queued = false;
struct request *rq, *n;
unsigned long flags = 0;
- struct blk_flush_queue *fq = blk_get_flush_queue(q);
+ struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
if (q->mq_ops) {
spin_lock_irqsave(&fq->mq_flush_lock, flags);
@@ -319,7 +319,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
static void flush_data_end_io(struct request *rq, int error)
{
struct request_queue *q = rq->q;
- struct blk_flush_queue *fq = blk_get_flush_queue(q);
+ struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
/*
* After populating an empty queue, kick it to avoid stall. Read
@@ -333,11 +333,10 @@ static void mq_flush_data_end_io(struct request *rq, int error)
{
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx;
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
unsigned long flags;
- struct blk_flush_queue *fq = blk_get_flush_queue(q);
+ struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
- ctx = rq->mq_ctx;
hctx = q->mq_ops->map_queue(q, ctx->cpu);
/*
@@ -367,7 +366,7 @@ void blk_insert_flush(struct request *rq)
struct request_queue *q = rq->q;
unsigned int fflags = q->flush_flags; /* may change, cache */
unsigned int policy = blk_flush_policy(fflags, rq);
- struct blk_flush_queue *fq = blk_get_flush_queue(q);
+ struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
/*
* @policy now records what operations need to be done. Adjust
OpenPOWER on IntegriCloud