diff options
author | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2013-06-24 12:03:57 -0400 |
---|---|---|
committer | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2013-06-24 13:57:27 -0400 |
commit | 7d8224574cbd2326a6be00f319f5f7597abec3f6 (patch) | |
tree | fcba1402f98bffa6a38f2a00d104e43e865804c8 | |
parent | bc57a0f7a44cfcf3e9873f6c6b8dcecdca486b1f (diff) | |
download | blackbird-op-linux-7d8224574cbd2326a6be00f319f5f7597abec3f6.tar.gz blackbird-op-linux-7d8224574cbd2326a6be00f319f5f7597abec3f6.zip |
NVMe: Call nvme_process_cq from submission path
Since we have the queue locked, it makes sense to check if there are
any completion queue entries on the queue before we release the lock.
If there are, it may save an interrupt and reduce latency for the I/Os
that happened to complete. This happens fairly often for some workloads.
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
-rw-r--r-- | drivers/block/nvme-core.c | 39 |
1 files changed, 20 insertions, 19 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index eb4a91f3bf41..07d527c66eb4 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -738,25 +738,6 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, return result; } -static void nvme_make_request(struct request_queue *q, struct bio *bio) -{ - struct nvme_ns *ns = q->queuedata; - struct nvme_queue *nvmeq = get_nvmeq(ns->dev); - int result = -EBUSY; - - spin_lock_irq(&nvmeq->q_lock); - if (bio_list_empty(&nvmeq->sq_cong)) - result = nvme_submit_bio_queue(nvmeq, ns, bio); - if (unlikely(result)) { - if (bio_list_empty(&nvmeq->sq_cong)) - add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); - bio_list_add(&nvmeq->sq_cong, bio); - } - - spin_unlock_irq(&nvmeq->q_lock); - put_nvmeq(nvmeq); -} - static int nvme_process_cq(struct nvme_queue *nvmeq) { u16 head, phase; @@ -797,6 +778,26 @@ static int nvme_process_cq(struct nvme_queue *nvmeq) return 1; } +static void nvme_make_request(struct request_queue *q, struct bio *bio) +{ + struct nvme_ns *ns = q->queuedata; + struct nvme_queue *nvmeq = get_nvmeq(ns->dev); + int result = -EBUSY; + + spin_lock_irq(&nvmeq->q_lock); + if (bio_list_empty(&nvmeq->sq_cong)) + result = nvme_submit_bio_queue(nvmeq, ns, bio); + if (unlikely(result)) { + if (bio_list_empty(&nvmeq->sq_cong)) + add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); + bio_list_add(&nvmeq->sq_cong, bio); + } + + nvme_process_cq(nvmeq); + spin_unlock_irq(&nvmeq->q_lock); + put_nvmeq(nvmeq); +} + static irqreturn_t nvme_irq(int irq, void *data) { irqreturn_t result; |