summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDaniel Stodden <daniel.stodden@citrix.com>2011-05-28 13:21:10 -0700
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-06-30 12:48:06 -0400
commitb4726a9df270859898e254b6eee67a28f38b34d3 (patch)
tree2512fcce88e4ad97ea1068864d716fd105d86c39 /drivers
parent2b727c6300b49352f80f63704bb50c256949e95e (diff)
downloadblackbird-op-linux-b4726a9df270859898e254b6eee67a28f38b34d3.tar.gz
blackbird-op-linux-b4726a9df270859898e254b6eee67a28f38b34d3.zip
xen/blkback: Don't let in-flight requests defer pending ones.
Running RING_FINAL_CHECK_FOR_REQUESTS from make_response is a bad idea. It means that in-flight I/O is essentially blocking continued batches. This essentially kills throughput on frontends which unplug (or even just notify) early and rightfully assume addtional requests will be picked up on time, not synchronously. Signed-off-by: Daniel Stodden <daniel.stodden@citrix.com> [v1: Rebased and fixed compile problems] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/xen-blkback/blkback.c36
1 files changed, 19 insertions, 17 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 5cf2993a8338..f0537f3f6366 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -458,7 +458,8 @@ static void end_block_io_op(struct bio *bio, int error)
* (which has the sectors we want, number of them, grant references, etc),
* and transmute it to the block API to hand it over to the proper block disk.
*/
-static int do_block_io_op(struct xen_blkif *blkif)
+static int
+__do_block_io_op(struct xen_blkif *blkif)
{
union blkif_back_rings *blk_rings = &blkif->blk_rings;
struct blkif_request req;
@@ -515,6 +516,23 @@ static int do_block_io_op(struct xen_blkif *blkif)
return more_to_do;
}
+static int
+do_block_io_op(struct xen_blkif *blkif)
+{
+ union blkif_back_rings *blk_rings = &blkif->blk_rings;
+ int more_to_do;
+
+ do {
+ more_to_do = __do_block_io_op(blkif);
+ if (more_to_do)
+ break;
+
+ RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
+ } while (more_to_do);
+
+ return more_to_do;
+}
+
/*
* Transmutation of the 'struct blkif_request' to a proper 'struct bio'
* and call the 'submit_bio' to pass it to the underlying storage.
@@ -700,7 +718,6 @@ static void make_response(struct xen_blkif *blkif, u64 id,
struct blkif_response resp;
unsigned long flags;
union blkif_back_rings *blk_rings = &blkif->blk_rings;
- int more_to_do = 0;
int notify;
resp.id = id;
@@ -727,22 +744,7 @@ static void make_response(struct xen_blkif *blkif, u64 id,
}
blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
- if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
- /*
- * Tail check for pending requests. Allows frontend to avoid
- * notifications if requests are already in flight (lower
- * overheads and promotes batching).
- */
- RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
-
- } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
- more_to_do = 1;
- }
-
spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-
- if (more_to_do)
- blkif_notify_work(blkif);
if (notify)
notify_remote_via_irq(blkif->irq);
}
OpenPOWER on IntegriCloud