From c64e38ea17a81721da0393584fd807f8434050fa Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 1 Nov 2010 14:32:27 -0400 Subject: xen/blkfront: map REQ_FLUSH into a full barrier Implement a flush as a full barrier, since we have nothing weaker. Signed-off-by: Jeremy Fitzhardinge Acked-by: Christoph Hellwig --- drivers/block/xen-blkfront.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) (limited to 'drivers/block/xen-blkfront.c') diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 06e2812ba124..3a318d8576c5 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, } /* - * blkif_queue_request + * Generate a Xen blkfront IO request from a blk layer request. Reads + * and writes are handled as expected. Since we lack a loose flush + * request, we map flushes into a full ordered barrier. * - * request block io - * - * id: for guest use only. - * operation: BLKIF_OP_{READ,WRITE,PROBE} - * buffer: buffer to read/write into. this should be a - * virtual address in the guest os. + * @req: a request struct */ static int blkif_queue_request(struct request *req) { @@ -289,7 +286,7 @@ static int blkif_queue_request(struct request *req) ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; - if (req->cmd_flags & REQ_HARDBARRIER) + if (req->cmd_flags & REQ_FLUSH) ring_req->operation = BLKIF_OP_WRITE_BARRIER; ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); @@ -1069,14 +1066,8 @@ static void blkfront_connect(struct blkfront_info *info) */ info->feature_flush = 0; - /* - * The driver doesn't properly handled empty flushes, so - * lets disable barrier support for now. - */ -#if 0 if (!err && barrier) info->feature_flush = REQ_FLUSH; -#endif err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); if (err) { -- cgit v1.2.1 From a945b9801a9bfd4a98bcfd9f6656b5027b254e3f Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 1 Nov 2010 17:03:14 -0400 Subject: xen/blkfront: change blk_shadow.request to proper pointer Signed-off-by: Jeremy Fitzhardinge --- drivers/block/xen-blkfront.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers/block/xen-blkfront.c') diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 3a318d8576c5..31c8a643d109 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -65,7 +65,7 @@ enum blkif_state { struct blk_shadow { struct blkif_request req; - unsigned long request; + struct request *request; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; @@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; - info->shadow[id].request = 0; + info->shadow[id].request = NULL; info->shadow_free = id; } @@ -278,7 +278,7 @@ static int blkif_queue_request(struct request *req) /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = get_id_from_freelist(info); - info->shadow[id].request = (unsigned long)req; + info->shadow[id].request = req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); @@ -633,7 +633,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; - req = (struct request *)info->shadow[id].request; + req = info->shadow[id].request; blkif_completion(&info->shadow[id]); @@ -898,7 +898,7 @@ static int blkif_recover(struct blkfront_info *info) /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ - if (copy[i].request == 0) + if (!copy[i].request) continue; /* Grab a request slot and copy shadow state into it. */ @@ -915,9 +915,7 @@ static int blkif_recover(struct blkfront_info *info) req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), - rq_data_dir( - (struct request *) - info->shadow[req->id].request)); + rq_data_dir(info->shadow[req->id].request)); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; -- cgit v1.2.1 From be2f8373c188ed1f5d36003c9928e4d695213080 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Tue, 2 Nov 2010 10:38:33 -0400 Subject: xen/blkfront: Implement FUA with BLKIF_OP_WRITE_BARRIER The BLKIF_OP_WRITE_BARRIER is a full ordered barrier, so we can use it to implement FUA as well as a plain FLUSH. Signed-off-by: Jeremy Fitzhardinge Acked-by: Christoph Hellwig --- drivers/block/xen-blkfront.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers/block/xen-blkfront.c') diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 31c8a643d109..76b874a79175 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -286,8 +286,18 @@ static int blkif_queue_request(struct request *req) ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; - if (req->cmd_flags & REQ_FLUSH) + + if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { + /* + * Ideally we could just do an unordered + * flush-to-disk, but all we have is a full write + * barrier at the moment. However, a barrier write is + * a superset of FUA, so we can implement it the same + * way. (It's also a FLUSH+FUA, since it is + * guaranteed ordered WRT previous writes.) + */ ring_req->operation = BLKIF_OP_WRITE_BARRIER; + } ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); @@ -1065,7 +1075,7 @@ static void blkfront_connect(struct blkfront_info *info) info->feature_flush = 0; if (!err && barrier) - info->feature_flush = REQ_FLUSH; + info->feature_flush = REQ_FLUSH | REQ_FUA; err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); if (err) { -- cgit v1.2.1 From dcb8baeceaa1c629bbd06f472cea023ad08a0c33 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Tue, 2 Nov 2010 11:55:58 -0400 Subject: xen/blkfront: cope with backend that fail empty BLKIF_OP_WRITE_BARRIER requests Some(?) Xen block backends fail BLKIF_OP_WRITE_BARRIER requests, which Linux uses as a cache flush operation. In that case, disable use of FLUSH. Signed-off-by: Jeremy Fitzhardinge Cc: Daniel Stodden --- drivers/block/xen-blkfront.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/block/xen-blkfront.c') diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 76b874a79175..4f9e22f29138 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -656,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", info->gd->disk_name); error = -EOPNOTSUPP; + } + if (unlikely(bret->status == BLKIF_RSP_ERROR && + info->shadow[id].req.nr_segments == 0)) { + printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n", + info->gd->disk_name); + error = -EOPNOTSUPP; + } + if (unlikely(error)) { + if (error == -EOPNOTSUPP) + error = 0; info->feature_flush = 0; xlvbd_flush(info); } -- cgit v1.2.1 From 667c78afaec0ac500908e191e8f236e9578d7b1f Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 8 Dec 2010 12:39:12 -0800 Subject: xen: Provide a variant of __RING_SIZE() that is an integer constant expression Without this, gcc 4.5 won't compile xen-netfront and xen-blkfront, where this is being used to specify array sizes. Signed-off-by: Jan Beulich Signed-off-by: Jeremy Fitzhardinge Cc: Jens Axboe Cc: David Miller Cc: Stable Kernel Signed-off-by: Linus Torvalds --- drivers/block/xen-blkfront.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/block/xen-blkfront.c') diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 4f9e22f29138..657873e4328d 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -72,7 +72,7 @@ struct blk_shadow { static DEFINE_MUTEX(blkfront_mutex); static const struct block_device_operations xlvbd_block_fops; -#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) +#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They -- cgit v1.2.1