summaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/brd.c23
-rw-r--r--drivers/block/drbd/drbd_actlog.c1
-rw-r--r--drivers/block/drbd/drbd_debugfs.c4
-rw-r--r--drivers/block/drbd/drbd_int.h1
-rw-r--r--drivers/block/drbd/drbd_main.c8
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/drbd/drbd_req.c6
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/floppy.c34
-rw-r--r--drivers/block/loop.c67
-rw-r--r--drivers/block/nbd.c12
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/rbd.c25
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/virtio_blk.c26
-rw-r--r--drivers/block/xen-blkfront.c97
-rw-r--r--drivers/block/zram/zram_drv.c29
17 files changed, 160 insertions, 181 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index ba5145d384d8..0c76d4016eeb 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, int rw,
+ unsigned int len, unsigned int off, bool is_write,
sector_t sector)
{
void *mem;
int err = 0;
- if (rw != READ) {
+ if (is_write) {
err = copy_to_brd_setup(brd, sector, len);
if (err)
goto out;
}
mem = kmap_atomic(page);
- if (rw == READ) {
+ if (!is_write) {
copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page);
} else {
@@ -330,7 +330,6 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
struct brd_device *brd = bdev->bd_disk->private_data;
- int rw;
struct bio_vec bvec;
sector_t sector;
struct bvec_iter iter;
@@ -347,14 +346,12 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
goto out;
}
- rw = bio_data_dir(bio);
-
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
int err;
- err = brd_do_bvec(brd, bvec.bv_page, len,
- bvec.bv_offset, rw, sector);
+ err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
+ op_is_write(bio_op(bio)), sector);
if (err)
goto io_error;
sector += len >> SECTOR_SHIFT;
@@ -369,17 +366,17 @@ io_error:
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int rw)
+ struct page *page, bool is_write)
{
struct brd_device *brd = bdev->bd_disk->private_data;
- int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
- page_endio(page, rw & WRITE, err);
+ int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
+ page_endio(page, is_write, err);
return err;
}
#ifdef CONFIG_BLK_DEV_RAM_DAX
static long brd_direct_access(struct block_device *bdev, sector_t sector,
- void __pmem **kaddr, pfn_t *pfn, long size)
+ void **kaddr, pfn_t *pfn, long size)
{
struct brd_device *brd = bdev->bd_disk->private_data;
struct page *page;
@@ -389,7 +386,7 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector,
page = brd_insert_page(brd, sector);
if (!page)
return -ENOSPC;
- *kaddr = (void __pmem *)page_address(page);
+ *kaddr = page_address(page);
*pfn = page_to_pfn_t(page);
return PAGE_SIZE;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 0a1aaf8c24c4..2d3d50ab74bf 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -27,7 +27,6 @@
#include <linux/crc32c.h>
#include <linux/drbd.h>
#include <linux/drbd_limits.h>
-#include <linux/dynamic_debug.h>
#include "drbd_int.h"
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c
index be91a8d7c22a..de5c3ee8a790 100644
--- a/drivers/block/drbd/drbd_debugfs.c
+++ b/drivers/block/drbd/drbd_debugfs.c
@@ -425,9 +425,6 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo
/* Are we still linked,
* or has debugfs_remove() already been called? */
parent = file->f_path.dentry->d_parent;
- /* not sure if this can happen: */
- if (!parent || d_really_is_negative(parent))
- goto out;
/* serialize with d_delete() */
inode_lock(d_inode(parent));
/* Make sure the object is still alive */
@@ -440,7 +437,6 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo
if (ret)
kref_put(kref, release);
}
-out:
return ret;
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 7b54354976a5..4cb8f21ff4ef 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -41,6 +41,7 @@
#include <linux/backing-dev.h>
#include <linux/genhd.h>
#include <linux/idr.h>
+#include <linux/dynamic_debug.h>
#include <net/tcp.h>
#include <linux/lru_cache.h>
#include <linux/prefetch.h>
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 0501ae0c517b..100be556e613 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1663,13 +1663,13 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection,
struct bio *bio)
{
if (connection->agreed_pro_version >= 95)
- return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
- (bio->bi_rw & REQ_FUA ? DP_FUA : 0) |
- (bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) |
+ return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
+ (bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
+ (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
(bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
else
- return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
+ return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
}
/* Used to send write or TRIM aka REQ_DISCARD requests
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index df45713dfbe8..942384f34e22 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1564,7 +1564,7 @@ static void drbd_issue_peer_wsame(struct drbd_device *device,
* drbd_submit_peer_request()
* @device: DRBD device.
* @peer_req: peer request
- * @rw: flag field, see bio->bi_rw
+ * @rw: flag field, see bio->bi_opf
*
* May spread the pages to multiple bios,
* depending on bio_add_page restrictions.
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 66b8e4bb74d8..de279fe4e4fd 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -288,7 +288,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
*/
if (!ok &&
bio_op(req->master_bio) == REQ_OP_READ &&
- !(req->master_bio->bi_rw & REQ_RAHEAD) &&
+ !(req->master_bio->bi_opf & REQ_RAHEAD) &&
!list_empty(&req->tl_requests))
req->rq_state |= RQ_POSTPONED;
@@ -1137,7 +1137,7 @@ static int drbd_process_write_request(struct drbd_request *req)
* replicating, in which case there is no point. */
if (unlikely(req->i.size == 0)) {
/* The only size==0 bios we expect are empty flushes. */
- D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH);
+ D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
if (remote)
_req_mod(req, QUEUE_AS_DRBD_BARRIER);
return remote;
@@ -1176,7 +1176,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
if (bio_op(bio) != REQ_OP_READ)
type = DRBD_FAULT_DT_WR;
- else if (bio->bi_rw & REQ_RAHEAD)
+ else if (bio->bi_opf & REQ_RAHEAD)
type = DRBD_FAULT_DT_RA;
else
type = DRBD_FAULT_DT_RD;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 35dbb3dca47e..c6755c9a0aea 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -256,7 +256,7 @@ void drbd_request_endio(struct bio *bio)
what = DISCARD_COMPLETED_WITH_ERROR;
break;
case REQ_OP_READ:
- if (bio->bi_rw & REQ_RAHEAD)
+ if (bio->bi_opf & REQ_RAHEAD)
what = READ_AHEAD_COMPLETED_WITH_ERROR;
else
what = READ_COMPLETED_WITH_ERROR;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index c557057fe8ae..e3d8e4ced4a2 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3663,11 +3663,6 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
opened_bdev[drive] = bdev;
- if (!(mode & (FMODE_READ|FMODE_WRITE))) {
- res = -EINVAL;
- goto out;
- }
-
res = -ENXIO;
if (!floppy_track_buffer) {
@@ -3711,20 +3706,21 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (UFDCS->rawcmd == 1)
UFDCS->rawcmd = 2;
- UDRS->last_checked = 0;
- clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
- check_disk_change(bdev);
- if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
- goto out;
- if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
- goto out;
-
- res = -EROFS;
-
- if ((mode & FMODE_WRITE) &&
- !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
- goto out;
-
+ if (!(mode & FMODE_NDELAY)) {
+ if (mode & (FMODE_READ|FMODE_WRITE)) {
+ UDRS->last_checked = 0;
+ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+ check_disk_change(bdev);
+ if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+ goto out;
+ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+ goto out;
+ }
+ res = -EROFS;
+ if ((mode & FMODE_WRITE) &&
+ !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
+ goto out;
+ }
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 075377eee0c0..c9f2107f7095 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -510,14 +510,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
return 0;
}
-
-static inline int lo_rw_simple(struct loop_device *lo,
- struct request *rq, loff_t pos, bool rw)
+static int do_req_filebacked(struct loop_device *lo, struct request *rq)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
-
- if (cmd->use_aio)
- return lo_rw_aio(lo, cmd, pos, rw);
+ loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
/*
* lo_write_simple and lo_read_simple should have been covered
@@ -528,37 +524,30 @@ static inline int lo_rw_simple(struct loop_device *lo,
* of the req at one time. And direct read IO doesn't need to
* run flush_dcache_page().
*/
- if (rw == WRITE)
- return lo_write_simple(lo, rq, pos);
- else
- return lo_read_simple(lo, rq, pos);
-}
-
-static int do_req_filebacked(struct loop_device *lo, struct request *rq)
-{
- loff_t pos;
- int ret;
-
- pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
-
- if (op_is_write(req_op(rq))) {
- if (req_op(rq) == REQ_OP_FLUSH)
- ret = lo_req_flush(lo, rq);
- else if (req_op(rq) == REQ_OP_DISCARD)
- ret = lo_discard(lo, rq, pos);
- else if (lo->transfer)
- ret = lo_write_transfer(lo, rq, pos);
+ switch (req_op(rq)) {
+ case REQ_OP_FLUSH:
+ return lo_req_flush(lo, rq);
+ case REQ_OP_DISCARD:
+ return lo_discard(lo, rq, pos);
+ case REQ_OP_WRITE:
+ if (lo->transfer)
+ return lo_write_transfer(lo, rq, pos);
+ else if (cmd->use_aio)
+ return lo_rw_aio(lo, cmd, pos, WRITE);
else
- ret = lo_rw_simple(lo, rq, pos, WRITE);
-
- } else {
+ return lo_write_simple(lo, rq, pos);
+ case REQ_OP_READ:
if (lo->transfer)
- ret = lo_read_transfer(lo, rq, pos);
+ return lo_read_transfer(lo, rq, pos);
+ else if (cmd->use_aio)
+ return lo_rw_aio(lo, cmd, pos, READ);
else
- ret = lo_rw_simple(lo, rq, pos, READ);
+ return lo_read_simple(lo, rq, pos);
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
+ break;
}
-
- return ret;
}
struct switch_request {
@@ -1659,11 +1648,15 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (lo->lo_state != Lo_bound)
return -EIO;
- if (lo->use_dio && (req_op(cmd->rq) != REQ_OP_FLUSH ||
- req_op(cmd->rq) == REQ_OP_DISCARD))
- cmd->use_aio = true;
- else
+ switch (req_op(cmd->rq)) {
+ case REQ_OP_FLUSH:
+ case REQ_OP_DISCARD:
cmd->use_aio = false;
+ break;
+ default:
+ cmd->use_aio = lo->use_dio;
+ break;
+ }
queue_kthread_work(&lo->worker, &cmd->work);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 6f55b262b5ce..a9e398019f38 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -451,14 +451,9 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
sk_set_memalloc(nbd->sock->sk);
- nbd->task_recv = current;
-
ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (ret) {
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
-
- nbd->task_recv = NULL;
-
return ret;
}
@@ -477,9 +472,6 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
nbd_size_clear(nbd, bdev);
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
-
- nbd->task_recv = NULL;
-
return ret;
}
@@ -788,6 +780,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
if (!nbd->sock)
return -EINVAL;
+ /* We have to claim the device under the lock */
+ nbd->task_recv = current;
mutex_unlock(&nbd->tx_lock);
nbd_parse_flags(nbd, bdev);
@@ -796,6 +790,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd_name(nbd));
if (IS_ERR(thread)) {
mutex_lock(&nbd->tx_lock);
+ nbd->task_recv = NULL;
return PTR_ERR(thread);
}
@@ -805,6 +800,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
kthread_stop(thread);
mutex_lock(&nbd->tx_lock);
+ nbd->task_recv = NULL;
sock_shutdown(nbd);
nbd_clear_que(nbd);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 9393bc730acf..90fa4ac149db 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1157,7 +1157,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
bio_reset(pkt->bio);
pkt->bio->bi_bdev = pd->bdev;
- pkt->bio->bi_rw = REQ_WRITE;
+ bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
pkt->bio->bi_iter.bi_sector = new_sector;
pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
pkt->bio->bi_vcnt = pkt->frames;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 450662055d97..6c6519f6492a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1937,7 +1937,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
- osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
+ osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
obj_request->object_name))
goto fail;
@@ -1991,7 +1991,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
- osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
+ osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
obj_request->object_name))
goto fail;
@@ -3950,6 +3950,7 @@ static void rbd_dev_release(struct device *dev)
bool need_put = !!rbd_dev->opts;
ceph_oid_destroy(&rbd_dev->header_oid);
+ ceph_oloc_destroy(&rbd_dev->header_oloc);
rbd_put_client(rbd_dev->rbd_client);
rbd_spec_put(rbd_dev->spec);
@@ -3995,10 +3996,11 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
/* Initialize the layout used for all rbd requests */
- rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
- rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
+ rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER;
+ rbd_dev->layout.stripe_count = 1;
+ rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER;
+ rbd_dev->layout.pool_id = spec->pool_id;
+ RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
/*
* If this is a mapping rbd_dev (as opposed to a parent one),
@@ -5187,7 +5189,7 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev)
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
- rbd_dev->header_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
+ rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id;
if (rbd_dev->image_format == 1)
ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
spec->image_name, RBD_SUFFIX);
@@ -5335,15 +5337,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
}
spec->pool_id = (u64)rc;
- /* The ceph file layout needs to fit pool id in 32 bits */
-
- if (spec->pool_id > (u64)U32_MAX) {
- rbd_warn(NULL, "pool id too large (%llu > %u)",
- (unsigned long long)spec->pool_id, U32_MAX);
- rc = -EIO;
- goto err_out_client;
- }
-
rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
if (!rbd_dev) {
rc = -ENOMEM;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index d0a3e6d4515f..be90e15854ed 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -535,7 +535,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
- if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+ if (bio->bi_opf & REQ_SYNC || !mm_check_plugged(card))
activate(card);
spin_unlock_irq(&card->lock);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 1523e05c46fc..93b1aaa5ba3b 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -391,22 +391,16 @@ static int init_vq(struct virtio_blk *vblk)
num_vqs = 1;
vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
- if (!vblk->vqs) {
- err = -ENOMEM;
- goto out;
- }
+ if (!vblk->vqs)
+ return -ENOMEM;
names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
- if (!names)
- goto err_names;
-
callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
- if (!callbacks)
- goto err_callbacks;
-
vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
- if (!vqs)
- goto err_vqs;
+ if (!names || !callbacks || !vqs) {
+ err = -ENOMEM;
+ goto out;
+ }
for (i = 0; i < num_vqs; i++) {
callbacks[i] = virtblk_done;
@@ -417,7 +411,7 @@ static int init_vq(struct virtio_blk *vblk)
/* Discover virtqueues and write information to configuration. */
err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
if (err)
- goto err_find_vqs;
+ goto out;
for (i = 0; i < num_vqs; i++) {
spin_lock_init(&vblk->vqs[i].lock);
@@ -425,16 +419,12 @@ static int init_vq(struct virtio_blk *vblk)
}
vblk->num_vqs = num_vqs;
- err_find_vqs:
+out:
kfree(vqs);
- err_vqs:
kfree(callbacks);
- err_callbacks:
kfree(names);
- err_names:
if (err)
kfree(vblk->vqs);
- out:
return err;
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index be4fea6a5dd3..88ef6d4729b4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -189,6 +189,8 @@ struct blkfront_info
struct mutex mutex;
struct xenbus_device *xbdev;
struct gendisk *gd;
+ u16 sector_size;
+ unsigned int physical_sector_size;
int vdevice;
blkif_vdev_t handle;
enum blkif_state connected;
@@ -910,9 +912,45 @@ static struct blk_mq_ops blkfront_mq_ops = {
.map_queue = blk_mq_map_queue,
};
+static void blkif_set_queue_limits(struct blkfront_info *info)
+{
+ struct request_queue *rq = info->rq;
+ struct gendisk *gd = info->gd;
+ unsigned int segments = info->max_indirect_segments ? :
+ BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
+
+ if (info->feature_discard) {
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
+ blk_queue_max_discard_sectors(rq, get_capacity(gd));
+ rq->limits.discard_granularity = info->discard_granularity;
+ rq->limits.discard_alignment = info->discard_alignment;
+ if (info->feature_secdiscard)
+ queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
+ }
+
+ /* Hard sector size and max sectors impersonate the equiv. hardware. */
+ blk_queue_logical_block_size(rq, info->sector_size);
+ blk_queue_physical_block_size(rq, info->physical_sector_size);
+ blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
+
+ /* Each segment in a request is up to an aligned page in size. */
+ blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
+ blk_queue_max_segment_size(rq, PAGE_SIZE);
+
+ /* Ensure a merged request will fit in a single I/O ring slot. */
+ blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
+
+ /* Make sure buffer addresses are sector-aligned. */
+ blk_queue_dma_alignment(rq, 511);
+
+ /* Make sure we don't use bounce buffers. */
+ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
+}
+
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
- unsigned int physical_sector_size,
- unsigned int segments)
+ unsigned int physical_sector_size)
{
struct request_queue *rq;
struct blkfront_info *info = gd->private_data;
@@ -944,36 +982,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
}
rq->queuedata = info;
- queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
-
- if (info->feature_discard) {
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
- blk_queue_max_discard_sectors(rq, get_capacity(gd));
- rq->limits.discard_granularity = info->discard_granularity;
- rq->limits.discard_alignment = info->discard_alignment;
- if (info->feature_secdiscard)
- queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
- }
-
- /* Hard sector size and max sectors impersonate the equiv. hardware. */
- blk_queue_logical_block_size(rq, sector_size);
- blk_queue_physical_block_size(rq, physical_sector_size);
- blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
-
- /* Each segment in a request is up to an aligned page in size. */
- blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
- blk_queue_max_segment_size(rq, PAGE_SIZE);
-
- /* Ensure a merged request will fit in a single I/O ring slot. */
- blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
-
- /* Make sure buffer addresses are sector-aligned. */
- blk_queue_dma_alignment(rq, 511);
-
- /* Make sure we don't use bounce buffers. */
- blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
-
- gd->queue = rq;
+ info->rq = gd->queue = rq;
+ info->gd = gd;
+ info->sector_size = sector_size;
+ info->physical_sector_size = physical_sector_size;
+ blkif_set_queue_limits(info);
return 0;
}
@@ -1136,16 +1149,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
gd->private_data = info;
set_capacity(gd, capacity);
- if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
- info->max_indirect_segments ? :
- BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
+ if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
del_gendisk(gd);
goto release;
}
- info->rq = gd->queue;
- info->gd = gd;
-
xlvbd_flush(info);
if (vdisk_info & VDISK_READONLY)
@@ -1315,7 +1323,7 @@ free_shadow:
rinfo->ring_ref[i] = GRANT_INVALID_REF;
}
}
- free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE));
+ free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
rinfo->ring.sring = NULL;
if (rinfo->irq)
@@ -2007,8 +2015,10 @@ static int blkif_recover(struct blkfront_info *info)
struct split_bio *split_bio;
blkfront_gather_backend_features(info);
+ /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
+ blkif_set_queue_limits(info);
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
- blk_queue_max_segments(info->rq, segs);
+ blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
for (r_index = 0; r_index < info->nr_rings; r_index++) {
struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
@@ -2432,7 +2442,7 @@ static void blkfront_connect(struct blkfront_info *info)
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
info->xbdev->otherend);
- return;
+ goto fail;
}
xenbus_switch_state(info->xbdev, XenbusStateConnected);
@@ -2445,6 +2455,11 @@ static void blkfront_connect(struct blkfront_info *info)
device_add_disk(&info->xbdev->dev, info->gd);
info->is_ready = 1;
+ return;
+
+fail:
+ blkif_free(info, 0);
+ return;
}
/**
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 7454cf188c8e..04365b17ee67 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -843,15 +843,16 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, int rw)
+ int offset, bool is_write)
{
unsigned long start_time = jiffies;
+ int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
int ret;
- generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
+ generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
&zram->disk->part0);
- if (rw == READ) {
+ if (!is_write) {
atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset);
} else {
@@ -859,10 +860,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_bvec_write(zram, bvec, index, offset);
}
- generic_end_io_acct(rw, &zram->disk->part0, start_time);
+ generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
if (unlikely(ret)) {
- if (rw == READ)
+ if (!is_write)
atomic64_inc(&zram->stats.failed_reads);
else
atomic64_inc(&zram->stats.failed_writes);
@@ -873,7 +874,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
static void __zram_make_request(struct zram *zram, struct bio *bio)
{
- int offset, rw;
+ int offset;
u32 index;
struct bio_vec bvec;
struct bvec_iter iter;
@@ -888,7 +889,6 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
return;
}
- rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) {
int max_transfer_size = PAGE_SIZE - offset;
@@ -903,15 +903,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = max_transfer_size;
bv.bv_offset = bvec.bv_offset;
- if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
+ if (zram_bvec_rw(zram, &bv, index, offset,
+ op_is_write(bio_op(bio))) < 0)
goto out;
bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size;
- if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
+ if (zram_bvec_rw(zram, &bv, index + 1, 0,
+ op_is_write(bio_op(bio))) < 0)
goto out;
} else
- if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
+ if (zram_bvec_rw(zram, &bvec, index, offset,
+ op_is_write(bio_op(bio))) < 0)
goto out;
update_position(&index, &offset, &bvec);
@@ -968,7 +971,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int rw)
+ struct page *page, bool is_write)
{
int offset, err = -EIO;
u32 index;
@@ -992,7 +995,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
- err = zram_bvec_rw(zram, &bv, index, offset, rw);
+ err = zram_bvec_rw(zram, &bv, index, offset, is_write);
put_zram:
zram_meta_put(zram);
out:
@@ -1005,7 +1008,7 @@ out:
* (e.g., SetPageError, set_page_dirty and extra works).
*/
if (err == 0)
- page_endio(page, rw, 0);
+ page_endio(page, is_write, 0);
return err;
}
OpenPOWER on IntegriCloud