diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-10-11 15:44:27 -0700 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2013-11-23 22:33:47 -0800 |
commit | 4f024f3797c43cb4b73cd2c50cec728842d0e49e (patch) | |
tree | 3aedcab02d2ad723a189d01934d1e94fec7a54e1 /drivers/md | |
parent | ed9c47bebeeea4a468b07cfd745c690190f8014c (diff) | |
download | blackbird-op-linux-4f024f3797c43cb4b73cd2c50cec728842d0e49e.tar.gz blackbird-op-linux-4f024f3797c43cb4b73cd2c50cec728842d0e49e.zip |
block: Abstract out bvec iterator
Immutable biovecs are going to require an explicit iterator. To
implement immutable bvecs, a later patch is going to add a bi_bvec_done
member to this struct; for now, this patch effectively just renames
things.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: "Ed L. Cashin" <ecashin@coraid.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Lars Ellenberg <drbd-dev@lists.linbit.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Matthew Wilcox <willy@linux.intel.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Yehuda Sadeh <yehuda@inktank.com>
Cc: Sage Weil <sage@inktank.com>
Cc: Alex Elder <elder@inktank.com>
Cc: ceph-devel@vger.kernel.org
Cc: Joshua Morris <josh.h.morris@us.ibm.com>
Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Neil Brown <neilb@suse.de>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: dm-devel@redhat.com
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: linux390@de.ibm.com
Cc: Boaz Harrosh <bharrosh@panasas.com>
Cc: Benny Halevy <bhalevy@tonian.com>
Cc: "James E.J. Bottomley" <JBottomley@parallels.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Chris Mason <chris.mason@fusionio.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Dave Kleikamp <shaggy@kernel.org>
Cc: Joern Engel <joern@logfs.org>
Cc: Prasad Joshi <prasadjoshi.linux@gmail.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Cc: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Ben Myers <bpm@sgi.com>
Cc: xfs@oss.sgi.com
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com>
Cc: Ben Hutchings <ben@decadent.org.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Guo Chao <yan@linux.vnet.ibm.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
Cc: "Roger Pau Monné" <roger.pau@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: Ian Campbell <Ian.Campbell@citrix.com>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Jerome Marchand <jmarchand@redhat.com>
Cc: Joe Perches <joe@perches.com>
Cc: Peng Tao <tao.peng@emc.com>
Cc: Andy Adamson <andros@netapp.com>
Cc: fanchaoting <fanchaoting@cn.fujitsu.com>
Cc: Jie Liu <jeff.liu@oracle.com>
Cc: Sunil Mushran <sunil.mushran@gmail.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Namjae Jeon <namjae.jeon@samsung.com>
Cc: Pankaj Kumar <pankaj.km@samsung.com>
Cc: Dan Magenheimer <dan.magenheimer@oracle.com>
Cc: Mel Gorman <mgorman@suse.de>6
Diffstat (limited to 'drivers/md')
35 files changed, 333 insertions, 300 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 5e2765aadce1..038a6d2aced3 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -299,7 +299,7 @@ void bch_btree_node_read(struct btree *b) bio = bch_bbio_alloc(b->c); bio->bi_rw = REQ_META|READ_SYNC; - bio->bi_size = KEY_SIZE(&b->key) << 9; + bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; bio->bi_end_io = btree_node_read_endio; bio->bi_private = &cl; @@ -395,7 +395,7 @@ static void do_btree_node_write(struct btree *b) b->bio->bi_end_io = btree_node_write_endio; b->bio->bi_private = cl; b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; - b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); + b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c); bch_bio_map(b->bio, i); /* diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 264fcfbd6290..92b3fd468a03 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -195,7 +195,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) dc->disk.c, "verify failed at dev %s sector %llu", bdevname(dc->bdev, name), - (uint64_t) bio->bi_sector); + (uint64_t) bio->bi_iter.bi_sector); kunmap_atomic(p1); } diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 9056632995b1..cc4ba2da5fb6 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -21,18 +21,18 @@ static void bch_bi_idx_hack_endio(struct bio *bio, int error) static void bch_generic_make_request_hack(struct bio *bio) { - if (bio->bi_idx) { + if (bio->bi_iter.bi_idx) { struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); memcpy(clone->bi_io_vec, bio_iovec(bio), bio_segments(bio) * sizeof(struct bio_vec)); - clone->bi_sector = bio->bi_sector; + clone->bi_iter.bi_sector = bio->bi_iter.bi_sector; clone->bi_bdev = bio->bi_bdev; clone->bi_rw = bio->bi_rw; clone->bi_vcnt = bio_segments(bio); - clone->bi_size = bio->bi_size; + clone->bi_iter.bi_size = bio->bi_iter.bi_size; clone->bi_private = bio; clone->bi_end_io = bch_bi_idx_hack_endio; @@ -72,7 +72,7 @@ static void bch_generic_make_request_hack(struct bio *bio) struct bio *bch_bio_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs) { - unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9; + unsigned idx = bio->bi_iter.bi_idx, vcnt = 0, nbytes = sectors << 9; struct bio_vec *bv; struct bio *ret = NULL; @@ -90,7 +90,7 @@ struct bio *bch_bio_split(struct bio *bio, int sectors, } bio_for_each_segment(bv, bio, idx) { - vcnt = idx - bio->bi_idx; + vcnt = idx - bio->bi_iter.bi_idx; if (!nbytes) { ret = bio_alloc_bioset(gfp, vcnt, bs); @@ -119,15 +119,15 @@ struct bio *bch_bio_split(struct bio *bio, int sectors, } out: ret->bi_bdev = bio->bi_bdev; - ret->bi_sector = bio->bi_sector; - ret->bi_size = sectors << 9; + ret->bi_iter.bi_sector = bio->bi_iter.bi_sector; + ret->bi_iter.bi_size = sectors << 9; ret->bi_rw = bio->bi_rw; ret->bi_vcnt = vcnt; ret->bi_max_vecs = vcnt; - bio->bi_sector += sectors; - bio->bi_size -= sectors << 9; - bio->bi_idx = idx; + bio->bi_iter.bi_sector += sectors; + bio->bi_iter.bi_size -= sectors << 9; + bio->bi_iter.bi_idx = idx; if (bio_integrity(bio)) { if (bio_integrity_clone(ret, bio, gfp)) { @@ -162,7 +162,7 @@ static unsigned bch_bio_max_sectors(struct bio *bio) bio_for_each_segment(bv, bio, i) { struct bvec_merge_data bvm = { .bi_bdev = bio->bi_bdev, - .bi_sector = bio->bi_sector, + .bi_sector = bio->bi_iter.bi_sector, .bi_size = ret << 9, .bi_rw = bio->bi_rw, }; @@ -272,8 +272,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c) { struct bbio *b = container_of(bio, struct bbio, bio); - bio->bi_sector = PTR_OFFSET(&b->key, 0); - bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; + bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); + bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; b->submit_time_us = local_clock_us(); closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index ecdaa671bd50..7eafdf09a0ae 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -51,10 +51,10 @@ reread: left = ca->sb.bucket_size - offset; len = min_t(unsigned, left, PAGE_SECTORS * 8); bio_reset(bio); - bio->bi_sector = bucket + offset; + bio->bi_iter.bi_sector = bucket + offset; bio->bi_bdev = ca->bdev; bio->bi_rw = READ; - bio->bi_size = len << 9; + bio->bi_iter.bi_size = len << 9; bio->bi_end_io = journal_read_endio; bio->bi_private = &cl; @@ -437,13 +437,13 @@ static void do_journal_discard(struct cache *ca) atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); bio_init(bio); - bio->bi_sector = bucket_to_sector(ca->set, + bio->bi_iter.bi_sector = bucket_to_sector(ca->set, ca->sb.d[ja->discard_idx]); bio->bi_bdev = ca->bdev; bio->bi_rw = REQ_WRITE|REQ_DISCARD; bio->bi_max_vecs = 1; bio->bi_io_vec = bio->bi_inline_vecs; - bio->bi_size = bucket_bytes(ca); + bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_end_io = journal_discard_endio; closure_get(&ca->set->cl); @@ -608,10 +608,10 @@ static void journal_write_unlocked(struct closure *cl) atomic_long_add(sectors, &ca->meta_sectors_written); bio_reset(bio); - bio->bi_sector = PTR_OFFSET(k, i); + bio->bi_iter.bi_sector = PTR_OFFSET(k, i); bio->bi_bdev = ca->bdev; bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; - bio->bi_size = sectors << 9; + bio->bi_iter.bi_size = sectors << 9; bio->bi_end_io = journal_write_endio; bio->bi_private = w; diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 7c1275e66025..581f95df8265 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -82,7 +82,7 @@ static void moving_init(struct moving_io *io) bio_get(bio); bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); - bio->bi_size = KEY_SIZE(&io->w->key) << 9; + bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9; bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS); bio->bi_private = &io->cl; @@ -98,7 +98,7 @@ static void write_moving(struct closure *cl) if (!op->error) { moving_init(io); - io->bio.bio.bi_sector = KEY_START(&io->w->key); + io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); op->write_prio = 1; op->bio = &io->bio.bio; diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 78bab4154e97..47a9bbc75124 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -261,7 +261,7 @@ static void bch_data_invalidate(struct closure *cl) struct bio *bio = op->bio; pr_debug("invalidating %i sectors from %llu", - bio_sectors(bio), (uint64_t) bio->bi_sector); + bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); while (bio_sectors(bio)) { unsigned sectors = min(bio_sectors(bio), @@ -270,11 +270,11 @@ static void bch_data_invalidate(struct closure *cl) if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) goto out; - bio->bi_sector += sectors; - bio->bi_size -= sectors << 9; + bio->bi_iter.bi_sector += sectors; + bio->bi_iter.bi_size -= sectors << 9; bch_keylist_add(&op->insert_keys, - &KEY(op->inode, bio->bi_sector, sectors)); + &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); } op->insert_data_done = true; @@ -364,7 +364,7 @@ static void bch_data_insert_start(struct closure *cl) k = op->insert_keys.top; bkey_init(k); SET_KEY_INODE(k, op->inode); - SET_KEY_OFFSET(k, bio->bi_sector); + SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), op->write_point, op->write_prio, @@ -522,7 +522,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) (bio->bi_rw & REQ_WRITE))) goto skip; - if (bio->bi_sector & (c->sb.block_size - 1) || + if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || bio_sectors(bio) & (c->sb.block_size - 1)) { pr_debug("skipping unaligned io"); goto skip; @@ -546,8 +546,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) spin_lock(&dc->io_lock); - hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) - if (i->last == bio->bi_sector && + hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) + if (i->last == bio->bi_iter.bi_sector && time_before(jiffies, i->jiffies)) goto found; @@ -556,8 +556,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) add_sequential(task); i->sequential = 0; found: - if (i->sequential + bio->bi_size > i->sequential) - i->sequential += bio->bi_size; + if (i->sequential + bio->bi_iter.bi_size > i->sequential) + i->sequential += bio->bi_iter.bi_size; i->last = bio_end_sector(bio); i->jiffies = jiffies + msecs_to_jiffies(5000); @@ -650,15 +650,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) struct bkey *bio_key; unsigned ptr; - if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) + if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) return MAP_CONTINUE; if (KEY_INODE(k) != s->iop.inode || - KEY_START(k) > bio->bi_sector) { + KEY_START(k) > bio->bi_iter.bi_sector) { unsigned bio_sectors = bio_sectors(bio); unsigned sectors = KEY_INODE(k) == s->iop.inode ? min_t(uint64_t, INT_MAX, - KEY_START(k) - bio->bi_sector) + KEY_START(k) - bio->bi_iter.bi_sector) : INT_MAX; int ret = s->d->cache_miss(b, s, bio, sectors); @@ -681,13 +681,13 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) s->read_dirty_data = true; n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, - KEY_OFFSET(k) - bio->bi_sector), + KEY_OFFSET(k) - bio->bi_iter.bi_sector), GFP_NOIO, s->d->bio_split); bio_key = &container_of(n, struct bbio, bio)->key; bch_bkey_copy_single_ptr(bio_key, k, ptr); - bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); + bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); n->bi_end_io = bch_cache_read_endio; @@ -714,7 +714,7 @@ static void cache_lookup(struct closure *cl) struct bio *bio = &s->bio.bio; int ret = bch_btree_map_keys(&s->op, s->iop.c, - &KEY(s->iop.inode, bio->bi_sector, 0), + &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), cache_lookup_fn, MAP_END_KEY); if (ret == -EAGAIN) continue_at(cl, cache_lookup, bcache_wq); @@ -872,9 +872,9 @@ static void cached_dev_read_done(struct closure *cl) if (s->iop.bio) { bio_reset(s->iop.bio); - s->iop.bio->bi_sector = s->cache_miss->bi_sector; + s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; - s->iop.bio->bi_size = s->insert_bio_sectors << 9; + s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; bch_bio_map(s->iop.bio, NULL); bio_copy_data(s->cache_miss, s->iop.bio); @@ -937,7 +937,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); s->iop.replace_key = KEY(s->iop.inode, - bio->bi_sector + s->insert_bio_sectors, + bio->bi_iter.bi_sector + s->insert_bio_sectors, s->insert_bio_sectors); ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); @@ -957,9 +957,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, if (!cache_bio) goto out_submit; - cache_bio->bi_sector = miss->bi_sector; - cache_bio->bi_bdev = miss->bi_bdev; - cache_bio->bi_size = s->insert_bio_sectors << 9; + cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; + cache_bio->bi_bdev = miss->bi_bdev; + cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; cache_bio->bi_end_io = request_endio; cache_bio->bi_private = &s->cl; @@ -1009,7 +1009,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) { struct closure *cl = &s->cl; struct bio *bio = &s->bio.bio; - struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); + struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); @@ -1104,13 +1104,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio) part_stat_unlock(); bio->bi_bdev = dc->bdev; - bio->bi_sector += dc->sb.data_offset; + bio->bi_iter.bi_sector += dc->sb.data_offset; if (cached_dev_get(dc)) { s = search_alloc(bio, d); trace_bcache_request_start(s->d, bio); - if (!bio->bi_size) { + if (!bio->bi_iter.bi_size) { /* * can't call bch_journal_meta from under * generic_make_request @@ -1197,9 +1197,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s, sectors -= j; } - bio_advance(bio, min(sectors << 9, bio->bi_size)); + bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size)); - if (!bio->bi_size) + if (!bio->bi_iter.bi_size) return MAP_DONE; return MAP_CONTINUE; @@ -1233,7 +1233,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) trace_bcache_request_start(s->d, bio); - if (!bio->bi_size) { + if (!bio->bi_iter.bi_size) { /* * can't call bch_journal_meta from under * generic_make_request @@ -1243,7 +1243,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) bcache_wq); } else if (rw) { bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, - &KEY(d->id, bio->bi_sector, 0), + &KEY(d->id, bio->bi_iter.bi_sector, 0), &KEY(d->id, bio_end_sector(bio), 0)); s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 1d9ee67d14ec..60fb6044b953 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -233,9 +233,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio) struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); unsigned i; - bio->bi_sector = SB_SECTOR; - bio->bi_rw = REQ_SYNC|REQ_META; - bio->bi_size = SB_SIZE; + bio->bi_iter.bi_sector = SB_SECTOR; + bio->bi_rw = REQ_SYNC|REQ_META; + bio->bi_iter.bi_size = SB_SIZE; bch_bio_map(bio, NULL); out->offset = cpu_to_le64(sb->offset); @@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw, struct bio *bio = bch_bbio_alloc(c); bio->bi_rw = REQ_SYNC|REQ_META|rw; - bio->bi_size = KEY_SIZE(k) << 9; + bio->bi_iter.bi_size = KEY_SIZE(k) << 9; bio->bi_end_io = uuid_endio; bio->bi_private = cl; @@ -503,10 +503,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) closure_init_stack(cl); - bio->bi_sector = bucket * ca->sb.bucket_size; - bio->bi_bdev = ca->bdev; - bio->bi_rw = REQ_SYNC|REQ_META|rw; - bio->bi_size = bucket_bytes(ca); + bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; + bio->bi_bdev = ca->bdev; + bio->bi_rw = REQ_SYNC|REQ_META|rw; + bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_end_io = prio_endio; bio->bi_private = ca; diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 462214eeacbe..c57621e49dc0 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -218,10 +218,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) void bch_bio_map(struct bio *bio, void *base) { - size_t size = bio->bi_size; + size_t size = bio->bi_iter.bi_size; struct bio_vec *bv = bio->bi_io_vec; - BUG_ON(!bio->bi_size); + BUG_ON(!bio->bi_iter.bi_size); BUG_ON(bio->bi_vcnt); bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 99053b1251be..04657e93f4fd 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -113,7 +113,7 @@ static void dirty_init(struct keybuf_key *w) if (!io->dc->writeback_percent) bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); - bio->bi_size = KEY_SIZE(&w->key) << 9; + bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); bio->bi_private = w; bio->bi_io_vec = bio->bi_inline_vecs; @@ -186,7 +186,7 @@ static void write_dirty(struct closure *cl) dirty_init(w); io->bio.bi_rw = WRITE; - io->bio.bi_sector = KEY_START(&w->key); + io->bio.bi_iter.bi_sector = KEY_START(&w->key); io->bio.bi_bdev = io->dc->bdev; io->bio.bi_end_io = dirty_endio; @@ -255,7 +255,7 @@ static void read_dirty(struct cached_dev *dc) io->dc = dc; dirty_init(w); - io->bio.bi_sector = PTR_OFFSET(&w->key, 0); + io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); io->bio.bi_bdev = PTR_CACHE(dc->disk.c, &w->key, 0)->bdev; io->bio.bi_rw = READ; diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index c9ddcf4614b9..e2f8598937ac 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, return false; if (dc->partial_stripes_expensive && - bcache_dev_stripe_dirty(dc, bio->bi_sector, + bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, bio_sectors(bio))) return true; diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h index 3a8cfa2645c7..5ace48ee9f58 100644 --- a/drivers/md/dm-bio-record.h +++ b/drivers/md/dm-bio-record.h @@ -40,10 +40,10 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) { unsigned i; - bd->bi_sector = bio->bi_sector; + bd->bi_sector = bio->bi_iter.bi_sector; bd->bi_bdev = bio->bi_bdev; - bd->bi_size = bio->bi_size; - bd->bi_idx = bio->bi_idx; + bd->bi_size = bio->bi_iter.bi_size; + bd->bi_idx = bio->bi_iter.bi_idx; bd->bi_flags = bio->bi_flags; for (i = 0; i < bio->bi_vcnt; i++) { @@ -56,10 +56,10 @@ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) { unsigned i; - bio->bi_sector = bd->bi_sector; + bio->bi_iter.bi_sector = bd->bi_sector; bio->bi_bdev = bd->bi_bdev; - bio->bi_size = bd->bi_size; - bio->bi_idx = bd->bi_idx; + bio->bi_iter.bi_size = bd->bi_size; + bio->bi_iter.bi_idx = bd->bi_idx; bio->bi_flags = bd->bi_flags; for (i = 0; i < bio->bi_vcnt; i++) { diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 173cbb20d104..4113b6044b80 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -538,7 +538,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, bio_init(&b->bio); b->bio.bi_io_vec = b->bio_vec; b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; - b->bio.bi_sector = block << b->c->sectors_per_block_bits; + b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; b->bio.bi_bdev = b->c->bdev; b->bio.bi_end_io = end_io; diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 416b7b752a6e..bfba97dcde2d 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t) static void iot_update_stats(struct io_tracker *t, struct bio *bio) { - if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1) + if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1) t->nr_seq_samples++; else { /* @@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio) t->nr_rand_samples++; } - t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1); + t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1); } static void iot_check_for_pattern_switch(struct io_tracker *t) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9efcf1059b99..86f9c83eb30c 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -664,15 +664,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio) static void remap_to_cache(struct cache *cache, struct bio *bio, dm_cblock_t cblock) { - sector_t bi_sector = bio->bi_sector; + sector_t bi_sector = bio->bi_iter.bi_sector; bio->bi_bdev = cache->cache_dev->bdev; if (!block_size_is_power_of_two(cache)) - bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + - sector_div(bi_sector, cache->sectors_per_block); + bio->bi_iter.bi_sector = + (from_cblock(cblock) * cache->sectors_per_block) + + sector_div(bi_sector, cache->sectors_per_block); else - bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | - (bi_sector & (cache->sectors_per_block - 1)); + bio->bi_iter.bi_sector = + (from_cblock(cblock) << cache->sectors_per_block_shift) | + (bi_sector & (cache->sectors_per_block - 1)); } static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) @@ -712,7 +714,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) { - sector_t block_nr = bio->bi_sector; + sector_t block_nr = bio->bi_iter.bi_sector; if (!block_size_is_power_of_two(cache)) (void) sector_div(block_nr, cache->sectors_per_block); @@ -1027,7 +1029,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio) static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) { return (bio_data_dir(bio) == WRITE) && - (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); + (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); } static void avoid_copy(struct dm_cache_migration *mg) @@ -1252,7 +1254,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) size_t pb_data_size = get_per_bio_data_size(cache); struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); - BUG_ON(bio->bi_size); + BUG_ON(bio->bi_iter.bi_size); if (!pb->req_nr) remap_to_origin(cache, bio); else @@ -1275,9 +1277,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) */ static void process_discard_bio(struct cache *cache, struct bio *bio) { - dm_block_t start_block = dm_sector_div_up(bio->bi_sector, + dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, cache->discard_block_size); - dm_block_t end_block = bio->bi_sector + bio_sectors(bio); + dm_block_t end_block = bio_end_sector(bio); dm_block_t b; end_block = block_div(end_block, cache->discard_block_size); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 81b0fa660452..1e2e5465d28e 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -828,8 +828,8 @@ static void crypt_convert_init(struct crypt_config *cc, ctx->bio_out = bio_out; ctx->offset_in = 0; ctx->offset_out = 0; - ctx->idx_in = bio_in ? bio_in->bi_idx : 0; - ctx->idx_out = bio_out ? bio_out->bi_idx : 0; + ctx->idx_in = bio_in ? bio_in->bi_iter.bi_idx : 0; + ctx->idx_out = bio_out ? bio_out->bi_iter.bi_idx : 0; ctx->cc_sector = sector + cc->iv_offset; init_completion(&ctx->restart); } @@ -1021,7 +1021,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, size -= len; } - if (!clone->bi_size) { + if (!clone->bi_iter.bi_size) { bio_put(clone); return NULL; } @@ -1161,7 +1161,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) crypt_inc_pending(io); clone_init(io, clone); - clone->bi_sector = cc->start + io->sector; + clone->bi_iter.bi_sector = cc->start + io->sector; generic_make_request(clone); return 0; @@ -1209,7 +1209,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) /* crypt_convert should have filled the clone bio */ BUG_ON(io->ctx.idx_out < clone->bi_vcnt); - clone->bi_sector = cc->start + io->sector; + clone->bi_iter.bi_sector = cc->start + io->sector; if (async) kcryptd_queue_io(io); @@ -1224,7 +1224,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) struct dm_crypt_io *new_io; int crypt_finished; unsigned out_of_pages = 0; - unsigned remaining = io->base_bio->bi_size; + unsigned remaining = io->base_bio->bi_iter.bi_size; sector_t sector = io->sector; int r; @@ -1248,7 +1248,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) io->ctx.bio_out = clone; io->ctx.idx_out = 0; - remaining -= clone->bi_size; + remaining -= clone->bi_iter.bi_size; sector += bio_sectors(clone); crypt_inc_pending(io); @@ -1869,11 +1869,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { bio->bi_bdev = cc->dev->bdev; if (bio_sectors(bio)) - bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); + bio->bi_iter.bi_sector = cc->start + + dm_target_offset(ti, bio->bi_iter.bi_sector); return DM_MAPIO_REMAPPED; } - io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); + io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); if (bio_data_dir(io->base_bio) == READ) { if (kcryptd_io_read(io, GFP_NOWAIT)) diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 496d5f3646a5..84c860191a2e 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -281,14 +281,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio) if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { bio->bi_bdev = dc->dev_write->bdev; if (bio_sectors(bio)) - bio->bi_sector = dc->start_write + - dm_target_offset(ti, bio->bi_sector); + bio->bi_iter.bi_sector = dc->start_write + + dm_target_offset(ti, bio->bi_iter.bi_sector); return delay_bio(dc, dc->write_delay, bio); } bio->bi_bdev = dc->dev_read->bdev; - bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector); + bio->bi_iter.bi_sector = dc->start_read + + dm_target_offset(ti, bio->bi_iter.bi_sector); return delay_bio(dc, dc->read_delay, bio); } diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index c80a0ec5f126..b257e46876d3 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio) bio->bi_bdev = fc->dev->bdev; if (bio_sectors(bio)) - bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); + bio->bi_iter.bi_sector = + flakey_map_sector(ti, bio->bi_iter.bi_sector); } static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) @@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, - (bio_data_dir(bio) == WRITE) ? 'w' : 'r', - bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes); + (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw, + (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); } } diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 2a20986a2fec..01558b093307 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -304,14 +304,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); - bio->bi_sector = where->sector + (where->count - remaining); + bio->bi_iter.bi_sector = where->sector + (where->count - remaining); bio->bi_bdev = where->bdev; bio->bi_end_io = endio; store_io_and_region_in_bio(bio, io, region); if (rw & REQ_DISCARD) { num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); - bio->bi_size = num_sectors << SECTOR_SHIFT; + bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; remaining -= num_sectors; } else if (rw & REQ_WRITE_SAME) { /* @@ -320,7 +320,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, dp->get_page(dp, &page, &len, &offset); bio_add_page(bio, page, logical_block_size, offset); num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); - bio->bi_size = num_sectors << SECTOR_SHIFT; + bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; offset = 0; remaining -= num_sectors; diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 4f99d267340c..53e848c10939 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio) bio->bi_bdev = lc->dev->bdev; if (bio_sectors(bio)) - bio->bi_sector = linear_map_sector(ti, bio->bi_sector); + bio->bi_iter.bi_sector = + linear_map_sector(ti, bio->bi_iter.bi_sector); } static int linear_map(struct dm_target *ti, struct bio *bio) diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9584443c5614..9f6d8e6baa7d 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio) region_t region = dm_rh_bio_to_region(ms->rh, bio); if (log->type->in_sync(log, region, 0)) - return choose_mirror(ms, bio->bi_sector) ? 1 : 0; + return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; return 0; } @@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio) */ static sector_t map_sector(struct mirror *m, struct bio *bio) { - if (unlikely(!bio->bi_size)) + if (unlikely(!bio->bi_iter.bi_size)) return 0; - return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector); + return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); } static void map_bio(struct mirror *m, struct bio *bio) { bio->bi_bdev = m->dev->bdev; - bio->bi_sector = map_sector(m, bio); + bio->bi_iter.bi_sector = map_sector(m, bio); } static void map_region(struct dm_io_region *io, struct mirror *m, @@ -527,7 +527,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio) struct dm_io_request io_req = { .bi_rw = READ, .mem.type = DM_IO_BVEC, - .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, + .mem.ptr.bvec = bio->bi_io_vec + bio->bi_iter.bi_idx, .notify.fn = read_callback, .notify.context = bio, .client = m->ms->io_client, @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) * We can only read balance if the region is in sync. */ if (likely(region_in_sync(ms, region, 1))) - m = choose_mirror(ms, bio->bi_sector); + m = choose_mirror(ms, bio->bi_iter.bi_sector); else if (m && atomic_read(&m->error_count)) m = NULL; @@ -630,7 +630,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) struct dm_io_request io_req = { .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), .mem.type = DM_IO_BVEC, - .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, + .mem.ptr.bvec = bio->bi_io_vec + bio->bi_iter.bi_idx, .notify.fn = write_callback, .notify.context = bio, .client = ms->io_client, @@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) * The region is in-sync and we can perform reads directly. * Store enough information so we can retry if it fails. */ - m = choose_mirror(ms, bio->bi_sector); + m = choose_mirror(ms, bio->bi_iter.bi_sector); if (unlikely(!m)) return -EIO; diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 69732e03eb34..b929fd5f4984 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector); region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) { - return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); + return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - + rh->target_begin); } EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index aec57d76db5d..3ded8c729dfb 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1562,11 +1562,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, struct bio *bio, chunk_t chunk) { bio->bi_bdev = s->cow->bdev; - bio->bi_sector = chunk_to_sector(s->store, - dm_chunk_number(e->new_chunk) + - (chunk - e->old_chunk)) + - (bio->bi_sector & - s->store->chunk_mask); + bio->bi_iter.bi_sector = + chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + + (chunk - e->old_chunk)) + + (bio->bi_iter.bi_sector & s->store->chunk_mask); } static int snapshot_map(struct dm_target *ti, struct bio *bio) @@ -1584,7 +1583,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } - chunk = sector_to_chunk(s->store, bio->bi_sector); + chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); /* Full snapshots are not usable */ /* To get here the table must be live so s->active is always set. */ @@ -1645,7 +1644,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) r = DM_MAPIO_SUBMITTED; if (!pe->started && - bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) { + bio->bi_iter.bi_size == + (s->store->chunk_size << SECTOR_SHIFT)) { pe->started = 1; up_write(&s->lock); start_full_bio(pe, bio); @@ -1701,7 +1701,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } - chunk = sector_to_chunk(s->store, bio->bi_sector); + chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); down_write(&s->lock); @@ -2038,7 +2038,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio) down_read(&_origins_lock); o = __lookup_origin(origin->bdev); if (o) - r = __origin_write(&o->snapshots, bio->bi_sector, bio); + r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); up_read(&_origins_lock); return r; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 73c1712dad96..d1600d2aa2e2 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio, { sector_t begin, end; - stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); + stripe_map_range_sector(sc, bio->bi_iter.bi_sector, + target_stripe, &begin); stripe_map_range_sector(sc, bio_end_sector(bio), target_stripe, &end); if (begin < end) { bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; - bio->bi_sector = begin + sc->stripe[target_stripe].physical_start; - bio->bi_size = to_bytes(end - begin); + bio->bi_iter.bi_sector = begin + + sc->stripe[target_stripe].physical_start; + bio->bi_iter.bi_size = to_bytes(end - begin); return DM_MAPIO_REMAPPED; } else { /* The range doesn't map to the target stripe */ @@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) return stripe_map_range(sc, bio, target_bio_nr); } - stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); + stripe_map_sector(sc, bio->bi_iter.bi_sector, + &stripe, &bio->bi_iter.bi_sector); - bio->bi_sector += sc->stripe[stripe].physical_start; + bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; bio->bi_bdev = sc->stripe[stripe].dev->bdev; return DM_MAPIO_REMAPPED; diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index ff9ac4be4721..09a688b3d48c 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -311,11 +311,11 @@ error: static int switch_map(struct dm_target *ti, struct bio *bio) { struct switch_ctx *sctx = ti->private; - sector_t offset = dm_target_offset(ti, bio->bi_sector); + sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); unsigned path_nr = switch_get_path_nr(sctx, offset); bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; - bio->bi_sector = sctx->path_list[path_nr].start + offset; + bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; return DM_MAPIO_REMAPPED; } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2c0cf511ec23..a65402480c8c 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -413,7 +413,7 @@ static bool block_size_is_power_of_two(struct pool *pool) static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) { struct pool *pool = tc->pool; - sector_t block_nr = bio->bi_sector; + sector_t block_nr = bio->bi_iter.bi_sector; if (block_size_is_power_of_two(pool)) block_nr >>= pool->sectors_per_block_shift; @@ -426,14 +426,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) { struct pool *pool = tc->pool; - sector_t bi_sector = bio->bi_sector; + sector_t bi_sector = bio->bi_iter.bi_sector; bio->bi_bdev = tc->pool_dev->bdev; if (block_size_is_power_of_two(pool)) - bio->bi_sector = (block << pool->sectors_per_block_shift) | - (bi_sector & (pool->sectors_per_block - 1)); + bio->bi_iter.bi_sector = + (block << pool->sectors_per_block_shift) | + (bi_sector & (pool->sectors_per_block - 1)); else - bio->bi_sector = (block * pool->sectors_per_block) + + bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + sector_div(bi_sector, pool->sectors_per_block); } @@ -721,7 +722,8 @@ static void process_prepared(struct pool *pool, struct list_head *head, */ static int io_overlaps_block(struct pool *pool, struct bio *bio) { - return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT); + return bio->bi_iter.bi_size == + (pool->sectors_per_block << SECTOR_SHIFT); } static int io_overwrites_block(struct pool *pool, struct bio *bio) @@ -1130,7 +1132,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, if (bio_detain(pool, &key, bio, &cell)) return; - if (bio_data_dir(bio) == WRITE && bio->bi_size) + if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) break_sharing(tc, bio, block, &key, lookup_result, cell); else { struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); @@ -1153,7 +1155,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block /* * Remap empty bios (flushes) immediately, without provisioning. */ - if (!bio->bi_size) { + if (!bio->bi_iter.bi_size) { inc_all_io_entry(pool, bio); cell_defer_no_holder(tc, cell); @@ -1253,7 +1255,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio) r = dm_thin_find_block(tc->td, block, 1, &lookup_result); switch (r) { case 0: - if (lookup_result.shared && (rw == WRITE) && bio->bi_size) + if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) bio_io_error(bio); else { inc_all_io_entry(tc->pool, bio); @@ -2867,7 +2869,7 @@ out_unlock: static int thin_map(struct dm_target *ti, struct bio *bio) { - bio->bi_sector = dm_target_offset(ti, bio->bi_sector); + bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); return thin_bio_map(ti, bio); } diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 4b7941db3aff..132b3154d466 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c @@ -493,9 +493,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio) struct dm_verity_io *io; bio->bi_bdev = v->data_dev->bdev; - bio->bi_sector = verity_map_sector(v, bio->bi_sector); + bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); - if (((unsigned)bio->bi_sector | bio_sectors(bio)) & + if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { DMERR_LIMIT("unaligned io"); return -EIO; @@ -514,8 +514,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio) io->v = v; io->orig_bi_end_io = bio->bi_end_io; io->orig_bi_private = bio->bi_private; - io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); - io->n_blocks = bio->bi_size >> v->data_dev_block_bits; + io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); + io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits; bio->bi_end_io = verity_end_io; bio->bi_private = io; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0704c523a76b..ccd064ea4fe6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -575,7 +575,7 @@ static void start_io_acct(struct dm_io *io) atomic_inc_return(&md->pending[rw])); if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, + dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, bio_sectors(bio), false, 0, &io->stats_aux); } @@ -593,7 +593,7 @@ static void end_io_acct(struct dm_io *io) part_stat_unlock(); if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, + dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, bio_sectors(bio), true, duration, &io->stats_aux); /* @@ -742,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error) if (io_error == DM_ENDIO_REQUEUE) return; - if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) { + if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { /* * Preflush done for flush with data, reissue * without REQ_FLUSH. @@ -797,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error) struct dm_rq_clone_bio_info *info = clone->bi_private; struct dm_rq_target_io *tio = info->tio; struct bio *bio = info->orig; - unsigned int nr_bytes = info->orig->bi_size; + unsigned int nr_bytes = info->orig->bi_iter.bi_size; bio_put(clone); @@ -1128,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio) * this io. */ atomic_inc(&tio->io->io_count); - sector = clone->bi_sector; + sector = clone->bi_iter.bi_sector; r = ti->type->map(ti, clone); if (r == DM_MAPIO_REMAPPED) { /* the bio has been remapped so dispatch it */ @@ -1160,13 +1160,13 @@ struct clone_info { static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) { - bio->bi_sector = sector; - bio->bi_size = to_bytes(len); + bio->bi_iter.bi_sector = sector; + bio->bi_iter.bi_size = to_bytes(len); } static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count) { - bio->bi_idx = idx; + bio->bi_iter.bi_idx = idx; bio->bi_vcnt = idx + bv_count; bio->bi_flags &= ~(1 << BIO_SEG_VALID); } @@ -1202,7 +1202,7 @@ static void clone_split_bio(struct dm_target_io *tio, struct bio *bio, clone->bi_rw = bio->bi_rw; clone->bi_vcnt = 1; clone->bi_io_vec->bv_offset = offset; - clone->bi_io_vec->bv_len = clone->bi_size; + clone->bi_io_vec->bv_len = clone->bi_iter.bi_size; clone->bi_flags |= 1 << BIO_CLONED; clone_bio_integrity(bio, clone, idx, len, offset, 1); @@ -1222,7 +1222,8 @@ static void clone_bio(struct dm_target_io *tio, struct bio *bio, bio_setup_sector(clone, sector, len); bio_setup_bv(clone, idx, bv_count); - if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) + if (idx != bio->bi_iter.bi_idx || + clone->bi_iter.bi_size < bio->bi_iter.bi_size) trim = 1; clone_bio_integrity(bio, clone, idx, len, 0, trim); } @@ -1510,8 +1511,8 @@ static void __split_and_process_bio(struct mapped_device *md, ci.io->bio = bio; ci.io->md = md; spin_lock_init(&ci.io->endio_lock); - ci.sector = bio->bi_sector; - ci.idx = bio->bi_idx; + ci.sector = bio->bi_iter.bi_sector; + ci.idx = bio->bi_iter.bi_idx; start_io_acct(ci.io); diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 3193aefe982b..e8b4574956c7 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error) { struct bio *b = bio->bi_private; - b->bi_size = bio->bi_size; - b->bi_sector = bio->bi_sector; + b->bi_iter.bi_size = bio->bi_iter.bi_size; + b->bi_iter.bi_sector = bio->bi_iter.bi_sector; bio_put(bio); @@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio) return; } - if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE)) + if (check_sector(conf, bio->bi_iter.bi_sector, + bio_end_sector(bio), WRITE)) failit = 1; if (check_mode(conf, WritePersistent)) { - add_sector(conf, bio->bi_sector, WritePersistent); + add_sector(conf, bio->bi_iter.bi_sector, + WritePersistent); failit = 1; } if (check_mode(conf, WriteTransient)) failit = 1; } else { /* read request */ - if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ)) + if (check_sector(conf, bio->bi_iter.bi_sector, + bio_end_sector(bio), READ)) failit = 1; if (check_mode(conf, ReadTransient)) failit = 1; if (check_mode(conf, ReadPersistent)) { - add_sector(conf, bio->bi_sector, ReadPersistent); + add_sector(conf, bio->bi_iter.bi_sector, + ReadPersistent); failit = 1; } if (check_mode(conf, ReadFixable)) { - add_sector(conf, bio->bi_sector, ReadFixable); + add_sector(conf, bio->bi_iter.bi_sector, + ReadFixable); failit = 1; } } diff --git a/drivers/md/linear.c b/drivers/md/linear.c index f03fabd2b37b..fb3b0d04edfb 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -297,19 +297,19 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) } rcu_read_lock(); - tmp_dev = which_dev(mddev, bio->bi_sector); + tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; - if (unlikely(bio->bi_sector >= (tmp_dev->end_sector) - || (bio->bi_sector < start_sector))) { + if (unlikely(bio->bi_iter.bi_sector >= (tmp_dev->end_sector) + || (bio->bi_iter.bi_sector < start_sector))) { char b[BDEVNAME_SIZE]; printk(KERN_ERR "md/linear:%s: make_request: Sector %llu out of bounds on " "dev %s: %llu sectors, offset %llu\n", mdname(mddev), - (unsigned long long)bio->bi_sector, + (unsigned long long)bio->bi_iter.bi_sector, bdevname(tmp_dev->rdev->bdev, b), (unsigned long long)tmp_dev->rdev->sectors, (unsigned long long)start_sector); @@ -326,7 +326,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) rcu_read_unlock(); - bp = bio_split(bio, end_sector - bio->bi_sector); + bp = bio_split(bio, end_sector - bio->bi_iter.bi_sector); linear_make_request(mddev, &bp->bio1); linear_make_request(mddev, &bp->bio2); @@ -335,7 +335,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) } bio->bi_bdev = tmp_dev->rdev->bdev; - bio->bi_sector = bio->bi_sector - start_sector + bio->bi_iter.bi_sector = bio->bi_iter.bi_sector - start_sector + tmp_dev->rdev->data_offset; rcu_read_unlock(); diff --git a/drivers/md/md.c b/drivers/md/md.c index 739b1ec54e28..b07fed398fd7 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -393,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws) struct mddev *mddev = container_of(ws, struct mddev, flush_work); struct bio *bio = mddev->flush_bio; - if (bio->bi_size == 0) + if (bio->bi_iter.bi_size == 0) /* an empty barrier - all done */ bio_endio(bio, 0); else { @@ -754,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; bio_add_page(bio, page, size, 0); bio->bi_private = rdev; bio->bi_end_io = super_written; @@ -785,13 +785,13 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; if (metadata_op) - bio->bi_sector = sector + rdev->sb_start; + bio->bi_iter.bi_sector = sector + rdev->sb_start; else if (rdev->mddev->reshape_position != MaxSector && (rdev->mddev->reshape_backwards == (sector >= rdev->mddev->reshape_position))) - bio->bi_sector = sector + rdev->new_data_offset; + bio->bi_iter.bi_sector = sector + rdev->new_data_offset; else - bio->bi_sector = sector + rdev->data_offset; + bio->bi_iter.bi_sector = sector + rdev->data_offset; bio_add_page(bio, page, size, 0); submit_bio_wait(rw, bio); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 1642eae75a33..849ad39f547b 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error) md_error (mp_bh->mddev, rdev); printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", bdevname(rdev->bdev,b), - (unsigned long long)bio->bi_sector); + (unsigned long long)bio->bi_iter.bi_sector); multipath_reschedule_retry(mp_bh); } else multipath_end_bh_io(mp_bh, error); @@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) multipath = conf->multipaths + mp_bh->path; mp_bh->bio = *bio; - mp_bh->bio.bi_sector += multipath->rdev->data_offset; + mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; mp_bh->bio.bi_bdev = multipath->rdev->bdev; mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; mp_bh->bio.bi_end_io = multipath_end_request; @@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread) spin_unlock_irqrestore(&conf->device_lock, flags); bio = &mp_bh->bio; - bio->bi_sector = mp_bh->master_bio->bi_sector; + bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; if ((mp_bh->path = multipath_map (conf))<0) { printk(KERN_ALERT "multipath: %s: unrecoverable IO read" " error for block %llu\n", bdevname(bio->bi_bdev,b), - (unsigned long long)bio->bi_sector); + (unsigned long long)bio->bi_iter.bi_sector); multipath_end_bh_io(mp_bh, -EIO); } else { printk(KERN_ERR "multipath: %s: redirecting sector %llu" " to another IO path\n", bdevname(bio->bi_bdev,b), - (unsigned long long)bio->bi_sector); + (unsigned long long)bio->bi_iter.bi_sector); *bio = *(mp_bh->master_bio); - bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; + bio->bi_iter.bi_sector += + conf->multipaths[mp_bh->path].rdev->data_offset; bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; bio->bi_rw |= REQ_FAILFAST_TRANSPORT; bio->bi_end_io = multipath_end_request; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index c4d420b7d2f4..e38d1d3226f3 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev, unsigned int chunk_sects, struct bio *bio) { if (likely(is_power_of_2(chunk_sects))) { - return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) + return chunk_sects >= + ((bio->bi_iter.bi_sector & (chunk_sects-1)) + bio_sectors(bio)); } else{ - sector_t sector = bio->bi_sector; + sector_t sector = bio->bi_iter.bi_sector; return chunk_sects >= (sector_div(sector, chunk_sects) + bio_sectors(bio)); } @@ -524,7 +525,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) chunk_sects = mddev->chunk_sectors; if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { - sector_t sector = bio->bi_sector; + sector_t sector = bio->bi_iter.bi_sector; struct bio_pair *bp; /* Sanity check -- queue functions should prevent this happening */ if (bio_segments(bio) > 1) @@ -544,12 +545,12 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) return; } - sector_offset = bio->bi_sector; + sector_offset = bio->bi_iter.bi_sector; zone = find_zone(mddev->private, §or_offset); - tmp_dev = map_sector(mddev, zone, bio->bi_sector, + tmp_dev = map_sector(mddev, zone, bio->bi_iter.bi_sector, §or_offset); bio->bi_bdev = tmp_dev->bdev; - bio->bi_sector = sector_offset + zone->dev_start + + bio->bi_iter.bi_sector = sector_offset + zone->dev_start + tmp_dev->data_offset; if (unlikely((bio->bi_rw & REQ_DISCARD) && @@ -566,7 +567,8 @@ bad_map: printk("md/raid0:%s: make_request bug: can't convert block across chunks" " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects / 2, - (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); + (unsigned long long)bio->bi_iter.bi_sector, + bio_sectors(bio) / 2); bio_io_error(bio); return; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1e5a540995e9..db3b9d7314f1 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio) int done; struct r1conf *conf = r1_bio->mddev->private; sector_t start_next_window = r1_bio->start_next_window; - sector_t bi_sector = bio->bi_sector; + sector_t bi_sector = bio->bi_iter.bi_sector; if (bio->bi_phys_segments) { unsigned long flags; @@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio) if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { pr_debug("raid1: sync end %s on sectors %llu-%llu\n", (bio_data_dir(bio) == WRITE) ? "write" : "read", - (unsigned long long) bio->bi_sector, - (unsigned long long) bio->bi_sector + - bio_sectors(bio) - 1); + (unsigned long long) bio->bi_iter.bi_sector, + (unsigned long long) bio_end_sector(bio) - 1); call_bio_endio(r1_bio); } @@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error) struct bio *mbio = r1_bio->master_bio; pr_debug("raid1: behind end write sectors" " %llu-%llu\n", - (unsigned long long) mbio->bi_sector, - (unsigned long long) mbio->bi_sector + - bio_sectors(mbio) - 1); + (unsigned long long) mbio->bi_iter.bi_sector, + (unsigned long long) bio_end_sector(mbio) - 1); call_bio_endio(r1_bio); } } @@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) else if ((conf->next_resync - RESYNC_WINDOW_SECTORS >= bio_end_sector(bio)) || (conf->next_resync + NEXT_NORMALIO_DISTANCE - <= bio->bi_sector)) + <= bio->bi_iter.bi_sector)) wait = false; else wait = true; @@ -913,19 +911,19 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) if (bio && bio_data_dir(bio) == WRITE) { if (conf->next_resync + NEXT_NORMALIO_DISTANCE - <= bio->bi_sector) { + <= bio->bi_iter.bi_sector) { if (conf->start_next_window == MaxSector) conf->start_next_window = conf->next_resync + NEXT_NORMALIO_DISTANCE; if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) - <= bio->bi_sector) + <= bio->bi_iter.bi_sector) conf->next_window_requests++; else conf->current_window_requests++; } - if (bio->bi_sector >= conf->start_next_window) + if (bio->bi_iter.bi_sector >= conf->start_next_window) sector = conf->start_next_window; } @@ -1028,7 +1026,8 @@ do_sync_io: if (bvecs[i].bv_page) put_page(bvecs[i].bv_page); kfree(bvecs); - pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); + pr_debug("%dB behind alloc failed, doing sync I/O\n", + bio->bi_iter.bi_size); } struct raid1_plug_cb { @@ -1108,7 +1107,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) if (bio_data_dir(bio) == WRITE && bio_end_sector(bio) > mddev->suspend_lo && - bio->bi_sector < mddev->suspend_hi) { + bio->bi_iter.bi_sector < mddev->suspend_hi) { /* As the suspend_* range is controlled by * userspace, we want an interruptible * wait. @@ -1119,7 +1118,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) prepare_to_wait(&conf->wait_barrier, &w, TASK_INTERRUPTIBLE); if (bio_end_sector(bio) <= mddev->suspend_lo || - bio->bi_sector >= mddev->suspend_hi) + bio->bi_iter.bi_sector >= mddev->suspend_hi) break; schedule(); } @@ -1141,7 +1140,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) r1_bio->sectors = bio_sectors(bio); r1_bio->state = 0; r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_sector; + r1_bio->sector = bio->bi_iter.bi_sector; /* We might need to issue multiple reads to different * devices if there are bad blocks around, so we keep @@ -1181,12 +1180,13 @@ read_again: r1_bio->read_disk = rdisk; read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(read_bio, r1_bio->sector - bio->bi_sector, + bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); r1_bio->bios[rdisk] = read_bio; - read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; + read_bio->bi_iter.bi_sector = r1_bio->sector + + mirror->rdev->data_offset; read_bio->bi_bdev = mirror->rdev->bdev; read_bio->bi_end_io = raid1_end_read_request; read_bio->bi_rw = READ | do_sync; @@ -1198,7 +1198,7 @@ read_again: */ sectors_handled = (r1_bio->sector + max_sectors - - bio->bi_sector); + - bio->bi_iter.bi_sector); r1_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (bio->bi_phys_segments == 0) @@ -1219,7 +1219,8 @@ read_again: r1_bio->sectors = bio_sectors(bio) - sectors_handled; r1_bio->state = 0; r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_sector + sectors_handled; + r1_bio->sector = bio->bi_iter.bi_sector + + sectors_handled; goto read_again; } else generic_make_request(read_bio); @@ -1322,7 +1323,7 @@ read_again: if (r1_bio->bios[j]) rdev_dec_pending(conf->mirrors[j].rdev, mddev); r1_bio->state = 0; - allow_barrier(conf, start_next_window, bio->bi_sector); + allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector); md_wait_for_blocked_rdev(blocked_rdev, mddev); start_next_window = wait_barrier(conf, bio); /* @@ -1349,7 +1350,7 @@ read_again: bio->bi_phys_segments++; spin_unlock_irq(&conf->device_lock); } - sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; + sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector; atomic_set(&r1_bio->remaining, 1); atomic_set(&r1_bio->behind_remaining, 0); @@ -1361,7 +1362,7 @@ read_again: continue; mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors); + bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); if (first_clone) { /* do behind I/O ? @@ -1395,7 +1396,7 @@ read_again: r1_bio->bios[i] = mbio; - mbio->bi_sector = (r1_bio->sector + + mbio->bi_iter.bi_sector = (r1_bio->sector + conf->mirrors[i].rdev->data_offset); mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; @@ -1435,7 +1436,7 @@ read_again: r1_bio->sectors = bio_sectors(bio) - sectors_handled; r1_bio->state = 0; r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_sector + sectors_handled; + r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled; goto retry_write; } @@ -1959,14 +1960,14 @@ static int process_checks(struct r1bio *r1_bio) /* fixup the bio for reuse */ bio_reset(b); b->bi_vcnt = vcnt; - b->bi_size = r1_bio->sectors << 9; - b->bi_sector = r1_bio->sector + + b->bi_iter.bi_size = r1_bio->sectors << 9; + b->bi_iter.bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; b->bi_bdev = conf->mirrors[i].rdev->bdev; b->bi_end_io = end_sync_read; b->bi_private = r1_bio; - size = b->bi_size; + size = b->bi_iter.bi_size; for (j = 0; j < vcnt ; j++) { struct bio_vec *bi; bi = &b->bi_io_vec[j]; @@ -2221,11 +2222,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) } wbio->bi_rw = WRITE; - wbio->bi_sector = r1_bio->sector; - wbio->bi_size = r1_bio->sectors << 9; + wbio->bi_iter.bi_sector = r1_bio->sector; + wbio->bi_iter.bi_size = r1_bio->sectors << 9; bio_trim(wbio, sector - r1_bio->sector, sectors); - wbio->bi_sector += rdev->data_offset; + wbio->bi_iter.bi_sector += rdev->data_offset; wbio->bi_bdev = rdev->bdev; if (submit_bio_wait(WRITE, wbio) == 0) /* failure! */ @@ -2339,7 +2340,8 @@ read_more: } r1_bio->read_disk = disk; bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); - bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors); + bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, + max_sectors); r1_bio->bios[r1_bio->read_disk] = bio; rdev = conf->mirrors[disk].rdev; printk_ratelimited(KERN_ERR @@ -2348,7 +2350,7 @@ read_more: mdname(mddev), (unsigned long long)r1_bio->sector, bdevname(rdev->bdev, b)); - bio->bi_sector = r1_bio->sector + rdev->data_offset; + bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_end_io = raid1_end_read_request; bio->bi_rw = READ | do_sync; @@ -2357,7 +2359,7 @@ read_more: /* Drat - have to split this up more */ struct bio *mbio = r1_bio->master_bio; int sectors_handled = (r1_bio->sector + max_sectors - - mbio->bi_sector); + - mbio->bi_iter.bi_sector); r1_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (mbio->bi_phys_segments == 0) @@ -2375,7 +2377,8 @@ read_more: r1_bio->state = 0; set_bit(R1BIO_ReadError, &r1_bio->state); r1_bio->mddev = mddev; - r1_bio->sector = mbio->bi_sector + sectors_handled; + r1_bio->sector = mbio->bi_iter.bi_sector + + sectors_handled; goto read_more; } else @@ -2599,7 +2602,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp } if (bio->bi_end_io) { atomic_inc(&rdev->nr_pending); - bio->bi_sector = sector_nr + rdev->data_offset; + bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_private = r1_bio; } @@ -2699,7 +2702,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp continue; /* remove last page from this bio */ bio->bi_vcnt--; - bio->bi_size -= len; + bio->bi_iter.bi_size -= len; bio->bi_flags &= ~(1<< BIO_SEG_VALID); } goto bio_full; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c504e8389e69..dbf3b63c2754 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1182,7 +1182,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) /* If this request crosses a chunk boundary, we need to * split it. This will only happen for 1 PAGE (or less) requests. */ - if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio) + if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + bio_sectors(bio) > chunk_sects && (conf->geo.near_copies < conf->geo.raid_disks || conf->prev.near_copies < conf->prev.raid_disks))) { @@ -1193,8 +1193,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) /* This is a one page bio that upper layers * refuse to split for us, so we need to split it. */ - bp = bio_split(bio, - chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); + bp = bio_split(bio, chunk_sects - + (bio->bi_iter.bi_sector & (chunk_sects - 1))); /* Each of these 'make_request' calls will call 'wait_barrier'. * If the first succeeds but the second blocks due to the resync @@ -1221,7 +1221,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) bad_map: printk("md/raid10:%s: make_request bug: can't convert block across chunks" " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, - (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); + (unsigned long long)bio->bi_iter.bi_sector, + bio_sectors(bio) / 2); bio_io_error(bio); return; @@ -1238,24 +1239,25 @@ static void make_request(struct mddev *mddev, struct bio * bio) sectors = bio_sectors(bio); while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && - bio->bi_sector < conf->reshape_progress && - bio->bi_sector + sectors > conf->reshape_progress) { + bio->bi_iter.bi_sector < conf->reshape_progress && + bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { /* IO spans the reshape position. Need to wait for * reshape to pass */ allow_barrier(conf); wait_event(conf->wait_barrier, - conf->reshape_progress <= bio->bi_sector || - conf->reshape_progress >= bio->bi_sector + sectors); + conf->reshape_progress <= bio->bi_iter.bi_sector || + conf->reshape_progress >= bio->bi_iter.bi_sector + + sectors); wait_barrier(conf); } if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && bio_data_dir(bio) == WRITE && (mddev->reshape_backwards - ? (bio->bi_sector < conf->reshape_safe && - bio->bi_sector + sectors > conf->reshape_progress) - : (bio->bi_sector + sectors > conf->reshape_safe && - bio->bi_sector < conf->reshape_progress))) { + ? (bio->bi_iter.bi_sector < conf->reshape_safe && + bio->bi_iter.bi_sector + sectors > conf->reshape_progress) + : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && + bio->bi_iter.bi_sector < conf->reshape_progress))) { /* Need to update reshape_position in metadata */ mddev->reshape_position = conf->reshape_progress; set_bit(MD_CHANGE_DEVS, &mddev->flags); @@ -1273,7 +1275,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) r10_bio->sectors = sectors; r10_bio->mddev = mddev; - r10_bio->sector = bio->bi_sector; + r10_bio->sector = bio->bi_iter.bi_sector; r10_bio->state = 0; /* We might need to issue multiple reads to different @@ -1302,13 +1304,13 @@ read_again: slot = r10_bio->read_slot; read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(read_bio, r10_bio->sector - bio->bi_sector, + bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); r10_bio->devs[slot].bio = read_bio; r10_bio->devs[slot].rdev = rdev; - read_bio->bi_sector = r10_bio->devs[slot].addr + + read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + choose_data_offset(r10_bio, rdev); read_bio->bi_bdev = rdev->bdev; read_bio->bi_end_io = raid10_end_read_request; @@ -1320,7 +1322,7 @@ read_again: * need another r10_bio. */ sectors_handled = (r10_bio->sectors + max_sectors - - bio->bi_sector); + - bio->bi_iter.bi_sector); r10_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (bio->bi_phys_segments == 0) @@ -1341,7 +1343,8 @@ read_again: r10_bio->sectors = bio_sectors(bio) - sectors_handled; r10_bio->state = 0; r10_bio->mddev = mddev; - r10_bio->sector = bio->bi_sector + sectors_handled; + r10_bio->sector = bio->bi_iter.bi_sector + + sectors_handled; goto read_again; } else generic_make_request(read_bio); @@ -1499,7 +1502,8 @@ retry_write: bio->bi_phys_segments++; spin_unlock_irq(&conf->device_lock); } - sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector; + sectors_handled = r10_bio->sector + max_sectors - + bio->bi_iter.bi_sector; atomic_set(&r10_bio->remaining, 1); bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); @@ -1510,11 +1514,11 @@ retry_write: if (r10_bio->devs[i].bio) { struct md_rdev *rdev = conf->mirrors[d].rdev; mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(mbio, r10_bio->sector - bio->bi_sector, + bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); r10_bio->devs[i].bio = mbio; - mbio->bi_sector = (r10_bio->devs[i].addr+ + mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ choose_data_offset(r10_bio, rdev)); mbio->bi_bdev = rdev->bdev; @@ -1553,11 +1557,11 @@ retry_write: rdev = conf->mirrors[d].rdev; } mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(mbio, r10_bio->sector - bio->bi_sector, + bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); r10_bio->devs[i].repl_bio = mbio; - mbio->bi_sector = (r10_bio->devs[i].addr + + mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + choose_data_offset( r10_bio, rdev)); mbio->bi_bdev = rdev->bdev; @@ -1591,7 +1595,7 @@ retry_write: r10_bio->sectors = bio_sectors(bio) - sectors_handled; r10_bio->mddev = mddev; - r10_bio->sector = bio->bi_sector + sectors_handled; + r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; r10_bio->state = 0; goto retry_write; } @@ -2124,10 +2128,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) bio_reset(tbio); tbio->bi_vcnt = vcnt; - tbio->bi_size = r10_bio->sectors << 9; + tbio->bi_iter.bi_size = r10_bio->sectors << 9; tbio->bi_rw = WRITE; tbio->bi_private = r10_bio; - tbio->bi_sector = r10_bio->devs[i].addr; + tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; for (j=0; j < vcnt ; j++) { tbio->bi_io_vec[j].bv_offset = 0; @@ -2144,7 +2148,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) atomic_inc(&r10_bio->remaining); md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); - tbio->bi_sector += conf->mirrors[d].rdev->data_offset; + tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; tbio->bi_bdev = conf->mirrors[d].rdev->bdev; generic_make_request(tbio); } @@ -2614,8 +2618,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) sectors = sect_to_write; /* Write at 'sector' for 'sectors' */ wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(wbio, sector - bio->bi_sector, sectors); - wbio->bi_sector = (r10_bio->devs[i].addr+ + bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); + wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ choose_data_offset(r10_bio, rdev) + (sector - r10_bio->sector)); wbio->bi_bdev = rdev->bdev; @@ -2687,10 +2691,10 @@ read_more: (unsigned long long)r10_bio->sector); bio = bio_clone_mddev(r10_bio->master_bio, GFP_NOIO, mddev); - bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors); + bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); r10_bio->devs[slot].bio = bio; r10_bio->devs[slot].rdev = rdev; - bio->bi_sector = r10_bio->devs[slot].addr + bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + choose_data_offset(r10_bio, rdev); bio->bi_bdev = rdev->bdev; bio->bi_rw = READ | do_sync; @@ -2701,7 +2705,7 @@ read_more: struct bio *mbio = r10_bio->master_bio; int sectors_handled = r10_bio->sector + max_sectors - - mbio->bi_sector; + - mbio->bi_iter.bi_sector; r10_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (mbio->bi_phys_segments == 0) @@ -2719,7 +2723,7 @@ read_more: set_bit(R10BIO_ReadError, &r10_bio->state); r10_bio->mddev = mddev; - r10_bio->sector = mbio->bi_sector + r10_bio->sector = mbio->bi_iter.bi_sector + sectors_handled; goto read_more; @@ -3157,7 +3161,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_end_io = end_sync_read; bio->bi_rw = READ; from_addr = r10_bio->devs[j].addr; - bio->bi_sector = from_addr + rdev->data_offset; + bio->bi_iter.bi_sector = from_addr + + rdev->data_offset; bio->bi_bdev = rdev->bdev; atomic_inc(&rdev->nr_pending); /* and we write to 'i' (if not in_sync) */ @@ -3181,7 +3186,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; bio->bi_rw = WRITE; - bio->bi_sector = to_addr + bio->bi_iter.bi_sector = to_addr + rdev->data_offset; bio->bi_bdev = rdev->bdev; atomic_inc(&r10_bio->remaining); @@ -3210,7 +3215,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; bio->bi_rw = WRITE; - bio->bi_sector = to_addr + rdev->data_offset; + bio->bi_iter.bi_sector = to_addr + + rdev->data_offset; bio->bi_bdev = rdev->bdev; atomic_inc(&r10_bio->remaining); break; @@ -3328,7 +3334,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_read; bio->bi_rw = READ; - bio->bi_sector = sector + + bio->bi_iter.bi_sector = sector + conf->mirrors[d].rdev->data_offset; bio->bi_bdev = conf->mirrors[d].rdev->bdev; count++; @@ -3350,7 +3356,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; bio->bi_rw = WRITE; - bio->bi_sector = sector + + bio->bi_iter.bi_sector = sector + conf->mirrors[d].replacement->data_offset; bio->bi_bdev = conf->mirrors[d].replacement->bdev; count++; @@ -3397,7 +3403,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio2 = bio2->bi_next) { /* remove last page from this bio */ bio2->bi_vcnt--; - bio2->bi_size -= len; + bio2->bi_iter.bi_size -= len; bio2->bi_flags &= ~(1<< BIO_SEG_VALID); } goto bio_full; @@ -4417,7 +4423,7 @@ read_more: read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); read_bio->bi_bdev = rdev->bdev; - read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr + read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr + rdev->data_offset); read_bio->bi_private = r10_bio; read_bio->bi_end_io = end_sync_read; @@ -4425,7 +4431,7 @@ read_more: read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); read_bio->bi_flags |= 1 << BIO_UPTODATE; read_bio->bi_vcnt = 0; - read_bio->bi_size = 0; + read_bio->bi_iter.bi_size = 0; r10_bio->master_bio = read_bio; r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; @@ -4451,7 +4457,8 @@ read_more: bio_reset(b); b->bi_bdev = rdev2->bdev; - b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; + b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + + rdev2->new_data_offset; b->bi_private = r10_bio; b->bi_end_io = end_reshape_write; b->bi_rw = WRITE; @@ -4478,7 +4485,7 @@ read_more: bio2 = bio2->bi_next) { /* Remove last page from this bio */ bio2->bi_vcnt--; - bio2->bi_size -= len; + bio2->bi_iter.bi_size -= len; bio2->bi_flags &= ~(1<<BIO_SEG_VALID); } goto bio_full; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 47da0af6322b..a5d9c0ee4d60 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) { int sectors = bio_sectors(bio); - if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) + if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) return bio->bi_next; else return NULL; @@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi) return_bi = bi->bi_next; bi->bi_next = NULL; - bi->bi_size = 0; + bi->bi_iter.bi_size = 0; trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), bi, 0); bio_endio(bi, 0); @@ -854,10 +854,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) bi->bi_rw, i); atomic_inc(&sh->count); if (use_new_offset(conf, sh)) - bi->bi_sector = (sh->sector + bi->bi_iter.bi_sector = (sh->sector + rdev->new_data_offset); else - bi->bi_sector = (sh->sector + bi->bi_iter.bi_sector = (sh->sector + rdev->data_offset); if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) bi->bi_rw |= REQ_NOMERGE; @@ -865,7 +865,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) bi->bi_vcnt = 1; bi->bi_io_vec[0].bv_len = STRIPE_SIZE; bi->bi_io_vec[0].bv_offset = 0; - bi->bi_size = STRIPE_SIZE; + bi->bi_iter.bi_size = STRIPE_SIZE; /* * If this is discard request, set bi_vcnt 0. We don't * want to confuse SCSI because SCSI will replace payload @@ -901,15 +901,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) rbi->bi_rw, i); atomic_inc(&sh->count); if (use_new_offset(conf, sh)) - rbi->bi_sector = (sh->sector + rbi->bi_iter.bi_sector = (sh->sector + rrdev->new_data_offset); else - rbi->bi_sector = (sh->sector + rbi->bi_iter.bi_sector = (sh->sector + rrdev->data_offset); rbi->bi_vcnt = 1; rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; rbi->bi_io_vec[0].bv_offset = 0; - rbi->bi_size = STRIPE_SIZE; + rbi->bi_iter.bi_size = STRIPE_SIZE; /* * If this is discard request, set bi_vcnt 0. We don't * want to confuse SCSI because SCSI will replace payload @@ -944,10 +944,10 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, struct async_submit_ctl submit; enum async_tx_flags flags = 0; - if (bio->bi_sector >= sector) - page_offset = (signed)(bio->bi_sector - sector) * 512; + if (bio->bi_iter.bi_sector >= sector) + page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; else - page_offset = (signed)(sector - bio->bi_sector) * -512; + page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; if (frombio) flags |= ASYNC_TX_FENCE; @@ -1014,7 +1014,7 @@ static void ops_complete_biofill(void *stripe_head_ref) BUG_ON(!dev->read); rbi = dev->read; dev->read = NULL; - while (rbi && rbi->bi_sector < + while (rbi && rbi->bi_iter.bi_sector < dev->sector + STRIPE_SECTORS) { rbi2 = r5_next_bio(rbi, dev->sector); if (!raid5_dec_bi_active_stripes(rbi)) { @@ -1050,7 +1050,7 @@ static void ops_run_biofill(struct stripe_head *sh) dev->read = rbi = dev->toread; dev->toread = NULL; spin_unlock_irq(&sh->stripe_lock); - while (rbi && rbi->bi_sector < + while (rbi && rbi->bi_iter.bi_sector < dev->sector + STRIPE_SECTORS) { tx = async_copy_data(0, rbi, dev->page, dev->sector, tx); @@ -1392,7 +1392,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) wbi = dev->written = chosen; spin_unlock_irq(&sh->stripe_lock); - while (wbi && wbi->bi_sector < + while (wbi && wbi->bi_iter.bi_sector < dev->sector + STRIPE_SECTORS) { if (wbi->bi_rw & REQ_FUA) set_bit(R5_WantFUA, &dev->flags); @@ -2616,7 +2616,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in int firstwrite=0; pr_debug("adding bi b#%llu to stripe s#%llu\n", - (unsigned long long)bi->bi_sector, + (unsigned long long)bi->bi_iter.bi_sector, (unsigned long long)sh->sector); /* @@ -2634,12 +2634,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in firstwrite = 1; } else bip = &sh->dev[dd_idx].toread; - while (*bip && (*bip)->bi_sector < bi->bi_sector) { - if (bio_end_sector(*bip) > bi->bi_sector) + while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { + if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) goto overlap; bip = & (*bip)->bi_next; } - if (*bip && (*bip)->bi_sector < bio_end_sector(bi)) + if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) goto overlap; BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); @@ -2653,7 +2653,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in sector_t sector = sh->dev[dd_idx].sector; for (bi=sh->dev[dd_idx].towrite; sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && - bi && bi->bi_sector <= sector; + bi && bi->bi_iter.bi_sector <= sector; bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { if (bio_end_sector(bi) >= sector) sector = bio_end_sector(bi); @@ -2663,7 +2663,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in } pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", - (unsigned long long)(*bip)->bi_sector, + (unsigned long long)(*bip)->bi_iter.bi_sector, (unsigned long long)sh->sector, dd_idx); spin_unlock_irq(&sh->stripe_lock); @@ -2738,7 +2738,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) wake_up(&conf->wait_for_overlap); - while (bi && bi->bi_sector < + while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); @@ -2757,7 +2757,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, bi = sh->dev[i].written; sh->dev[i].written = NULL; if (bi) bitmap_end = 1; - while (bi && bi->bi_sector < + while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); @@ -2781,7 +2781,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, spin_unlock_irq(&sh->stripe_lock); if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) wake_up(&conf->wait_for_overlap); - while (bi && bi->bi_sector < + while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); @@ -3005,7 +3005,7 @@ static void handle_stripe_clean_event(struct r5conf *conf, clear_bit(R5_UPTODATE, &dev->flags); wbi = dev->written; dev->written = NULL; - while (wbi && wbi->bi_sector < + while (wbi && wbi->bi_iter.bi_sector < dev->sector + STRIPE_SECTORS) { wbi2 = r5_next_bio(wbi, dev->sector); if (!raid5_dec_bi_active_stripes(wbi)) { @@ -4097,7 +4097,7 @@ static int raid5_mergeable_bvec(struct request_queue *q, static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) { - sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); + sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); unsigned int chunk_sectors = mddev->chunk_sectors; unsigned int bio_sectors = bio_sectors(bio); @@ -4234,9 +4234,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) /* * compute position */ - align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, - 0, - &dd_idx, NULL); + align_bi->bi_iter.bi_sector = + raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, + 0, &dd_idx, NULL); end_sector = bio_end_sector(align_bi); rcu_read_lock(); @@ -4261,7 +4261,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); if (!bio_fits_rdev(align_bi) || - is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi), + is_badblock(rdev, align_bi->bi_iter.bi_sector, + bio_sectors(align_bi), &first_bad, &bad_sectors)) { /* too big in some way, or has a known bad block */ bio_put(align_bi); @@ -4270,7 +4271,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) } /* No reshape active, so we can trust rdev->data_offset */ - align_bi->bi_sector += rdev->data_offset; + align_bi->bi_iter.bi_sector += rdev->data_offset; spin_lock_irq(&conf->device_lock); wait_event_lock_irq(conf->wait_for_stripe, @@ -4282,7 +4283,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) if (mddev->gendisk) trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), align_bi, disk_devt(mddev->gendisk), - raid_bio->bi_sector); + raid_bio->bi_iter.bi_sector); generic_make_request(align_bi); return 1; } else { @@ -4465,8 +4466,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) /* Skip discard while reshape is happening */ return; - logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); - last_sector = bi->bi_sector + (bi->bi_size>>9); + logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); + last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); bi->bi_next = NULL; bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ @@ -4570,7 +4571,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) return; } - logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); + logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); last_sector = bio_end_sector(bi); bi->bi_next = NULL; bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ @@ -5054,7 +5055,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) int remaining; int handled = 0; - logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); + logical_sector = raid_bio->bi_iter.bi_sector & + ~((sector_t)STRIPE_SECTORS-1); sector = raid5_compute_sector(conf, logical_sector, 0, &dd_idx, NULL); last_sector = bio_end_sector(raid_bio); |