diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-24 14:42:19 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-24 14:42:19 -0800 |
commit | a682e0035494c449e53a57d039f86f75b9e2fe67 (patch) | |
tree | 382d6c2d4729e6ed8f697fd528209a2b4701b618 /block | |
parent | 1802979ab1ee8ec5a72987ad518f5a91bf41cd89 (diff) | |
parent | 1ec492232ed659acde8cc00b9ecc7529778e03e1 (diff) | |
download | talos-op-linux-a682e0035494c449e53a57d039f86f75b9e2fe67.tar.gz talos-op-linux-a682e0035494c449e53a57d039f86f75b9e2fe67.zip |
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
Pull md updates from Shaohua Li:
"Mainly fixes bugs and improves performance:
- Improve scalability for raid1 from Coly
- Improve raid5-cache read performance, disk efficiency and IO
pattern from Song and me
- Fix a race condition of disk hotplug for linear from Coly
- A few cleanup patches from Ming and Byungchul
- Fix a memory leak from Neil
- Fix WRITE SAME IO failure from me
- Add doc for raid5-cache from me"
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md: (23 commits)
md/raid1: fix write behind issues introduced by bio_clone_bioset_partial
md/raid1: handle flush request correctly
md/linear: shutup lockdep warnning
md/raid1: fix a use-after-free bug
RAID1: avoid unnecessary spin locks in I/O barrier code
RAID1: a new I/O barrier implementation to remove resync window
md/raid5: Don't reinvent the wheel but use existing llist API
md: fast clone bio in bio_clone_mddev()
md: remove unnecessary check on mddev
md/raid1: use bio_clone_bioset_partial() in case of write behind
md: fail if mddev->bio_set can't be created
block: introduce bio_clone_bioset_partial()
md: disable WRITE SAME if it fails in underlayer disks
md/raid5-cache: exclude reclaiming stripes in reclaim check
md/raid5-cache: stripe reclaim only counts valid stripes
MD: add doc for raid5-cache
Documentation: move MD related doc into a separate dir
md: ensure md devices are freed before module is unloaded.
md/r5cache: improve journal device efficiency
md/r5cache: enable chunk_aligned_read with write back cache
...
Diffstat (limited to 'block')
-rw-r--r-- | block/bio.c | 61 |
1 files changed, 48 insertions, 13 deletions
diff --git a/block/bio.c b/block/bio.c index 4b564d0c3e29..5eec5e08417f 100644 --- a/block/bio.c +++ b/block/bio.c @@ -625,21 +625,20 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) } EXPORT_SYMBOL(bio_clone_fast); -/** - * bio_clone_bioset - clone a bio - * @bio_src: bio to clone - * @gfp_mask: allocation priority - * @bs: bio_set to allocate from - * - * Clone bio. Caller will own the returned bio, but not the actual data it - * points to. Reference count of returned bio will be one. - */ -struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, - struct bio_set *bs) +static struct bio *__bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, + struct bio_set *bs, int offset, + int size) { struct bvec_iter iter; struct bio_vec bv; struct bio *bio; + struct bvec_iter iter_src = bio_src->bi_iter; + + /* for supporting partial clone */ + if (offset || size != bio_src->bi_iter.bi_size) { + bio_advance_iter(bio_src, &iter_src, offset); + iter_src.bi_size = size; + } /* * Pre immutable biovecs, __bio_clone() used to just do a memcpy from @@ -663,7 +662,8 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, * __bio_clone_fast() anyways. */ - bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); + bio = bio_alloc_bioset(gfp_mask, __bio_segments(bio_src, + &iter_src), bs); if (!bio) return NULL; bio->bi_bdev = bio_src->bi_bdev; @@ -680,7 +680,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; break; default: - bio_for_each_segment(bv, bio_src, iter) + __bio_for_each_segment(bv, bio_src, iter, iter_src) bio->bi_io_vec[bio->bi_vcnt++] = bv; break; } @@ -699,9 +699,44 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, return bio; } + +/** + * bio_clone_bioset - clone a bio + * @bio_src: bio to clone + * @gfp_mask: allocation priority + * @bs: bio_set to allocate from + * + * Clone bio. Caller will own the returned bio, but not the actual data it + * points to. Reference count of returned bio will be one. + */ +struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, + struct bio_set *bs) +{ + return __bio_clone_bioset(bio_src, gfp_mask, bs, 0, + bio_src->bi_iter.bi_size); +} EXPORT_SYMBOL(bio_clone_bioset); /** + * bio_clone_bioset_partial - clone a partial bio + * @bio_src: bio to clone + * @gfp_mask: allocation priority + * @bs: bio_set to allocate from + * @offset: cloned starting from the offset + * @size: size for the cloned bio + * + * Clone bio. Caller will own the returned bio, but not the actual data it + * points to. Reference count of returned bio will be one. + */ +struct bio *bio_clone_bioset_partial(struct bio *bio_src, gfp_t gfp_mask, + struct bio_set *bs, int offset, + int size) +{ + return __bio_clone_bioset(bio_src, gfp_mask, bs, offset, size); +} +EXPORT_SYMBOL(bio_clone_bioset_partial); + +/** * bio_add_pc_page - attempt to add page to bio * @q: the target queue * @bio: destination bio |