summaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c19
1 files changed, 6 insertions, 13 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index e35119a72a44..094c0fa5c405 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1244,8 +1244,7 @@ static void blk_recalc_rq_segments(struct request *rq)
int seg_size;
int hw_seg_size;
int cluster;
- struct bio *bio;
- int i;
+ struct req_iterator iter;
int high, highprv = 1;
struct request_queue *q = rq->q;
@@ -1255,8 +1254,7 @@ static void blk_recalc_rq_segments(struct request *rq)
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
hw_seg_size = seg_size = 0;
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
- rq_for_each_bio(bio, rq)
- bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, rq, iter) {
/*
* the trick here is making sure that a high page is never
* considered part of another segment, since that might
@@ -1353,8 +1351,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sg)
{
struct bio_vec *bvec, *bvprv;
- struct bio *bio;
- int nsegs, i, cluster;
+ struct req_iterator iter;
+ int nsegs, cluster;
nsegs = 0;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
@@ -1363,11 +1361,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
* for each bio in rq
*/
bvprv = NULL;
- rq_for_each_bio(bio, rq) {
- /*
- * for each segment in bio
- */
- bio_for_each_segment(bvec, bio, i) {
+ rq_for_each_segment(bvec, rq, iter) {
int nbytes = bvec->bv_len;
if (bvprv && cluster) {
@@ -1390,8 +1384,7 @@ new_segment:
nsegs++;
}
bvprv = bvec;
- } /* segments in bio */
- } /* bios in rq */
+ } /* segments in rq */
return nsegs;
}
OpenPOWER on IntegriCloud