diff options
author | NeilBrown <neilb@suse.com> | 2017-03-15 14:05:12 +1100 |
---|---|---|
committer | Shaohua Li <shli@fb.com> | 2017-03-22 19:16:12 -0700 |
commit | bd83d0a28c68bacba88a3193a1bd6a083bb8d9f5 (patch) | |
tree | f12df651c211e4d0faa4104e41e51c37417a7d93 /drivers/md/raid5.c | |
parent | 16d997b78b157315f5c90fcbc2f9ce575cb3879f (diff) | |
download | blackbird-op-linux-bd83d0a28c68bacba88a3193a1bd6a083bb8d9f5.tar.gz blackbird-op-linux-bd83d0a28c68bacba88a3193a1bd6a083bb8d9f5.zip |
md/raid5: call bio_endio() directly rather than queueing for later.
We currently gather bios that need to be returned into a bio_list
and call bio_endio() on them all together.
The original reason for this was to avoid making the calls while
holding a spinlock.
Locking has changed a lot since then, and that reason is no longer
valid.
So discard return_io() and various return_bi lists, and just call
bio_endio() directly as needed.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 38 |
1 files changed, 10 insertions, 28 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a2c9ddc35335..44c8ceba13fe 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -158,17 +158,6 @@ static int raid6_idx_to_slot(int idx, struct stripe_head *sh, return slot; } -static void return_io(struct bio_list *return_bi) -{ - struct bio *bi; - while ((bi = bio_list_pop(return_bi)) != NULL) { - bi->bi_iter.bi_size = 0; - trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), - bi, 0); - bio_endio(bi); - } -} - static void print_raid5_conf (struct r5conf *conf); static int stripe_operations_active(struct stripe_head *sh) @@ -1310,7 +1299,6 @@ async_copy_data(int frombio, struct bio *bio, struct page **page, static void ops_complete_biofill(void *stripe_head_ref) { struct stripe_head *sh = stripe_head_ref; - struct bio_list return_bi = BIO_EMPTY_LIST; int i; pr_debug("%s: stripe %llu\n", __func__, @@ -1335,15 +1323,13 @@ static void ops_complete_biofill(void *stripe_head_ref) dev->sector + STRIPE_SECTORS) { rbi2 = r5_next_bio(rbi, dev->sector); if (!raid5_dec_bi_active_stripes(rbi)) - bio_list_add(&return_bi, rbi); + bio_endio(rbi); rbi = rbi2; } } } clear_bit(STRIPE_BIOFILL_RUN, &sh->state); - return_io(&return_bi); - set_bit(STRIPE_HANDLE, &sh->state); raid5_release_stripe(sh); } @@ -3351,8 +3337,7 @@ static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, static void handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, - struct stripe_head_state *s, int disks, - struct bio_list *return_bi) + struct stripe_head_state *s, int disks) { int i; BUG_ON(sh->batch_head); @@ -3400,7 +3385,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, bi->bi_error = -EIO; md_write_end(conf->mddev); if (!raid5_dec_bi_active_stripes(bi)) - bio_list_add(return_bi, bi); + bio_endio(bi); bi = nextbi; } if (bitmap_end) @@ -3423,7 +3408,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, bi->bi_error = -EIO; md_write_end(conf->mddev); if (!raid5_dec_bi_active_stripes(bi)) - bio_list_add(return_bi, bi); + bio_endio(bi); bi = bi2; } @@ -3449,7 +3434,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, bi->bi_error = -EIO; if (!raid5_dec_bi_active_stripes(bi)) - bio_list_add(return_bi, bi); + bio_endio(bi); bi = nextbi; } } @@ -3748,7 +3733,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh, * never LOCKED, so we don't need to test 'failed' directly. */ static void handle_stripe_clean_event(struct r5conf *conf, - struct stripe_head *sh, int disks, struct bio_list *return_bi) + struct stripe_head *sh, int disks) { int i; struct r5dev *dev; @@ -3782,7 +3767,7 @@ returnbi: wbi2 = r5_next_bio(wbi, dev->sector); md_write_end(conf->mddev); if (!raid5_dec_bi_active_stripes(wbi)) - bio_list_add(return_bi, wbi); + bio_endio(wbi); wbi = wbi2; } bitmap_endwrite(conf->mddev->bitmap, sh->sector, @@ -4725,7 +4710,7 @@ static void handle_stripe(struct stripe_head *sh) sh->reconstruct_state = 0; break_stripe_batch_list(sh, 0); if (s.to_read+s.to_write+s.written) - handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); + handle_failed_stripe(conf, sh, &s, disks); if (s.syncing + s.replacing) handle_failed_sync(conf, sh, &s); } @@ -4791,10 +4776,10 @@ static void handle_stripe(struct stripe_head *sh) && !test_bit(R5_LOCKED, &qdev->flags) && (test_bit(R5_UPTODATE, &qdev->flags) || test_bit(R5_Discard, &qdev->flags)))))) - handle_stripe_clean_event(conf, sh, disks, &s.return_bi); + handle_stripe_clean_event(conf, sh, disks); if (s.just_cached) - r5c_handle_cached_data_endio(conf, sh, disks, &s.return_bi); + r5c_handle_cached_data_endio(conf, sh, disks); log_stripe_write_finished(sh); /* Now we might consider reading some blocks, either to check/generate @@ -5022,9 +5007,6 @@ finish: md_wakeup_thread(conf->mddev->thread); } - if (!bio_list_empty(&s.return_bi)) - return_io(&s.return_bi); - clear_bit_unlock(STRIPE_ACTIVE, &sh->state); } |