diff options
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r-- | fs/btrfs/scrub.c | 92 |
1 files changed, 62 insertions, 30 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 7e487be0094e..a38cfa4f251e 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -50,7 +50,7 @@ struct scrub_dev; struct scrub_page { struct scrub_block *sblock; struct page *page; - struct block_device *bdev; + struct btrfs_device *dev; u64 flags; /* extent flags */ u64 generation; u64 logical; @@ -86,6 +86,7 @@ struct scrub_block { unsigned int header_error:1; unsigned int checksum_error:1; unsigned int no_io_error_seen:1; + unsigned int generation_error:1; /* also sets header_error */ }; }; @@ -675,6 +676,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) sdev->stat.read_errors++; sdev->stat.uncorrectable_errors++; spin_unlock(&sdev->stat_lock); + btrfs_dev_stat_inc_and_print(sdev->dev, + BTRFS_DEV_STAT_READ_ERRS); goto out; } @@ -686,6 +689,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) sdev->stat.read_errors++; sdev->stat.uncorrectable_errors++; spin_unlock(&sdev->stat_lock); + btrfs_dev_stat_inc_and_print(sdev->dev, + BTRFS_DEV_STAT_READ_ERRS); goto out; } BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); @@ -699,6 +704,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) sdev->stat.read_errors++; sdev->stat.uncorrectable_errors++; spin_unlock(&sdev->stat_lock); + btrfs_dev_stat_inc_and_print(sdev->dev, + BTRFS_DEV_STAT_READ_ERRS); goto out; } @@ -725,12 +732,16 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) spin_unlock(&sdev->stat_lock); if (__ratelimit(&_rs)) scrub_print_warning("i/o error", sblock_to_check); + btrfs_dev_stat_inc_and_print(sdev->dev, + BTRFS_DEV_STAT_READ_ERRS); } else if (sblock_bad->checksum_error) { spin_lock(&sdev->stat_lock); sdev->stat.csum_errors++; spin_unlock(&sdev->stat_lock); if (__ratelimit(&_rs)) scrub_print_warning("checksum error", sblock_to_check); + btrfs_dev_stat_inc_and_print(sdev->dev, + BTRFS_DEV_STAT_CORRUPTION_ERRS); } else if (sblock_bad->header_error) { spin_lock(&sdev->stat_lock); sdev->stat.verify_errors++; @@ -738,6 +749,12 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) if (__ratelimit(&_rs)) scrub_print_warning("checksum/header error", sblock_to_check); + if (sblock_bad->generation_error) + btrfs_dev_stat_inc_and_print(sdev->dev, + BTRFS_DEV_STAT_GENERATION_ERRS); + else + btrfs_dev_stat_inc_and_print(sdev->dev, + BTRFS_DEV_STAT_CORRUPTION_ERRS); } if (sdev->readonly) @@ -998,8 +1015,8 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev, page = sblock->pagev + page_index; page->logical = logical; page->physical = bbio->stripes[mirror_index].physical; - /* for missing devices, bdev is NULL */ - page->bdev = bbio->stripes[mirror_index].dev->bdev; + /* for missing devices, dev->bdev is NULL */ + page->dev = bbio->stripes[mirror_index].dev; page->mirror_num = mirror_index + 1; page->page = alloc_page(GFP_NOFS); if (!page->page) { @@ -1043,7 +1060,7 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info, struct scrub_page *page = sblock->pagev + page_num; DECLARE_COMPLETION_ONSTACK(complete); - if (page->bdev == NULL) { + if (page->dev->bdev == NULL) { page->io_error = 1; sblock->no_io_error_seen = 0; continue; @@ -1053,7 +1070,7 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info, bio = bio_alloc(GFP_NOFS, 1); if (!bio) return -EIO; - bio->bi_bdev = page->bdev; + bio->bi_bdev = page->dev->bdev; bio->bi_sector = page->physical >> 9; bio->bi_end_io = scrub_complete_bio_end_io; bio->bi_private = &complete; @@ -1098,21 +1115,24 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, if (is_metadata) { struct btrfs_header *h; - mapped_buffer = kmap_atomic(sblock->pagev[0].page, KM_USER0); + mapped_buffer = kmap_atomic(sblock->pagev[0].page); h = (struct btrfs_header *)mapped_buffer; if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) || - generation != le64_to_cpu(h->generation) || memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) || memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, - BTRFS_UUID_SIZE)) + BTRFS_UUID_SIZE)) { sblock->header_error = 1; + } else if (generation != le64_to_cpu(h->generation)) { + sblock->header_error = 1; + sblock->generation_error = 1; + } csum = h->csum; } else { if (!have_csum) return; - mapped_buffer = kmap_atomic(sblock->pagev[0].page, KM_USER0); + mapped_buffer = kmap_atomic(sblock->pagev[0].page); } for (page_num = 0;;) { @@ -1124,14 +1144,13 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, crc = btrfs_csum_data(root, mapped_buffer, crc, PAGE_SIZE); - kunmap_atomic(mapped_buffer, KM_USER0); + kunmap_atomic(mapped_buffer); page_num++; if (page_num >= sblock->page_count) break; BUG_ON(!sblock->pagev[page_num].page); - mapped_buffer = kmap_atomic(sblock->pagev[page_num].page, - KM_USER0); + mapped_buffer = kmap_atomic(sblock->pagev[page_num].page); } btrfs_csum_final(crc, calculated_csum); @@ -1183,7 +1202,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, bio = bio_alloc(GFP_NOFS, 1); if (!bio) return -EIO; - bio->bi_bdev = page_bad->bdev; + bio->bi_bdev = page_bad->dev->bdev; bio->bi_sector = page_bad->physical >> 9; bio->bi_end_io = scrub_complete_bio_end_io; bio->bi_private = &complete; @@ -1197,6 +1216,12 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, /* this will also unplug the queue */ wait_for_completion(&complete); + if (!bio_flagged(bio, BIO_UPTODATE)) { + btrfs_dev_stat_inc_and_print(page_bad->dev, + BTRFS_DEV_STAT_WRITE_ERRS); + bio_put(bio); + return -EIO; + } bio_put(bio); } @@ -1242,7 +1267,7 @@ static int scrub_checksum_data(struct scrub_block *sblock) on_disk_csum = sblock->pagev[0].csum; page = sblock->pagev[0].page; - buffer = kmap_atomic(page, KM_USER0); + buffer = kmap_atomic(page); len = sdev->sectorsize; index = 0; @@ -1250,7 +1275,7 @@ static int scrub_checksum_data(struct scrub_block *sblock) u64 l = min_t(u64, len, PAGE_SIZE); crc = btrfs_csum_data(root, buffer, crc, l); - kunmap_atomic(buffer, KM_USER0); + kunmap_atomic(buffer); len -= l; if (len == 0) break; @@ -1258,7 +1283,7 @@ static int scrub_checksum_data(struct scrub_block *sblock) BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index].page); page = sblock->pagev[index].page; - buffer = kmap_atomic(page, KM_USER0); + buffer = kmap_atomic(page); } btrfs_csum_final(crc, csum); @@ -1288,7 +1313,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock) BUG_ON(sblock->page_count < 1); page = sblock->pagev[0].page; - mapped_buffer = kmap_atomic(page, KM_USER0); + mapped_buffer = kmap_atomic(page); h = (struct btrfs_header *)mapped_buffer; memcpy(on_disk_csum, h->csum, sdev->csum_size); @@ -1320,7 +1345,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock) u64 l = min_t(u64, len, mapped_size); crc = btrfs_csum_data(root, p, crc, l); - kunmap_atomic(mapped_buffer, KM_USER0); + kunmap_atomic(mapped_buffer); len -= l; if (len == 0) break; @@ -1328,7 +1353,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock) BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index].page); page = sblock->pagev[index].page; - mapped_buffer = kmap_atomic(page, KM_USER0); + mapped_buffer = kmap_atomic(page); mapped_size = PAGE_SIZE; p = mapped_buffer; } @@ -1353,24 +1378,25 @@ static int scrub_checksum_super(struct scrub_block *sblock) u64 mapped_size; void *p; u32 crc = ~(u32)0; - int fail = 0; + int fail_gen = 0; + int fail_cor = 0; u64 len; int index; BUG_ON(sblock->page_count < 1); page = sblock->pagev[0].page; - mapped_buffer = kmap_atomic(page, KM_USER0); + mapped_buffer = kmap_atomic(page); s = (struct btrfs_super_block *)mapped_buffer; memcpy(on_disk_csum, s->csum, sdev->csum_size); if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr)) - ++fail; + ++fail_cor; if (sblock->pagev[0].generation != le64_to_cpu(s->generation)) - ++fail; + ++fail_gen; if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) - ++fail; + ++fail_cor; len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE; mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; @@ -1380,7 +1406,7 @@ static int scrub_checksum_super(struct scrub_block *sblock) u64 l = min_t(u64, len, mapped_size); crc = btrfs_csum_data(root, p, crc, l); - kunmap_atomic(mapped_buffer, KM_USER0); + kunmap_atomic(mapped_buffer); len -= l; if (len == 0) break; @@ -1388,16 +1414,16 @@ static int scrub_checksum_super(struct scrub_block *sblock) BUG_ON(index >= sblock->page_count); BUG_ON(!sblock->pagev[index].page); page = sblock->pagev[index].page; - mapped_buffer = kmap_atomic(page, KM_USER0); + mapped_buffer = kmap_atomic(page); mapped_size = PAGE_SIZE; p = mapped_buffer; } btrfs_csum_final(crc, calculated_csum); if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size)) - ++fail; + ++fail_cor; - if (fail) { + if (fail_cor + fail_gen) { /* * if we find an error in a super block, we just report it. * They will get written with the next transaction commit @@ -1406,9 +1432,15 @@ static int scrub_checksum_super(struct scrub_block *sblock) spin_lock(&sdev->stat_lock); ++sdev->stat.super_errors; spin_unlock(&sdev->stat_lock); + if (fail_cor) + btrfs_dev_stat_inc_and_print(sdev->dev, + BTRFS_DEV_STAT_CORRUPTION_ERRS); + else + btrfs_dev_stat_inc_and_print(sdev->dev, + BTRFS_DEV_STAT_GENERATION_ERRS); } - return fail; + return fail_cor + fail_gen; } static void scrub_block_get(struct scrub_block *sblock) @@ -1552,7 +1584,7 @@ static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len, return -ENOMEM; } spage->sblock = sblock; - spage->bdev = sdev->dev->bdev; + spage->dev = sdev->dev; spage->flags = flags; spage->generation = gen; spage->logical = logical; |