diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bitmap.c | 8 | ||||
-rw-r--r-- | drivers/md/bitmap.h | 3 | ||||
-rw-r--r-- | drivers/md/dm-log-userspace-transfer.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-raid.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 48 | ||||
-rw-r--r-- | drivers/md/md.c | 9 | ||||
-rw-r--r-- | drivers/md/raid1.c | 3 | ||||
-rw-r--r-- | drivers/md/raid10.c | 60 |
9 files changed, 79 insertions, 62 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 3d0dfa7a89a2..17e2b472e16d 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -539,9 +539,6 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) bitmap->events_cleared = bitmap->mddev->events; sb->events_cleared = cpu_to_le64(bitmap->mddev->events); - bitmap->flags |= BITMAP_HOSTENDIAN; - sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN); - kunmap_atomic(sb); return 0; @@ -1730,8 +1727,7 @@ int bitmap_create(struct mddev *mddev) bitmap->chunkshift = (ffz(~mddev->bitmap_info.chunksize) - BITMAP_BLOCK_SHIFT); - /* now that chunksize and chunkshift are set, we can use these macros */ - chunks = (blocks + bitmap->chunkshift - 1) >> + chunks = (blocks + (1 << bitmap->chunkshift) - 1) >> bitmap->chunkshift; pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO; @@ -1788,7 +1784,9 @@ int bitmap_load(struct mddev *mddev) * re-add of a missing device */ start = mddev->recovery_cp; + mutex_lock(&mddev->bitmap_info.mutex); err = bitmap_init_from_disk(bitmap, start); + mutex_unlock(&mddev->bitmap_info.mutex); if (err) goto out; diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h index 55ca5aec84e4..b44b0aba2d47 100644 --- a/drivers/md/bitmap.h +++ b/drivers/md/bitmap.h @@ -101,9 +101,6 @@ typedef __u16 bitmap_counter_t; #define BITMAP_BLOCK_SHIFT 9 -/* how many blocks per chunk? (this is variable) */ -#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->mddev->bitmap_info.chunksize >> BITMAP_BLOCK_SHIFT) - #endif /* diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index 1f23e048f077..08d9a207259a 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c @@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) { struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); - if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_ADMIN)) return; spin_lock(&receiving_list_lock); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 922a3385eead..754f38f8a692 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -718,8 +718,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) return 0; m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); - request_module("scsi_dh_%s", m->hw_handler_name); - if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { + if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name), + "scsi_dh_%s", m->hw_handler_name)) { ti->error = "unknown hardware handler type"; ret = -EINVAL; goto fail; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index b0ba52459ed7..68965e663248 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -859,7 +859,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) int ret; unsigned redundancy = 0; struct raid_dev *dev; - struct md_rdev *rdev, *freshest; + struct md_rdev *rdev, *tmp, *freshest; struct mddev *mddev = &rs->md; switch (rs->raid_type->level) { @@ -877,7 +877,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) } freshest = NULL; - rdev_for_each(rdev, mddev) { + rdev_for_each_safe(rdev, tmp, mddev) { if (!rdev->meta_bdev) continue; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 213ae32a0fc4..eb3d138ff55a 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -279,8 +279,10 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates) hlist_del(&cell->list); - bio_list_add(inmates, cell->holder); - bio_list_merge(inmates, &cell->bios); + if (inmates) { + bio_list_add(inmates, cell->holder); + bio_list_merge(inmates, &cell->bios); + } mempool_free(cell, prison->cell_pool); } @@ -303,9 +305,10 @@ static void cell_release(struct cell *cell, struct bio_list *bios) */ static void __cell_release_singleton(struct cell *cell, struct bio *bio) { - hlist_del(&cell->list); BUG_ON(cell->holder != bio); BUG_ON(!bio_list_empty(&cell->bios)); + + __cell_release(cell, NULL); } static void cell_release_singleton(struct cell *cell, struct bio *bio) @@ -1177,6 +1180,7 @@ static void no_space(struct cell *cell) static void process_discard(struct thin_c *tc, struct bio *bio) { int r; + unsigned long flags; struct pool *pool = tc->pool; struct cell *cell, *cell2; struct cell_key key, key2; @@ -1218,7 +1222,9 @@ static void process_discard(struct thin_c *tc, struct bio *bio) m->bio = bio; if (!ds_add_work(&pool->all_io_ds, &m->list)) { + spin_lock_irqsave(&pool->lock, flags); list_add(&m->list, &pool->prepared_discards); + spin_unlock_irqrestore(&pool->lock, flags); wake_worker(pool); } } else { @@ -1626,6 +1632,21 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) pool->low_water_blocks = pt->low_water_blocks; pool->pf = pt->pf; + /* + * If discard_passdown was enabled verify that the data device + * supports discards. Disable discard_passdown if not; otherwise + * -EOPNOTSUPP will be returned. + */ + if (pt->pf.discard_passdown) { + struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); + if (!q || !blk_queue_discard(q)) { + char buf[BDEVNAME_SIZE]; + DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.", + bdevname(pt->data_dev->bdev, buf)); + pool->pf.discard_passdown = 0; + } + } + return 0; } @@ -1982,19 +2003,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) goto out_flags_changed; } - /* - * If discard_passdown was enabled verify that the data device - * supports discards. Disable discard_passdown if not; otherwise - * -EOPNOTSUPP will be returned. - */ - if (pf.discard_passdown) { - struct request_queue *q = bdev_get_queue(data_dev->bdev); - if (!q || !blk_queue_discard(q)) { - DMWARN("Discard unsupported by data device: Disabling discard passdown."); - pf.discard_passdown = 0; - } - } - pt->pool = pool; pt->ti = ti; pt->metadata_dev = metadata_dev; @@ -2379,7 +2387,7 @@ static int pool_status(struct dm_target *ti, status_type_t type, (unsigned long long)pt->low_water_blocks); count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled + - !pool->pf.discard_passdown; + !pt->pf.discard_passdown; DMEMIT("%u ", count); if (!pool->pf.zero_new_blocks) @@ -2388,7 +2396,7 @@ static int pool_status(struct dm_target *ti, status_type_t type, if (!pool->pf.discard_enabled) DMEMIT("ignore_discard "); - if (!pool->pf.discard_passdown) + if (!pt->pf.discard_passdown) DMEMIT("no_discard_passdown "); break; @@ -2626,8 +2634,10 @@ static int thin_endio(struct dm_target *ti, if (h->all_io_entry) { INIT_LIST_HEAD(&work); ds_dec(h->all_io_entry, &work); + spin_lock_irqsave(&pool->lock, flags); list_for_each_entry_safe(m, tmp, &work, list) list_add(&m->list, &pool->prepared_discards); + spin_unlock_irqrestore(&pool->lock, flags); } mempool_free(h, pool->endio_hook_pool); @@ -2759,6 +2769,6 @@ static void dm_thin_exit(void) module_init(dm_thin_init); module_exit(dm_thin_exit); -MODULE_DESCRIPTION(DM_NAME "device-mapper thin provisioning target"); +MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/md/md.c b/drivers/md/md.c index b572e1e386ce..01233d855eb2 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -391,6 +391,8 @@ void mddev_suspend(struct mddev *mddev) synchronize_rcu(); wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); mddev->pers->quiesce(mddev, 1); + + del_timer_sync(&mddev->safemode_timer); } EXPORT_SYMBOL_GPL(mddev_suspend); @@ -7560,14 +7562,14 @@ void md_check_recovery(struct mddev *mddev) * any transients in the value of "sync_action". */ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); /* Clear some bits that don't mean anything, but * might be left set */ clear_bit(MD_RECOVERY_INTR, &mddev->recovery); clear_bit(MD_RECOVERY_DONE, &mddev->recovery); - if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) + if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || + test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) goto unlock; /* no recovery is running. * remove any failed drives, then @@ -8140,7 +8142,8 @@ static int md_notify_reboot(struct notifier_block *this, for_each_mddev(mddev, tmp) { if (mddev_trylock(mddev)) { - __md_stop_writes(mddev); + if (mddev->pers) + __md_stop_writes(mddev); mddev->safemode = 2; mddev_unlock(mddev); } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d35e4c991e38..15dd59b84e94 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1712,6 +1712,7 @@ static int process_checks(struct r1bio *r1_bio) struct r1conf *conf = mddev->private; int primary; int i; + int vcnt; for (primary = 0; primary < conf->raid_disks * 2; primary++) if (r1_bio->bios[primary]->bi_end_io == end_sync_read && @@ -1721,9 +1722,9 @@ static int process_checks(struct r1bio *r1_bio) break; } r1_bio->read_disk = primary; + vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); for (i = 0; i < conf->raid_disks * 2; i++) { int j; - int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9); struct bio *pbio = r1_bio->bios[primary]; struct bio *sbio = r1_bio->bios[i]; int size; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index fff782189e48..3f91c2e1dfe7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1788,6 +1788,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) struct r10conf *conf = mddev->private; int i, first; struct bio *tbio, *fbio; + int vcnt; atomic_set(&r10_bio->remaining, 1); @@ -1802,10 +1803,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) first = i; fbio = r10_bio->devs[i].bio; + vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); /* now find blocks with errors */ for (i=0 ; i < conf->copies ; i++) { int j, d; - int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9); tbio = r10_bio->devs[i].bio; @@ -1871,7 +1872,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) */ for (i = 0; i < conf->copies; i++) { int j, d; - int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9); tbio = r10_bio->devs[i].repl_bio; if (!tbio || !tbio->bi_end_io) @@ -3164,12 +3164,40 @@ raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) return size << conf->chunk_shift; } +static void calc_sectors(struct r10conf *conf, sector_t size) +{ + /* Calculate the number of sectors-per-device that will + * actually be used, and set conf->dev_sectors and + * conf->stride + */ + + size = size >> conf->chunk_shift; + sector_div(size, conf->far_copies); + size = size * conf->raid_disks; + sector_div(size, conf->near_copies); + /* 'size' is now the number of chunks in the array */ + /* calculate "used chunks per device" */ + size = size * conf->copies; + + /* We need to round up when dividing by raid_disks to + * get the stride size. + */ + size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks); + + conf->dev_sectors = size << conf->chunk_shift; + + if (conf->far_offset) + conf->stride = 1 << conf->chunk_shift; + else { + sector_div(size, conf->far_copies); + conf->stride = size << conf->chunk_shift; + } +} static struct r10conf *setup_conf(struct mddev *mddev) { struct r10conf *conf = NULL; int nc, fc, fo; - sector_t stride, size; int err = -EINVAL; if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) || @@ -3219,28 +3247,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) if (!conf->r10bio_pool) goto out; - size = mddev->dev_sectors >> conf->chunk_shift; - sector_div(size, fc); - size = size * conf->raid_disks; - sector_div(size, nc); - /* 'size' is now the number of chunks in the array */ - /* calculate "used chunks per device" in 'stride' */ - stride = size * conf->copies; - - /* We need to round up when dividing by raid_disks to - * get the stride size. - */ - stride += conf->raid_disks - 1; - sector_div(stride, conf->raid_disks); - - conf->dev_sectors = stride << conf->chunk_shift; - - if (fo) - stride = 1; - else - sector_div(stride, fc); - conf->stride = stride << conf->chunk_shift; - + calc_sectors(conf, mddev->dev_sectors); spin_lock_init(&conf->device_lock); INIT_LIST_HEAD(&conf->retry_list); @@ -3468,7 +3475,8 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors) mddev->recovery_cp = oldsize; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } - mddev->dev_sectors = sectors; + calc_sectors(conf, sectors); + mddev->dev_sectors = conf->dev_sectors; mddev->resync_max_sectors = size; return 0; } |