diff options
Diffstat (limited to 'drivers/mtd/ubi')
-rw-r--r-- | drivers/mtd/ubi/build.c | 674 | ||||
-rw-r--r-- | drivers/mtd/ubi/cdev.c | 244 | ||||
-rw-r--r-- | drivers/mtd/ubi/debug.h | 21 | ||||
-rw-r--r-- | drivers/mtd/ubi/eba.c | 321 | ||||
-rw-r--r-- | drivers/mtd/ubi/gluebi.c | 9 | ||||
-rw-r--r-- | drivers/mtd/ubi/io.c | 10 | ||||
-rw-r--r-- | drivers/mtd/ubi/kapi.c | 177 | ||||
-rw-r--r-- | drivers/mtd/ubi/misc.c | 2 | ||||
-rw-r--r-- | drivers/mtd/ubi/scan.c | 12 | ||||
-rw-r--r-- | drivers/mtd/ubi/ubi.h | 171 | ||||
-rw-r--r-- | drivers/mtd/ubi/upd.c | 185 | ||||
-rw-r--r-- | drivers/mtd/ubi/vmt.c | 208 | ||||
-rw-r--r-- | drivers/mtd/ubi/vtbl.c | 45 | ||||
-rw-r--r-- | drivers/mtd/ubi/wl.c | 338 |
14 files changed, 1578 insertions, 839 deletions
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 023653977a1a..6ac81e35355c 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -21,11 +21,16 @@ */ /* - * This file includes UBI initialization and building of UBI devices. At the - * moment UBI devices may only be added while UBI is initialized, but dynamic - * device add/remove functionality is planned. Also, at the moment we only - * attach UBI devices by scanning, which will become a bottleneck when flashes - * reach certain large size. Then one may improve UBI and add other methods. + * This file includes UBI initialization and building of UBI devices. + * + * When UBI is initialized, it attaches all the MTD devices specified as the + * module load parameters or the kernel boot parameters. If MTD devices were + * specified, UBI does not attach any MTD device, but it is possible to do + * later using the "UBI control device". + * + * At the moment we only attach UBI devices by scanning, which will become a + * bottleneck when flashes reach certain large size. Then one may improve UBI + * and add other methods, although it does not seem to be easy to do. */ #include <linux/err.h> @@ -33,7 +38,9 @@ #include <linux/moduleparam.h> #include <linux/stringify.h> #include <linux/stat.h> +#include <linux/miscdevice.h> #include <linux/log2.h> +#include <linux/kthread.h> #include "ubi.h" /* Maximum length of the 'mtd=' parameter */ @@ -43,13 +50,11 @@ * struct mtd_dev_param - MTD device parameter description data structure. * @name: MTD device name or number string * @vid_hdr_offs: VID header offset - * @data_offs: data offset */ struct mtd_dev_param { char name[MTD_PARAM_LEN_MAX]; int vid_hdr_offs; - int data_offs; }; /* Numbers of elements set in the @mtd_dev_param array */ @@ -58,14 +63,27 @@ static int mtd_devs = 0; /* MTD devices specification parameters */ static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; -/* Number of UBI devices in system */ -int ubi_devices_cnt; +/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ +struct class *ubi_class; + +/* Slab cache for wear-leveling entries */ +struct kmem_cache *ubi_wl_entry_slab; + +/* UBI control character device */ +static struct miscdevice ubi_ctrl_cdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "ubi_ctrl", + .fops = &ubi_ctrl_cdev_operations, +}; /* All UBI devices in system */ -struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; +static struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; -/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ -struct class *ubi_class; +/* Serializes UBI devices creations and removals */ +DEFINE_MUTEX(ubi_devices_mutex); + +/* Protects @ubi_devices and @ubi->ref_count */ +static DEFINE_SPINLOCK(ubi_devices_lock); /* "Show" method for files in '/<sysfs>/class/ubi/' */ static ssize_t ubi_version_show(struct class *class, char *buf) @@ -101,38 +119,150 @@ static struct device_attribute dev_min_io_size = __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); static struct device_attribute dev_bgt_enabled = __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_mtd_num = + __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); + +/** + * ubi_get_device - get UBI device. + * @ubi_num: UBI device number + * + * This function returns UBI device description object for UBI device number + * @ubi_num, or %NULL if the device does not exist. This function increases the + * device reference count to prevent removal of the device. In other words, the + * device cannot be removed if its reference count is not zero. + */ +struct ubi_device *ubi_get_device(int ubi_num) +{ + struct ubi_device *ubi; + + spin_lock(&ubi_devices_lock); + ubi = ubi_devices[ubi_num]; + if (ubi) { + ubi_assert(ubi->ref_count >= 0); + ubi->ref_count += 1; + get_device(&ubi->dev); + } + spin_unlock(&ubi_devices_lock); + + return ubi; +} + +/** + * ubi_put_device - drop an UBI device reference. + * @ubi: UBI device description object + */ +void ubi_put_device(struct ubi_device *ubi) +{ + spin_lock(&ubi_devices_lock); + ubi->ref_count -= 1; + put_device(&ubi->dev); + spin_unlock(&ubi_devices_lock); +} + +/** + * ubi_get_by_major - get UBI device description object by character device + * major number. + * @major: major number + * + * This function is similar to 'ubi_get_device()', but it searches the device + * by its major number. + */ +struct ubi_device *ubi_get_by_major(int major) +{ + int i; + struct ubi_device *ubi; + + spin_lock(&ubi_devices_lock); + for (i = 0; i < UBI_MAX_DEVICES; i++) { + ubi = ubi_devices[i]; + if (ubi && MAJOR(ubi->cdev.dev) == major) { + ubi_assert(ubi->ref_count >= 0); + ubi->ref_count += 1; + get_device(&ubi->dev); + spin_unlock(&ubi_devices_lock); + return ubi; + } + } + spin_unlock(&ubi_devices_lock); + + return NULL; +} + +/** + * ubi_major2num - get UBI device number by character device major number. + * @major: major number + * + * This function searches UBI device number object by its major number. If UBI + * device was not found, this function returns -ENODEV, otherwise the UBI device + * number is returned. + */ +int ubi_major2num(int major) +{ + int i, ubi_num = -ENODEV; + + spin_lock(&ubi_devices_lock); + for (i = 0; i < UBI_MAX_DEVICES; i++) { + struct ubi_device *ubi = ubi_devices[i]; + + if (ubi && MAJOR(ubi->cdev.dev) == major) { + ubi_num = ubi->ubi_num; + break; + } + } + spin_unlock(&ubi_devices_lock); + + return ubi_num; +} /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ static ssize_t dev_attribute_show(struct device *dev, struct device_attribute *attr, char *buf) { - const struct ubi_device *ubi; + ssize_t ret; + struct ubi_device *ubi; + /* + * The below code looks weird, but it actually makes sense. We get the + * UBI device reference from the contained 'struct ubi_device'. But it + * is unclear if the device was removed or not yet. Indeed, if the + * device was removed before we increased its reference count, + * 'ubi_get_device()' will return -ENODEV and we fail. + * + * Remember, 'struct ubi_device' is freed in the release function, so + * we still can use 'ubi->ubi_num'. + */ ubi = container_of(dev, struct ubi_device, dev); + ubi = ubi_get_device(ubi->ubi_num); + if (!ubi) + return -ENODEV; + if (attr == &dev_eraseblock_size) - return sprintf(buf, "%d\n", ubi->leb_size); + ret = sprintf(buf, "%d\n", ubi->leb_size); else if (attr == &dev_avail_eraseblocks) - return sprintf(buf, "%d\n", ubi->avail_pebs); + ret = sprintf(buf, "%d\n", ubi->avail_pebs); else if (attr == &dev_total_eraseblocks) - return sprintf(buf, "%d\n", ubi->good_peb_count); + ret = sprintf(buf, "%d\n", ubi->good_peb_count); else if (attr == &dev_volumes_count) - return sprintf(buf, "%d\n", ubi->vol_count); + ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); else if (attr == &dev_max_ec) - return sprintf(buf, "%d\n", ubi->max_ec); + ret = sprintf(buf, "%d\n", ubi->max_ec); else if (attr == &dev_reserved_for_bad) - return sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); + ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); else if (attr == &dev_bad_peb_count) - return sprintf(buf, "%d\n", ubi->bad_peb_count); + ret = sprintf(buf, "%d\n", ubi->bad_peb_count); else if (attr == &dev_max_vol_count) - return sprintf(buf, "%d\n", ubi->vtbl_slots); + ret = sprintf(buf, "%d\n", ubi->vtbl_slots); else if (attr == &dev_min_io_size) - return sprintf(buf, "%d\n", ubi->min_io_size); + ret = sprintf(buf, "%d\n", ubi->min_io_size); else if (attr == &dev_bgt_enabled) - return sprintf(buf, "%d\n", ubi->thread_enabled); + ret = sprintf(buf, "%d\n", ubi->thread_enabled); + else if (attr == &dev_mtd_num) + ret = sprintf(buf, "%d\n", ubi->mtd->index); else - BUG(); + ret = -EINVAL; - return 0; + ubi_put_device(ubi); + return ret; } /* Fake "release" method for UBI devices */ @@ -150,68 +280,44 @@ static int ubi_sysfs_init(struct ubi_device *ubi) int err; ubi->dev.release = dev_release; - ubi->dev.devt = MKDEV(ubi->major, 0); + ubi->dev.devt = ubi->cdev.dev; ubi->dev.class = ubi_class; sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num); err = device_register(&ubi->dev); if (err) - goto out; + return err; err = device_create_file(&ubi->dev, &dev_eraseblock_size); if (err) - goto out_unregister; + return err; err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); if (err) - goto out_eraseblock_size; + return err; err = device_create_file(&ubi->dev, &dev_total_eraseblocks); if (err) - goto out_avail_eraseblocks; + return err; err = device_create_file(&ubi->dev, &dev_volumes_count); if (err) - goto out_total_eraseblocks; + return err; err = device_create_file(&ubi->dev, &dev_max_ec); if (err) - goto out_volumes_count; + return err; err = device_create_file(&ubi->dev, &dev_reserved_for_bad); if (err) - goto out_volumes_max_ec; + return err; err = device_create_file(&ubi->dev, &dev_bad_peb_count); if (err) - goto out_reserved_for_bad; + return err; err = device_create_file(&ubi->dev, &dev_max_vol_count); if (err) - goto out_bad_peb_count; + return err; err = device_create_file(&ubi->dev, &dev_min_io_size); if (err) - goto out_max_vol_count; + return err; err = device_create_file(&ubi->dev, &dev_bgt_enabled); if (err) - goto out_min_io_size; - - return 0; - -out_min_io_size: - device_remove_file(&ubi->dev, &dev_min_io_size); -out_max_vol_count: - device_remove_file(&ubi->dev, &dev_max_vol_count); -out_bad_peb_count: - device_remove_file(&ubi->dev, &dev_bad_peb_count); -out_reserved_for_bad: - device_remove_file(&ubi->dev, &dev_reserved_for_bad); -out_volumes_max_ec: - device_remove_file(&ubi->dev, &dev_max_ec); -out_volumes_count: - device_remove_file(&ubi->dev, &dev_volumes_count); -out_total_eraseblocks: - device_remove_file(&ubi->dev, &dev_total_eraseblocks); -out_avail_eraseblocks: - device_remove_file(&ubi->dev, &dev_avail_eraseblocks); -out_eraseblock_size: - device_remove_file(&ubi->dev, &dev_eraseblock_size); -out_unregister: - device_unregister(&ubi->dev); -out: - ubi_err("failed to initialize sysfs for %s", ubi->ubi_name); + return err; + err = device_create_file(&ubi->dev, &dev_mtd_num); return err; } @@ -221,6 +327,7 @@ out: */ static void ubi_sysfs_close(struct ubi_device *ubi) { + device_remove_file(&ubi->dev, &dev_mtd_num); device_remove_file(&ubi->dev, &dev_bgt_enabled); device_remove_file(&ubi->dev, &dev_min_io_size); device_remove_file(&ubi->dev, &dev_max_vol_count); @@ -244,7 +351,7 @@ static void kill_volumes(struct ubi_device *ubi) for (i = 0; i < ubi->vtbl_slots; i++) if (ubi->volumes[i]) - ubi_free_volume(ubi, i); + ubi_free_volume(ubi, ubi->volumes[i]); } /** @@ -259,9 +366,6 @@ static int uif_init(struct ubi_device *ubi) int i, err; dev_t dev; - mutex_init(&ubi->vtbl_mutex); - spin_lock_init(&ubi->volumes_lock); - sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); /* @@ -278,39 +382,40 @@ static int uif_init(struct ubi_device *ubi) return err; } + ubi_assert(MINOR(dev) == 0); cdev_init(&ubi->cdev, &ubi_cdev_operations); - ubi->major = MAJOR(dev); - dbg_msg("%s major is %u", ubi->ubi_name, ubi->major); + dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev)); ubi->cdev.owner = THIS_MODULE; - dev = MKDEV(ubi->major, 0); err = cdev_add(&ubi->cdev, dev, 1); if (err) { - ubi_err("cannot add character device %s", ubi->ubi_name); + ubi_err("cannot add character device"); goto out_unreg; } err = ubi_sysfs_init(ubi); if (err) - goto out_cdev; + goto out_sysfs; for (i = 0; i < ubi->vtbl_slots; i++) if (ubi->volumes[i]) { - err = ubi_add_volume(ubi, i); - if (err) + err = ubi_add_volume(ubi, ubi->volumes[i]); + if (err) { + ubi_err("cannot add volume %d", i); goto out_volumes; + } } return 0; out_volumes: kill_volumes(ubi); +out_sysfs: ubi_sysfs_close(ubi); -out_cdev: cdev_del(&ubi->cdev); out_unreg: - unregister_chrdev_region(MKDEV(ubi->major, 0), - ubi->vtbl_slots + 1); + unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); + ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); return err; } @@ -323,7 +428,7 @@ static void uif_close(struct ubi_device *ubi) kill_volumes(ubi); ubi_sysfs_close(ubi); cdev_del(&ubi->cdev); - unregister_chrdev_region(MKDEV(ubi->major, 0), ubi->vtbl_slots + 1); + unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); } /** @@ -384,9 +489,9 @@ out_si: * assumed: * o EC header is always at offset zero - this cannot be changed; * o VID header starts just after the EC header at the closest address - * aligned to @io->@hdrs_min_io_size; + * aligned to @io->hdrs_min_io_size; * o data starts just after the VID header at the closest address aligned to - * @io->@min_io_size + * @io->min_io_size * * This function returns zero in case of success and a negative error code in * case of failure. @@ -407,6 +512,9 @@ static int io_init(struct ubi_device *ubi) return -EINVAL; } + if (ubi->vid_hdr_offset < 0) + return -EINVAL; + /* * Note, in this implementation we support MTD devices with 0x7FFFFFFF * physical eraseblocks maximum. @@ -424,7 +532,8 @@ static int io_init(struct ubi_device *ubi) /* Make sure minimal I/O unit is power of 2 */ if (!is_power_of_2(ubi->min_io_size)) { - ubi_err("bad min. I/O unit"); + ubi_err("min. I/O unit (%d) is not power of 2", + ubi->min_io_size); return -EINVAL; } @@ -453,10 +562,8 @@ static int io_init(struct ubi_device *ubi) } /* Similar for the data offset */ - if (ubi->leb_start == 0) { - ubi->leb_start = ubi->vid_hdr_offset + ubi->vid_hdr_alsize; - ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); - } + ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE; + ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); @@ -514,76 +621,147 @@ static int io_init(struct ubi_device *ubi) } /** - * attach_mtd_dev - attach an MTD device. - * @mtd_dev: MTD device name or number string - * @vid_hdr_offset: VID header offset - * @data_offset: data offset + * autoresize - re-size the volume which has the "auto-resize" flag set. + * @ubi: UBI device description object + * @vol_id: ID of the volume to re-size * - * This function attaches an MTD device to UBI. It first treats @mtd_dev as the - * MTD device name, and tries to open it by this name. If it is unable to open, - * it tries to convert @mtd_dev to an integer and open the MTD device by its - * number. Returns zero in case of success and a negative error code in case of - * failure. + * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in + * the volume table to the largest possible size. See comments in ubi-header.h + * for more description of the flag. Returns zero in case of success and a + * negative error code in case of failure. */ -static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset, - int data_offset) +static int autoresize(struct ubi_device *ubi, int vol_id) { - struct ubi_device *ubi; - struct mtd_info *mtd; - int i, err; + struct ubi_volume_desc desc; + struct ubi_volume *vol = ubi->volumes[vol_id]; + int err, old_reserved_pebs = vol->reserved_pebs; - mtd = get_mtd_device_nm(mtd_dev); - if (IS_ERR(mtd)) { - int mtd_num; - char *endp; + /* + * Clear the auto-resize flag in the volume in-memory copy of the + * volume table, and 'ubi_resize_volume()' will propogate this change + * to the flash. + */ + ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; - if (PTR_ERR(mtd) != -ENODEV) - return PTR_ERR(mtd); + if (ubi->avail_pebs == 0) { + struct ubi_vtbl_record vtbl_rec; /* - * Probably this is not MTD device name but MTD device number - - * check this out. + * No avalilable PEBs to re-size the volume, clear the flag on + * flash and exit. */ - mtd_num = simple_strtoul(mtd_dev, &endp, 0); - if (*endp != '\0' || mtd_dev == endp) { - ubi_err("incorrect MTD device: \"%s\"", mtd_dev); - return -ENODEV; - } - - mtd = get_mtd_device(NULL, mtd_num); - if (IS_ERR(mtd)) - return PTR_ERR(mtd); + memcpy(&vtbl_rec, &ubi->vtbl[vol_id], + sizeof(struct ubi_vtbl_record)); + err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); + if (err) + ubi_err("cannot clean auto-resize flag for volume %d", + vol_id); + } else { + desc.vol = vol; + err = ubi_resize_volume(&desc, + old_reserved_pebs + ubi->avail_pebs); + if (err) + ubi_err("cannot auto-resize volume %d", vol_id); } - /* Check if we already have the same MTD device attached */ - for (i = 0; i < ubi_devices_cnt; i++) - if (ubi_devices[i]->mtd->index == mtd->index) { - ubi_err("mtd%d is already attached to ubi%d", + if (err) + return err; + + ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, + vol->name, old_reserved_pebs, vol->reserved_pebs); + return 0; +} + +/** + * ubi_attach_mtd_dev - attach an MTD device. + * @mtd_dev: MTD device description object + * @ubi_num: number to assign to the new UBI device + * @vid_hdr_offset: VID header offset + * + * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number + * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in + * which case this function finds a vacant device nubert and assings it + * automatically. Returns the new UBI device number in case of success and a + * negative error code in case of failure. + * + * Note, the invocations of this function has to be serialized by the + * @ubi_devices_mutex. + */ +int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) +{ + struct ubi_device *ubi; + int i, err; + + /* + * Check if we already have the same MTD device attached. + * + * Note, this function assumes that UBI devices creations and deletions + * are serialized, so it does not take the &ubi_devices_lock. + */ + for (i = 0; i < UBI_MAX_DEVICES; i++) { + ubi = ubi_devices[i]; + if (ubi && mtd->index == ubi->mtd->index) { + dbg_err("mtd%d is already attached to ubi%d", mtd->index, i); - err = -EINVAL; - goto out_mtd; + return -EEXIST; } + } - ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device), - GFP_KERNEL); - if (!ubi) { - err = -ENOMEM; - goto out_mtd; + /* + * Make sure this MTD device is not emulated on top of an UBI volume + * already. Well, generally this recursion works fine, but there are + * different problems like the UBI module takes a reference to itself + * by attaching (and thus, opening) the emulated MTD device. This + * results in inability to unload the module. And in general it makes + * no sense to attach emulated MTD devices, so we prohibit this. + */ + if (mtd->type == MTD_UBIVOLUME) { + ubi_err("refuse attaching mtd%d - it is already emulated on " + "top of UBI", mtd->index); + return -EINVAL; } - ubi->ubi_num = ubi_devices_cnt; - ubi->mtd = mtd; + if (ubi_num == UBI_DEV_NUM_AUTO) { + /* Search for an empty slot in the @ubi_devices array */ + for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) + if (!ubi_devices[ubi_num]) + break; + if (ubi_num == UBI_MAX_DEVICES) { + dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES); + return -ENFILE; + } + } else { + if (ubi_num >= UBI_MAX_DEVICES) + return -EINVAL; + + /* Make sure ubi_num is not busy */ + if (ubi_devices[ubi_num]) { + dbg_err("ubi%d already exists", ubi_num); + return -EEXIST; + } + } - dbg_msg("attaching mtd%d to ubi%d: VID header offset %d data offset %d", - ubi->mtd->index, ubi_devices_cnt, vid_hdr_offset, data_offset); + ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL); + if (!ubi) + return -ENOMEM; + ubi->mtd = mtd; + ubi->ubi_num = ubi_num; ubi->vid_hdr_offset = vid_hdr_offset; - ubi->leb_start = data_offset; + ubi->autoresize_vol_id = -1; + + mutex_init(&ubi->buf_mutex); + mutex_init(&ubi->ckvol_mutex); + mutex_init(&ubi->volumes_mutex); + spin_lock_init(&ubi->volumes_lock); + + dbg_msg("attaching mtd%d to ubi%d: VID header offset %d", + mtd->index, ubi_num, vid_hdr_offset); + err = io_init(ubi); if (err) goto out_free; - mutex_init(&ubi->buf_mutex); ubi->peb_buf1 = vmalloc(ubi->peb_size); if (!ubi->peb_buf1) goto out_free; @@ -605,12 +783,26 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset, goto out_free; } + if (ubi->autoresize_vol_id != -1) { + err = autoresize(ubi, ubi->autoresize_vol_id); + if (err) + goto out_detach; + } + err = uif_init(ubi); if (err) goto out_detach; - ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt); - ubi_msg("MTD device name: \"%s\"", ubi->mtd->name); + ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); + if (IS_ERR(ubi->bgt_thread)) { + err = PTR_ERR(ubi->bgt_thread); + ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, + err); + goto out_uif; + } + + ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num); + ubi_msg("MTD device name: \"%s\"", mtd->name); ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); ubi_msg("physical eraseblock size: %d bytes (%d KiB)", ubi->peb_size, ubi->peb_size >> 10); @@ -638,9 +830,11 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset, wake_up_process(ubi->bgt_thread); } - ubi_devices_cnt += 1; - return 0; + ubi_devices[ubi_num] = ubi; + return ubi_num; +out_uif: + uif_close(ubi); out_detach: ubi_eba_close(ubi); ubi_wl_close(ubi); @@ -652,21 +846,58 @@ out_free: vfree(ubi->dbg_peb_buf); #endif kfree(ubi); -out_mtd: - put_mtd_device(mtd); - ubi_devices[ubi_devices_cnt] = NULL; return err; } /** - * detach_mtd_dev - detach an MTD device. - * @ubi: UBI device description object + * ubi_detach_mtd_dev - detach an MTD device. + * @ubi_num: UBI device number to detach from + * @anyway: detach MTD even if device reference count is not zero + * + * This function destroys an UBI device number @ubi_num and detaches the + * underlying MTD device. Returns zero in case of success and %-EBUSY if the + * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not + * exist. + * + * Note, the invocations of this function has to be serialized by the + * @ubi_devices_mutex. */ -static void detach_mtd_dev(struct ubi_device *ubi) +int ubi_detach_mtd_dev(int ubi_num, int anyway) { - int ubi_num = ubi->ubi_num, mtd_num = ubi->mtd->index; + struct ubi_device *ubi; + + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) + return -EINVAL; + + spin_lock(&ubi_devices_lock); + ubi = ubi_devices[ubi_num]; + if (!ubi) { + spin_unlock(&ubi_devices_lock); + return -EINVAL; + } + + if (ubi->ref_count) { + if (!anyway) { + spin_unlock(&ubi_devices_lock); + return -EBUSY; + } + /* This may only happen if there is a bug */ + ubi_err("%s reference count %d, destroy anyway", + ubi->ubi_name, ubi->ref_count); + } + ubi_devices[ubi_num] = NULL; + spin_unlock(&ubi_devices_lock); + ubi_assert(ubi_num == ubi->ubi_num); dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); + + /* + * Before freeing anything, we have to stop the background thread to + * prevent it from doing anything on this device while we are freeing. + */ + if (ubi->bgt_thread) + kthread_stop(ubi->bgt_thread); + uif_close(ubi); ubi_eba_close(ubi); ubi_wl_close(ubi); @@ -677,11 +908,37 @@ static void detach_mtd_dev(struct ubi_device *ubi) #ifdef CONFIG_MTD_UBI_DEBUG vfree(ubi->dbg_peb_buf); #endif - kfree(ubi_devices[ubi_num]); - ubi_devices[ubi_num] = NULL; - ubi_devices_cnt -= 1; - ubi_assert(ubi_devices_cnt >= 0); - ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num); + ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); + kfree(ubi); + return 0; +} + +/** + * find_mtd_device - open an MTD device by its name or number. + * @mtd_dev: name or number of the device + * + * This function tries to open and MTD device described by @mtd_dev string, + * which is first treated as an ASCII number, and if it is not true, it is + * treated as MTD device name. Returns MTD device description object in case of + * success and a negative error code in case of failure. + */ +static struct mtd_info * __init open_mtd_device(const char *mtd_dev) +{ + struct mtd_info *mtd; + int mtd_num; + char *endp; + + mtd_num = simple_strtoul(mtd_dev, &endp, 0); + if (*endp != '\0' || mtd_dev == endp) { + /* + * This does not look like an ASCII integer, probably this is + * MTD device name. + */ + mtd = get_mtd_device_nm(mtd_dev); + } else + mtd = get_mtd_device(NULL, mtd_num); + + return mtd; } static int __init ubi_init(void) @@ -693,47 +950,96 @@ static int __init ubi_init(void) BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); if (mtd_devs > UBI_MAX_DEVICES) { - printk("UBI error: too many MTD devices, maximum is %d\n", - UBI_MAX_DEVICES); + printk(KERN_ERR "UBI error: too many MTD devices, " + "maximum is %d\n", UBI_MAX_DEVICES); return -EINVAL; } + /* Create base sysfs directory and sysfs files */ ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); - if (IS_ERR(ubi_class)) - return PTR_ERR(ubi_class); + if (IS_ERR(ubi_class)) { + err = PTR_ERR(ubi_class); + printk(KERN_ERR "UBI error: cannot create UBI class\n"); + goto out; + } err = class_create_file(ubi_class, &ubi_version); - if (err) + if (err) { + printk(KERN_ERR "UBI error: cannot create sysfs file\n"); goto out_class; + } + + err = misc_register(&ubi_ctrl_cdev); + if (err) { + printk(KERN_ERR "UBI error: cannot register device\n"); + goto out_version; + } + + ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab", + sizeof(struct ubi_wl_entry), + 0, 0, NULL); + if (!ubi_wl_entry_slab) + goto out_dev_unreg; /* Attach MTD devices */ for (i = 0; i < mtd_devs; i++) { struct mtd_dev_param *p = &mtd_dev_param[i]; + struct mtd_info *mtd; cond_resched(); - err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs); - if (err) + + mtd = open_mtd_device(p->name); + if (IS_ERR(mtd)) { + err = PTR_ERR(mtd); + goto out_detach; + } + + mutex_lock(&ubi_devices_mutex); + err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, + p->vid_hdr_offs); + mutex_unlock(&ubi_devices_mutex); + if (err < 0) { + put_mtd_device(mtd); + printk(KERN_ERR "UBI error: cannot attach %s\n", + p->name); goto out_detach; + } } return 0; out_detach: for (k = 0; k < i; k++) - detach_mtd_dev(ubi_devices[k]); + if (ubi_devices[k]) { + mutex_lock(&ubi_devices_mutex); + ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1); + mutex_unlock(&ubi_devices_mutex); + } + kmem_cache_destroy(ubi_wl_entry_slab); +out_dev_unreg: + misc_deregister(&ubi_ctrl_cdev); +out_version: class_remove_file(ubi_class, &ubi_version); out_class: class_destroy(ubi_class); +out: + printk(KERN_ERR "UBI error: cannot initialize UBI, error %d\n", err); return err; } module_init(ubi_init); static void __exit ubi_exit(void) { - int i, n = ubi_devices_cnt; + int i; - for (i = 0; i < n; i++) - detach_mtd_dev(ubi_devices[i]); + for (i = 0; i < UBI_MAX_DEVICES; i++) + if (ubi_devices[i]) { + mutex_lock(&ubi_devices_mutex); + ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1); + mutex_unlock(&ubi_devices_mutex); + } + kmem_cache_destroy(ubi_wl_entry_slab); + misc_deregister(&ubi_ctrl_cdev); class_remove_file(ubi_class, &ubi_version); class_destroy(ubi_class); } @@ -754,7 +1060,8 @@ static int __init bytes_str_to_int(const char *str) result = simple_strtoul(str, &endp, 0); if (str == endp || result < 0) { - printk("UBI error: incorrect bytes count: \"%s\"\n", str); + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", + str); return -EINVAL; } @@ -764,15 +1071,14 @@ static int __init bytes_str_to_int(const char *str) case 'M': result *= 1024; case 'K': - case 'k': result *= 1024; - if (endp[1] == 'i' && (endp[2] == '\0' || - endp[2] == 'B' || endp[2] == 'b')) + if (endp[1] == 'i' && endp[2] == 'B') endp += 2; case '\0': break; default: - printk("UBI error: incorrect bytes count: \"%s\"\n", str); + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", + str); return -EINVAL; } @@ -793,23 +1099,27 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) struct mtd_dev_param *p; char buf[MTD_PARAM_LEN_MAX]; char *pbuf = &buf[0]; - char *tokens[3] = {NULL, NULL, NULL}; + char *tokens[2] = {NULL, NULL}; + + if (!val) + return -EINVAL; if (mtd_devs == UBI_MAX_DEVICES) { - printk("UBI error: too many parameters, max. is %d\n", + printk(KERN_ERR "UBI error: too many parameters, max. is %d\n", UBI_MAX_DEVICES); return -EINVAL; } len = strnlen(val, MTD_PARAM_LEN_MAX); if (len == MTD_PARAM_LEN_MAX) { - printk("UBI error: parameter \"%s\" is too long, max. is %d\n", - val, MTD_PARAM_LEN_MAX); + printk(KERN_ERR "UBI error: parameter \"%s\" is too long, " + "max. is %d\n", val, MTD_PARAM_LEN_MAX); return -EINVAL; } if (len == 0) { - printk("UBI warning: empty 'mtd=' parameter - ignored\n"); + printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - " + "ignored\n"); return 0; } @@ -819,11 +1129,12 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) if (buf[len - 1] == '\n') buf[len - 1] = '\0'; - for (i = 0; i < 3; i++) + for (i = 0; i < 2; i++) tokens[i] = strsep(&pbuf, ","); if (pbuf) { - printk("UBI error: too many arguments at \"%s\"\n", val); + printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n", + val); return -EINVAL; } @@ -832,13 +1143,9 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) if (tokens[1]) p->vid_hdr_offs = bytes_str_to_int(tokens[1]); - if (tokens[2]) - p->data_offs = bytes_str_to_int(tokens[2]); if (p->vid_hdr_offs < 0) return p->vid_hdr_offs; - if (p->data_offs < 0) - return p->data_offs; mtd_devs += 1; return 0; @@ -846,16 +1153,15 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " - "mtd=<name|num>[,<vid_hdr_offs>,<data_offs>]. " + "mtd=<name|num>[,<vid_hdr_offs>].\n" "Multiple \"mtd\" parameters may be specified.\n" - "MTD devices may be specified by their number or name. " - "Optional \"vid_hdr_offs\" and \"data_offs\" parameters " - "specify UBI VID header position and data starting " - "position to be used by UBI.\n" - "Example: mtd=content,1984,2048 mtd=4 - attach MTD device" - "with name content using VID header offset 1984 and data " - "start 2048, and MTD device number 4 using default " - "offsets"); + "MTD devices may be specified by their number or name.\n" + "Optional \"vid_hdr_offs\" parameter specifies UBI VID " + "header position and data starting position to be used " + "by UBI.\n" + "Example: mtd=content,1984 mtd=4 - attach MTD device" + "with name \"content\" using VID header offset 1984, and " + "MTD device number 4 with default VID header offset."); MODULE_VERSION(__stringify(UBI_VERSION)); MODULE_DESCRIPTION("UBI - Unsorted Block Images"); diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index fe4da1e96c52..9d6aae5449b6 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -28,6 +28,11 @@ * * Major and minor numbers are assigned dynamically to both UBI and volume * character devices. + * + * Well, there is the third kind of character devices - the UBI control + * character device, which allows to manipulate by UBI devices - create and + * delete them. In other words, it is used for attaching and detaching MTD + * devices. */ #include <linux/module.h> @@ -39,34 +44,6 @@ #include <asm/div64.h> #include "ubi.h" -/* - * Maximum sequence numbers of UBI and volume character device IOCTLs (direct - * logical eraseblock erase is a debug-only feature). - */ -#define UBI_CDEV_IOC_MAX_SEQ 2 -#ifndef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO -#define VOL_CDEV_IOC_MAX_SEQ 1 -#else -#define VOL_CDEV_IOC_MAX_SEQ 2 -#endif - -/** - * major_to_device - get UBI device object by character device major number. - * @major: major number - * - * This function returns a pointer to the UBI device object. - */ -static struct ubi_device *major_to_device(int major) -{ - int i; - - for (i = 0; i < ubi_devices_cnt; i++) - if (ubi_devices[i] && ubi_devices[i]->major == major) - return ubi_devices[i]; - BUG(); - return NULL; -} - /** * get_exclusive - get exclusive access to an UBI volume. * @desc: volume descriptor @@ -124,9 +101,11 @@ static void revoke_exclusive(struct ubi_volume_desc *desc, int mode) static int vol_cdev_open(struct inode *inode, struct file *file) { struct ubi_volume_desc *desc; - const struct ubi_device *ubi = major_to_device(imajor(inode)); - int vol_id = iminor(inode) - 1; - int mode; + int vol_id = iminor(inode) - 1, mode, ubi_num; + + ubi_num = ubi_major2num(imajor(inode)); + if (ubi_num < 0) + return ubi_num; if (file->f_mode & FMODE_WRITE) mode = UBI_READWRITE; @@ -135,7 +114,7 @@ static int vol_cdev_open(struct inode *inode, struct file *file) dbg_msg("open volume %d, mode %d", vol_id, mode); - desc = ubi_open_volume(ubi->ubi_num, vol_id, mode); + desc = ubi_open_volume(ubi_num, vol_id, mode); if (IS_ERR(desc)) return PTR_ERR(desc); @@ -153,8 +132,15 @@ static int vol_cdev_release(struct inode *inode, struct file *file) if (vol->updating) { ubi_warn("update of volume %d not finished, volume is damaged", vol->vol_id); + ubi_assert(!vol->changing_leb); vol->updating = 0; vfree(vol->upd_buf); + } else if (vol->changing_leb) { + dbg_msg("only %lld of %lld bytes received for atomic LEB change" + " for volume %d:%d, cancel", vol->upd_received, + vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id); + vol->changing_leb = 0; + vfree(vol->upd_buf); } ubi_close_volume(desc); @@ -205,13 +191,13 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; - int err, lnum, off, len, vol_id = desc->vol->vol_id, tbuf_size; + int err, lnum, off, len, tbuf_size; size_t count_save = count; void *tbuf; uint64_t tmp; dbg_msg("read %zd bytes from offset %lld of volume %d", - count, *offp, vol_id); + count, *offp, vol->vol_id); if (vol->updating) { dbg_err("updating"); @@ -225,7 +211,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, return 0; if (vol->corrupted) - dbg_msg("read from corrupted volume %d", vol_id); + dbg_msg("read from corrupted volume %d", vol->vol_id); if (*offp + count > vol->used_bytes) count_save = count = vol->used_bytes - *offp; @@ -249,7 +235,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, if (off + len >= vol->usable_leb_size) len = vol->usable_leb_size - off; - err = ubi_eba_read_leb(ubi, vol_id, lnum, tbuf, off, len, 0); + err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0); if (err) break; @@ -289,13 +275,13 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; - int lnum, off, len, tbuf_size, vol_id = vol->vol_id, err = 0; + int lnum, off, len, tbuf_size, err = 0; size_t count_save = count; char *tbuf; uint64_t tmp; dbg_msg("requested: write %zd bytes to offset %lld of volume %u", - count, *offp, desc->vol->vol_id); + count, *offp, vol->vol_id); if (vol->vol_type == UBI_STATIC_VOLUME) return -EROFS; @@ -339,7 +325,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, break; } - err = ubi_eba_write_leb(ubi, vol_id, lnum, tbuf, off, len, + err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len, UBI_UNKNOWN); if (err) break; @@ -372,22 +358,32 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf, struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; - if (!vol->updating) + if (!vol->updating && !vol->changing_leb) return vol_cdev_direct_write(file, buf, count, offp); - err = ubi_more_update_data(ubi, vol->vol_id, buf, count); + if (vol->updating) + err = ubi_more_update_data(ubi, vol, buf, count); + else + err = ubi_more_leb_change_data(ubi, vol, buf, count); + if (err < 0) { - ubi_err("cannot write %zd bytes of update data", count); + ubi_err("cannot accept more %zd bytes of data, error %d", + count, err); return err; } if (err) { /* - * Update is finished, @err contains number of actually written - * bytes now. + * The operation is finished, @err contains number of actually + * written bytes. */ count = err; + if (vol->changing_leb) { + revoke_exclusive(desc, UBI_READWRITE); + return count; + } + err = ubi_check_volume(ubi, vol->vol_id); if (err < 0) return err; @@ -402,7 +398,6 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf, revoke_exclusive(desc, UBI_READWRITE); } - *offp += count; return count; } @@ -447,11 +442,46 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file, if (err < 0) break; - err = ubi_start_update(ubi, vol->vol_id, bytes); + err = ubi_start_update(ubi, vol, bytes); if (bytes == 0) revoke_exclusive(desc, UBI_READWRITE); + break; + } + + /* Atomic logical eraseblock change command */ + case UBI_IOCEBCH: + { + struct ubi_leb_change_req req; + + err = copy_from_user(&req, argp, + sizeof(struct ubi_leb_change_req)); + if (err) { + err = -EFAULT; + break; + } + + if (desc->mode == UBI_READONLY || + vol->vol_type == UBI_STATIC_VOLUME) { + err = -EROFS; + break; + } + + /* Validate the request */ + err = -EINVAL; + if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || + req.bytes < 0 || req.lnum >= vol->usable_leb_size) + break; + if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM && + req.dtype != UBI_UNKNOWN) + break; + + err = get_exclusive(desc); + if (err < 0) + break; - file->f_pos = 0; + err = ubi_start_leb_change(ubi, vol, &req); + if (req.bytes == 0) + revoke_exclusive(desc, UBI_READWRITE); break; } @@ -467,7 +497,8 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file, break; } - if (desc->mode == UBI_READONLY) { + if (desc->mode == UBI_READONLY || + vol->vol_type == UBI_STATIC_VOLUME) { err = -EROFS; break; } @@ -477,13 +508,8 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file, break; } - if (vol->vol_type != UBI_DYNAMIC_VOLUME) { - err = -EROFS; - break; - } - dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); - err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum); + err = ubi_eba_unmap_leb(ubi, vol, lnum); if (err) break; @@ -580,9 +606,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, if (!capable(CAP_SYS_RESOURCE)) return -EPERM; - ubi = major_to_device(imajor(inode)); - if (IS_ERR(ubi)) - return PTR_ERR(ubi); + ubi = ubi_get_by_major(imajor(inode)); + if (!ubi) + return -ENODEV; switch (cmd) { /* Create volume command */ @@ -591,8 +617,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, struct ubi_mkvol_req req; dbg_msg("create volume"); - err = copy_from_user(&req, argp, - sizeof(struct ubi_mkvol_req)); + err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req)); if (err) { err = -EFAULT; break; @@ -604,7 +629,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, req.name[req.name_len] = '\0'; + mutex_lock(&ubi->volumes_mutex); err = ubi_create_volume(ubi, &req); + mutex_unlock(&ubi->volumes_mutex); if (err) break; @@ -633,10 +660,16 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, break; } + mutex_lock(&ubi->volumes_mutex); err = ubi_remove_volume(desc); - if (err) - ubi_close_volume(desc); + mutex_unlock(&ubi->volumes_mutex); + /* + * The volume is deleted (unless an error occurred), and the + * 'struct ubi_volume' object will be freed when + * 'ubi_close_volume()' will call 'put_device()'. + */ + ubi_close_volume(desc); break; } @@ -648,8 +681,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, struct ubi_rsvol_req req; dbg_msg("re-size volume"); - err = copy_from_user(&req, argp, - sizeof(struct ubi_rsvol_req)); + err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req)); if (err) { err = -EFAULT; break; @@ -669,7 +701,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, pebs = !!do_div(tmp, desc->vol->usable_leb_size); pebs += tmp; + mutex_lock(&ubi->volumes_mutex); err = ubi_resize_volume(desc, pebs); + mutex_unlock(&ubi->volumes_mutex); ubi_close_volume(desc); break; } @@ -679,9 +713,93 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, break; } + ubi_put_device(ubi); return err; } +static int ctrl_cdev_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + int err = 0; + void __user *argp = (void __user *)arg; + + if (!capable(CAP_SYS_RESOURCE)) + return -EPERM; + + switch (cmd) { + /* Attach an MTD device command */ + case UBI_IOCATT: + { + struct ubi_attach_req req; + struct mtd_info *mtd; + + dbg_msg("attach MTD device"); + err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req)); + if (err) { + err = -EFAULT; + break; + } + + if (req.mtd_num < 0 || + (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) { + err = -EINVAL; + break; + } + + mtd = get_mtd_device(NULL, req.mtd_num); + if (IS_ERR(mtd)) { + err = PTR_ERR(mtd); + break; + } + + /* + * Note, further request verification is done by + * 'ubi_attach_mtd_dev()'. + */ + mutex_lock(&ubi_devices_mutex); + err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset); + mutex_unlock(&ubi_devices_mutex); + if (err < 0) + put_mtd_device(mtd); + else + /* @err contains UBI device number */ + err = put_user(err, (__user int32_t *)argp); + + break; + } + + /* Detach an MTD device command */ + case UBI_IOCDET: + { + int ubi_num; + + dbg_msg("dettach MTD device"); + err = get_user(ubi_num, (__user int32_t *)argp); + if (err) { + err = -EFAULT; + break; + } + + mutex_lock(&ubi_devices_mutex); + err = ubi_detach_mtd_dev(ubi_num, 0); + mutex_unlock(&ubi_devices_mutex); + break; + } + + default: + err = -ENOTTY; + break; + } + + return err; +} + +/* UBI control character device operations */ +struct file_operations ubi_ctrl_cdev_operations = { + .ioctl = ctrl_cdev_ioctl, + .owner = THIS_MODULE, +}; + /* UBI character device operations */ struct file_operations ubi_cdev_operations = { .owner = THIS_MODULE, diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h index 467722eb618b..51c40b17f1ec 100644 --- a/drivers/mtd/ubi/debug.h +++ b/drivers/mtd/ubi/debug.h @@ -39,8 +39,9 @@ #ifdef CONFIG_MTD_UBI_DEBUG_MSG /* Generic debugging message */ -#define dbg_msg(fmt, ...) \ - printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", __FUNCTION__, ##__VA_ARGS__) +#define dbg_msg(fmt, ...) \ + printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \ + current->pid, __FUNCTION__, ##__VA_ARGS__) #define ubi_dbg_dump_stack() dump_stack() @@ -76,36 +77,28 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); #ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA /* Messages from the eraseblock association unit */ -#define dbg_eba(fmt, ...) \ - printk(KERN_DEBUG "UBI DBG eba: %s: " fmt "\n", __FUNCTION__, \ - ##__VA_ARGS__) +#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) #else #define dbg_eba(fmt, ...) ({}) #endif #ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL /* Messages from the wear-leveling unit */ -#define dbg_wl(fmt, ...) \ - printk(KERN_DEBUG "UBI DBG wl: %s: " fmt "\n", __FUNCTION__, \ - ##__VA_ARGS__) +#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) #else #define dbg_wl(fmt, ...) ({}) #endif #ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO /* Messages from the input/output unit */ -#define dbg_io(fmt, ...) \ - printk(KERN_DEBUG "UBI DBG io: %s: " fmt "\n", __FUNCTION__, \ - ##__VA_ARGS__) +#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) #else #define dbg_io(fmt, ...) ({}) #endif #ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD /* Initialization and build messages */ -#define dbg_bld(fmt, ...) \ - printk(KERN_DEBUG "UBI DBG bld: %s: " fmt "\n", __FUNCTION__, \ - ##__VA_ARGS__) +#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) #else #define dbg_bld(fmt, ...) ({}) #endif diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 880fa3690352..7ce91ca742b1 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -31,7 +31,7 @@ * logical eraseblock it is locked for reading or writing. The per-logical * eraseblock locking is implemented by means of the lock tree. The lock tree * is an RB-tree which refers all the currently locked logical eraseblocks. The - * lock tree elements are &struct ltree_entry objects. They are indexed by + * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by * (@vol_id, @lnum) pairs. * * EBA also maintains the global sequence counter which is incremented each @@ -50,29 +50,6 @@ #define EBA_RESERVED_PEBS 1 /** - * struct ltree_entry - an entry in the lock tree. - * @rb: links RB-tree nodes - * @vol_id: volume ID of the locked logical eraseblock - * @lnum: locked logical eraseblock number - * @users: how many tasks are using this logical eraseblock or wait for it - * @mutex: read/write mutex to implement read/write access serialization to - * the (@vol_id, @lnum) logical eraseblock - * - * When a logical eraseblock is being locked - corresponding &struct ltree_entry - * object is inserted to the lock tree (@ubi->ltree). - */ -struct ltree_entry { - struct rb_node rb; - int vol_id; - int lnum; - int users; - struct rw_semaphore mutex; -}; - -/* Slab cache for lock-tree entries */ -static struct kmem_cache *ltree_slab; - -/** * next_sqnum - get next sequence number. * @ubi: UBI device description object * @@ -101,7 +78,7 @@ static unsigned long long next_sqnum(struct ubi_device *ubi) */ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) { - if (vol_id == UBI_LAYOUT_VOL_ID) + if (vol_id == UBI_LAYOUT_VOLUME_ID) return UBI_LAYOUT_VOLUME_COMPAT; return 0; } @@ -112,20 +89,20 @@ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) * @vol_id: volume ID * @lnum: logical eraseblock number * - * This function returns a pointer to the corresponding &struct ltree_entry + * This function returns a pointer to the corresponding &struct ubi_ltree_entry * object if the logical eraseblock is locked and %NULL if it is not. * @ubi->ltree_lock has to be locked. */ -static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, - int lnum) +static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, + int lnum) { struct rb_node *p; p = ubi->ltree.rb_node; while (p) { - struct ltree_entry *le; + struct ubi_ltree_entry *le; - le = rb_entry(p, struct ltree_entry, rb); + le = rb_entry(p, struct ubi_ltree_entry, rb); if (vol_id < le->vol_id) p = p->rb_left; @@ -155,15 +132,17 @@ static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation * failed. */ -static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, - int lnum) +static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi, + int vol_id, int lnum) { - struct ltree_entry *le, *le1, *le_free; + struct ubi_ltree_entry *le, *le1, *le_free; - le = kmem_cache_alloc(ltree_slab, GFP_NOFS); + le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS); if (!le) return ERR_PTR(-ENOMEM); + le->users = 0; + init_rwsem(&le->mutex); le->vol_id = vol_id; le->lnum = lnum; @@ -189,7 +168,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, p = &ubi->ltree.rb_node; while (*p) { parent = *p; - le1 = rb_entry(parent, struct ltree_entry, rb); + le1 = rb_entry(parent, struct ubi_ltree_entry, rb); if (vol_id < le1->vol_id) p = &(*p)->rb_left; @@ -211,7 +190,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, spin_unlock(&ubi->ltree_lock); if (le_free) - kmem_cache_free(ltree_slab, le_free); + kfree(le_free); return le; } @@ -227,7 +206,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, */ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) { - struct ltree_entry *le; + struct ubi_ltree_entry *le; le = ltree_add_entry(ubi, vol_id, lnum); if (IS_ERR(le)) @@ -245,7 +224,7 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) { int free = 0; - struct ltree_entry *le; + struct ubi_ltree_entry *le; spin_lock(&ubi->ltree_lock); le = ltree_lookup(ubi, vol_id, lnum); @@ -259,7 +238,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) up_read(&le->mutex); if (free) - kmem_cache_free(ltree_slab, le); + kfree(le); } /** @@ -273,7 +252,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) */ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) { - struct ltree_entry *le; + struct ubi_ltree_entry *le; le = ltree_add_entry(ubi, vol_id, lnum); if (IS_ERR(le)) @@ -283,6 +262,44 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) } /** + * leb_write_lock - lock logical eraseblock for writing. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * + * This function locks a logical eraseblock for writing if there is no + * contention and does nothing if there is contention. Returns %0 in case of + * success, %1 in case of contention, and and a negative error code in case of + * failure. + */ +static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) +{ + int free; + struct ubi_ltree_entry *le; + + le = ltree_add_entry(ubi, vol_id, lnum); + if (IS_ERR(le)) + return PTR_ERR(le); + if (down_write_trylock(&le->mutex)) + return 0; + + /* Contention, cancel */ + spin_lock(&ubi->ltree_lock); + le->users -= 1; + ubi_assert(le->users >= 0); + if (le->users == 0) { + rb_erase(&le->rb, &ubi->ltree); + free = 1; + } else + free = 0; + spin_unlock(&ubi->ltree_lock); + if (free) + kfree(le); + + return 1; +} + +/** * leb_write_unlock - unlock logical eraseblock. * @ubi: UBI device description object * @vol_id: volume ID @@ -291,7 +308,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) { int free; - struct ltree_entry *le; + struct ubi_ltree_entry *le; spin_lock(&ubi->ltree_lock); le = ltree_lookup(ubi, vol_id, lnum); @@ -306,23 +323,23 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) up_write(&le->mutex); if (free) - kmem_cache_free(ltree_slab, le); + kfree(le); } /** * ubi_eba_unmap_leb - un-map logical eraseblock. * @ubi: UBI device description object - * @vol_id: volume ID + * @vol: volume description object * @lnum: logical eraseblock number * * This function un-maps logical eraseblock @lnum and schedules corresponding * physical eraseblock for erasure. Returns zero in case of success and a * negative error code in case of failure. */ -int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum) +int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, + int lnum) { - int idx = vol_id2idx(ubi, vol_id), err, pnum; - struct ubi_volume *vol = ubi->volumes[idx]; + int err, pnum, vol_id = vol->vol_id; if (ubi->ro_mode) return -EROFS; @@ -349,7 +366,7 @@ out_unlock: /** * ubi_eba_read_leb - read data. * @ubi: UBI device description object - * @vol_id: volume ID + * @vol: volume description object * @lnum: logical eraseblock number * @buf: buffer to store the read data * @offset: offset from where to read @@ -365,12 +382,11 @@ out_unlock: * returned for any volume type if an ECC error was detected by the MTD device * driver. Other negative error cored may be returned in case of other errors. */ -int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, - int offset, int len, int check) +int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, + void *buf, int offset, int len, int check) { - int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id); + int err, pnum, scrub = 0, vol_id = vol->vol_id; struct ubi_vid_hdr *vid_hdr; - struct ubi_volume *vol = ubi->volumes[idx]; uint32_t uninitialized_var(crc); err = leb_read_lock(ubi, vol_id, lnum); @@ -578,7 +594,7 @@ write_error: /** * ubi_eba_write_leb - write data to dynamic volume. * @ubi: UBI device description object - * @vol_id: volume ID + * @vol: volume description object * @lnum: logical eraseblock number * @buf: the data to write * @offset: offset within the logical eraseblock where to write @@ -586,15 +602,14 @@ write_error: * @dtype: data type * * This function writes data to logical eraseblock @lnum of a dynamic volume - * @vol_id. Returns zero in case of success and a negative error code in case + * @vol. Returns zero in case of success and a negative error code in case * of failure. In case of error, it is possible that something was still * written to the flash media, but may be some garbage. */ -int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, +int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, const void *buf, int offset, int len, int dtype) { - int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0; - struct ubi_volume *vol = ubi->volumes[idx]; + int err, pnum, tries = 0, vol_id = vol->vol_id; struct ubi_vid_hdr *vid_hdr; if (ubi->ro_mode) @@ -613,7 +628,8 @@ int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, if (err) { ubi_warn("failed to write data to PEB %d", pnum); if (err == -EIO && ubi->bad_allowed) - err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len); + err = recover_peb(ubi, pnum, vol_id, lnum, buf, + offset, len); if (err) ubi_ro_mode(ubi); } @@ -656,11 +672,14 @@ retry: goto write_error; } - err = ubi_io_write_data(ubi, buf, pnum, offset, len); - if (err) { - ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, " - "PEB %d", len, offset, vol_id, lnum, pnum); - goto write_error; + if (len) { + err = ubi_io_write_data(ubi, buf, pnum, offset, len); + if (err) { + ubi_warn("failed to write %d bytes at offset %d of " + "LEB %d:%d, PEB %d", len, offset, vol_id, + lnum, pnum); + goto write_error; + } } vol->eba_tbl[lnum] = pnum; @@ -698,7 +717,7 @@ write_error: /** * ubi_eba_write_leb_st - write data to static volume. * @ubi: UBI device description object - * @vol_id: volume ID + * @vol: volume description object * @lnum: logical eraseblock number * @buf: data to write * @len: how many bytes to write @@ -706,7 +725,7 @@ write_error: * @used_ebs: how many logical eraseblocks will this volume contain * * This function writes data to logical eraseblock @lnum of static volume - * @vol_id. The @used_ebs argument should contain total number of logical + * @vol. The @used_ebs argument should contain total number of logical * eraseblock in this static volume. * * When writing to the last logical eraseblock, the @len argument doesn't have @@ -718,12 +737,11 @@ write_error: * volumes. This function returns zero in case of success and a negative error * code in case of failure. */ -int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, - const void *buf, int len, int dtype, int used_ebs) +int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, + int lnum, const void *buf, int len, int dtype, + int used_ebs) { - int err, pnum, tries = 0, data_size = len; - int idx = vol_id2idx(ubi, vol_id); - struct ubi_volume *vol = ubi->volumes[idx]; + int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id; struct ubi_vid_hdr *vid_hdr; uint32_t crc; @@ -819,7 +837,7 @@ write_error: /* * ubi_eba_atomic_leb_change - change logical eraseblock atomically. * @ubi: UBI device description object - * @vol_id: volume ID + * @vol: volume description object * @lnum: logical eraseblock number * @buf: data to write * @len: how many bytes to write @@ -834,17 +852,27 @@ write_error: * UBI reserves one LEB for the "atomic LEB change" operation, so only one * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. */ -int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, - const void *buf, int len, int dtype) +int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, + int lnum, const void *buf, int len, int dtype) { - int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id); - struct ubi_volume *vol = ubi->volumes[idx]; + int err, pnum, tries = 0, vol_id = vol->vol_id; struct ubi_vid_hdr *vid_hdr; uint32_t crc; if (ubi->ro_mode) return -EROFS; + if (len == 0) { + /* + * Special case when data length is zero. In this case the LEB + * has to be unmapped and mapped somewhere else. + */ + err = ubi_eba_unmap_leb(ubi, vol, lnum); + if (err) + return err; + return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype); + } + vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); if (!vid_hdr) return -ENOMEM; @@ -928,20 +956,6 @@ write_error: } /** - * ltree_entry_ctor - lock tree entries slab cache constructor. - * @obj: the lock-tree entry to construct - * @cache: the lock tree entry slab cache - * @flags: constructor flags - */ -static void ltree_entry_ctor(struct kmem_cache *cache, void *obj) -{ - struct ltree_entry *le = obj; - - le->users = 0; - init_rwsem(&le->mutex); -} - -/** * ubi_eba_copy_leb - copy logical eraseblock. * @ubi: UBI device description object * @from: physical eraseblock number from where to copy @@ -950,14 +964,16 @@ static void ltree_entry_ctor(struct kmem_cache *cache, void *obj) * * This function copies logical eraseblock from physical eraseblock @from to * physical eraseblock @to. The @vid_hdr buffer may be changed by this - * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation - * was canceled because bit-flips were detected at the target PEB, and a - * negative error code in case of failure. + * function. Returns: + * o %0 in case of success; + * o %1 if the operation was canceled and should be tried later (e.g., + * because a bit-flip was detected at the target PEB); + * o %2 if the volume is being deleted and this LEB should not be moved. */ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, struct ubi_vid_hdr *vid_hdr) { - int err, vol_id, lnum, data_size, aldata_size, pnum, idx; + int err, vol_id, lnum, data_size, aldata_size, idx; struct ubi_volume *vol; uint32_t crc; @@ -973,51 +989,67 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, data_size = aldata_size = ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); - /* - * We do not want anybody to write to this logical eraseblock while we - * are moving it, so we lock it. - */ - err = leb_write_lock(ubi, vol_id, lnum); - if (err) - return err; - - mutex_lock(&ubi->buf_mutex); - - /* - * But the logical eraseblock might have been put by this time. - * Cancel if it is true. - */ idx = vol_id2idx(ubi, vol_id); - + spin_lock(&ubi->volumes_lock); /* - * We may race with volume deletion/re-size, so we have to hold - * @ubi->volumes_lock. + * Note, we may race with volume deletion, which means that the volume + * this logical eraseblock belongs to might be being deleted. Since the + * volume deletion unmaps all the volume's logical eraseblocks, it will + * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. */ - spin_lock(&ubi->volumes_lock); vol = ubi->volumes[idx]; if (!vol) { - dbg_eba("volume %d was removed meanwhile", vol_id); + /* No need to do further work, cancel */ + dbg_eba("volume %d is being removed, cancel", vol_id); spin_unlock(&ubi->volumes_lock); - goto out_unlock; + return 2; } + spin_unlock(&ubi->volumes_lock); - pnum = vol->eba_tbl[lnum]; - if (pnum != from) { - dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " - "PEB %d, cancel", vol_id, lnum, from, pnum); - spin_unlock(&ubi->volumes_lock); - goto out_unlock; + /* + * We do not want anybody to write to this logical eraseblock while we + * are moving it, so lock it. + * + * Note, we are using non-waiting locking here, because we cannot sleep + * on the LEB, since it may cause deadlocks. Indeed, imagine a task is + * unmapping the LEB which is mapped to the PEB we are going to move + * (@from). This task locks the LEB and goes sleep in the + * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are + * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the + * LEB is already locked, we just do not move it and return %1. + */ + err = leb_write_trylock(ubi, vol_id, lnum); + if (err) { + dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum); + return err; } - spin_unlock(&ubi->volumes_lock); - /* OK, now the LEB is locked and we can safely start moving it */ + /* + * The LEB might have been put meanwhile, and the task which put it is + * probably waiting on @ubi->move_mutex. No need to continue the work, + * cancel it. + */ + if (vol->eba_tbl[lnum] != from) { + dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " + "PEB %d, cancel", vol_id, lnum, from, + vol->eba_tbl[lnum]); + err = 1; + goto out_unlock_leb; + } + /* + * OK, now the LEB is locked and we can safely start moving iy. Since + * this function utilizes thie @ubi->peb1_buf buffer which is shared + * with some other functions, so lock the buffer by taking the + * @ubi->buf_mutex. + */ + mutex_lock(&ubi->buf_mutex); dbg_eba("read %d bytes of data", aldata_size); err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); if (err && err != UBI_IO_BITFLIPS) { ubi_warn("error %d while reading data from PEB %d", err, from); - goto out_unlock; + goto out_unlock_buf; } /* @@ -1053,7 +1085,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); if (err) - goto out_unlock; + goto out_unlock_buf; cond_resched(); @@ -1062,13 +1094,15 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, if (err) { if (err != UBI_IO_BITFLIPS) ubi_warn("cannot read VID header back from PEB %d", to); - goto out_unlock; + else + err = 1; + goto out_unlock_buf; } if (data_size > 0) { err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); if (err) - goto out_unlock; + goto out_unlock_buf; cond_resched(); @@ -1082,7 +1116,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, if (err != UBI_IO_BITFLIPS) ubi_warn("cannot read data back from PEB %d", to); - goto out_unlock; + else + err = 1; + goto out_unlock_buf; } cond_resched(); @@ -1090,15 +1126,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { ubi_warn("read data back from PEB %d - it is different", to); - goto out_unlock; + goto out_unlock_buf; } } ubi_assert(vol->eba_tbl[lnum] == from); vol->eba_tbl[lnum] = to; -out_unlock: +out_unlock_buf: mutex_unlock(&ubi->buf_mutex); +out_unlock_leb: leb_write_unlock(ubi, vol_id, lnum); return err; } @@ -1125,14 +1162,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) mutex_init(&ubi->alc_mutex); ubi->ltree = RB_ROOT; - if (ubi_devices_cnt == 0) { - ltree_slab = kmem_cache_create("ubi_ltree_slab", - sizeof(struct ltree_entry), 0, - 0, <ree_entry_ctor); - if (!ltree_slab) - return -ENOMEM; - } - ubi->global_sqnum = si->max_sqnum + 1; num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; @@ -1168,6 +1197,15 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) } } + if (ubi->avail_pebs < EBA_RESERVED_PEBS) { + ubi_err("no enough physical eraseblocks (%d, need %d)", + ubi->avail_pebs, EBA_RESERVED_PEBS); + err = -ENOSPC; + goto out_free; + } + ubi->avail_pebs -= EBA_RESERVED_PEBS; + ubi->rsvd_pebs += EBA_RESERVED_PEBS; + if (ubi->bad_allowed) { ubi_calculate_reserved(ubi); @@ -1184,15 +1222,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ubi->rsvd_pebs += ubi->beb_rsvd_pebs; } - if (ubi->avail_pebs < EBA_RESERVED_PEBS) { - ubi_err("no enough physical eraseblocks (%d, need %d)", - ubi->avail_pebs, EBA_RESERVED_PEBS); - err = -ENOSPC; - goto out_free; - } - ubi->avail_pebs -= EBA_RESERVED_PEBS; - ubi->rsvd_pebs += EBA_RESERVED_PEBS; - dbg_eba("EBA unit is initialized"); return 0; @@ -1202,8 +1231,6 @@ out_free: continue; kfree(ubi->volumes[i]->eba_tbl); } - if (ubi_devices_cnt == 0) - kmem_cache_destroy(ltree_slab); return err; } @@ -1222,6 +1249,4 @@ void ubi_eba_close(const struct ubi_device *ubi) continue; kfree(ubi->volumes[i]->eba_tbl); } - if (ubi_devices_cnt == 1) - kmem_cache_destroy(ltree_slab); } diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c index 41ff74c60e14..d397219238d3 100644 --- a/drivers/mtd/ubi/gluebi.c +++ b/drivers/mtd/ubi/gluebi.c @@ -129,8 +129,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, if (to_read > total_read) to_read = total_read; - err = ubi_eba_read_leb(ubi, vol->vol_id, lnum, buf, offs, - to_read, 0); + err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0); if (err) break; @@ -187,8 +186,8 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, if (to_write > total_written) to_write = total_written; - err = ubi_eba_write_leb(ubi, vol->vol_id, lnum, buf, offs, - to_write, UBI_UNKNOWN); + err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write, + UBI_UNKNOWN); if (err) break; @@ -237,7 +236,7 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) return -EROFS; for (i = 0; i < count; i++) { - err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum + i); + err = ubi_eba_unmap_leb(ubi, vol, lnum + i); if (err) goto out_err; } diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 7c304eec78b5..db3efdef2433 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -173,6 +173,16 @@ retry: ubi_err("error %d while reading %d bytes from PEB %d:%d, " "read %zd bytes", err, len, pnum, offset, read); ubi_dbg_dump_stack(); + + /* + * The driver should never return -EBADMSG if it failed to read + * all the requested data. But some buggy drivers might do + * this, so we change it to -EIO. + */ + if (read != len && err == -EBADMSG) { + ubi_assert(0); + err = -EIO; + } } else { ubi_assert(len == read); diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 03c774f41549..a70d58823f8d 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c @@ -30,23 +30,27 @@ * @ubi_num: UBI device number * @di: the information is stored here * - * This function returns %0 in case of success and a %-ENODEV if there is no - * such UBI device. + * This function returns %0 in case of success, %-EINVAL if the UBI device + * number is invalid, and %-ENODEV if there is no such UBI device. */ int ubi_get_device_info(int ubi_num, struct ubi_device_info *di) { - const struct ubi_device *ubi; + struct ubi_device *ubi; + + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) + return -EINVAL; - if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || - !ubi_devices[ubi_num]) + ubi = ubi_get_device(ubi_num); + if (!ubi) return -ENODEV; - ubi = ubi_devices[ubi_num]; di->ubi_num = ubi->ubi_num; di->leb_size = ubi->leb_size; di->min_io_size = ubi->min_io_size; di->ro_mode = ubi->ro_mode; - di->cdev = MKDEV(ubi->major, 0); + di->cdev = ubi->cdev.dev; + + ubi_put_device(ubi); return 0; } EXPORT_SYMBOL_GPL(ubi_get_device_info); @@ -73,7 +77,7 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc, vi->usable_leb_size = vol->usable_leb_size; vi->name_len = vol->name_len; vi->name = vol->name; - vi->cdev = MKDEV(ubi->major, vi->vol_id + 1); + vi->cdev = vol->cdev.dev; } EXPORT_SYMBOL_GPL(ubi_get_volume_info); @@ -104,37 +108,39 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode) dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); - err = -ENODEV; - if (ubi_num < 0) - return ERR_PTR(err); - - ubi = ubi_devices[ubi_num]; - - if (!try_module_get(THIS_MODULE)) - return ERR_PTR(err); - - if (ubi_num >= UBI_MAX_DEVICES || !ubi) - goto out_put; + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) + return ERR_PTR(-EINVAL); - err = -EINVAL; - if (vol_id < 0 || vol_id >= ubi->vtbl_slots) - goto out_put; if (mode != UBI_READONLY && mode != UBI_READWRITE && mode != UBI_EXCLUSIVE) - goto out_put; + return ERR_PTR(-EINVAL); + + /* + * First of all, we have to get the UBI device to prevent its removal. + */ + ubi = ubi_get_device(ubi_num); + if (!ubi) + return ERR_PTR(-ENODEV); + + if (vol_id < 0 || vol_id >= ubi->vtbl_slots) { + err = -EINVAL; + goto out_put_ubi; + } desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL); if (!desc) { err = -ENOMEM; - goto out_put; + goto out_put_ubi; } + err = -ENODEV; + if (!try_module_get(THIS_MODULE)) + goto out_free; + spin_lock(&ubi->volumes_lock); vol = ubi->volumes[vol_id]; - if (!vol) { - err = -ENODEV; + if (!vol) goto out_unlock; - } err = -EBUSY; switch (mode) { @@ -156,21 +162,19 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode) vol->exclusive = 1; break; } + get_device(&vol->dev); + vol->ref_count += 1; spin_unlock(&ubi->volumes_lock); desc->vol = vol; desc->mode = mode; - /* - * To prevent simultaneous checks of the same volume we use @vtbl_mutex, - * although it is not the purpose it was introduced for. - */ - mutex_lock(&ubi->vtbl_mutex); + mutex_lock(&ubi->ckvol_mutex); if (!vol->checked) { /* This is the first open - check the volume */ err = ubi_check_volume(ubi, vol_id); if (err < 0) { - mutex_unlock(&ubi->vtbl_mutex); + mutex_unlock(&ubi->ckvol_mutex); ubi_close_volume(desc); return ERR_PTR(err); } @@ -181,14 +185,17 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode) } vol->checked = 1; } - mutex_unlock(&ubi->vtbl_mutex); + mutex_unlock(&ubi->ckvol_mutex); + return desc; out_unlock: spin_unlock(&ubi->volumes_lock); - kfree(desc); -out_put: module_put(THIS_MODULE); +out_free: + kfree(desc); +out_put_ubi: + ubi_put_device(ubi); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(ubi_open_volume); @@ -205,8 +212,8 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, int mode) { int i, vol_id = -1, len; - struct ubi_volume_desc *ret; struct ubi_device *ubi; + struct ubi_volume_desc *ret; dbg_msg("open volume %s, mode %d", name, mode); @@ -217,14 +224,12 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, if (len > UBI_VOL_NAME_MAX) return ERR_PTR(-EINVAL); - ret = ERR_PTR(-ENODEV); - if (!try_module_get(THIS_MODULE)) - return ret; - - if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi_devices[ubi_num]) - goto out_put; + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) + return ERR_PTR(-EINVAL); - ubi = ubi_devices[ubi_num]; + ubi = ubi_get_device(ubi_num); + if (!ubi) + return ERR_PTR(-ENODEV); spin_lock(&ubi->volumes_lock); /* Walk all volumes of this UBI device */ @@ -238,13 +243,16 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, } spin_unlock(&ubi->volumes_lock); - if (vol_id < 0) - goto out_put; + if (vol_id >= 0) + ret = ubi_open_volume(ubi_num, vol_id, mode); + else + ret = ERR_PTR(-ENODEV); - ret = ubi_open_volume(ubi_num, vol_id, mode); - -out_put: - module_put(THIS_MODULE); + /* + * We should put the UBI device even in case of success, because + * 'ubi_open_volume()' took a reference as well. + */ + ubi_put_device(ubi); return ret; } EXPORT_SYMBOL_GPL(ubi_open_volume_nm); @@ -256,10 +264,11 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm); void ubi_close_volume(struct ubi_volume_desc *desc) { struct ubi_volume *vol = desc->vol; + struct ubi_device *ubi = vol->ubi; dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode); - spin_lock(&vol->ubi->volumes_lock); + spin_lock(&ubi->volumes_lock); switch (desc->mode) { case UBI_READONLY: vol->readers -= 1; @@ -270,9 +279,12 @@ void ubi_close_volume(struct ubi_volume_desc *desc) case UBI_EXCLUSIVE: vol->exclusive = 0; } - spin_unlock(&vol->ubi->volumes_lock); + vol->ref_count -= 1; + spin_unlock(&ubi->volumes_lock); kfree(desc); + put_device(&vol->dev); + ubi_put_device(ubi); module_put(THIS_MODULE); } EXPORT_SYMBOL_GPL(ubi_close_volume); @@ -332,7 +344,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, if (len == 0) return 0; - err = ubi_eba_read_leb(ubi, vol_id, lnum, buf, offset, len, check); + err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check); if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) { ubi_warn("mark volume %d as corrupted", vol_id); vol->corrupted = 1; @@ -399,7 +411,7 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, if (len == 0) return 0; - return ubi_eba_write_leb(ubi, vol_id, lnum, buf, offset, len, dtype); + return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype); } EXPORT_SYMBOL_GPL(ubi_leb_write); @@ -448,7 +460,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, if (len == 0) return 0; - return ubi_eba_atomic_leb_change(ubi, vol_id, lnum, buf, len, dtype); + return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype); } EXPORT_SYMBOL_GPL(ubi_leb_change); @@ -468,9 +480,9 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum) { struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; - int err, vol_id = vol->vol_id; + int err; - dbg_msg("erase LEB %d:%d", vol_id, lnum); + dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) return -EROFS; @@ -481,7 +493,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum) if (vol->upd_marker) return -EBADF; - err = ubi_eba_unmap_leb(ubi, vol_id, lnum); + err = ubi_eba_unmap_leb(ubi, vol, lnum); if (err) return err; @@ -529,9 +541,8 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum) { struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; - int vol_id = vol->vol_id; - dbg_msg("unmap LEB %d:%d", vol_id, lnum); + dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum); if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) return -EROFS; @@ -542,11 +553,55 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum) if (vol->upd_marker) return -EBADF; - return ubi_eba_unmap_leb(ubi, vol_id, lnum); + return ubi_eba_unmap_leb(ubi, vol, lnum); } EXPORT_SYMBOL_GPL(ubi_leb_unmap); /** + * ubi_leb_map - map logical erasblock to a physical eraseblock. + * @desc: volume descriptor + * @lnum: logical eraseblock number + * @dtype: expected data type + * + * This function maps an un-mapped logical eraseblock @lnum to a physical + * eraseblock. This means, that after a successfull invocation of this + * function the logical eraseblock @lnum will be empty (contain only %0xFF + * bytes) and be mapped to a physical eraseblock, even if an unclean reboot + * happens. + * + * This function returns zero in case of success, %-EBADF if the volume is + * damaged because of an interrupted update, %-EBADMSG if the logical + * eraseblock is already mapped, and other negative error codes in case of + * other failures. + */ +int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype) +{ + struct ubi_volume *vol = desc->vol; + struct ubi_device *ubi = vol->ubi; + + dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum); + + if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) + return -EROFS; + + if (lnum < 0 || lnum >= vol->reserved_pebs) + return -EINVAL; + + if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && + dtype != UBI_UNKNOWN) + return -EINVAL; + + if (vol->upd_marker) + return -EBADF; + + if (vol->eba_tbl[lnum] >= 0) + return -EBADMSG; + + return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype); +} +EXPORT_SYMBOL_GPL(ubi_leb_map); + +/** * ubi_is_mapped - check if logical eraseblock is mapped. * @desc: volume descriptor * @lnum: logical eraseblock number diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c index 9e2338c8e2cf..93e052812012 100644 --- a/drivers/mtd/ubi/misc.c +++ b/drivers/mtd/ubi/misc.c @@ -79,7 +79,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id) else size = vol->usable_leb_size; - err = ubi_eba_read_leb(ubi, vol_id, i, buf, 0, size, 1); + err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1); if (err) { if (err == -EBADMSG) err = 1; diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index c7b0afc9d280..05aa3e7daba1 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c @@ -286,9 +286,14 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb, * FIXME: but this is anyway obsolete and will be removed at * some point. */ - dbg_bld("using old crappy leb_ver stuff"); + if (v1 == v2) { + ubi_err("PEB %d and PEB %d have the same version %lld", + seb->pnum, pnum, v1); + return -EINVAL; + } + abs = v1 - v2; if (abs < 0) abs = -abs; @@ -390,7 +395,6 @@ out_free_buf: vfree(buf); out_free_vidh: ubi_free_vid_hdr(ubi, vh); - ubi_assert(err < 0); return err; } @@ -769,7 +773,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi, */ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) { - long long ec; + long long uninitialized_var(ec); int err, bitflips = 0, vol_id, ec_corr = 0; dbg_bld("scan PEB %d", pnum); @@ -854,7 +858,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum } vol_id = be32_to_cpu(vidh->vol_id); - if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOL_ID) { + if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) { int lnum = be32_to_cpu(vidh->lnum); /* Unsupported internal volume */ diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 5e941a633030..457710615261 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -94,8 +94,43 @@ enum { UBI_IO_BITFLIPS }; -extern int ubi_devices_cnt; -extern struct ubi_device *ubi_devices[]; +/** + * struct ubi_wl_entry - wear-leveling entry. + * @rb: link in the corresponding RB-tree + * @ec: erase counter + * @pnum: physical eraseblock number + * + * This data structure is used in the WL unit. Each physical eraseblock has a + * corresponding &struct wl_entry object which may be kept in different + * RB-trees. See WL unit for details. + */ +struct ubi_wl_entry { + struct rb_node rb; + int ec; + int pnum; +}; + +/** + * struct ubi_ltree_entry - an entry in the lock tree. + * @rb: links RB-tree nodes + * @vol_id: volume ID of the locked logical eraseblock + * @lnum: locked logical eraseblock number + * @users: how many tasks are using this logical eraseblock or wait for it + * @mutex: read/write mutex to implement read/write access serialization to + * the (@vol_id, @lnum) logical eraseblock + * + * This data structure is used in the EBA unit to implement per-LEB locking. + * When a logical eraseblock is being locked - corresponding + * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree). + * See EBA unit for details. + */ +struct ubi_ltree_entry { + struct rb_node rb; + int vol_id; + int lnum; + int users; + struct rw_semaphore mutex; +}; struct ubi_volume_desc; @@ -105,11 +140,10 @@ struct ubi_volume_desc; * @cdev: character device object to create character device * @ubi: reference to the UBI device description object * @vol_id: volume ID + * @ref_count: volume reference count * @readers: number of users holding this volume in read-only mode * @writers: number of users holding this volume in read-write mode * @exclusive: whether somebody holds this volume in exclusive mode - * @removed: if the volume was removed - * @checked: if this static volume was checked * * @reserved_pebs: how many physical eraseblocks are reserved for this volume * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) @@ -117,21 +151,30 @@ struct ubi_volume_desc; * @used_ebs: how many logical eraseblocks in this volume contain data * @last_eb_bytes: how many bytes are stored in the last logical eraseblock * @used_bytes: how many bytes of data this volume contains - * @upd_marker: non-zero if the update marker is set for this volume - * @corrupted: non-zero if the volume is corrupted (static volumes only) * @alignment: volume alignment * @data_pad: how many bytes are not used at the end of physical eraseblocks to - * satisfy the requested alignment + * satisfy the requested alignment * @name_len: volume name length * @name: volume name * - * @updating: whether the volume is being updated * @upd_ebs: how many eraseblocks are expected to be updated - * @upd_bytes: how many bytes are expected to be received - * @upd_received: how many update bytes were already received - * @upd_buf: update buffer which is used to collect update data + * @ch_lnum: LEB number which is being changing by the atomic LEB change + * operation + * @ch_dtype: data persistency type which is being changing by the atomic LEB + * change operation + * @upd_bytes: how many bytes are expected to be received for volume update or + * atomic LEB change + * @upd_received: how many bytes were already received for volume update or + * atomic LEB change + * @upd_buf: update buffer which is used to collect update data or data for + * atomic LEB change * * @eba_tbl: EBA table of this volume (LEB->PEB mapping) + * @checked: %1 if this static volume was checked + * @corrupted: %1 if the volume is corrupted (static volumes only) + * @upd_marker: %1 if the update marker is set for this volume + * @updating: %1 if the volume is being updated + * @changing_leb: %1 if the atomic LEB change ioctl command is in progress * * @gluebi_desc: gluebi UBI volume descriptor * @gluebi_refcount: reference count of the gluebi MTD device @@ -150,11 +193,10 @@ struct ubi_volume { struct cdev cdev; struct ubi_device *ubi; int vol_id; + int ref_count; int readers; int writers; int exclusive; - int removed; - int checked; int reserved_pebs; int vol_type; @@ -162,23 +204,31 @@ struct ubi_volume { int used_ebs; int last_eb_bytes; long long used_bytes; - int upd_marker; - int corrupted; int alignment; int data_pad; int name_len; char name[UBI_VOL_NAME_MAX+1]; - int updating; int upd_ebs; + int ch_lnum; + int ch_dtype; long long upd_bytes; long long upd_received; void *upd_buf; int *eba_tbl; + int checked:1; + int corrupted:1; + int upd_marker:1; + int updating:1; + int changing_leb:1; #ifdef CONFIG_MTD_UBI_GLUEBI - /* Gluebi-related stuff may be compiled out */ + /* + * Gluebi-related stuff may be compiled out. + * TODO: this should not be built into UBI but should be a separate + * ubimtd driver which works on top of UBI and emulates MTD devices. + */ struct ubi_volume_desc *gluebi_desc; int gluebi_refcount; struct mtd_info gluebi_mtd; @@ -200,28 +250,31 @@ struct ubi_wl_entry; /** * struct ubi_device - UBI device description structure - * @dev: class device object to use the the Linux device model + * @dev: UBI device object to use the the Linux device model * @cdev: character device object to create character device * @ubi_num: UBI device number * @ubi_name: UBI device name - * @major: character device major number * @vol_count: number of volumes in this UBI device * @volumes: volumes of this UBI device * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, - * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, @vol->readers, - * @vol->writers, @vol->exclusive, @vol->removed, @vol->mapping and - * @vol->eba_tbl. + * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, + * @vol->readers, @vol->writers, @vol->exclusive, + * @vol->ref_count, @vol->mapping and @vol->eba_tbl. + * @ref_count: count of references on the UBI device * * @rsvd_pebs: count of reserved physical eraseblocks * @avail_pebs: count of available physical eraseblocks * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB - * handling + * handling * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling * + * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end + * of UBI ititializetion * @vtbl_slots: how many slots are available in the volume table * @vtbl_size: size of the volume table in bytes * @vtbl: in-RAM volume table copy - * @vtbl_mutex: protects on-flash volume table + * @volumes_mutex: protects on-flash volume table and serializes volume + * changes, like creation, deletion, update, resize * * @max_ec: current highest erase counter value * @mean_ec: current mean erase counter value @@ -238,15 +291,15 @@ struct ubi_wl_entry; * @prot.pnum: protection tree indexed by physical eraseblock numbers * @prot.aec: protection tree indexed by absolute erase counter value * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, - * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works - * fields + * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works + * fields + * @move_mutex: serializes eraseblock moves * @wl_scheduled: non-zero if the wear-leveling was scheduled * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any - * physical eraseblock + * physical eraseblock * @abs_ec: absolute erase counter * @move_from: physical eraseblock from where the data is being moved * @move_to: physical eraseblock where the data is being moved to - * @move_from_put: if the "from" PEB was put * @move_to_put: if the "to" PEB was put * @works: list of pending works * @works_count: count of pending works @@ -273,13 +326,13 @@ struct ubi_wl_entry; * @hdrs_min_io_size * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or - * not + * not * @mtd: MTD device descriptor * * @peb_buf1: a buffer of PEB size used for different purposes * @peb_buf2: another buffer of PEB size used for different purposes * @buf_mutex: proptects @peb_buf1 and @peb_buf2 - * @dbg_peb_buf: buffer of PEB size used for debugging + * @dbg_peb_buf: buffer of PEB size used for debugging * @dbg_buf_mutex: proptects @dbg_peb_buf */ struct ubi_device { @@ -287,22 +340,24 @@ struct ubi_device { struct device dev; int ubi_num; char ubi_name[sizeof(UBI_NAME_STR)+5]; - int major; int vol_count; struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; spinlock_t volumes_lock; + int ref_count; int rsvd_pebs; int avail_pebs; int beb_rsvd_pebs; int beb_rsvd_level; + int autoresize_vol_id; int vtbl_slots; int vtbl_size; struct ubi_vtbl_record *vtbl; - struct mutex vtbl_mutex; + struct mutex volumes_mutex; int max_ec; + /* TODO: mean_ec is not updated run-time, fix */ int mean_ec; /* EBA unit's stuff */ @@ -320,12 +375,13 @@ struct ubi_device { struct rb_root aec; } prot; spinlock_t wl_lock; + struct mutex move_mutex; + struct rw_semaphore work_sem; int wl_scheduled; struct ubi_wl_entry **lookuptbl; unsigned long long abs_ec; struct ubi_wl_entry *move_from; struct ubi_wl_entry *move_to; - int move_from_put; int move_to_put; struct list_head works; int works_count; @@ -355,15 +411,19 @@ struct ubi_device { void *peb_buf1; void *peb_buf2; struct mutex buf_mutex; + struct mutex ckvol_mutex; #ifdef CONFIG_MTD_UBI_DEBUG void *dbg_peb_buf; struct mutex dbg_buf_mutex; #endif }; +extern struct kmem_cache *ubi_wl_entry_slab; +extern struct file_operations ubi_ctrl_cdev_operations; extern struct file_operations ubi_cdev_operations; extern struct file_operations ubi_vol_cdev_operations; extern struct class *ubi_class; +extern struct mutex ubi_devices_mutex; /* vtbl.c */ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, @@ -374,13 +434,18 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si); int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); int ubi_remove_volume(struct ubi_volume_desc *desc); int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); -int ubi_add_volume(struct ubi_device *ubi, int vol_id); -void ubi_free_volume(struct ubi_device *ubi, int vol_id); +int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol); +void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol); /* upd.c */ -int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes); -int ubi_more_update_data(struct ubi_device *ubi, int vol_id, +int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, + long long bytes); +int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, const void __user *buf, int count); +int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, + const struct ubi_leb_change_req *req); +int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, + const void __user *buf, int count); /* misc.c */ int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); @@ -399,16 +464,17 @@ void ubi_gluebi_updated(struct ubi_volume *vol); #endif /* eba.c */ -int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum); -int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, - int offset, int len, int check); -int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, +int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, + int lnum); +int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, + void *buf, int offset, int len, int check); +int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, const void *buf, int offset, int len, int dtype); -int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, - const void *buf, int len, int dtype, +int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, + int lnum, const void *buf, int len, int dtype, int used_ebs); -int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, - const void *buf, int len, int dtype); +int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, + int lnum, const void *buf, int len, int dtype); int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, struct ubi_vid_hdr *vid_hdr); int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); @@ -421,6 +487,7 @@ int ubi_wl_flush(struct ubi_device *ubi); int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); void ubi_wl_close(struct ubi_device *ubi); +int ubi_thread(void *u); /* io.c */ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, @@ -439,6 +506,14 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, struct ubi_vid_hdr *vid_hdr); +/* build.c */ +int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset); +int ubi_detach_mtd_dev(int ubi_num, int anyway); +struct ubi_device *ubi_get_device(int ubi_num); +void ubi_put_device(struct ubi_device *ubi); +struct ubi_device *ubi_get_by_major(int major); +int ubi_major2num(int major); + /* * ubi_rb_for_each_entry - walk an RB-tree. * @rb: a pointer to type 'struct rb_node' to to use as a loop counter @@ -523,8 +598,10 @@ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf, */ static inline void ubi_ro_mode(struct ubi_device *ubi) { - ubi->ro_mode = 1; - ubi_warn("switch to read-only mode"); + if (!ubi->ro_mode) { + ubi->ro_mode = 1; + ubi_warn("switch to read-only mode"); + } } /** diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c index 0efc586a8328..ddaa1a56cc69 100644 --- a/drivers/mtd/ubi/upd.c +++ b/drivers/mtd/ubi/upd.c @@ -22,7 +22,8 @@ */ /* - * This file contains implementation of the volume update functionality. + * This file contains implementation of the volume update and atomic LEB change + * functionality. * * The update operation is based on the per-volume update marker which is * stored in the volume table. The update marker is set before the update @@ -45,29 +46,31 @@ /** * set_update_marker - set update marker. * @ubi: UBI device description object - * @vol_id: volume ID + * @vol: volume description object * - * This function sets the update marker flag for volume @vol_id. Returns zero + * This function sets the update marker flag for volume @vol. Returns zero * in case of success and a negative error code in case of failure. */ -static int set_update_marker(struct ubi_device *ubi, int vol_id) +static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol) { int err; struct ubi_vtbl_record vtbl_rec; - struct ubi_volume *vol = ubi->volumes[vol_id]; - dbg_msg("set update marker for volume %d", vol_id); + dbg_msg("set update marker for volume %d", vol->vol_id); if (vol->upd_marker) { - ubi_assert(ubi->vtbl[vol_id].upd_marker); + ubi_assert(ubi->vtbl[vol->vol_id].upd_marker); dbg_msg("already set"); return 0; } - memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); + memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], + sizeof(struct ubi_vtbl_record)); vtbl_rec.upd_marker = 1; - err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); + mutex_lock(&ubi->volumes_mutex); + err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); + mutex_unlock(&ubi->volumes_mutex); vol->upd_marker = 1; return err; } @@ -75,23 +78,24 @@ static int set_update_marker(struct ubi_device *ubi, int vol_id) /** * clear_update_marker - clear update marker. * @ubi: UBI device description object - * @vol_id: volume ID + * @vol: volume description object * @bytes: new data size in bytes * - * This function clears the update marker for volume @vol_id, sets new volume + * This function clears the update marker for volume @vol, sets new volume * data size and clears the "corrupted" flag (static volumes only). Returns * zero in case of success and a negative error code in case of failure. */ -static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long bytes) +static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol, + long long bytes) { int err; uint64_t tmp; struct ubi_vtbl_record vtbl_rec; - struct ubi_volume *vol = ubi->volumes[vol_id]; - dbg_msg("clear update marker for volume %d", vol_id); + dbg_msg("clear update marker for volume %d", vol->vol_id); - memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); + memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], + sizeof(struct ubi_vtbl_record)); ubi_assert(vol->upd_marker && vtbl_rec.upd_marker); vtbl_rec.upd_marker = 0; @@ -106,7 +110,9 @@ static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long byt vol->last_eb_bytes = vol->usable_leb_size; } - err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); + mutex_lock(&ubi->volumes_mutex); + err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); + mutex_unlock(&ubi->volumes_mutex); vol->upd_marker = 0; return err; } @@ -114,35 +120,36 @@ static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long byt /** * ubi_start_update - start volume update. * @ubi: UBI device description object - * @vol_id: volume ID + * @vol: volume description object * @bytes: update bytes * * This function starts volume update operation. If @bytes is zero, the volume * is just wiped out. Returns zero in case of success and a negative error code * in case of failure. */ -int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes) +int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, + long long bytes) { int i, err; uint64_t tmp; - struct ubi_volume *vol = ubi->volumes[vol_id]; - dbg_msg("start update of volume %d, %llu bytes", vol_id, bytes); + dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes); + ubi_assert(!vol->updating && !vol->changing_leb); vol->updating = 1; - err = set_update_marker(ubi, vol_id); + err = set_update_marker(ubi, vol); if (err) return err; /* Before updating - wipe out the volume */ for (i = 0; i < vol->reserved_pebs; i++) { - err = ubi_eba_unmap_leb(ubi, vol_id, i); + err = ubi_eba_unmap_leb(ubi, vol, i); if (err) return err; } if (bytes == 0) { - err = clear_update_marker(ubi, vol_id, 0); + err = clear_update_marker(ubi, vol, 0); if (err) return err; err = ubi_wl_flush(ubi); @@ -163,9 +170,42 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes) } /** + * ubi_start_leb_change - start atomic LEB change. + * @ubi: UBI device description object + * @vol: volume description object + * @req: operation request + * + * This function starts atomic LEB change operation. Returns zero in case of + * success and a negative error code in case of failure. + */ +int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, + const struct ubi_leb_change_req *req) +{ + ubi_assert(!vol->updating && !vol->changing_leb); + + dbg_msg("start changing LEB %d:%d, %u bytes", + vol->vol_id, req->lnum, req->bytes); + if (req->bytes == 0) + return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0, + req->dtype); + + vol->upd_bytes = req->bytes; + vol->upd_received = 0; + vol->changing_leb = 1; + vol->ch_lnum = req->lnum; + vol->ch_dtype = req->dtype; + + vol->upd_buf = vmalloc(req->bytes); + if (!vol->upd_buf) + return -ENOMEM; + + return 0; +} + +/** * write_leb - write update data. * @ubi: UBI device description object - * @vol_id: volume ID + * @vol: volume description object * @lnum: logical eraseblock number * @buf: data to write * @len: data size @@ -191,26 +231,22 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes) * This function returns zero in case of success and a negative error code in * case of failure. */ -static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, - int len, int used_ebs) +static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, + void *buf, int len, int used_ebs) { - int err, l; - struct ubi_volume *vol = ubi->volumes[vol_id]; + int err; if (vol->vol_type == UBI_DYNAMIC_VOLUME) { - l = ALIGN(len, ubi->min_io_size); - memset(buf + len, 0xFF, l - len); + len = ALIGN(len, ubi->min_io_size); + memset(buf + len, 0xFF, len - len); - l = ubi_calc_data_len(ubi, buf, l); - if (l == 0) { + len = ubi_calc_data_len(ubi, buf, len); + if (len == 0) { dbg_msg("all %d bytes contain 0xFF - skip", len); return 0; } - if (len != l) - dbg_msg("skip last %d bytes (0xFF)", len - l); - err = ubi_eba_write_leb(ubi, vol_id, lnum, buf, 0, l, - UBI_UNKNOWN); + err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN); } else { /* * When writing static volume, and this is the last logical @@ -222,7 +258,7 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, * contain zeros, not random trash. */ memset(buf + len, 0, vol->usable_leb_size - len); - err = ubi_eba_write_leb_st(ubi, vol_id, lnum, buf, len, + err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, UBI_UNKNOWN, used_ebs); } @@ -236,16 +272,15 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, * @count: how much bytes to write * * This function writes more data to the volume which is being updated. It may - * be called arbitrary number of times until all of the update data arrive. - * This function returns %0 in case of success, number of bytes written during - * the last call if the whole volume update was successfully finished, and a + * be called arbitrary number of times until all the update data arriveis. This + * function returns %0 in case of success, number of bytes written during the + * last call if the whole volume update has been successfully finished, and a * negative error code in case of failure. */ -int ubi_more_update_data(struct ubi_device *ubi, int vol_id, +int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, const void __user *buf, int count) { uint64_t tmp; - struct ubi_volume *vol = ubi->volumes[vol_id]; int lnum, offs, err = 0, len, to_write = count; dbg_msg("write %d of %lld bytes, %lld already passed", @@ -290,8 +325,8 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id, * is the last chunk, it's time to flush the buffer. */ ubi_assert(flush_len <= vol->usable_leb_size); - err = write_leb(ubi, vol_id, lnum, vol->upd_buf, - flush_len, vol->upd_ebs); + err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len, + vol->upd_ebs); if (err) return err; } @@ -318,8 +353,8 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id, if (len == vol->usable_leb_size || vol->upd_received + len == vol->upd_bytes) { - err = write_leb(ubi, vol_id, lnum, vol->upd_buf, len, - vol->upd_ebs); + err = write_leb(ubi, vol, lnum, vol->upd_buf, + len, vol->upd_ebs); if (err) break; } @@ -333,16 +368,70 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id, ubi_assert(vol->upd_received <= vol->upd_bytes); if (vol->upd_received == vol->upd_bytes) { /* The update is finished, clear the update marker */ - err = clear_update_marker(ubi, vol_id, vol->upd_bytes); + err = clear_update_marker(ubi, vol, vol->upd_bytes); if (err) return err; err = ubi_wl_flush(ubi); if (err == 0) { + vol->updating = 0; err = to_write; vfree(vol->upd_buf); - vol->updating = 0; } } return err; } + +/** + * ubi_more_leb_change_data - accept more data for atomic LEB change. + * @vol: volume description object + * @buf: write data (user-space memory buffer) + * @count: how much bytes to write + * + * This function accepts more data to the volume which is being under the + * "atomic LEB change" operation. It may be called arbitrary number of times + * until all data arrives. This function returns %0 in case of success, number + * of bytes written during the last call if the whole "atomic LEB change" + * operation has been successfully finished, and a negative error code in case + * of failure. + */ +int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, + const void __user *buf, int count) +{ + int err; + + dbg_msg("write %d of %lld bytes, %lld already passed", + count, vol->upd_bytes, vol->upd_received); + + if (ubi->ro_mode) + return -EROFS; + + if (vol->upd_received + count > vol->upd_bytes) + count = vol->upd_bytes - vol->upd_received; + + err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count); + if (err) + return -EFAULT; + + vol->upd_received += count; + + if (vol->upd_received == vol->upd_bytes) { + int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size); + + memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes); + len = ubi_calc_data_len(ubi, vol->upd_buf, len); + err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum, + vol->upd_buf, len, UBI_UNKNOWN); + if (err) + return err; + } + + ubi_assert(vol->upd_received <= vol->upd_bytes); + if (vol->upd_received == vol->upd_bytes) { + vol->changing_leb = 0; + err = count; + vfree(vol->upd_buf); + } + + return err; +} diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 88629a320c2b..a3ca2257e601 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -63,21 +63,30 @@ static struct device_attribute attr_vol_upd_marker = * B. process 2 removes volume Y; * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file; * - * What we want to do in a situation like that is to return error when the file - * is read. This is done by means of the 'removed' flag and the 'vol_lock' of - * the UBI volume description object. + * In this situation, this function will return %-ENODEV because it will find + * out that the volume was removed from the @ubi->volumes array. */ static ssize_t vol_attribute_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); + struct ubi_device *ubi; - spin_lock(&vol->ubi->volumes_lock); - if (vol->removed) { - spin_unlock(&vol->ubi->volumes_lock); + ubi = ubi_get_device(vol->ubi->ubi_num); + if (!ubi) + return -ENODEV; + + spin_lock(&ubi->volumes_lock); + if (!ubi->volumes[vol->vol_id]) { + spin_unlock(&ubi->volumes_lock); + ubi_put_device(ubi); return -ENODEV; } + /* Take a reference to prevent volume removal */ + vol->ref_count += 1; + spin_unlock(&ubi->volumes_lock); + if (attr == &attr_vol_reserved_ebs) ret = sprintf(buf, "%d\n", vol->reserved_pebs); else if (attr == &attr_vol_type) { @@ -94,15 +103,22 @@ static ssize_t vol_attribute_show(struct device *dev, ret = sprintf(buf, "%d\n", vol->corrupted); else if (attr == &attr_vol_alignment) ret = sprintf(buf, "%d\n", vol->alignment); - else if (attr == &attr_vol_usable_eb_size) { + else if (attr == &attr_vol_usable_eb_size) ret = sprintf(buf, "%d\n", vol->usable_leb_size); - } else if (attr == &attr_vol_data_bytes) + else if (attr == &attr_vol_data_bytes) ret = sprintf(buf, "%lld\n", vol->used_bytes); else if (attr == &attr_vol_upd_marker) ret = sprintf(buf, "%d\n", vol->upd_marker); else - BUG(); - spin_unlock(&vol->ubi->volumes_lock); + /* This must be a bug */ + ret = -EINVAL; + + /* We've done the operation, drop volume and UBI device references */ + spin_lock(&ubi->volumes_lock); + vol->ref_count -= 1; + ubi_assert(vol->ref_count >= 0); + spin_unlock(&ubi->volumes_lock); + ubi_put_device(ubi); return ret; } @@ -110,7 +126,7 @@ static ssize_t vol_attribute_show(struct device *dev, static void vol_release(struct device *dev) { struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); - ubi_assert(vol->removed); + kfree(vol); } @@ -152,9 +168,7 @@ static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol) if (err) return err; err = device_create_file(&vol->dev, &attr_vol_upd_marker); - if (err) - return err; - return 0; + return err; } /** @@ -180,16 +194,18 @@ static void volume_sysfs_close(struct ubi_volume *vol) * @req: volume creation request * * This function creates volume described by @req. If @req->vol_id id - * %UBI_VOL_NUM_AUTO, this function automatically assigne ID to the new volume + * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume * and saves it in @req->vol_id. Returns zero in case of success and a negative - * error code in case of failure. + * error code in case of failure. Note, the caller has to have the + * @ubi->volumes_mutex locked. */ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) { - int i, err, vol_id = req->vol_id; + int i, err, vol_id = req->vol_id, dont_free = 0; struct ubi_volume *vol; struct ubi_vtbl_record vtbl_rec; uint64_t bytes; + dev_t dev; if (ubi->ro_mode) return -EROFS; @@ -199,7 +215,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) return -ENOMEM; spin_lock(&ubi->volumes_lock); - if (vol_id == UBI_VOL_NUM_AUTO) { /* Find unused volume ID */ dbg_msg("search for vacant volume ID"); @@ -252,6 +267,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) } ubi->avail_pebs -= vol->reserved_pebs; ubi->rsvd_pebs += vol->reserved_pebs; + spin_unlock(&ubi->volumes_lock); vol->vol_id = vol_id; vol->alignment = req->alignment; @@ -259,10 +275,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) vol->vol_type = req->vol_type; vol->name_len = req->name_len; memcpy(vol->name, req->name, vol->name_len + 1); - vol->exclusive = 1; vol->ubi = ubi; - ubi->volumes[vol_id] = vol; - spin_unlock(&ubi->volumes_lock); /* * Finish all pending erases because there may be some LEBs belonging @@ -299,9 +312,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) /* Register character device for the volume */ cdev_init(&vol->cdev, &ubi_vol_cdev_operations); vol->cdev.owner = THIS_MODULE; - err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol_id + 1), 1); + dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1); + err = cdev_add(&vol->cdev, dev, 1); if (err) { - ubi_err("cannot add character device for volume %d", vol_id); + ubi_err("cannot add character device"); goto out_mapping; } @@ -311,12 +325,15 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) vol->dev.release = vol_release; vol->dev.parent = &ubi->dev; - vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); + vol->dev.devt = dev; vol->dev.class = ubi_class; + sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); err = device_register(&vol->dev); - if (err) + if (err) { + ubi_err("cannot register device"); goto out_gluebi; + } err = volume_sysfs_init(ubi, vol); if (err) @@ -339,15 +356,27 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) goto out_sysfs; spin_lock(&ubi->volumes_lock); + ubi->volumes[vol_id] = vol; ubi->vol_count += 1; - vol->exclusive = 0; spin_unlock(&ubi->volumes_lock); paranoid_check_volumes(ubi); return 0; +out_sysfs: + /* + * We have registered our device, we should not free the volume* + * description object in this function in case of an error - it is + * freed by the release function. + * + * Get device reference to prevent the release function from being + * called just after sysfs has been closed. + */ + dont_free = 1; + get_device(&vol->dev); + volume_sysfs_close(vol); out_gluebi: - err = ubi_destroy_gluebi(vol); + ubi_destroy_gluebi(vol); out_cdev: cdev_del(&vol->cdev); out_mapping: @@ -356,26 +385,13 @@ out_acc: spin_lock(&ubi->volumes_lock); ubi->rsvd_pebs -= vol->reserved_pebs; ubi->avail_pebs += vol->reserved_pebs; - ubi->volumes[vol_id] = NULL; out_unlock: spin_unlock(&ubi->volumes_lock); - kfree(vol); - return err; - - /* - * We are registered, so @vol is destroyed in the release function and - * we have to de-initialize differently. - */ -out_sysfs: - err = ubi_destroy_gluebi(vol); - cdev_del(&vol->cdev); - kfree(vol->eba_tbl); - spin_lock(&ubi->volumes_lock); - ubi->rsvd_pebs -= vol->reserved_pebs; - ubi->avail_pebs += vol->reserved_pebs; - ubi->volumes[vol_id] = NULL; - spin_unlock(&ubi->volumes_lock); - volume_sysfs_close(vol); + if (dont_free) + put_device(&vol->dev); + else + kfree(vol); + ubi_err("cannot create volume %d, error %d", vol_id, err); return err; } @@ -385,7 +401,8 @@ out_sysfs: * * This function removes volume described by @desc. The volume has to be opened * in "exclusive" mode. Returns zero in case of success and a negative error - * code in case of failure. + * code in case of failure. The caller has to have the @ubi->volumes_mutex + * locked. */ int ubi_remove_volume(struct ubi_volume_desc *desc) { @@ -400,30 +417,36 @@ int ubi_remove_volume(struct ubi_volume_desc *desc) if (ubi->ro_mode) return -EROFS; + spin_lock(&ubi->volumes_lock); + if (vol->ref_count > 1) { + /* + * The volume is busy, probably someone is reading one of its + * sysfs files. + */ + err = -EBUSY; + goto out_unlock; + } + ubi->volumes[vol_id] = NULL; + spin_unlock(&ubi->volumes_lock); + err = ubi_destroy_gluebi(vol); if (err) - return err; + goto out_err; err = ubi_change_vtbl_record(ubi, vol_id, NULL); if (err) - return err; + goto out_err; for (i = 0; i < vol->reserved_pebs; i++) { - err = ubi_eba_unmap_leb(ubi, vol_id, i); + err = ubi_eba_unmap_leb(ubi, vol, i); if (err) - return err; + goto out_err; } - spin_lock(&ubi->volumes_lock); - vol->removed = 1; - ubi->volumes[vol_id] = NULL; - spin_unlock(&ubi->volumes_lock); - kfree(vol->eba_tbl); vol->eba_tbl = NULL; cdev_del(&vol->cdev); volume_sysfs_close(vol); - kfree(desc); spin_lock(&ubi->volumes_lock); ubi->rsvd_pebs -= reserved_pebs; @@ -441,8 +464,15 @@ int ubi_remove_volume(struct ubi_volume_desc *desc) spin_unlock(&ubi->volumes_lock); paranoid_check_volumes(ubi); - module_put(THIS_MODULE); return 0; + +out_err: + ubi_err("cannot remove volume %d, error %d", vol_id, err); + spin_lock(&ubi->volumes_lock); + ubi->volumes[vol_id] = vol; +out_unlock: + spin_unlock(&ubi->volumes_lock); + return err; } /** @@ -450,8 +480,9 @@ int ubi_remove_volume(struct ubi_volume_desc *desc) * @desc: volume descriptor * @reserved_pebs: new size in physical eraseblocks * - * This function returns zero in case of success, and a negative error code in - * case of failure. + * This function re-sizes the volume and returns zero in case of success, and a + * negative error code in case of failure. The caller has to have the + * @ubi->volumes_mutex locked. */ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) { @@ -466,8 +497,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) dbg_msg("re-size volume %d to from %d to %d PEBs", vol_id, vol->reserved_pebs, reserved_pebs); - ubi_assert(desc->mode == UBI_EXCLUSIVE); - ubi_assert(vol == ubi->volumes[vol_id]); if (vol->vol_type == UBI_STATIC_VOLUME && reserved_pebs < vol->used_ebs) { @@ -487,6 +516,14 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) for (i = 0; i < reserved_pebs; i++) new_mapping[i] = UBI_LEB_UNMAPPED; + spin_lock(&ubi->volumes_lock); + if (vol->ref_count > 1) { + spin_unlock(&ubi->volumes_lock); + err = -EBUSY; + goto out_free; + } + spin_unlock(&ubi->volumes_lock); + /* Reserve physical eraseblocks */ pebs = reserved_pebs - vol->reserved_pebs; if (pebs > 0) { @@ -516,7 +553,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) if (pebs < 0) { for (i = 0; i < -pebs; i++) { - err = ubi_eba_unmap_leb(ubi, vol_id, reserved_pebs + i); + err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i); if (err) goto out_acc; } @@ -565,27 +602,28 @@ out_free: /** * ubi_add_volume - add volume. * @ubi: UBI device description object - * @vol_id: volume ID + * @vol: volume description object * - * This function adds an existin volume and initializes all its data - * structures. Returnes zero in case of success and a negative error code in + * This function adds an existing volume and initializes all its data + * structures. Returns zero in case of success and a negative error code in * case of failure. */ -int ubi_add_volume(struct ubi_device *ubi, int vol_id) +int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) { - int err; - struct ubi_volume *vol = ubi->volumes[vol_id]; + int err, vol_id = vol->vol_id; + dev_t dev; dbg_msg("add volume %d", vol_id); ubi_dbg_dump_vol_info(vol); - ubi_assert(vol); /* Register character device for the volume */ cdev_init(&vol->cdev, &ubi_vol_cdev_operations); vol->cdev.owner = THIS_MODULE; - err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol->vol_id + 1), 1); + dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1); + err = cdev_add(&vol->cdev, dev, 1); if (err) { - ubi_err("cannot add character device for volume %d", vol_id); + ubi_err("cannot add character device for volume %d, error %d", + vol_id, err); return err; } @@ -595,7 +633,7 @@ int ubi_add_volume(struct ubi_device *ubi, int vol_id) vol->dev.release = vol_release; vol->dev.parent = &ubi->dev; - vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); + vol->dev.devt = dev; vol->dev.class = ubi_class; sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); err = device_register(&vol->dev); @@ -623,22 +661,19 @@ out_cdev: /** * ubi_free_volume - free volume. * @ubi: UBI device description object - * @vol_id: volume ID + * @vol: volume description object * - * This function frees all resources for volume @vol_id but does not remove it. + * This function frees all resources for volume @vol but does not remove it. * Used only when the UBI device is detached. */ -void ubi_free_volume(struct ubi_device *ubi, int vol_id) +void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol) { int err; - struct ubi_volume *vol = ubi->volumes[vol_id]; - dbg_msg("free volume %d", vol_id); - ubi_assert(vol); + dbg_msg("free volume %d", vol->vol_id); - vol->removed = 1; + ubi->volumes[vol->vol_id] = NULL; err = ubi_destroy_gluebi(vol); - ubi->volumes[vol_id] = NULL; cdev_del(&vol->cdev); volume_sysfs_close(vol); } @@ -708,11 +743,6 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id) goto fail; } - if (vol->upd_marker != 0 && vol->upd_marker != 1) { - ubi_err("bad upd_marker"); - goto fail; - } - if (vol->upd_marker && vol->corrupted) { dbg_err("update marker and corrupted simultaneously"); goto fail; @@ -747,7 +777,7 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id) n = (long long)vol->used_ebs * vol->usable_leb_size; if (vol->vol_type == UBI_DYNAMIC_VOLUME) { - if (vol->corrupted != 0) { + if (vol->corrupted) { ubi_err("corrupted dynamic volume"); goto fail; } @@ -764,10 +794,6 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id) goto fail; } } else { - if (vol->corrupted != 0 && vol->corrupted != 1) { - ubi_err("bad corrupted"); - goto fail; - } if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) { ubi_err("bad used_ebs"); goto fail; @@ -820,9 +846,7 @@ static void paranoid_check_volumes(struct ubi_device *ubi) { int i; - mutex_lock(&ubi->vtbl_mutex); for (i = 0; i < ubi->vtbl_slots; i++) paranoid_check_volume(ubi, i); - mutex_unlock(&ubi->vtbl_mutex); } #endif diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 25b3bd61c7ec..56fc3fbce838 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -86,8 +86,10 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, { int i, err; uint32_t crc; + struct ubi_volume *layout_vol; ubi_assert(idx >= 0 && idx < ubi->vtbl_slots); + layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)]; if (!vtbl_rec) vtbl_rec = &empty_vtbl_record; @@ -96,31 +98,25 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, vtbl_rec->crc = cpu_to_be32(crc); } - mutex_lock(&ubi->vtbl_mutex); memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { - err = ubi_eba_unmap_leb(ubi, UBI_LAYOUT_VOL_ID, i); - if (err) { - mutex_unlock(&ubi->vtbl_mutex); + err = ubi_eba_unmap_leb(ubi, layout_vol, i); + if (err) return err; - } - err = ubi_eba_write_leb(ubi, UBI_LAYOUT_VOL_ID, i, ubi->vtbl, 0, + + err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0, ubi->vtbl_size, UBI_LONGTERM); - if (err) { - mutex_unlock(&ubi->vtbl_mutex); + if (err) return err; - } } paranoid_vtbl_check(ubi); - mutex_unlock(&ubi->vtbl_mutex); - return ubi_wl_flush(ubi); + return 0; } /** - * vol_til_check - check if volume table is not corrupted and contains sensible - * data. - * + * vtbl_check - check if volume table is not corrupted and contains sensible + * data. * @ubi: UBI device description object * @vtbl: volume table * @@ -273,7 +269,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si, * this volume table copy was found during scanning. It has to be wiped * out. */ - sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID); + sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID); if (sv) old_seb = ubi_scan_find_seb(sv, copy); @@ -285,7 +281,7 @@ retry: } vid_hdr->vol_type = UBI_VID_DYNAMIC; - vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOL_ID); + vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID); vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT; vid_hdr->data_size = vid_hdr->used_ebs = vid_hdr->data_pad = cpu_to_be32(0); @@ -518,6 +514,17 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, vol->name[vol->name_len] = '\0'; vol->vol_id = i; + if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { + /* Auto re-size flag may be set only for one volume */ + if (ubi->autoresize_vol_id != -1) { + ubi_err("more then one auto-resize volume (%d " + "and %d)", ubi->autoresize_vol_id, i); + return -EINVAL; + } + + ubi->autoresize_vol_id = i; + } + ubi_assert(!ubi->volumes[i]); ubi->volumes[i] = vol; ubi->vol_count += 1; @@ -568,6 +575,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, vol->last_eb_bytes = sv->last_data_size; } + /* And add the layout volume */ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); if (!vol) return -ENOMEM; @@ -582,7 +590,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, vol->last_eb_bytes = vol->reserved_pebs; vol->used_bytes = (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad); - vol->vol_id = UBI_LAYOUT_VOL_ID; + vol->vol_id = UBI_LAYOUT_VOLUME_ID; + vol->ref_count = 1; ubi_assert(!ubi->volumes[i]); ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol; @@ -734,7 +743,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si) ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE; ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size); - sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID); + sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID); if (!sv) { /* * No logical eraseblocks belonging to the layout volume were diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 6330c8cc72b5..a471a491f0ab 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -117,21 +117,6 @@ #define WL_MAX_FAILURES 32 /** - * struct ubi_wl_entry - wear-leveling entry. - * @rb: link in the corresponding RB-tree - * @ec: erase counter - * @pnum: physical eraseblock number - * - * Each physical eraseblock has a corresponding &struct wl_entry object which - * may be kept in different RB-trees. - */ -struct ubi_wl_entry { - struct rb_node rb; - int ec; - int pnum; -}; - -/** * struct ubi_wl_prot_entry - PEB protection entry. * @rb_pnum: link in the @wl->prot.pnum RB-tree * @rb_aec: link in the @wl->prot.aec RB-tree @@ -216,9 +201,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, #define paranoid_check_in_wl_tree(e, root) #endif -/* Slab cache for wear-leveling entries */ -static struct kmem_cache *wl_entries_slab; - /** * wl_tree_add - add a wear-leveling entry to a WL RB-tree. * @e: the wear-leveling entry to add @@ -267,15 +249,26 @@ static int do_work(struct ubi_device *ubi) int err; struct ubi_work *wrk; - spin_lock(&ubi->wl_lock); + cond_resched(); + /* + * @ubi->work_sem is used to synchronize with the workers. Workers take + * it in read mode, so many of them may be doing works at a time. But + * the queue flush code has to be sure the whole queue of works is + * done, and it takes the mutex in write mode. + */ + down_read(&ubi->work_sem); + spin_lock(&ubi->wl_lock); if (list_empty(&ubi->works)) { spin_unlock(&ubi->wl_lock); + up_read(&ubi->work_sem); return 0; } wrk = list_entry(ubi->works.next, struct ubi_work, list); list_del(&wrk->list); + ubi->works_count -= 1; + ubi_assert(ubi->works_count >= 0); spin_unlock(&ubi->wl_lock); /* @@ -286,11 +279,8 @@ static int do_work(struct ubi_device *ubi) err = wrk->func(ubi, wrk, 0); if (err) ubi_err("work failed with error code %d", err); + up_read(&ubi->work_sem); - spin_lock(&ubi->wl_lock); - ubi->works_count -= 1; - ubi_assert(ubi->works_count >= 0); - spin_unlock(&ubi->wl_lock); return err; } @@ -549,8 +539,12 @@ retry: * prot_tree_del - remove a physical eraseblock from the protection trees * @ubi: UBI device description object * @pnum: the physical eraseblock to remove + * + * This function returns PEB @pnum from the protection trees and returns zero + * in case of success and %-ENODEV if the PEB was not found in the protection + * trees. */ -static void prot_tree_del(struct ubi_device *ubi, int pnum) +static int prot_tree_del(struct ubi_device *ubi, int pnum) { struct rb_node *p; struct ubi_wl_prot_entry *pe = NULL; @@ -561,7 +555,7 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum) pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); if (pnum == pe->e->pnum) - break; + goto found; if (pnum < pe->e->pnum) p = p->rb_left; @@ -569,10 +563,14 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum) p = p->rb_right; } + return -ENODEV; + +found: ubi_assert(pe->e->pnum == pnum); rb_erase(&pe->rb_aec, &ubi->prot.aec); rb_erase(&pe->rb_pnum, &ubi->prot.pnum); kfree(pe); + return 0; } /** @@ -744,7 +742,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, int cancel) { - int err, put = 0; + int err, put = 0, scrubbing = 0, protect = 0; + struct ubi_wl_prot_entry *uninitialized_var(pe); struct ubi_wl_entry *e1, *e2; struct ubi_vid_hdr *vid_hdr; @@ -757,21 +756,17 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, if (!vid_hdr) return -ENOMEM; + mutex_lock(&ubi->move_mutex); spin_lock(&ubi->wl_lock); + ubi_assert(!ubi->move_from && !ubi->move_to); + ubi_assert(!ubi->move_to_put); - /* - * Only one WL worker at a time is supported at this implementation, so - * make sure a PEB is not being moved already. - */ - if (ubi->move_to || !ubi->free.rb_node || + if (!ubi->free.rb_node || (!ubi->used.rb_node && !ubi->scrub.rb_node)) { /* - * Only one WL worker at a time is supported at this - * implementation, so if a LEB is already being moved, cancel. - * - * No free physical eraseblocks? Well, we cancel wear-leveling - * then. It will be triggered again when a free physical - * eraseblock appears. + * No free physical eraseblocks? Well, they must be waiting in + * the queue to be erased. Cancel movement - it will be + * triggered again when a free physical eraseblock appears. * * No used physical eraseblocks? They must be temporarily * protected from being moved. They will be moved to the @@ -780,10 +775,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, */ dbg_wl("cancel WL, a list is empty: free %d, used %d", !ubi->free.rb_node, !ubi->used.rb_node); - ubi->wl_scheduled = 0; - spin_unlock(&ubi->wl_lock); - ubi_free_vid_hdr(ubi, vid_hdr); - return 0; + goto out_cancel; } if (!ubi->scrub.rb_node) { @@ -798,27 +790,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { dbg_wl("no WL needed: min used EC %d, max free EC %d", e1->ec, e2->ec); - ubi->wl_scheduled = 0; - spin_unlock(&ubi->wl_lock); - ubi_free_vid_hdr(ubi, vid_hdr); - return 0; + goto out_cancel; } paranoid_check_in_wl_tree(e1, &ubi->used); rb_erase(&e1->rb, &ubi->used); dbg_wl("move PEB %d EC %d to PEB %d EC %d", e1->pnum, e1->ec, e2->pnum, e2->ec); } else { + /* Perform scrubbing */ + scrubbing = 1; e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); paranoid_check_in_wl_tree(e1, &ubi->scrub); - rb_erase(&e1->rb, &ubi->scrub); + rb_erase(&e1->rb, &ubi->scrub); dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); } paranoid_check_in_wl_tree(e2, &ubi->free); rb_erase(&e2->rb, &ubi->free); - ubi_assert(!ubi->move_from && !ubi->move_to); - ubi_assert(!ubi->move_to_put && !ubi->move_from_put); ubi->move_from = e1; ubi->move_to = e2; spin_unlock(&ubi->wl_lock); @@ -828,6 +817,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, * We so far do not know which logical eraseblock our physical * eraseblock (@e1) belongs to. We have to read the volume identifier * header first. + * + * Note, we are protected from this PEB being unmapped and erased. The + * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB + * which is being moved was unmapped. */ err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); @@ -842,32 +835,51 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, * likely have the VID header in place. */ dbg_wl("PEB %d has no VID header", e1->pnum); - err = 0; - } else { - ubi_err("error %d while reading VID header from PEB %d", - err, e1->pnum); - if (err > 0) - err = -EIO; + goto out_not_moved; } - goto error; + + ubi_err("error %d while reading VID header from PEB %d", + err, e1->pnum); + if (err > 0) + err = -EIO; + goto out_error; } err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); if (err) { - if (err == UBI_IO_BITFLIPS) - err = 0; - goto error; + + if (err < 0) + goto out_error; + if (err == 1) + goto out_not_moved; + + /* + * For some reason the LEB was not moved - it might be because + * the volume is being deleted. We should prevent this PEB from + * being selected for wear-levelling movement for some "time", + * so put it to the protection tree. + */ + + dbg_wl("cancelled moving PEB %d", e1->pnum); + pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); + if (!pe) { + err = -ENOMEM; + goto out_error; + } + + protect = 1; } ubi_free_vid_hdr(ubi, vid_hdr); spin_lock(&ubi->wl_lock); + if (protect) + prot_tree_add(ubi, e1, pe, protect); if (!ubi->move_to_put) wl_tree_add(e2, &ubi->used); else put = 1; ubi->move_from = ubi->move_to = NULL; - ubi->move_from_put = ubi->move_to_put = 0; - ubi->wl_scheduled = 0; + ubi->move_to_put = ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); if (put) { @@ -877,62 +889,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, */ dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); err = schedule_erase(ubi, e2, 0); - if (err) { - kmem_cache_free(wl_entries_slab, e2); - ubi_ro_mode(ubi); - } + if (err) + goto out_error; } - err = schedule_erase(ubi, e1, 0); - if (err) { - kmem_cache_free(wl_entries_slab, e1); - ubi_ro_mode(ubi); + if (!protect) { + err = schedule_erase(ubi, e1, 0); + if (err) + goto out_error; } + dbg_wl("done"); - return err; + mutex_unlock(&ubi->move_mutex); + return 0; /* - * Some error occurred. @e1 was not changed, so return it back. @e2 - * might be changed, schedule it for erasure. + * For some reasons the LEB was not moved, might be an error, might be + * something else. @e1 was not changed, so return it back. @e2 might + * be changed, schedule it for erasure. */ -error: - if (err) - dbg_wl("error %d occurred, cancel operation", err); - ubi_assert(err <= 0); - +out_not_moved: ubi_free_vid_hdr(ubi, vid_hdr); spin_lock(&ubi->wl_lock); - ubi->wl_scheduled = 0; - if (ubi->move_from_put) - put = 1; + if (scrubbing) + wl_tree_add(e1, &ubi->scrub); else wl_tree_add(e1, &ubi->used); ubi->move_from = ubi->move_to = NULL; - ubi->move_from_put = ubi->move_to_put = 0; + ubi->move_to_put = ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); - if (put) { - /* - * Well, the target PEB was put meanwhile, schedule it for - * erasure. - */ - dbg_wl("PEB %d was put meanwhile, erase", e1->pnum); - err = schedule_erase(ubi, e1, 0); - if (err) { - kmem_cache_free(wl_entries_slab, e1); - ubi_ro_mode(ubi); - } - } - err = schedule_erase(ubi, e2, 0); - if (err) { - kmem_cache_free(wl_entries_slab, e2); - ubi_ro_mode(ubi); - } + if (err) + goto out_error; + + mutex_unlock(&ubi->move_mutex); + return 0; + +out_error: + ubi_err("error %d while moving PEB %d to PEB %d", + err, e1->pnum, e2->pnum); - yield(); + ubi_free_vid_hdr(ubi, vid_hdr); + spin_lock(&ubi->wl_lock); + ubi->move_from = ubi->move_to = NULL; + ubi->move_to_put = ubi->wl_scheduled = 0; + spin_unlock(&ubi->wl_lock); + + kmem_cache_free(ubi_wl_entry_slab, e1); + kmem_cache_free(ubi_wl_entry_slab, e2); + ubi_ro_mode(ubi); + + mutex_unlock(&ubi->move_mutex); return err; + +out_cancel: + ubi->wl_scheduled = 0; + spin_unlock(&ubi->wl_lock); + mutex_unlock(&ubi->move_mutex); + ubi_free_vid_hdr(ubi, vid_hdr); + return 0; } /** @@ -1020,7 +1037,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, if (cancel) { dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); kfree(wl_wrk); - kmem_cache_free(wl_entries_slab, e); + kmem_cache_free(ubi_wl_entry_slab, e); return 0; } @@ -1049,7 +1066,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, ubi_err("failed to erase PEB %d, error %d", pnum, err); kfree(wl_wrk); - kmem_cache_free(wl_entries_slab, e); + kmem_cache_free(ubi_wl_entry_slab, e); if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || err == -EBUSY) { @@ -1119,8 +1136,7 @@ out_ro: } /** - * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling - * unit. + * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit. * @ubi: UBI device description object * @pnum: physical eraseblock to return * @torture: if this physical eraseblock has to be tortured @@ -1128,7 +1144,7 @@ out_ro: * This function is called to return physical eraseblock @pnum to the pool of * free physical eraseblocks. The @torture flag has to be set if an I/O error * occurred to this @pnum and it has to be tested. This function returns zero - * in case of success and a negative error code in case of failure. + * in case of success, and a negative error code in case of failure. */ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) { @@ -1139,8 +1155,8 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) ubi_assert(pnum >= 0); ubi_assert(pnum < ubi->peb_count); +retry: spin_lock(&ubi->wl_lock); - e = ubi->lookuptbl[pnum]; if (e == ubi->move_from) { /* @@ -1148,17 +1164,22 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) * be moved. It will be scheduled for erasure in the * wear-leveling worker. */ - dbg_wl("PEB %d is being moved", pnum); - ubi_assert(!ubi->move_from_put); - ubi->move_from_put = 1; + dbg_wl("PEB %d is being moved, wait", pnum); spin_unlock(&ubi->wl_lock); - return 0; + + /* Wait for the WL worker by taking the @ubi->move_mutex */ + mutex_lock(&ubi->move_mutex); + mutex_unlock(&ubi->move_mutex); + goto retry; } else if (e == ubi->move_to) { /* * User is putting the physical eraseblock which was selected * as the target the data is moved to. It may happen if the EBA - * unit already re-mapped the LEB but the WL unit did has not - * put the PEB to the "used" tree. + * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but + * the WL unit has not put the PEB to the "used" tree yet, but + * it is about to do this. So we just set a flag which will + * tell the WL worker that the PEB is not needed anymore and + * should be scheduled for erasure. */ dbg_wl("PEB %d is the target of data moving", pnum); ubi_assert(!ubi->move_to_put); @@ -1172,8 +1193,15 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) } else if (in_wl_tree(e, &ubi->scrub)) { paranoid_check_in_wl_tree(e, &ubi->scrub); rb_erase(&e->rb, &ubi->scrub); - } else - prot_tree_del(ubi, e->pnum); + } else { + err = prot_tree_del(ubi, e->pnum); + if (err) { + ubi_err("PEB %d not found", pnum); + ubi_ro_mode(ubi); + spin_unlock(&ubi->wl_lock); + return err; + } + } } spin_unlock(&ubi->wl_lock); @@ -1227,8 +1255,17 @@ retry: if (in_wl_tree(e, &ubi->used)) { paranoid_check_in_wl_tree(e, &ubi->used); rb_erase(&e->rb, &ubi->used); - } else - prot_tree_del(ubi, pnum); + } else { + int err; + + err = prot_tree_del(ubi, e->pnum); + if (err) { + ubi_err("PEB %d not found", pnum); + ubi_ro_mode(ubi); + spin_unlock(&ubi->wl_lock); + return err; + } + } wl_tree_add(e, &ubi->scrub); spin_unlock(&ubi->wl_lock); @@ -1249,17 +1286,32 @@ retry: */ int ubi_wl_flush(struct ubi_device *ubi) { - int err, pending_count; - - pending_count = ubi->works_count; - - dbg_wl("flush (%d pending works)", pending_count); + int err; /* * Erase while the pending works queue is not empty, but not more then * the number of currently pending works. */ - while (pending_count-- > 0) { + dbg_wl("flush (%d pending works)", ubi->works_count); + while (ubi->works_count) { + err = do_work(ubi); + if (err) + return err; + } + + /* + * Make sure all the works which have been done in parallel are + * finished. + */ + down_write(&ubi->work_sem); + up_write(&ubi->work_sem); + + /* + * And in case last was the WL worker and it cancelled the LEB + * movement, flush again. + */ + while (ubi->works_count) { + dbg_wl("flush more (%d pending works)", ubi->works_count); err = do_work(ubi); if (err) return err; @@ -1294,7 +1346,7 @@ static void tree_destroy(struct rb_root *root) rb->rb_right = NULL; } - kmem_cache_free(wl_entries_slab, e); + kmem_cache_free(ubi_wl_entry_slab, e); } } } @@ -1303,7 +1355,7 @@ static void tree_destroy(struct rb_root *root) * ubi_thread - UBI background thread. * @u: the UBI device description object pointer */ -static int ubi_thread(void *u) +int ubi_thread(void *u) { int failures = 0; struct ubi_device *ubi = u; @@ -1394,36 +1446,22 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ubi->used = ubi->free = ubi->scrub = RB_ROOT; ubi->prot.pnum = ubi->prot.aec = RB_ROOT; spin_lock_init(&ubi->wl_lock); + mutex_init(&ubi->move_mutex); + init_rwsem(&ubi->work_sem); ubi->max_ec = si->max_ec; INIT_LIST_HEAD(&ubi->works); sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); - ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); - if (IS_ERR(ubi->bgt_thread)) { - err = PTR_ERR(ubi->bgt_thread); - ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, - err); - return err; - } - - if (ubi_devices_cnt == 0) { - wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab", - sizeof(struct ubi_wl_entry), - 0, 0, NULL); - if (!wl_entries_slab) - return -ENOMEM; - } - err = -ENOMEM; ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); if (!ubi->lookuptbl) - goto out_free; + return err; list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { cond_resched(); - e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) goto out_free; @@ -1431,7 +1469,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) e->ec = seb->ec; ubi->lookuptbl[e->pnum] = e; if (schedule_erase(ubi, e, 0)) { - kmem_cache_free(wl_entries_slab, e); + kmem_cache_free(ubi_wl_entry_slab, e); goto out_free; } } @@ -1439,7 +1477,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) list_for_each_entry(seb, &si->free, u.list) { cond_resched(); - e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) goto out_free; @@ -1453,7 +1491,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) list_for_each_entry(seb, &si->corr, u.list) { cond_resched(); - e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) goto out_free; @@ -1461,7 +1499,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) e->ec = seb->ec; ubi->lookuptbl[e->pnum] = e; if (schedule_erase(ubi, e, 0)) { - kmem_cache_free(wl_entries_slab, e); + kmem_cache_free(ubi_wl_entry_slab, e); goto out_free; } } @@ -1470,7 +1508,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { cond_resched(); - e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) goto out_free; @@ -1510,8 +1548,6 @@ out_free: tree_destroy(&ubi->free); tree_destroy(&ubi->scrub); kfree(ubi->lookuptbl); - if (ubi_devices_cnt == 0) - kmem_cache_destroy(wl_entries_slab); return err; } @@ -1541,7 +1577,7 @@ static void protection_trees_destroy(struct ubi_device *ubi) rb->rb_right = NULL; } - kmem_cache_free(wl_entries_slab, pe->e); + kmem_cache_free(ubi_wl_entry_slab, pe->e); kfree(pe); } } @@ -1553,10 +1589,6 @@ static void protection_trees_destroy(struct ubi_device *ubi) */ void ubi_wl_close(struct ubi_device *ubi) { - dbg_wl("disable \"%s\"", ubi->bgt_name); - if (ubi->bgt_thread) - kthread_stop(ubi->bgt_thread); - dbg_wl("close the UBI wear-leveling unit"); cancel_pending(ubi); @@ -1565,8 +1597,6 @@ void ubi_wl_close(struct ubi_device *ubi) tree_destroy(&ubi->free); tree_destroy(&ubi->scrub); kfree(ubi->lookuptbl); - if (ubi_devices_cnt == 1) - kmem_cache_destroy(wl_entries_slab); } #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID |