diff options
Diffstat (limited to 'drivers/nvdimm')
| -rw-r--r-- | drivers/nvdimm/blk.c | 18 | ||||
| -rw-r--r-- | drivers/nvdimm/btt.c | 19 | ||||
| -rw-r--r-- | drivers/nvdimm/bus.c | 151 | ||||
| -rw-r--r-- | drivers/nvdimm/core.c | 203 | ||||
| -rw-r--r-- | drivers/nvdimm/dimm_devs.c | 6 | ||||
| -rw-r--r-- | drivers/nvdimm/e820.c | 2 | ||||
| -rw-r--r-- | drivers/nvdimm/namespace_devs.c | 141 | ||||
| -rw-r--r-- | drivers/nvdimm/nd-core.h | 2 | ||||
| -rw-r--r-- | drivers/nvdimm/nd.h | 20 | ||||
| -rw-r--r-- | drivers/nvdimm/pfn.h | 23 | ||||
| -rw-r--r-- | drivers/nvdimm/pfn_devs.c | 161 | ||||
| -rw-r--r-- | drivers/nvdimm/pmem.c | 350 | ||||
| -rw-r--r-- | drivers/nvdimm/region.c | 12 | ||||
| -rw-r--r-- | drivers/nvdimm/region_devs.c | 66 |
14 files changed, 912 insertions, 262 deletions
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 91a336ea8c4f..e9ff9229d942 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -31,8 +31,6 @@ struct nd_blk_device { u32 internal_lbasize; }; -static int nd_blk_major; - static u32 nd_blk_meta_size(struct nd_blk_device *blk_dev) { return blk_dev->nsblk->lbasize - blk_dev->sector_size; @@ -264,7 +262,6 @@ static int nd_blk_attach_disk(struct nd_namespace_common *ndns, } disk->driverfs_dev = &ndns->dev; - disk->major = nd_blk_major; disk->first_minor = 0; disk->fops = &nd_blk_fops; disk->private_data = blk_dev; @@ -358,25 +355,12 @@ static struct nd_device_driver nd_blk_driver = { static int __init nd_blk_init(void) { - int rc; - - rc = register_blkdev(0, "nd_blk"); - if (rc < 0) - return rc; - - nd_blk_major = rc; - rc = nd_driver_register(&nd_blk_driver); - - if (rc < 0) - unregister_blkdev(nd_blk_major, "nd_blk"); - - return rc; + return nd_driver_register(&nd_blk_driver); } static void __exit nd_blk_exit(void) { driver_unregister(&nd_blk_driver.drv); - unregister_blkdev(nd_blk_major, "nd_blk"); } MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index efb2c1ceef98..c32cbb593600 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -31,8 +31,6 @@ enum log_ent_request { LOG_OLD_ENT }; -static int btt_major; - static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, void *buf, size_t n) { @@ -1246,7 +1244,6 @@ static int btt_blk_init(struct btt *btt) nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name); btt->btt_disk->driverfs_dev = &btt->nd_btt->dev; - btt->btt_disk->major = btt_major; btt->btt_disk->first_minor = 0; btt->btt_disk->fops = &btt_fops; btt->btt_disk->private_data = btt; @@ -1423,22 +1420,11 @@ EXPORT_SYMBOL(nvdimm_namespace_detach_btt); static int __init nd_btt_init(void) { - int rc; - - btt_major = register_blkdev(0, "btt"); - if (btt_major < 0) - return btt_major; + int rc = 0; debugfs_root = debugfs_create_dir("btt", NULL); - if (IS_ERR_OR_NULL(debugfs_root)) { + if (IS_ERR_OR_NULL(debugfs_root)) rc = -ENXIO; - goto err_debugfs; - } - - return 0; - - err_debugfs: - unregister_blkdev(btt_major, "btt"); return rc; } @@ -1446,7 +1432,6 @@ static int __init nd_btt_init(void) static void __exit nd_btt_exit(void) { debugfs_remove_recursive(debugfs_root); - unregister_blkdev(btt_major, "btt"); } MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT); diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 7e2c43f701bc..33557481d452 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -133,6 +133,78 @@ static int nvdimm_bus_remove(struct device *dev) return rc; } +void nd_device_notify(struct device *dev, enum nvdimm_event event) +{ + device_lock(dev); + if (dev->driver) { + struct nd_device_driver *nd_drv; + + nd_drv = to_nd_device_driver(dev->driver); + if (nd_drv->notify) + nd_drv->notify(dev, event); + } + device_unlock(dev); +} +EXPORT_SYMBOL(nd_device_notify); + +void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event) +{ + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); + + if (!nvdimm_bus) + return; + + /* caller is responsible for holding a reference on the device */ + nd_device_notify(&nd_region->dev, event); +} +EXPORT_SYMBOL_GPL(nvdimm_region_notify); + +long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, + unsigned int len) +{ + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc; + struct nd_cmd_clear_error clear_err; + struct nd_cmd_ars_cap ars_cap; + u32 clear_err_unit, mask; + int cmd_rc, rc; + + if (!nvdimm_bus) + return -ENXIO; + + nd_desc = nvdimm_bus->nd_desc; + if (!nd_desc->ndctl) + return -ENXIO; + + memset(&ars_cap, 0, sizeof(ars_cap)); + ars_cap.address = phys; + ars_cap.length = len; + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, &ars_cap, + sizeof(ars_cap), &cmd_rc); + if (rc < 0) + return rc; + if (cmd_rc < 0) + return cmd_rc; + clear_err_unit = ars_cap.clear_err_unit; + if (!clear_err_unit || !is_power_of_2(clear_err_unit)) + return -ENXIO; + + mask = clear_err_unit - 1; + if ((phys | len) & mask) + return -ENXIO; + memset(&clear_err, 0, sizeof(clear_err)); + clear_err.address = phys; + clear_err.length = len; + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CLEAR_ERROR, &clear_err, + sizeof(clear_err), &cmd_rc); + if (rc < 0) + return rc; + if (cmd_rc < 0) + return cmd_rc; + return clear_err.cleared; +} +EXPORT_SYMBOL_GPL(nvdimm_clear_poison); + static struct bus_type nvdimm_bus_type = { .name = "nd", .uevent = nvdimm_bus_uevent, @@ -382,18 +454,24 @@ static const struct nd_cmd_desc __nd_cmd_bus_descs[] = { [ND_CMD_ARS_CAP] = { .in_num = 2, .in_sizes = { 8, 8, }, - .out_num = 2, - .out_sizes = { 4, 4, }, + .out_num = 4, + .out_sizes = { 4, 4, 4, 4, }, }, [ND_CMD_ARS_START] = { - .in_num = 4, - .in_sizes = { 8, 8, 2, 6, }, - .out_num = 1, - .out_sizes = { 4, }, + .in_num = 5, + .in_sizes = { 8, 8, 2, 1, 5, }, + .out_num = 2, + .out_sizes = { 4, 4, }, }, [ND_CMD_ARS_STATUS] = { - .out_num = 2, - .out_sizes = { 4, UINT_MAX, }, + .out_num = 3, + .out_sizes = { 4, 4, UINT_MAX, }, + }, + [ND_CMD_CLEAR_ERROR] = { + .in_num = 2, + .in_sizes = { 8, 8, }, + .out_num = 3, + .out_sizes = { 4, 4, 8, }, }, }; @@ -442,8 +520,8 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, return in_field[1]; else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2) return out_field[1]; - else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 1) - return ND_CMD_ARS_STATUS_MAX; + else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2) + return out_field[1] - 8; return UINT_MAX; } @@ -463,17 +541,37 @@ void wait_nvdimm_bus_probe_idle(struct device *dev) } while (true); } +static int pmem_active(struct device *dev, void *data) +{ + if (is_nd_pmem(dev) && dev->driver) + return -EBUSY; + return 0; +} + /* set_config requires an idle interleave set */ -static int nd_cmd_clear_to_send(struct nvdimm *nvdimm, unsigned int cmd) +static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus, + struct nvdimm *nvdimm, unsigned int cmd) { - struct nvdimm_bus *nvdimm_bus; + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + + /* ask the bus provider if it would like to block this request */ + if (nd_desc->clear_to_send) { + int rc = nd_desc->clear_to_send(nd_desc, nvdimm, cmd); + + if (rc) + return rc; + } + + /* require clear error to go through the pmem driver */ + if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR) + return device_for_each_child(&nvdimm_bus->dev, NULL, + pmem_active); if (!nvdimm || cmd != ND_CMD_SET_CONFIG_DATA) return 0; - nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev); + /* prevent label manipulation while the kernel owns label updates */ wait_nvdimm_bus_probe_idle(&nvdimm_bus->dev); - if (atomic_read(&nvdimm->busy)) return -EBUSY; return 0; @@ -513,10 +611,11 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, /* fail write commands (when read-only) */ if (read_only) - switch (ioctl_cmd) { - case ND_IOCTL_VENDOR: - case ND_IOCTL_SET_CONFIG_DATA: - case ND_IOCTL_ARS_START: + switch (cmd) { + case ND_CMD_VENDOR: + case ND_CMD_SET_CONFIG_DATA: + case ND_CMD_ARS_START: + case ND_CMD_CLEAR_ERROR: dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n", nvdimm ? nvdimm_cmd_name(cmd) : nvdimm_bus_cmd_name(cmd)); @@ -583,11 +682,11 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, } nvdimm_bus_lock(&nvdimm_bus->dev); - rc = nd_cmd_clear_to_send(nvdimm, cmd); + rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, cmd); if (rc) goto out_unlock; - rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len); + rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL); if (rc < 0) goto out_unlock; if (copy_to_user(p, buf, buf_len)) @@ -602,14 +701,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long id = (long) file->private_data; - int rc = -ENXIO, read_only; + int rc = -ENXIO, ro; struct nvdimm_bus *nvdimm_bus; - read_only = (O_RDWR != (file->f_flags & O_ACCMODE)); + ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); mutex_lock(&nvdimm_bus_list_mutex); list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { if (nvdimm_bus->id == id) { - rc = __nd_ioctl(nvdimm_bus, NULL, read_only, cmd, arg); + rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg); break; } } @@ -633,10 +732,10 @@ static int match_dimm(struct device *dev, void *data) static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - int rc = -ENXIO, read_only; + int rc = -ENXIO, ro; struct nvdimm_bus *nvdimm_bus; - read_only = (O_RDWR != (file->f_flags & O_ACCMODE)); + ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); mutex_lock(&nvdimm_bus_list_mutex); list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { struct device *dev = device_find_child(&nvdimm_bus->dev, @@ -647,7 +746,7 @@ static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) continue; nvdimm = to_nvdimm(dev); - rc = __nd_ioctl(nvdimm_bus, nvdimm, read_only, cmd, arg); + rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg); put_device(dev); break; } diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 82c49bb87055..79646d0c3277 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -11,6 +11,7 @@ * General Public License for more details. */ #include <linux/libnvdimm.h> +#include <linux/badblocks.h> #include <linux/export.h> #include <linux/module.h> #include <linux/blkdev.h> @@ -297,6 +298,15 @@ static int flush_regions_dimms(struct device *dev, void *data) static ssize_t wait_probe_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + int rc; + + if (nd_desc->flush_probe) { + rc = nd_desc->flush_probe(nd_desc); + if (rc) + return rc; + } nd_synchronize(); device_for_each_child(dev, NULL, flush_regions_dimms); return sprintf(buf, "1\n"); @@ -325,6 +335,7 @@ struct nvdimm_bus *__nvdimm_bus_register(struct device *parent, if (!nvdimm_bus) return NULL; INIT_LIST_HEAD(&nvdimm_bus->list); + INIT_LIST_HEAD(&nvdimm_bus->poison_list); init_waitqueue_head(&nvdimm_bus->probe_wait); nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); mutex_init(&nvdimm_bus->reconfig_mutex); @@ -359,6 +370,193 @@ struct nvdimm_bus *__nvdimm_bus_register(struct device *parent, } EXPORT_SYMBOL_GPL(__nvdimm_bus_register); +static void set_badblock(struct badblocks *bb, sector_t s, int num) +{ + dev_dbg(bb->dev, "Found a poison range (0x%llx, 0x%llx)\n", + (u64) s * 512, (u64) num * 512); + /* this isn't an error as the hardware will still throw an exception */ + if (badblocks_set(bb, s, num, 1)) + dev_info_once(bb->dev, "%s: failed for sector %llx\n", + __func__, (u64) s); +} + +/** + * __add_badblock_range() - Convert a physical address range to bad sectors + * @bb: badblocks instance to populate + * @ns_offset: namespace offset where the error range begins (in bytes) + * @len: number of bytes of poison to be added + * + * This assumes that the range provided with (ns_offset, len) is within + * the bounds of physical addresses for this namespace, i.e. lies in the + * interval [ns_start, ns_start + ns_size) + */ +static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len) +{ + const unsigned int sector_size = 512; + sector_t start_sector; + u64 num_sectors; + u32 rem; + + start_sector = div_u64(ns_offset, sector_size); + num_sectors = div_u64_rem(len, sector_size, &rem); + if (rem) + num_sectors++; + + if (unlikely(num_sectors > (u64)INT_MAX)) { + u64 remaining = num_sectors; + sector_t s = start_sector; + + while (remaining) { + int done = min_t(u64, remaining, INT_MAX); + + set_badblock(bb, s, done); + remaining -= done; + s += done; + } + } else + set_badblock(bb, start_sector, num_sectors); +} + +static void namespace_add_poison(struct list_head *poison_list, + struct badblocks *bb, struct resource *res) +{ + struct nd_poison *pl; + + if (list_empty(poison_list)) + return; + + list_for_each_entry(pl, poison_list, list) { + u64 pl_end = pl->start + pl->length - 1; + + /* Discard intervals with no intersection */ + if (pl_end < res->start) + continue; + if (pl->start > res->end) + continue; + /* Deal with any overlap after start of the namespace */ + if (pl->start >= res->start) { + u64 start = pl->start; + u64 len; + + if (pl_end <= res->end) + len = pl->length; + else + len = res->start + resource_size(res) + - pl->start; + __add_badblock_range(bb, start - res->start, len); + continue; + } + /* Deal with overlap for poison starting before the namespace */ + if (pl->start < res->start) { + u64 len; + + if (pl_end < res->end) + len = pl->start + pl->length - res->start; + else + len = resource_size(res); + __add_badblock_range(bb, 0, len); + } + } +} + +/** + * nvdimm_namespace_add_poison() - Convert a list of poison ranges to badblocks + * @ndns: the namespace containing poison ranges + * @bb: badblocks instance to populate + * @offset: offset at the start of the namespace before 'sector 0' + * + * The poison list generated during NFIT initialization may contain multiple, + * possibly overlapping ranges in the SPA (System Physical Address) space. + * Compare each of these ranges to the namespace currently being initialized, + * and add badblocks to the gendisk for all matching sub-ranges + */ +void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns, + struct badblocks *bb, resource_size_t offset) +{ + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); + struct nd_region *nd_region = to_nd_region(ndns->dev.parent); + struct nvdimm_bus *nvdimm_bus; + struct list_head *poison_list; + struct resource res = { + .start = nsio->res.start + offset, + .end = nsio->res.end, + }; + + nvdimm_bus = to_nvdimm_bus(nd_region->dev.parent); + poison_list = &nvdimm_bus->poison_list; + + nvdimm_bus_lock(&nvdimm_bus->dev); + namespace_add_poison(poison_list, bb, &res); + nvdimm_bus_unlock(&nvdimm_bus->dev); +} +EXPORT_SYMBOL_GPL(nvdimm_namespace_add_poison); + +static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) +{ + struct nd_poison *pl; + + pl = kzalloc(sizeof(*pl), GFP_KERNEL); + if (!pl) + return -ENOMEM; + + pl->start = addr; + pl->length = length; + list_add_tail(&pl->list, &nvdimm_bus->poison_list); + + return 0; +} + +static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) +{ + struct nd_poison *pl; + + if (list_empty(&nvdimm_bus->poison_list)) + return add_poison(nvdimm_bus, addr, length); + + /* + * There is a chance this is a duplicate, check for those first. + * This will be the common case as ARS_STATUS returns all known + * errors in the SPA space, and we can't query it per region + */ + list_for_each_entry(pl, &nvdimm_bus->poison_list, list) + if (pl->start == addr) { + /* If length has changed, update this list entry */ + if (pl->length != length) + pl->length = length; + return 0; + } + + /* + * If not a duplicate or a simple length update, add the entry as is, + * as any overlapping ranges will get resolved when the list is consumed + * and converted to badblocks + */ + return add_poison(nvdimm_bus, addr, length); +} + +int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) +{ + int rc; + + nvdimm_bus_lock(&nvdimm_bus->dev); + rc = bus_add_poison(nvdimm_bus, addr, length); + nvdimm_bus_unlock(&nvdimm_bus->dev); + + return rc; +} +EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison); + +static void free_poison_list(struct list_head *poison_list) +{ + struct nd_poison *pl, *next; + + list_for_each_entry_safe(pl, next, poison_list, list) { + list_del(&pl->list); + kfree(pl); + } + list_del_init(poison_list); +} + static int child_unregister(struct device *dev, void *data) { /* @@ -385,6 +583,11 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus) nd_synchronize(); device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister); + + nvdimm_bus_lock(&nvdimm_bus->dev); + free_poison_list(&nvdimm_bus->poison_list); + nvdimm_bus_unlock(&nvdimm_bus->dev); + nvdimm_bus_destroy_ndctl(nvdimm_bus); device_unregister(&nvdimm_bus->dev); diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 651b8d19d324..c56f88217924 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -75,7 +75,7 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) memset(cmd, 0, sizeof(*cmd)); nd_desc = nvdimm_bus->nd_desc; return nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), - ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd)); + ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), NULL); } int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) @@ -120,7 +120,7 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) cmd->in_offset = offset; rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), ND_CMD_GET_CONFIG_DATA, cmd, - cmd->in_length + sizeof(*cmd)); + cmd->in_length + sizeof(*cmd), NULL); if (rc || cmd->status) { rc = -ENXIO; break; @@ -171,7 +171,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, status = ((void *) cmd) + cmd_size - sizeof(u32); rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), - ND_CMD_SET_CONFIG_DATA, cmd, cmd_size); + ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL); if (rc || *status) { rc = rc ? rc : -ENXIO; break; diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c index b0045a505dc8..95825b38559a 100644 --- a/drivers/nvdimm/e820.c +++ b/drivers/nvdimm/e820.c @@ -55,7 +55,7 @@ static int e820_pmem_probe(struct platform_device *pdev) for (p = iomem_resource.child; p ; p = p->sibling) { struct nd_region_desc ndr_desc; - if (strncmp(p->name, "Persistent Memory (legacy)", 26) != 0) + if (p->desc != IORES_DESC_PERSISTENT_MEMORY_LEGACY) continue; memset(&ndr_desc, 0, sizeof(ndr_desc)); diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 0955b2cb10fe..f5cb88601359 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -77,9 +77,63 @@ static bool is_namespace_io(struct device *dev) return dev ? dev->type == &namespace_io_device_type : false; } +static int is_uuid_busy(struct device *dev, void *data) +{ + u8 *uuid1 = data, *uuid2 = NULL; + + if (is_namespace_pmem(dev)) { + struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); + + uuid2 = nspm->uuid; + } else if (is_namespace_blk(dev)) { + struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); + + uuid2 = nsblk->uuid; + } else if (is_nd_btt(dev)) { + struct nd_btt *nd_btt = to_nd_btt(dev); + + uuid2 = nd_btt->uuid; + } else if (is_nd_pfn(dev)) { + struct nd_pfn *nd_pfn = to_nd_pfn(dev); + + uuid2 = nd_pfn->uuid; + } + + if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0) + return -EBUSY; + + return 0; +} + +static int is_namespace_uuid_busy(struct device *dev, void *data) +{ + if (is_nd_pmem(dev) || is_nd_blk(dev)) + return device_for_each_child(dev, data, is_uuid_busy); + return 0; +} + +/** + * nd_is_uuid_unique - verify that no other namespace has @uuid + * @dev: any device on a nvdimm_bus + * @uuid: uuid to check + */ +bool nd_is_uuid_unique(struct device *dev, u8 *uuid) +{ + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); + + if (!nvdimm_bus) + return false; + WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev)); + if (device_for_each_child(&nvdimm_bus->dev, uuid, + is_namespace_uuid_busy) != 0) + return false; + return true; +} + bool pmem_should_map_pages(struct device *dev) { struct nd_region *nd_region = to_nd_region(dev->parent); + struct nd_namespace_io *nsio; if (!IS_ENABLED(CONFIG_ZONE_DEVICE)) return false; @@ -90,6 +144,12 @@ bool pmem_should_map_pages(struct device *dev) if (is_nd_pfn(dev) || is_nd_btt(dev)) return false; + nsio = to_nd_namespace_io(dev); + if (region_intersects(nsio->res.start, resource_size(&nsio->res), + IORESOURCE_SYSTEM_RAM, + IORES_DESC_NONE) == REGION_MIXED) + return false; + #ifdef ARCH_MEMREMAP_PMEM return ARCH_MEMREMAP_PMEM == MEMREMAP_WB; #else @@ -104,20 +164,10 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, struct nd_region *nd_region = to_nd_region(ndns->dev.parent); const char *suffix = NULL; - if (ndns->claim) { - if (is_nd_btt(ndns->claim)) - suffix = "s"; - else if (is_nd_pfn(ndns->claim)) - suffix = "m"; - else - dev_WARN_ONCE(&ndns->dev, 1, - "unknown claim type by %s\n", - dev_name(ndns->claim)); - } + if (ndns->claim && is_nd_btt(ndns->claim)) + suffix = "s"; if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) { - if (!suffix && pmem_should_map_pages(&ndns->dev)) - suffix = "m"; sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : ""); } else if (is_namespace_blk(&ndns->dev)) { struct nd_namespace_blk *nsblk; @@ -791,6 +841,15 @@ static void nd_namespace_pmem_set_size(struct nd_region *nd_region, res->end = nd_region->ndr_start + size - 1; } +static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where) +{ + if (!uuid) { + dev_dbg(dev, "%s: uuid not set\n", where); + return true; + } + return false; +} + static ssize_t __size_store(struct device *dev, unsigned long long val) { resource_size_t allocated = 0, available = 0; @@ -820,8 +879,12 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) * We need a uuid for the allocation-label and dimm(s) on which * to store the label. */ - if (!uuid || nd_region->ndr_mappings == 0) + if (uuid_not_set(uuid, dev, __func__)) return -ENXIO; + if (nd_region->ndr_mappings == 0) { + dev_dbg(dev, "%s: not associated with dimm(s)\n", __func__); + return -ENXIO; + } div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder); if (remainder) { @@ -1211,6 +1274,31 @@ static ssize_t holder_show(struct device *dev, } static DEVICE_ATTR_RO(holder); +static ssize_t mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nd_namespace_common *ndns = to_ndns(dev); + struct device *claim; + char *mode; + ssize_t rc; + + device_lock(dev); + claim = ndns->claim; + if (claim && is_nd_btt(claim)) + mode = "safe"; + else if (claim && is_nd_pfn(claim)) + mode = "memory"; + else if (!claim && pmem_should_map_pages(dev)) + mode = "memory"; + else + mode = "raw"; + rc = sprintf(buf, "%s\n", mode); + device_unlock(dev); + + return rc; +} +static DEVICE_ATTR_RO(mode); + static ssize_t force_raw_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { @@ -1234,6 +1322,7 @@ static DEVICE_ATTR_RW(force_raw); static struct attribute *nd_namespace_attributes[] = { &dev_attr_nstype.attr, &dev_attr_size.attr, + &dev_attr_mode.attr, &dev_attr_uuid.attr, &dev_attr_holder.attr, &dev_attr_resource.attr, @@ -1267,7 +1356,8 @@ static umode_t namespace_visible(struct kobject *kobj, if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr || a == &dev_attr_holder.attr - || a == &dev_attr_force_raw.attr) + || a == &dev_attr_force_raw.attr + || a == &dev_attr_mode.attr) return a->mode; return 0; @@ -1343,14 +1433,19 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) struct nd_namespace_pmem *nspm; nspm = to_nd_namespace_pmem(&ndns->dev); - if (!nspm->uuid) { - dev_dbg(&ndns->dev, "%s: uuid not set\n", __func__); + if (uuid_not_set(nspm->uuid, &ndns->dev, __func__)) return ERR_PTR(-ENODEV); - } } else if (is_namespace_blk(&ndns->dev)) { struct nd_namespace_blk *nsblk; nsblk = to_nd_namespace_blk(&ndns->dev); + if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__)) + return ERR_PTR(-ENODEV); + if (!nsblk->lbasize) { + dev_dbg(&ndns->dev, "%s: sector size not set\n", + __func__); + return ERR_PTR(-ENODEV); + } if (!nd_namespace_blk_validate(nsblk)) return ERR_PTR(-ENODEV); } @@ -1689,6 +1784,18 @@ void nd_region_create_blk_seed(struct nd_region *nd_region) nd_device_register(nd_region->ns_seed); } +void nd_region_create_pfn_seed(struct nd_region *nd_region) +{ + WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); + nd_region->pfn_seed = nd_pfn_create(nd_region); + /* + * Seed creation failures are not fatal, provisioning is simply + * disabled until memory becomes available + */ + if (!nd_region->pfn_seed) + dev_err(&nd_region->dev, "failed to create pfn namespace\n"); +} + void nd_region_create_btt_seed(struct nd_region *nd_region) { WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 159aed532042..1d1500f3d8b5 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -30,6 +30,7 @@ struct nvdimm_bus { struct list_head list; struct device dev; int id, probe_active; + struct list_head poison_list; struct mutex reconfig_mutex; }; @@ -52,6 +53,7 @@ void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev); struct nd_region; void nd_region_create_blk_seed(struct nd_region *nd_region); void nd_region_create_btt_seed(struct nd_region *nd_region); +void nd_region_create_pfn_seed(struct nd_region *nd_region); void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev); int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus); void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 417e521d299c..1799bd97a9ce 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -18,6 +18,7 @@ #include <linux/mutex.h> #include <linux/ndctl.h> #include <linux/types.h> +#include <linux/nd.h> #include "label.h" enum { @@ -29,13 +30,12 @@ enum { ND_MAX_LANES = 256, SECTOR_SHIFT = 9, INT_LBASIZE_ALIGNMENT = 64, -#if IS_ENABLED(CONFIG_NVDIMM_PFN) - ND_PFN_ALIGN = PAGES_PER_SECTION * PAGE_SIZE, - ND_PFN_MASK = ND_PFN_ALIGN - 1, -#else - ND_PFN_ALIGN = 0, - ND_PFN_MASK = 0, -#endif +}; + +struct nd_poison { + u64 start; + u64 length; + struct list_head list; }; struct nvdimm_drvdata { @@ -153,6 +153,7 @@ struct nd_pfn { int id; u8 *uuid; struct device dev; + unsigned long align; unsigned long npfns; enum nd_pfn_mode mode; struct nd_pfn_sb *pfn_sb; @@ -168,6 +169,7 @@ int nd_integrity_init(struct gendisk *disk, unsigned long meta_size); void wait_nvdimm_bus_probe_idle(struct device *dev); void nd_device_register(struct device *dev); void nd_device_unregister(struct device *dev, enum nd_async_mode mode); +void nd_device_notify(struct device *dev, enum nvdimm_event event); int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, size_t len); ssize_t nd_sector_size_show(unsigned long current_lbasize, @@ -184,6 +186,8 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd); int nvdimm_init_config_data(struct nvdimm_drvdata *ndd); int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, void *buf, size_t len); +long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, + unsigned int len); struct nd_btt *to_nd_btt(struct device *dev); struct nd_gen_sb { @@ -262,6 +266,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns); int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns); const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, char *name); +void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns, + struct badblocks *bb, resource_size_t offset); int nd_blk_region_init(struct nd_region *nd_region); void __nd_iostat_start(struct bio *bio, unsigned long *start); static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h index cc243754acef..8e343a3ca873 100644 --- a/drivers/nvdimm/pfn.h +++ b/drivers/nvdimm/pfn.h @@ -15,6 +15,7 @@ #define __NVDIMM_PFN_H #include <linux/types.h> +#include <linux/mmzone.h> #define PFN_SIG_LEN 16 #define PFN_SIG "NVDIMM_PFN_INFO\0" @@ -26,10 +27,28 @@ struct nd_pfn_sb { __le32 flags; __le16 version_major; __le16 version_minor; - __le64 dataoff; + __le64 dataoff; /* relative to namespace_base + start_pad */ __le64 npfns; __le32 mode; - u8 padding[4012]; + /* minor-version-1 additions for section alignment */ + __le32 start_pad; + __le32 end_trunc; + u8 padding[4004]; __le64 checksum; }; + +#ifdef CONFIG_SPARSEMEM +#define PFN_SECTION_ALIGN_DOWN(x) SECTION_ALIGN_DOWN(x) +#define PFN_SECTION_ALIGN_UP(x) SECTION_ALIGN_UP(x) +#else +/* + * In this case ZONE_DEVICE=n and we will disable 'pfn' device support, + * but we still want pmem to compile. + */ +#define PFN_SECTION_ALIGN_DOWN(x) (x) +#define PFN_SECTION_ALIGN_UP(x) (x) +#endif + +#define PHYS_SECTION_ALIGN_DOWN(x) PFN_PHYS(PFN_SECTION_ALIGN_DOWN(PHYS_PFN(x))) +#define PHYS_SECTION_ALIGN_UP(x) PFN_PHYS(PFN_SECTION_ALIGN_UP(PHYS_PFN(x))) #endif /* __NVDIMM_PFN_H */ diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 71805a1aa0f3..254d3bc13f70 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -83,8 +83,7 @@ static ssize_t mode_store(struct device *dev, if (strncmp(buf, "pmem\n", n) == 0 || strncmp(buf, "pmem", n) == 0) { - /* TODO: allocate from PMEM support */ - rc = -ENOTTY; + nd_pfn->mode = PFN_MODE_PMEM; } else if (strncmp(buf, "ram\n", n) == 0 || strncmp(buf, "ram", n) == 0) nd_pfn->mode = PFN_MODE_RAM; @@ -103,6 +102,52 @@ static ssize_t mode_store(struct device *dev, } static DEVICE_ATTR_RW(mode); +static ssize_t align_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nd_pfn *nd_pfn = to_nd_pfn(dev); + + return sprintf(buf, "%lx\n", nd_pfn->align); +} + +static ssize_t __align_store(struct nd_pfn *nd_pfn, const char *buf) +{ + unsigned long val; + int rc; + + rc = kstrtoul(buf, 0, &val); + if (rc) + return rc; + + if (!is_power_of_2(val) || val < PAGE_SIZE || val > SZ_1G) + return -EINVAL; + + if (nd_pfn->dev.driver) + return -EBUSY; + else + nd_pfn->align = val; + + return 0; +} + +static ssize_t align_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct nd_pfn *nd_pfn = to_nd_pfn(dev); + ssize_t rc; + + device_lock(dev); + nvdimm_bus_lock(dev); + rc = __align_store(nd_pfn, buf); + dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, + rc, buf, buf[len - 1] == '\n' ? "" : "\n"); + nvdimm_bus_unlock(dev); + device_unlock(dev); + + return rc ? rc : len; +} +static DEVICE_ATTR_RW(align); + static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -160,10 +205,67 @@ static ssize_t namespace_store(struct device *dev, } static DEVICE_ATTR_RW(namespace); +static ssize_t resource_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nd_pfn *nd_pfn = to_nd_pfn(dev); + ssize_t rc; + + device_lock(dev); + if (dev->driver) { + struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; + u64 offset = __le64_to_cpu(pfn_sb->dataoff); + struct nd_namespace_common *ndns = nd_pfn->ndns; + u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); + + rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start + + start_pad + offset); + } else { + /* no address to convey if the pfn instance is disabled */ + rc = -ENXIO; + } + device_unlock(dev); + + return rc; +} +static DEVICE_ATTR_RO(resource); + +static ssize_t size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nd_pfn *nd_pfn = to_nd_pfn(dev); + ssize_t rc; + + device_lock(dev); + if (dev->driver) { + struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; + u64 offset = __le64_to_cpu(pfn_sb->dataoff); + struct nd_namespace_common *ndns = nd_pfn->ndns; + u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); + u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); + + rc = sprintf(buf, "%llu\n", (unsigned long long) + resource_size(&nsio->res) - start_pad + - end_trunc - offset); + } else { + /* no size to convey if the pfn instance is disabled */ + rc = -ENXIO; + } + device_unlock(dev); + + return rc; +} +static DEVICE_ATTR_RO(size); + static struct attribute *nd_pfn_attributes[] = { &dev_attr_mode.attr, &dev_attr_namespace.attr, &dev_attr_uuid.attr, + &dev_attr_align.attr, + &dev_attr_resource.attr, + &dev_attr_size.attr, NULL, }; @@ -179,7 +281,6 @@ static const struct attribute_group *nd_pfn_attribute_groups[] = { }; static struct device *__nd_pfn_create(struct nd_region *nd_region, - u8 *uuid, enum nd_pfn_mode mode, struct nd_namespace_common *ndns) { struct nd_pfn *nd_pfn; @@ -199,10 +300,8 @@ static struct device *__nd_pfn_create(struct nd_region *nd_region, return NULL; } - nd_pfn->mode = mode; - if (uuid) - uuid = kmemdup(uuid, 16, GFP_KERNEL); - nd_pfn->uuid = uuid; + nd_pfn->mode = PFN_MODE_NONE; + nd_pfn->align = HPAGE_SIZE; dev = &nd_pfn->dev; dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id); dev->parent = &nd_region->dev; @@ -220,8 +319,7 @@ static struct device *__nd_pfn_create(struct nd_region *nd_region, struct device *nd_pfn_create(struct nd_region *nd_region) { - struct device *dev = __nd_pfn_create(nd_region, NULL, PFN_MODE_NONE, - NULL); + struct device *dev = __nd_pfn_create(nd_region, NULL); if (dev) __nd_device_register(dev); @@ -230,10 +328,11 @@ struct device *nd_pfn_create(struct nd_region *nd_region) int nd_pfn_validate(struct nd_pfn *nd_pfn) { - struct nd_namespace_common *ndns = nd_pfn->ndns; - struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; - struct nd_namespace_io *nsio; u64 checksum, offset; + struct nd_namespace_io *nsio; + struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; + struct nd_namespace_common *ndns = nd_pfn->ndns; + const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev); if (!pfn_sb || !ndns) return -ENODEV; @@ -241,10 +340,6 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn) if (!is_nd_pmem(nd_pfn->dev.parent)) return -ENODEV; - /* section alignment for simple hotplug */ - if (nvdimm_namespace_capacity(ndns) < ND_PFN_ALIGN) - return -ENODEV; - if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb))) return -ENXIO; @@ -257,12 +352,18 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn) return -ENODEV; pfn_sb->checksum = cpu_to_le64(checksum); + if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0) + return -ENODEV; + + if (__le16_to_cpu(pfn_sb->version_minor) < 1) { + pfn_sb->start_pad = 0; + pfn_sb->end_trunc = 0; + } + switch (le32_to_cpu(pfn_sb->mode)) { case PFN_MODE_RAM: - break; case PFN_MODE_PMEM: - /* TODO: allocate from PMEM support */ - return -ENOTTY; + break; default: return -ENXIO; } @@ -278,6 +379,12 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn) return -EINVAL; } + if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) { + dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n", + nd_pfn->align, nvdimm_namespace_capacity(ndns)); + return -EINVAL; + } + /* * These warnings are verbose because they can only trigger in * the case where the physical address alignment of the @@ -286,17 +393,19 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn) */ offset = le64_to_cpu(pfn_sb->dataoff); nsio = to_nd_namespace_io(&ndns->dev); - if (nsio->res.start & ND_PFN_MASK) { - dev_err(&nd_pfn->dev, - "init failed: %s not section aligned\n", - dev_name(&ndns->dev)); - return -EBUSY; - } else if (offset >= resource_size(&nsio->res)) { + if (offset >= resource_size(&nsio->res)) { dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n", dev_name(&ndns->dev)); return -EBUSY; } + nd_pfn->align = 1UL << ilog2(offset); + if (!is_power_of_2(offset) || offset < PAGE_SIZE) { + dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n", + offset); + return -ENXIO; + } + return 0; } EXPORT_SYMBOL(nd_pfn_validate); @@ -313,7 +422,7 @@ int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata) return -ENODEV; nvdimm_bus_lock(&ndns->dev); - dev = __nd_pfn_create(nd_region, NULL, PFN_MODE_NONE, ndns); + dev = __nd_pfn_create(nd_region, ndns); nvdimm_bus_unlock(&ndns->dev); if (!dev) return -ENOMEM; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 8ee79893d2f5..ca5721c306bb 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -21,9 +21,11 @@ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/module.h> -#include <linux/memory_hotplug.h> #include <linux/moduleparam.h> +#include <linux/badblocks.h> +#include <linux/memremap.h> #include <linux/vmalloc.h> +#include <linux/pfn_t.h> #include <linux/slab.h> #include <linux/pmem.h> #include <linux/nd.h> @@ -39,33 +41,83 @@ struct pmem_device { phys_addr_t phys_addr; /* when non-zero this device is hosting a 'pfn' instance */ phys_addr_t data_offset; + u64 pfn_flags; void __pmem *virt_addr; + /* immutable base size of the namespace */ size_t size; + /* trim size when namespace capacity has been section aligned */ + u32 pfn_pad; + struct badblocks bb; }; -static int pmem_major; +static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len) +{ + if (bb->count) { + sector_t first_bad; + int num_bad; + + return !!badblocks_check(bb, sector, len / 512, &first_bad, + &num_bad); + } + + return false; +} -static void pmem_do_bvec(struct pmem_device *pmem, struct page *page, +static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, + unsigned int len) +{ + struct device *dev = disk_to_dev(pmem->pmem_disk); + sector_t sector; + long cleared; + + sector = (offset - pmem->data_offset) / 512; + cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); + + if (cleared > 0 && cleared / 512) { + dev_dbg(dev, "%s: %llx clear %ld sector%s\n", + __func__, (unsigned long long) sector, + cleared / 512, cleared / 512 > 1 ? "s" : ""); + badblocks_clear(&pmem->bb, sector, cleared / 512); + } + invalidate_pmem(pmem->virt_addr + offset, len); +} + +static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, unsigned int len, unsigned int off, int rw, sector_t sector) { + int rc = 0; + bool bad_pmem = false; void *mem = kmap_atomic(page); phys_addr_t pmem_off = sector * 512 + pmem->data_offset; void __pmem *pmem_addr = pmem->virt_addr + pmem_off; + if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) + bad_pmem = true; + if (rw == READ) { - memcpy_from_pmem(mem + off, pmem_addr, len); - flush_dcache_page(page); + if (unlikely(bad_pmem)) + rc = -EIO; + else { + memcpy_from_pmem(mem + off, pmem_addr, len); + flush_dcache_page(page); + } } else { flush_dcache_page(page); memcpy_to_pmem(pmem_addr, mem + off, len); + if (unlikely(bad_pmem)) { + pmem_clear_poison(pmem, pmem_off, len); + memcpy_to_pmem(pmem_addr, mem + off, len); + } } kunmap_atomic(mem); + return rc; } static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) { + int rc = 0; bool do_acct; unsigned long start; struct bio_vec bvec; @@ -74,9 +126,15 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) struct pmem_device *pmem = bdev->bd_disk->private_data; do_acct = nd_iostat_start(bio, &start); - bio_for_each_segment(bvec, bio, iter) - pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset, - bio_data_dir(bio), iter.bi_sector); + bio_for_each_segment(bvec, bio, iter) { + rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, + bvec.bv_offset, bio_data_dir(bio), + iter.bi_sector); + if (rc) { + bio->bi_error = rc; + break; + } + } if (do_acct) nd_iostat_end(bio, start); @@ -91,25 +149,34 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, struct page *page, int rw) { struct pmem_device *pmem = bdev->bd_disk->private_data; + int rc; - pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector); + rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector); if (rw & WRITE) wmb_pmem(); - page_endio(page, rw & WRITE, 0); - return 0; + /* + * The ->rw_page interface is subtle and tricky. The core + * retries on any error, so we can only invoke page_endio() in + * the successful completion case. Otherwise, we'll see crashes + * caused by double completion. + */ + if (rc == 0) + page_endio(page, rw & WRITE, 0); + + return rc; } static long pmem_direct_access(struct block_device *bdev, sector_t sector, - void __pmem **kaddr, unsigned long *pfn) + void __pmem **kaddr, pfn_t *pfn) { struct pmem_device *pmem = bdev->bd_disk->private_data; resource_size_t offset = sector * 512 + pmem->data_offset; *kaddr = pmem->virt_addr + offset; - *pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT; + *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); - return pmem->size - offset; + return pmem->size - pmem->pfn_pad - offset; } static const struct block_device_operations pmem_fops = { @@ -123,6 +190,7 @@ static struct pmem_device *pmem_alloc(struct device *dev, struct resource *res, int id) { struct pmem_device *pmem; + struct request_queue *q; pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); if (!pmem) @@ -140,16 +208,26 @@ static struct pmem_device *pmem_alloc(struct device *dev, return ERR_PTR(-EBUSY); } - if (pmem_should_map_pages(dev)) - pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res); - else + q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev)); + if (!q) + return ERR_PTR(-ENOMEM); + + pmem->pfn_flags = PFN_DEV; + if (pmem_should_map_pages(dev)) { + pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res, + &q->q_usage_counter, NULL); + pmem->pfn_flags |= PFN_MAP; + } else pmem->virt_addr = (void __pmem *) devm_memremap(dev, pmem->phys_addr, pmem->size, ARCH_MEMREMAP_PMEM); - if (IS_ERR(pmem->virt_addr)) + if (IS_ERR(pmem->virt_addr)) { + blk_cleanup_queue(q); return (void __force *) pmem->virt_addr; + } + pmem->pmem_queue = q; return pmem; } @@ -169,10 +247,6 @@ static int pmem_attach_disk(struct device *dev, int nid = dev_to_node(dev); struct gendisk *disk; - pmem->pmem_queue = blk_alloc_queue_node(GFP_KERNEL, nid); - if (!pmem->pmem_queue) - return -ENOMEM; - blk_queue_make_request(pmem->pmem_queue, pmem_make_request); blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE); blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX); @@ -185,17 +259,21 @@ static int pmem_attach_disk(struct device *dev, return -ENOMEM; } - disk->major = pmem_major; - disk->first_minor = 0; disk->fops = &pmem_fops; disk->private_data = pmem; disk->queue = pmem->pmem_queue; disk->flags = GENHD_FL_EXT_DEVT; nvdimm_namespace_disk_name(ndns, disk->disk_name); disk->driverfs_dev = dev; - set_capacity(disk, (pmem->size - pmem->data_offset) / 512); + set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset) + / 512); pmem->pmem_disk = disk; + devm_exit_badblocks(dev, &pmem->bb); + if (devm_init_badblocks(dev, &pmem->bb)) + return -ENOMEM; + nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset); + disk->bb = &pmem->bb; add_disk(disk); revalidate_disk(disk); @@ -212,9 +290,13 @@ static int pmem_rw_bytes(struct nd_namespace_common *ndns, return -EFAULT; } - if (rw == READ) + if (rw == READ) { + unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); + + if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align))) + return -EIO; memcpy_from_pmem(buf, pmem->virt_addr + offset, size); - else { + } else { memcpy_to_pmem(pmem->virt_addr + offset, buf, size); wmb_pmem(); } @@ -227,6 +309,9 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL); struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev); struct nd_namespace_common *ndns = nd_pfn->ndns; + u32 start_pad = 0, end_trunc = 0; + resource_size_t start, size; + struct nd_namespace_io *nsio; struct nd_region *nd_region; unsigned long npfns; phys_addr_t offset; @@ -238,14 +323,11 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) nd_pfn->pfn_sb = pfn_sb; rc = nd_pfn_validate(nd_pfn); - if (rc == 0 || rc == -EBUSY) + if (rc == -ENODEV) + /* no info block, do init */; + else return rc; - /* section alignment for simple hotplug */ - if (nvdimm_namespace_capacity(ndns) < ND_PFN_ALIGN - || pmem->phys_addr & ND_PFN_MASK) - return -ENODEV; - nd_region = to_nd_region(nd_pfn->dev.parent); if (nd_region->ro) { dev_info(&nd_pfn->dev, @@ -255,27 +337,66 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) } memset(pfn_sb, 0, sizeof(*pfn_sb)); - npfns = (pmem->size - SZ_8K) / SZ_4K; + + /* + * Check if pmem collides with 'System RAM' when section aligned and + * trim it accordingly + */ + nsio = to_nd_namespace_io(&ndns->dev); + start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start); + size = resource_size(&nsio->res); + if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, + IORES_DESC_NONE) == REGION_MIXED) { + + start = nsio->res.start; + start_pad = PHYS_SECTION_ALIGN_UP(start) - start; + } + + start = nsio->res.start; + size = PHYS_SECTION_ALIGN_UP(start + size) - start; + if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, + IORES_DESC_NONE) == REGION_MIXED) { + size = resource_size(&nsio->res); + end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size); + } + + if (start_pad + end_trunc) + dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n", + dev_name(&ndns->dev), start_pad + end_trunc); + /* * Note, we use 64 here for the standard size of struct page, * debugging options may cause it to be larger in which case the * implementation will limit the pfns advertised through * ->direct_access() to those that are included in the memmap. */ + start += start_pad; + npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K; if (nd_pfn->mode == PFN_MODE_PMEM) - offset = ALIGN(SZ_8K + 64 * npfns, PMD_SIZE); + offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align) + - start; else if (nd_pfn->mode == PFN_MODE_RAM) - offset = SZ_8K; + offset = ALIGN(start + SZ_8K, nd_pfn->align) - start; else goto err; - npfns = (pmem->size - offset) / SZ_4K; + if (offset + start_pad + end_trunc >= pmem->size) { + dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n", + dev_name(&ndns->dev)); + goto err; + } + + npfns = (pmem->size - offset - start_pad - end_trunc) / SZ_4K; pfn_sb->mode = cpu_to_le32(nd_pfn->mode); pfn_sb->dataoff = cpu_to_le64(offset); pfn_sb->npfns = cpu_to_le64(npfns); memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN); memcpy(pfn_sb->uuid, nd_pfn->uuid, 16); + memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16); pfn_sb->version_major = cpu_to_le16(1); + pfn_sb->version_minor = cpu_to_le16(1); + pfn_sb->start_pad = cpu_to_le32(start_pad); + pfn_sb->end_trunc = cpu_to_le32(end_trunc); checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); pfn_sb->checksum = cpu_to_le64(checksum); @@ -306,60 +427,85 @@ static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns) return 0; } -static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) +/* + * We hotplug memory at section granularity, pad the reserved area from + * the previous section base to the namespace base address. + */ +static unsigned long init_altmap_base(resource_size_t base) { - struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); - struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim); - struct device *dev = &nd_pfn->dev; - struct vmem_altmap *altmap; - struct nd_region *nd_region; - struct nd_pfn_sb *pfn_sb; - struct pmem_device *pmem; - phys_addr_t offset; - int rc; + unsigned long base_pfn = PHYS_PFN(base); - if (!nd_pfn->uuid || !nd_pfn->ndns) - return -ENODEV; + return PFN_SECTION_ALIGN_DOWN(base_pfn); +} - nd_region = to_nd_region(dev->parent); - rc = nd_pfn_init(nd_pfn); - if (rc) - return rc; +static unsigned long init_altmap_reserve(resource_size_t base) +{ + unsigned long reserve = PHYS_PFN(SZ_8K); + unsigned long base_pfn = PHYS_PFN(base); - if (PAGE_SIZE != SZ_4K) { - dev_err(dev, "only supported on systems with 4K PAGE_SIZE\n"); - return -ENXIO; - } - if (nsio->res.start & ND_PFN_MASK) { - dev_err(dev, "%s not memory hotplug section aligned\n", - dev_name(&ndns->dev)); - return -ENXIO; - } + reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn); + return reserve; +} + +static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn) +{ + int rc; + struct resource res; + struct request_queue *q; + struct pmem_device *pmem; + struct vmem_altmap *altmap; + struct device *dev = &nd_pfn->dev; + struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; + struct nd_namespace_common *ndns = nd_pfn->ndns; + u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); + u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); + resource_size_t base = nsio->res.start + start_pad; + struct vmem_altmap __altmap = { + .base_pfn = init_altmap_base(base), + .reserve = init_altmap_reserve(base), + }; - pfn_sb = nd_pfn->pfn_sb; - offset = le64_to_cpu(pfn_sb->dataoff); + pmem = dev_get_drvdata(dev); + pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); + pmem->pfn_pad = start_pad + end_trunc; nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode); if (nd_pfn->mode == PFN_MODE_RAM) { - if (offset != SZ_8K) + if (pmem->data_offset < SZ_8K) return -EINVAL; nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); altmap = NULL; + } else if (nd_pfn->mode == PFN_MODE_PMEM) { + nd_pfn->npfns = (pmem->size - pmem->pfn_pad - pmem->data_offset) + / PAGE_SIZE; + if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns) + dev_info(&nd_pfn->dev, + "number of pfns truncated from %lld to %ld\n", + le64_to_cpu(nd_pfn->pfn_sb->npfns), + nd_pfn->npfns); + altmap = & __altmap; + altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K); + altmap->alloc = 0; } else { rc = -ENXIO; goto err; } /* establish pfn range for lookup, and switch to direct map */ - pmem = dev_get_drvdata(dev); + q = pmem->pmem_queue; + memcpy(&res, &nsio->res, sizeof(res)); + res.start += start_pad; + res.end -= end_trunc; devm_memunmap(dev, (void __force *) pmem->virt_addr); - pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &nsio->res); + pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res, + &q->q_usage_counter, altmap); + pmem->pfn_flags |= PFN_MAP; if (IS_ERR(pmem->virt_addr)) { rc = PTR_ERR(pmem->virt_addr); goto err; } /* attach pmem disk in "pfn-mode" */ - pmem->data_offset = offset; rc = pmem_attach_disk(dev, ndns, pmem); if (rc) goto err; @@ -368,6 +514,22 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) err: nvdimm_namespace_detach_pfn(ndns); return rc; + +} + +static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) +{ + struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim); + int rc; + + if (!nd_pfn->uuid || !nd_pfn->ndns) + return -ENODEV; + + rc = nd_pfn_init(nd_pfn); + if (rc) + return rc; + /* we need a valid pfn_sb before we can init a vmem_altmap */ + return __nvdimm_namespace_attach_pfn(nd_pfn); } static int nd_pmem_probe(struct device *dev) @@ -389,20 +551,26 @@ static int nd_pmem_probe(struct device *dev) pmem->ndns = ndns; dev_set_drvdata(dev, pmem); ndns->rw_bytes = pmem_rw_bytes; + if (devm_init_badblocks(dev, &pmem->bb)) + return -ENOMEM; + nvdimm_namespace_add_poison(ndns, &pmem->bb, 0); - if (is_nd_btt(dev)) + if (is_nd_btt(dev)) { + /* btt allocates its own request_queue */ + blk_cleanup_queue(pmem->pmem_queue); + pmem->pmem_queue = NULL; return nvdimm_namespace_attach_btt(ndns); + } if (is_nd_pfn(dev)) return nvdimm_namespace_attach_pfn(ndns); - if (nd_btt_probe(ndns, pmem) == 0) { - /* we'll come back as btt-pmem */ - return -ENXIO; - } - - if (nd_pfn_probe(ndns, pmem) == 0) { - /* we'll come back as pfn-pmem */ + if (nd_btt_probe(ndns, pmem) == 0 || nd_pfn_probe(ndns, pmem) == 0) { + /* + * We'll come back as either btt-pmem, or pfn-pmem, so + * drop the queue allocation for now. + */ + blk_cleanup_queue(pmem->pmem_queue); return -ENXIO; } @@ -423,12 +591,27 @@ static int nd_pmem_remove(struct device *dev) return 0; } +static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) +{ + struct pmem_device *pmem = dev_get_drvdata(dev); + struct nd_namespace_common *ndns = pmem->ndns; + + if (event != NVDIMM_REVALIDATE_POISON) + return; + + if (is_nd_btt(dev)) + nvdimm_namespace_add_poison(ndns, &pmem->bb, 0); + else + nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset); +} + MODULE_ALIAS("pmem"); MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM); static struct nd_device_driver nd_pmem_driver = { .probe = nd_pmem_probe, .remove = nd_pmem_remove, + .notify = nd_pmem_notify, .drv = { .name = "nd_pmem", }, @@ -437,26 +620,13 @@ static struct nd_device_driver nd_pmem_driver = { static int __init pmem_init(void) { - int error; - - pmem_major = register_blkdev(0, "pmem"); - if (pmem_major < 0) - return pmem_major; - - error = nd_driver_register(&nd_pmem_driver); - if (error) { - unregister_blkdev(pmem_major, "pmem"); - return error; - } - - return 0; + return nd_driver_register(&nd_pmem_driver); } module_init(pmem_init); static void pmem_exit(void) { driver_unregister(&nd_pmem_driver.drv); - unregister_blkdev(pmem_major, "pmem"); } module_exit(pmem_exit); diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c index 7da63eac78ee..4b7715e29cff 100644 --- a/drivers/nvdimm/region.c +++ b/drivers/nvdimm/region.c @@ -93,9 +93,21 @@ static int nd_region_remove(struct device *dev) return 0; } +static int child_notify(struct device *dev, void *data) +{ + nd_device_notify(dev, *(enum nvdimm_event *) data); + return 0; +} + +static void nd_region_notify(struct device *dev, enum nvdimm_event event) +{ + device_for_each_child(dev, &event, child_notify); +} + static struct nd_device_driver nd_region_driver = { .probe = nd_region_probe, .remove = nd_region_remove, + .notify = nd_region_notify, .drv = { .name = "nd_region", }, diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 529f3f02e7b2..139bf71ca549 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -134,62 +134,6 @@ int nd_region_to_nstype(struct nd_region *nd_region) } EXPORT_SYMBOL(nd_region_to_nstype); -static int is_uuid_busy(struct device *dev, void *data) -{ - struct nd_region *nd_region = to_nd_region(dev->parent); - u8 *uuid = data; - - switch (nd_region_to_nstype(nd_region)) { - case ND_DEVICE_NAMESPACE_PMEM: { - struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); - - if (!nspm->uuid) - break; - if (memcmp(uuid, nspm->uuid, NSLABEL_UUID_LEN) == 0) - return -EBUSY; - break; - } - case ND_DEVICE_NAMESPACE_BLK: { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - if (!nsblk->uuid) - break; - if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) == 0) - return -EBUSY; - break; - } - default: - break; - } - - return 0; -} - -static int is_namespace_uuid_busy(struct device *dev, void *data) -{ - if (is_nd_pmem(dev) || is_nd_blk(dev)) - return device_for_each_child(dev, data, is_uuid_busy); - return 0; -} - -/** - * nd_is_uuid_unique - verify that no other namespace has @uuid - * @dev: any device on a nvdimm_bus - * @uuid: uuid to check - */ -bool nd_is_uuid_unique(struct device *dev, u8 *uuid) -{ - struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); - - if (!nvdimm_bus) - return false; - WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev)); - if (device_for_each_child(&nvdimm_bus->dev, uuid, - is_namespace_uuid_busy) != 0) - return false; - return true; -} - static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -406,6 +350,9 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) struct nd_interleave_set *nd_set = nd_region->nd_set; int type = nd_region_to_nstype(nd_region); + if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr) + return 0; + if (a != &dev_attr_set_cookie.attr && a != &dev_attr_available_size.attr) return a->mode; @@ -487,6 +434,13 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, nd_region_create_blk_seed(nd_region); nvdimm_bus_unlock(dev); } + if (is_nd_pfn(dev) && probe) { + nd_region = to_nd_region(dev->parent); + nvdimm_bus_lock(dev); + if (nd_region->pfn_seed == dev) + nd_region_create_pfn_seed(nd_region); + nvdimm_bus_unlock(dev); + } } void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev) |

