summaryrefslogtreecommitdiffstats
path: root/drivers/nvdimm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvdimm')
-rw-r--r--drivers/nvdimm/Kconfig12
-rw-r--r--drivers/nvdimm/Makefile4
-rw-r--r--drivers/nvdimm/btt_devs.c16
-rw-r--r--drivers/nvdimm/bus.c220
-rw-r--r--drivers/nvdimm/core.c10
-rw-r--r--drivers/nvdimm/dimm_devs.c138
-rw-r--r--drivers/nvdimm/label.c5
-rw-r--r--drivers/nvdimm/namespace_devs.c76
-rw-r--r--drivers/nvdimm/nd-core.h125
-rw-r--r--drivers/nvdimm/nd.h4
-rw-r--r--drivers/nvdimm/of_pmem.c2
-rw-r--r--drivers/nvdimm/pfn.h5
-rw-r--r--drivers/nvdimm/pfn_devs.c64
-rw-r--r--drivers/nvdimm/pmem.c33
-rw-r--r--drivers/nvdimm/region.c24
-rw-r--r--drivers/nvdimm/region_devs.c95
-rw-r--r--drivers/nvdimm/security.c199
17 files changed, 600 insertions, 432 deletions
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index a5fde15e91d3..36af7af6b7cf 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -118,4 +118,16 @@ config NVDIMM_KEYS
depends on ENCRYPTED_KEYS
depends on (LIBNVDIMM=ENCRYPTED_KEYS) || LIBNVDIMM=m
+config NVDIMM_TEST_BUILD
+ tristate "Build the unit test core"
+ depends on m
+ depends on COMPILE_TEST && X86_64
+ default m if COMPILE_TEST
+ help
+ Build the core of the unit test infrastructure. The result of
+ this build is non-functional for unit test execution, but it
+ otherwise helps catch build errors induced by changes to the
+ core devm_memremap_pages() implementation and other
+ infrastructure.
+
endif
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index cefe233e0b52..29203f3d3069 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -29,3 +29,7 @@ libnvdimm-$(CONFIG_BTT) += btt_devs.o
libnvdimm-$(CONFIG_NVDIMM_PFN) += pfn_devs.o
libnvdimm-$(CONFIG_NVDIMM_DAX) += dax_devs.o
libnvdimm-$(CONFIG_NVDIMM_KEYS) += security.o
+
+TOOLS := ../../tools
+TEST_SRC := $(TOOLS)/testing/nvdimm/test
+obj-$(CONFIG_NVDIMM_TEST_BUILD) += $(TEST_SRC)/iomap.o
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 62d00fffa4af..3508a79110c7 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -62,14 +62,14 @@ static ssize_t sector_size_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_btt->lbasize,
btt_lbasize_supported);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -91,11 +91,11 @@ static ssize_t uuid_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -120,13 +120,13 @@ static ssize_t namespace_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -138,14 +138,14 @@ static ssize_t size_show(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver)
rc = sprintf(buf, "%llu\n", nd_btt->size);
else {
/* no size to convey if the btt instance is disabled */
rc = -ENXIO;
}
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 2dca3034fee0..75a58a6e9615 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -26,7 +26,7 @@
int nvdimm_major;
static int nvdimm_bus_major;
-static struct class *nd_class;
+struct class *nd_class;
static DEFINE_IDA(nd_ida);
static int to_nd_device_type(struct device *dev)
@@ -73,7 +73,7 @@ static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
{
nvdimm_bus_lock(&nvdimm_bus->dev);
if (--nvdimm_bus->probe_active == 0)
- wake_up(&nvdimm_bus->probe_wait);
+ wake_up(&nvdimm_bus->wait);
nvdimm_bus_unlock(&nvdimm_bus->dev);
}
@@ -91,11 +91,13 @@ static int nvdimm_bus_probe(struct device *dev)
dev->driver->name, dev_name(dev));
nvdimm_bus_probe_start(nvdimm_bus);
+ debug_nvdimm_lock(dev);
rc = nd_drv->probe(dev);
- if (rc == 0)
- nd_region_probe_success(nvdimm_bus, dev);
- else
- nd_region_disable(nvdimm_bus, dev);
+ debug_nvdimm_unlock(dev);
+
+ if ((rc == 0 || rc == -EOPNOTSUPP) &&
+ dev->parent && is_nd_region(dev->parent))
+ nd_region_advance_seeds(to_nd_region(dev->parent), dev);
nvdimm_bus_probe_end(nvdimm_bus);
dev_dbg(&nvdimm_bus->dev, "END: %s.probe(%s) = %d\n", dev->driver->name,
@@ -113,9 +115,11 @@ static int nvdimm_bus_remove(struct device *dev)
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
int rc = 0;
- if (nd_drv->remove)
+ if (nd_drv->remove) {
+ debug_nvdimm_lock(dev);
rc = nd_drv->remove(dev);
- nd_region_disable(nvdimm_bus, dev);
+ debug_nvdimm_unlock(dev);
+ }
dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
dev_name(dev), rc);
@@ -140,7 +144,7 @@ static void nvdimm_bus_shutdown(struct device *dev)
void nd_device_notify(struct device *dev, enum nvdimm_event event)
{
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver) {
struct nd_device_driver *nd_drv;
@@ -148,7 +152,7 @@ void nd_device_notify(struct device *dev, enum nvdimm_event event)
if (nd_drv->notify)
nd_drv->notify(dev, event);
}
- device_unlock(dev);
+ nd_device_unlock(dev);
}
EXPORT_SYMBOL(nd_device_notify);
@@ -296,7 +300,7 @@ static void nvdimm_bus_release(struct device *dev)
kfree(nvdimm_bus);
}
-static bool is_nvdimm_bus(struct device *dev)
+bool is_nvdimm_bus(struct device *dev)
{
return dev->release == nvdimm_bus_release;
}
@@ -341,7 +345,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
return NULL;
INIT_LIST_HEAD(&nvdimm_bus->list);
INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
- init_waitqueue_head(&nvdimm_bus->probe_wait);
+ init_waitqueue_head(&nvdimm_bus->wait);
nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
if (nvdimm_bus->id < 0) {
kfree(nvdimm_bus);
@@ -394,7 +398,7 @@ static int child_unregister(struct device *dev, void *data)
/* We are shutting down. Make state frozen artificially. */
nvdimm_bus_lock(dev);
- nvdimm->sec.state = NVDIMM_SECURITY_FROZEN;
+ set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
dev_put = true;
nvdimm_bus_unlock(dev);
@@ -426,6 +430,9 @@ static int nd_bus_remove(struct device *dev)
list_del_init(&nvdimm_bus->list);
mutex_unlock(&nvdimm_bus_list_mutex);
+ wait_event(nvdimm_bus->wait,
+ atomic_read(&nvdimm_bus->ioctl_active) == 0);
+
nd_synchronize();
device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
@@ -547,13 +554,38 @@ EXPORT_SYMBOL(nd_device_register);
void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
{
+ bool killed;
+
switch (mode) {
case ND_ASYNC:
+ /*
+ * In the async case this is being triggered with the
+ * device lock held and the unregistration work needs to
+ * be moved out of line iff this is thread has won the
+ * race to schedule the deletion.
+ */
+ if (!kill_device(dev))
+ return;
+
get_device(dev);
async_schedule_domain(nd_async_device_unregister, dev,
&nd_async_domain);
break;
case ND_SYNC:
+ /*
+ * In the sync case the device is being unregistered due
+ * to a state change of the parent. Claim the kill state
+ * to synchronize against other unregistration requests,
+ * or otherwise let the async path handle it if the
+ * unregistration was already queued.
+ */
+ nd_device_lock(dev);
+ killed = kill_device(dev);
+ nd_device_unlock(dev);
+
+ if (!killed)
+ return;
+
nd_synchronize();
device_unregister(dev);
break;
@@ -859,10 +891,12 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
do {
if (nvdimm_bus->probe_active == 0)
break;
- nvdimm_bus_unlock(&nvdimm_bus->dev);
- wait_event(nvdimm_bus->probe_wait,
+ nvdimm_bus_unlock(dev);
+ nd_device_unlock(dev);
+ wait_event(nvdimm_bus->wait,
nvdimm_bus->probe_active == 0);
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ nd_device_lock(dev);
+ nvdimm_bus_lock(dev);
} while (true);
}
@@ -945,20 +979,19 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
int read_only, unsigned int ioctl_cmd, unsigned long arg)
{
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
- static char out_env[ND_CMD_MAX_ENVELOPE];
- static char in_env[ND_CMD_MAX_ENVELOPE];
const struct nd_cmd_desc *desc = NULL;
unsigned int cmd = _IOC_NR(ioctl_cmd);
struct device *dev = &nvdimm_bus->dev;
void __user *p = (void __user *) arg;
+ char *out_env = NULL, *in_env = NULL;
const char *cmd_name, *dimm_name;
u32 in_len = 0, out_len = 0;
unsigned int func = cmd;
unsigned long cmd_mask;
struct nd_cmd_pkg pkg;
int rc, i, cmd_rc;
+ void *buf = NULL;
u64 buf_len = 0;
- void *buf;
if (nvdimm) {
desc = nd_cmd_dimm_desc(cmd);
@@ -989,7 +1022,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
case ND_CMD_ARS_START:
case ND_CMD_CLEAR_ERROR:
case ND_CMD_CALL:
- dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
+ dev_dbg(dev, "'%s' command while read-only.\n",
nvdimm ? nvdimm_cmd_name(cmd)
: nvdimm_bus_cmd_name(cmd));
return -EPERM;
@@ -998,6 +1031,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
/* process an input envelope */
+ in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
+ if (!in_env)
+ return -ENOMEM;
for (i = 0; i < desc->in_num; i++) {
u32 in_size, copy;
@@ -1005,14 +1041,17 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (in_size == UINT_MAX) {
dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
__func__, dimm_name, cmd_name, i);
- return -ENXIO;
+ rc = -ENXIO;
+ goto out;
}
- if (in_len < sizeof(in_env))
- copy = min_t(u32, sizeof(in_env) - in_len, in_size);
+ if (in_len < ND_CMD_MAX_ENVELOPE)
+ copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
else
copy = 0;
- if (copy && copy_from_user(&in_env[in_len], p + in_len, copy))
- return -EFAULT;
+ if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
+ rc = -EFAULT;
+ goto out;
+ }
in_len += in_size;
}
@@ -1024,6 +1063,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
/* process an output envelope */
+ out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
+ if (!out_env) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
for (i = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
(u32 *) in_env, (u32 *) out_env, 0);
@@ -1032,15 +1077,18 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (out_size == UINT_MAX) {
dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
dimm_name, cmd_name, i);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
- if (out_len < sizeof(out_env))
- copy = min_t(u32, sizeof(out_env) - out_len, out_size);
+ if (out_len < ND_CMD_MAX_ENVELOPE)
+ copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
else
copy = 0;
if (copy && copy_from_user(&out_env[out_len],
- p + in_len + out_len, copy))
- return -EFAULT;
+ p + in_len + out_len, copy)) {
+ rc = -EFAULT;
+ goto out;
+ }
out_len += out_size;
}
@@ -1048,19 +1096,23 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
buf = vmalloc(buf_len);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ rc = -ENOMEM;
+ goto out;
+ }
if (copy_from_user(buf, p, buf_len)) {
rc = -EFAULT;
goto out;
}
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ nd_device_lock(dev);
+ nvdimm_bus_lock(dev);
rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
if (rc)
goto out_unlock;
@@ -1075,39 +1127,24 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
clear_err->cleared);
}
- nvdimm_bus_unlock(&nvdimm_bus->dev);
if (copy_to_user(p, buf, buf_len))
rc = -EFAULT;
- vfree(buf);
- return rc;
-
- out_unlock:
- nvdimm_bus_unlock(&nvdimm_bus->dev);
- out:
+out_unlock:
+ nvdimm_bus_unlock(dev);
+ nd_device_unlock(dev);
+out:
+ kfree(in_env);
+ kfree(out_env);
vfree(buf);
return rc;
}
-static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- long id = (long) file->private_data;
- int rc = -ENXIO, ro;
- struct nvdimm_bus *nvdimm_bus;
-
- ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
- mutex_lock(&nvdimm_bus_list_mutex);
- list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
- if (nvdimm_bus->id == id) {
- rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg);
- break;
- }
- }
- mutex_unlock(&nvdimm_bus_list_mutex);
-
- return rc;
-}
+enum nd_ioctl_mode {
+ BUS_IOCTL,
+ DIMM_IOCTL,
+};
static int match_dimm(struct device *dev, void *data)
{
@@ -1122,31 +1159,62 @@ static int match_dimm(struct device *dev, void *data)
return 0;
}
-static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
+ enum nd_ioctl_mode mode)
+
{
- int rc = -ENXIO, ro;
- struct nvdimm_bus *nvdimm_bus;
+ struct nvdimm_bus *nvdimm_bus, *found = NULL;
+ long id = (long) file->private_data;
+ struct nvdimm *nvdimm = NULL;
+ int rc, ro;
ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
mutex_lock(&nvdimm_bus_list_mutex);
list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
- struct device *dev = device_find_child(&nvdimm_bus->dev,
- file->private_data, match_dimm);
- struct nvdimm *nvdimm;
-
- if (!dev)
- continue;
+ if (mode == DIMM_IOCTL) {
+ struct device *dev;
+
+ dev = device_find_child(&nvdimm_bus->dev,
+ file->private_data, match_dimm);
+ if (!dev)
+ continue;
+ nvdimm = to_nvdimm(dev);
+ found = nvdimm_bus;
+ } else if (nvdimm_bus->id == id) {
+ found = nvdimm_bus;
+ }
- nvdimm = to_nvdimm(dev);
- rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
- put_device(dev);
- break;
+ if (found) {
+ atomic_inc(&nvdimm_bus->ioctl_active);
+ break;
+ }
}
mutex_unlock(&nvdimm_bus_list_mutex);
+ if (!found)
+ return -ENXIO;
+
+ nvdimm_bus = found;
+ rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
+
+ if (nvdimm)
+ put_device(&nvdimm->dev);
+ if (atomic_dec_and_test(&nvdimm_bus->ioctl_active))
+ wake_up(&nvdimm_bus->wait);
+
return rc;
}
+static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return nd_ioctl(file, cmd, arg, BUS_IOCTL);
+}
+
+static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return nd_ioctl(file, cmd, arg, DIMM_IOCTL);
+}
+
static int nd_open(struct inode *inode, struct file *file)
{
long minor = iminor(inode);
@@ -1158,16 +1226,16 @@ static int nd_open(struct inode *inode, struct file *file)
static const struct file_operations nvdimm_bus_fops = {
.owner = THIS_MODULE,
.open = nd_open,
- .unlocked_ioctl = nd_ioctl,
- .compat_ioctl = nd_ioctl,
+ .unlocked_ioctl = bus_ioctl,
+ .compat_ioctl = bus_ioctl,
.llseek = noop_llseek,
};
static const struct file_operations nvdimm_fops = {
.owner = THIS_MODULE,
.open = nd_open,
- .unlocked_ioctl = nvdimm_ioctl,
- .compat_ioctl = nvdimm_ioctl,
+ .unlocked_ioctl = dimm_ioctl,
+ .compat_ioctl = dimm_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 5e1f060547bf..9204f1e9fd14 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -246,7 +246,7 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
*
* Enforce that uuids can only be changed while the device is disabled
* (driver detached)
- * LOCKING: expects device_lock() is held on entry
+ * LOCKING: expects nd_device_lock() is held on entry
*/
int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
size_t len)
@@ -347,15 +347,15 @@ static DEVICE_ATTR_RO(provider);
static int flush_namespaces(struct device *dev, void *data)
{
- device_lock(dev);
- device_unlock(dev);
+ nd_device_lock(dev);
+ nd_device_unlock(dev);
return 0;
}
static int flush_regions_dimms(struct device *dev, void *data)
{
- device_lock(dev);
- device_unlock(dev);
+ nd_device_lock(dev);
+ nd_device_unlock(dev);
device_for_each_child(dev, NULL, flush_namespaces);
return 0;
}
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index dfecd6e17043..196aa44c4936 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -372,106 +372,26 @@ __weak ssize_t security_show(struct device *dev,
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- switch (nvdimm->sec.state) {
- case NVDIMM_SECURITY_DISABLED:
+ if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
return sprintf(buf, "disabled\n");
- case NVDIMM_SECURITY_UNLOCKED:
+ if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
return sprintf(buf, "unlocked\n");
- case NVDIMM_SECURITY_LOCKED:
+ if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
return sprintf(buf, "locked\n");
- case NVDIMM_SECURITY_FROZEN:
- return sprintf(buf, "frozen\n");
- case NVDIMM_SECURITY_OVERWRITE:
+ if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
return sprintf(buf, "overwrite\n");
- default:
- return -ENOTTY;
- }
-
return -ENOTTY;
}
-#define OPS \
- C( OP_FREEZE, "freeze", 1), \
- C( OP_DISABLE, "disable", 2), \
- C( OP_UPDATE, "update", 3), \
- C( OP_ERASE, "erase", 2), \
- C( OP_OVERWRITE, "overwrite", 2), \
- C( OP_MASTER_UPDATE, "master_update", 3), \
- C( OP_MASTER_ERASE, "master_erase", 2)
-#undef C
-#define C(a, b, c) a
-enum nvdimmsec_op_ids { OPS };
-#undef C
-#define C(a, b, c) { b, c }
-static struct {
- const char *name;
- int args;
-} ops[] = { OPS };
-#undef C
-
-#define SEC_CMD_SIZE 32
-#define KEY_ID_SIZE 10
-
-static ssize_t __security_store(struct device *dev, const char *buf, size_t len)
+static ssize_t frozen_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- ssize_t rc;
- char cmd[SEC_CMD_SIZE+1], keystr[KEY_ID_SIZE+1],
- nkeystr[KEY_ID_SIZE+1];
- unsigned int key, newkey;
- int i;
- if (atomic_read(&nvdimm->busy))
- return -EBUSY;
-
- rc = sscanf(buf, "%"__stringify(SEC_CMD_SIZE)"s"
- " %"__stringify(KEY_ID_SIZE)"s"
- " %"__stringify(KEY_ID_SIZE)"s",
- cmd, keystr, nkeystr);
- if (rc < 1)
- return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(ops); i++)
- if (sysfs_streq(cmd, ops[i].name))
- break;
- if (i >= ARRAY_SIZE(ops))
- return -EINVAL;
- if (ops[i].args > 1)
- rc = kstrtouint(keystr, 0, &key);
- if (rc >= 0 && ops[i].args > 2)
- rc = kstrtouint(nkeystr, 0, &newkey);
- if (rc < 0)
- return rc;
-
- if (i == OP_FREEZE) {
- dev_dbg(dev, "freeze\n");
- rc = nvdimm_security_freeze(nvdimm);
- } else if (i == OP_DISABLE) {
- dev_dbg(dev, "disable %u\n", key);
- rc = nvdimm_security_disable(nvdimm, key);
- } else if (i == OP_UPDATE) {
- dev_dbg(dev, "update %u %u\n", key, newkey);
- rc = nvdimm_security_update(nvdimm, key, newkey, NVDIMM_USER);
- } else if (i == OP_ERASE) {
- dev_dbg(dev, "erase %u\n", key);
- rc = nvdimm_security_erase(nvdimm, key, NVDIMM_USER);
- } else if (i == OP_OVERWRITE) {
- dev_dbg(dev, "overwrite %u\n", key);
- rc = nvdimm_security_overwrite(nvdimm, key);
- } else if (i == OP_MASTER_UPDATE) {
- dev_dbg(dev, "master_update %u %u\n", key, newkey);
- rc = nvdimm_security_update(nvdimm, key, newkey,
- NVDIMM_MASTER);
- } else if (i == OP_MASTER_ERASE) {
- dev_dbg(dev, "master_erase %u\n", key);
- rc = nvdimm_security_erase(nvdimm, key,
- NVDIMM_MASTER);
- } else
- return -EINVAL;
-
- if (rc == 0)
- rc = len;
- return rc;
+ return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
+ &nvdimm->sec.flags));
}
+static DEVICE_ATTR_RO(frozen);
static ssize_t security_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
@@ -484,12 +404,12 @@ static ssize_t security_store(struct device *dev,
* done while probing is idle and the DIMM is not in active use
* in any region.
*/
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
- rc = __security_store(dev, buf, len);
+ rc = nvdimm_security_store(dev, buf, len);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -501,6 +421,7 @@ static struct attribute *nvdimm_attributes[] = {
&dev_attr_commands.attr,
&dev_attr_available_slots.attr,
&dev_attr_security.attr,
+ &dev_attr_frozen.attr,
NULL,
};
@@ -509,17 +430,24 @@ static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
struct device *dev = container_of(kobj, typeof(*dev), kobj);
struct nvdimm *nvdimm = to_nvdimm(dev);
- if (a != &dev_attr_security.attr)
+ if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
return a->mode;
- if (nvdimm->sec.state < 0)
+ if (!nvdimm->sec.flags)
return 0;
- /* Are there any state mutation ops? */
- if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
- || nvdimm->sec.ops->change_key
- || nvdimm->sec.ops->erase
- || nvdimm->sec.ops->overwrite)
+
+ if (a == &dev_attr_security.attr) {
+ /* Are there any state mutation ops (make writable)? */
+ if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
+ || nvdimm->sec.ops->change_key
+ || nvdimm->sec.ops->erase
+ || nvdimm->sec.ops->overwrite)
+ return a->mode;
+ return 0444;
+ }
+
+ if (nvdimm->sec.ops->freeze)
return a->mode;
- return 0444;
+ return 0;
}
struct attribute_group nvdimm_attribute_group = {
@@ -569,8 +497,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
* attribute visibility.
*/
/* get security state and extended (master) state */
- nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
- nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, NVDIMM_MASTER);
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
+ nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
nd_device_register(dev);
return nvdimm;
@@ -588,7 +516,7 @@ int nvdimm_security_setup_events(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- if (nvdimm->sec.state < 0 || !nvdimm->sec.ops
+ if (!nvdimm->sec.flags || !nvdimm->sec.ops
|| !nvdimm->sec.ops->overwrite)
return 0;
nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
@@ -614,7 +542,7 @@ int nvdimm_security_freeze(struct nvdimm *nvdimm)
if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
return -EOPNOTSUPP;
- if (nvdimm->sec.state < 0)
+ if (!nvdimm->sec.flags)
return -EIO;
if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
@@ -623,7 +551,7 @@ int nvdimm_security_freeze(struct nvdimm *nvdimm)
}
rc = nvdimm->sec.ops->freeze(nvdimm);
- nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
return rc;
}
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 73e197babc2f..47a4828b8b31 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -353,11 +353,6 @@ static bool slot_valid(struct nvdimm_drvdata *ndd,
if (slot != __le32_to_cpu(nd_label->slot))
return false;
- /* check that DPA allocations are page aligned */
- if ((__le64_to_cpu(nd_label->dpa)
- | __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
- return false;
-
/* check checksum */
if (namespace_label_has(ndd, checksum)) {
u64 sum, sum_save;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 2d8d7e554877..43401325c874 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -410,7 +410,7 @@ static ssize_t alt_name_store(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __alt_name_store(dev, buf, len);
@@ -418,7 +418,7 @@ static ssize_t alt_name_store(struct device *dev,
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1006,10 +1006,10 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
return -ENXIO;
}
- div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder);
+ div_u64_rem(val, PAGE_SIZE * nd_region->ndr_mappings, &remainder);
if (remainder) {
- dev_dbg(dev, "%llu is not %dK aligned\n", val,
- (SZ_4K * nd_region->ndr_mappings) / SZ_1K);
+ dev_dbg(dev, "%llu is not %ldK aligned\n", val,
+ (PAGE_SIZE * nd_region->ndr_mappings) / SZ_1K);
return -EINVAL;
}
@@ -1077,7 +1077,7 @@ static ssize_t size_store(struct device *dev,
if (rc)
return rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __size_store(dev, val);
@@ -1103,7 +1103,7 @@ static ssize_t size_store(struct device *dev,
dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1286,7 +1286,7 @@ static ssize_t uuid_store(struct device *dev,
} else
return -ENXIO;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
if (to_ndns(dev)->claim)
@@ -1302,7 +1302,7 @@ static ssize_t uuid_store(struct device *dev,
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1376,7 +1376,7 @@ static ssize_t sector_size_store(struct device *dev,
} else
return -ENXIO;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
if (to_ndns(dev)->claim)
rc = -EBUSY;
@@ -1387,7 +1387,7 @@ static ssize_t sector_size_store(struct device *dev,
dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
buf, buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -1502,9 +1502,9 @@ static ssize_t holder_show(struct device *dev,
struct nd_namespace_common *ndns = to_ndns(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -1541,7 +1541,7 @@ static ssize_t holder_class_store(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __holder_class_store(dev, buf);
@@ -1549,7 +1549,7 @@ static ssize_t holder_class_store(struct device *dev,
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1560,7 +1560,7 @@ static ssize_t holder_class_show(struct device *dev,
struct nd_namespace_common *ndns = to_ndns(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (ndns->claim_class == NVDIMM_CCLASS_NONE)
rc = sprintf(buf, "\n");
else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
@@ -1572,7 +1572,7 @@ static ssize_t holder_class_show(struct device *dev,
rc = sprintf(buf, "dax\n");
else
rc = sprintf(buf, "<unknown>\n");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -1586,7 +1586,7 @@ static ssize_t mode_show(struct device *dev,
char *mode;
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
claim = ndns->claim;
if (claim && is_nd_btt(claim))
mode = "safe";
@@ -1599,7 +1599,7 @@ static ssize_t mode_show(struct device *dev,
else
mode = "raw";
rc = sprintf(buf, "%s\n", mode);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -1703,8 +1703,8 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
* Flush any in-progess probes / removals in the driver
* for the raw personality of this namespace.
*/
- device_lock(&ndns->dev);
- device_unlock(&ndns->dev);
+ nd_device_lock(&ndns->dev);
+ nd_device_unlock(&ndns->dev);
if (ndns->dev.driver) {
dev_dbg(&ndns->dev, "is active, can't bind %s\n",
dev_name(dev));
@@ -2462,6 +2462,27 @@ static struct device **create_namespaces(struct nd_region *nd_region)
return devs;
}
+static void deactivate_labels(void *region)
+{
+ struct nd_region *nd_region = region;
+ int i;
+
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+ struct nvdimm_drvdata *ndd = nd_mapping->ndd;
+ struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+ mutex_lock(&nd_mapping->lock);
+ nd_mapping_free_labels(nd_mapping);
+ mutex_unlock(&nd_mapping->lock);
+
+ put_ndd(ndd);
+ nd_mapping->ndd = NULL;
+ if (ndd)
+ atomic_dec(&nvdimm->busy);
+ }
+}
+
static int init_active_labels(struct nd_region *nd_region)
{
int i;
@@ -2519,16 +2540,17 @@ static int init_active_labels(struct nd_region *nd_region)
mutex_unlock(&nd_mapping->lock);
}
- if (j >= count)
- continue;
+ if (j < count)
+ break;
+ }
- mutex_lock(&nd_mapping->lock);
- nd_mapping_free_labels(nd_mapping);
- mutex_unlock(&nd_mapping->lock);
+ if (i < nd_region->ndr_mappings) {
+ deactivate_labels(nd_region);
return -ENOMEM;
}
- return 0;
+ return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
+ nd_region);
}
int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 391e88de3a29..25fa121104d0 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -9,6 +9,7 @@
#include <linux/sizes.h>
#include <linux/mutex.h>
#include <linux/nd.h>
+#include "nd.h"
extern struct list_head nvdimm_bus_list;
extern struct mutex nvdimm_bus_list_mutex;
@@ -17,10 +18,11 @@ extern struct workqueue_struct *nvdimm_wq;
struct nvdimm_bus {
struct nvdimm_bus_descriptor *nd_desc;
- wait_queue_head_t probe_wait;
+ wait_queue_head_t wait;
struct list_head list;
struct device dev;
int id, probe_active;
+ atomic_t ioctl_active;
struct list_head mapping_list;
struct mutex reconfig_mutex;
struct badrange badrange;
@@ -37,53 +39,40 @@ struct nvdimm {
const char *dimm_id;
struct {
const struct nvdimm_security_ops *ops;
- enum nvdimm_security_state state;
- enum nvdimm_security_state ext_state;
+ unsigned long flags;
+ unsigned long ext_flags;
unsigned int overwrite_tmo;
struct kernfs_node *overwrite_state;
} sec;
struct delayed_work dwork;
};
-static inline enum nvdimm_security_state nvdimm_security_state(
+static inline unsigned long nvdimm_security_flags(
struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype)
{
+ u64 flags;
+ const u64 state_flags = 1UL << NVDIMM_SECURITY_DISABLED
+ | 1UL << NVDIMM_SECURITY_LOCKED
+ | 1UL << NVDIMM_SECURITY_UNLOCKED
+ | 1UL << NVDIMM_SECURITY_OVERWRITE;
+
if (!nvdimm->sec.ops)
- return -ENXIO;
+ return 0;
- return nvdimm->sec.ops->state(nvdimm, ptype);
+ flags = nvdimm->sec.ops->get_flags(nvdimm, ptype);
+ /* disabled, locked, unlocked, and overwrite are mutually exclusive */
+ dev_WARN_ONCE(&nvdimm->dev, hweight64(flags & state_flags) > 1,
+ "reported invalid security state: %#llx\n",
+ (unsigned long long) flags);
+ return flags;
}
int nvdimm_security_freeze(struct nvdimm *nvdimm);
#if IS_ENABLED(CONFIG_NVDIMM_KEYS)
-int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid);
-int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
- unsigned int new_keyid,
- enum nvdimm_passphrase_type pass_type);
-int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
- enum nvdimm_passphrase_type pass_type);
-int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid);
+ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len);
void nvdimm_security_overwrite_query(struct work_struct *work);
#else
-static inline int nvdimm_security_disable(struct nvdimm *nvdimm,
- unsigned int keyid)
-{
- return -EOPNOTSUPP;
-}
-static inline int nvdimm_security_update(struct nvdimm *nvdimm,
- unsigned int keyid,
- unsigned int new_keyid,
- enum nvdimm_passphrase_type pass_type)
-{
- return -EOPNOTSUPP;
-}
-static inline int nvdimm_security_erase(struct nvdimm *nvdimm,
- unsigned int keyid,
- enum nvdimm_passphrase_type pass_type)
-{
- return -EOPNOTSUPP;
-}
-static inline int nvdimm_security_overwrite(struct nvdimm *nvdimm,
- unsigned int keyid)
+static inline ssize_t nvdimm_security_store(struct device *dev,
+ const char *buf, size_t len)
{
return -EOPNOTSUPP;
}
@@ -126,13 +115,12 @@ int __init nvdimm_bus_init(void);
void nvdimm_bus_exit(void);
void nvdimm_devs_exit(void);
void nd_region_devs_exit(void);
-void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev);
struct nd_region;
+void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev);
void nd_region_create_ns_seed(struct nd_region *nd_region);
void nd_region_create_btt_seed(struct nd_region *nd_region);
void nd_region_create_pfn_seed(struct nd_region *nd_region);
void nd_region_create_dax_seed(struct nd_region *nd_region);
-void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev);
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
void nd_synchronize(void);
@@ -181,4 +169,71 @@ ssize_t nd_namespace_store(struct device *dev,
struct nd_namespace_common **_ndns, const char *buf,
size_t len);
struct nd_pfn *to_nd_pfn_safe(struct device *dev);
+bool is_nvdimm_bus(struct device *dev);
+
+#ifdef CONFIG_PROVE_LOCKING
+extern struct class *nd_class;
+
+enum {
+ LOCK_BUS,
+ LOCK_NDCTL,
+ LOCK_REGION,
+ LOCK_DIMM = LOCK_REGION,
+ LOCK_NAMESPACE,
+ LOCK_CLAIM,
+};
+
+static inline void debug_nvdimm_lock(struct device *dev)
+{
+ if (is_nd_region(dev))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_REGION);
+ else if (is_nvdimm(dev))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_DIMM);
+ else if (is_nd_btt(dev) || is_nd_pfn(dev) || is_nd_dax(dev))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_CLAIM);
+ else if (dev->parent && (is_nd_region(dev->parent)))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_NAMESPACE);
+ else if (is_nvdimm_bus(dev))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_BUS);
+ else if (dev->class && dev->class == nd_class)
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_NDCTL);
+ else
+ dev_WARN(dev, "unknown lock level\n");
+}
+
+static inline void debug_nvdimm_unlock(struct device *dev)
+{
+ mutex_unlock(&dev->lockdep_mutex);
+}
+
+static inline void nd_device_lock(struct device *dev)
+{
+ device_lock(dev);
+ debug_nvdimm_lock(dev);
+}
+
+static inline void nd_device_unlock(struct device *dev)
+{
+ debug_nvdimm_unlock(dev);
+ device_unlock(dev);
+}
+#else
+static inline void nd_device_lock(struct device *dev)
+{
+ device_lock(dev);
+}
+
+static inline void nd_device_unlock(struct device *dev)
+{
+ device_unlock(dev);
+}
+
+static inline void debug_nvdimm_lock(struct device *dev)
+{
+}
+
+static inline void debug_nvdimm_unlock(struct device *dev)
+{
+}
+#endif
#endif /* __ND_CORE_H__ */
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 1b9955651379..e89af4b2d8e9 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -375,6 +375,10 @@ unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, const struct resource *res);
#if IS_ENABLED(CONFIG_ND_CLAIM)
+
+/* max struct page size independent of kernel config */
+#define MAX_STRUCT_PAGE_SIZE 64
+
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index a0c8dcfa0bf9..97187d6c0bdb 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -42,7 +42,7 @@ static int of_pmem_region_probe(struct platform_device *pdev)
return -ENOMEM;
priv->bus_desc.attr_groups = bus_attr_groups;
- priv->bus_desc.provider_name = "of_pmem";
+ priv->bus_desc.provider_name = kstrdup(pdev->name, GFP_KERNEL);
priv->bus_desc.module = THIS_MODULE;
priv->bus_desc.of_node = np;
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
index 7381673b7b70..acb19517f678 100644
--- a/drivers/nvdimm/pfn.h
+++ b/drivers/nvdimm/pfn.h
@@ -29,7 +29,10 @@ struct nd_pfn_sb {
/* minor-version-2 record the base alignment of the mapping */
__le32 align;
/* minor-version-3 guarantee the padding and flags are zero */
- u8 padding[4000];
+ /* minor-version-4 record the page size and struct page size */
+ __le32 page_size;
+ __le16 page_struct_size;
+ u8 padding[3994];
__le64 checksum;
};
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index df2bdbd22450..bb9cc5cf0873 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -67,7 +67,7 @@ static ssize_t mode_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc = 0;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
if (dev->driver)
rc = -EBUSY;
@@ -89,7 +89,7 @@ static ssize_t mode_store(struct device *dev,
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -132,14 +132,14 @@ static ssize_t align_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_pfn->align,
nd_pfn_supported_alignments());
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -161,11 +161,11 @@ static ssize_t uuid_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -190,13 +190,13 @@ static ssize_t namespace_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -208,7 +208,7 @@ static ssize_t resource_show(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver) {
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
@@ -222,7 +222,7 @@ static ssize_t resource_show(struct device *dev,
/* no address to convey if the pfn instance is disabled */
rc = -ENXIO;
}
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -234,7 +234,7 @@ static ssize_t size_show(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver) {
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
@@ -250,7 +250,7 @@ static ssize_t size_show(struct device *dev,
/* no size to convey if the pfn instance is disabled */
rc = -ENXIO;
}
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -460,6 +460,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
if (__le16_to_cpu(pfn_sb->version_minor) < 2)
pfn_sb->align = 0;
+ if (__le16_to_cpu(pfn_sb->version_minor) < 4) {
+ pfn_sb->page_struct_size = cpu_to_le16(64);
+ pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
+ }
+
switch (le32_to_cpu(pfn_sb->mode)) {
case PFN_MODE_RAM:
case PFN_MODE_PMEM:
@@ -475,6 +480,22 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
align = 1UL << ilog2(offset);
mode = le32_to_cpu(pfn_sb->mode);
+ if ((le32_to_cpu(pfn_sb->page_size) > PAGE_SIZE) &&
+ (mode == PFN_MODE_PMEM)) {
+ dev_err(&nd_pfn->dev,
+ "init failed, page size mismatch %d\n",
+ le32_to_cpu(pfn_sb->page_size));
+ return -EOPNOTSUPP;
+ }
+
+ if ((le16_to_cpu(pfn_sb->page_struct_size) < sizeof(struct page)) &&
+ (mode == PFN_MODE_PMEM)) {
+ dev_err(&nd_pfn->dev,
+ "init failed, struct page size mismatch %d\n",
+ le16_to_cpu(pfn_sb->page_struct_size));
+ return -EOPNOTSUPP;
+ }
+
if (!nd_pfn->uuid) {
/*
* When probing a namepace via nd_pfn_probe() the uuid
@@ -655,6 +676,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
resource_size_t start, size;
struct nd_region *nd_region;
unsigned long npfns, align;
+ u32 end_trunc;
struct nd_pfn_sb *pfn_sb;
phys_addr_t offset;
const char *sig;
@@ -696,13 +718,22 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
size = resource_size(&nsio->res);
npfns = PHYS_PFN(size - SZ_8K);
align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
+ end_trunc = start + size - ALIGN_DOWN(start + size, align);
if (nd_pfn->mode == PFN_MODE_PMEM) {
/*
* The altmap should be padded out to the block size used
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
+ *
+ * Also make sure size of struct page is less than 64. We
+ * want to make sure we use large enough size here so that
+ * we don't have a dynamic reserve space depending on
+ * struct page size. But we also want to make sure we notice
+ * when we end up adding new elements to struct page.
*/
- offset = ALIGN(start + SZ_8K + 64 * npfns, align) - start;
+ BUILD_BUG_ON(sizeof(struct page) > MAX_STRUCT_PAGE_SIZE);
+ offset = ALIGN(start + SZ_8K + MAX_STRUCT_PAGE_SIZE * npfns, align)
+ - start;
} else if (nd_pfn->mode == PFN_MODE_RAM)
offset = ALIGN(start + SZ_8K, align) - start;
else
@@ -714,7 +745,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
return -ENXIO;
}
- npfns = PHYS_PFN(size - offset);
+ npfns = PHYS_PFN(size - offset - end_trunc);
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
pfn_sb->dataoff = cpu_to_le64(offset);
pfn_sb->npfns = cpu_to_le64(npfns);
@@ -722,8 +753,11 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
pfn_sb->version_major = cpu_to_le16(1);
- pfn_sb->version_minor = cpu_to_le16(3);
+ pfn_sb->version_minor = cpu_to_le16(4);
+ pfn_sb->end_trunc = cpu_to_le32(end_trunc);
pfn_sb->align = cpu_to_le32(nd_pfn->align);
+ pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
+ pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 2bf3acd69613..f9f76f6ba07b 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -490,6 +490,7 @@ static int pmem_attach_disk(struct device *dev,
static int nd_pmem_probe(struct device *dev)
{
+ int ret;
struct nd_namespace_common *ndns;
ndns = nvdimm_namespace_common_probe(dev);
@@ -505,12 +506,32 @@ static int nd_pmem_probe(struct device *dev)
if (is_nd_pfn(dev))
return pmem_attach_disk(dev, ndns);
- /* if we find a valid info-block we'll come back as that personality */
- if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
- || nd_dax_probe(dev, ndns) == 0)
+ ret = nd_btt_probe(dev, ndns);
+ if (ret == 0)
return -ENXIO;
- /* ...otherwise we're just a raw pmem device */
+ /*
+ * We have two failure conditions here, there is no
+ * info reserver block or we found a valid info reserve block
+ * but failed to initialize the pfn superblock.
+ *
+ * For the first case consider namespace as a raw pmem namespace
+ * and attach a disk.
+ *
+ * For the latter, consider this a success and advance the namespace
+ * seed.
+ */
+ ret = nd_pfn_probe(dev, ndns);
+ if (ret == 0)
+ return -ENXIO;
+ else if (ret == -EOPNOTSUPP)
+ return ret;
+
+ ret = nd_dax_probe(dev, ndns);
+ if (ret == 0)
+ return -ENXIO;
+ else if (ret == -EOPNOTSUPP)
+ return ret;
return pmem_attach_disk(dev, ndns);
}
@@ -522,8 +543,8 @@ static int nd_pmem_remove(struct device *dev)
nvdimm_namespace_detach_btt(to_nd_btt(dev));
else {
/*
- * Note, this assumes device_lock() context to not race
- * nd_pmem_notify()
+ * Note, this assumes nd_device_lock() context to not
+ * race nd_pmem_notify()
*/
sysfs_put(pmem->bb_state);
pmem->bb_state = NULL;
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index ef46cc3a71ae..37bf8719a2a4 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -34,17 +34,6 @@ static int nd_region_probe(struct device *dev)
if (rc)
return rc;
- rc = nd_region_register_namespaces(nd_region, &err);
- if (rc < 0)
- return rc;
-
- ndrd = dev_get_drvdata(dev);
- ndrd->ns_active = rc;
- ndrd->ns_count = rc + err;
-
- if (rc && err && rc == err)
- return -ENODEV;
-
if (is_nd_pmem(&nd_region->dev)) {
struct resource ndr_res;
@@ -60,6 +49,17 @@ static int nd_region_probe(struct device *dev)
nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
}
+ rc = nd_region_register_namespaces(nd_region, &err);
+ if (rc < 0)
+ return rc;
+
+ ndrd = dev_get_drvdata(dev);
+ ndrd->ns_active = rc;
+ ndrd->ns_count = rc + err;
+
+ if (rc && err && rc == err)
+ return -ENODEV;
+
nd_region->btt_seed = nd_btt_create(nd_region);
nd_region->pfn_seed = nd_pfn_create(nd_region);
nd_region->dax_seed = nd_dax_create(nd_region);
@@ -102,7 +102,7 @@ static int nd_region_remove(struct device *dev)
nvdimm_bus_unlock(dev);
/*
- * Note, this assumes device_lock() context to not race
+ * Note, this assumes nd_device_lock() context to not race
* nd_region_notify()
*/
sysfs_put(nd_region->bb_state);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 56f2227f192a..3fd6b59abd33 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -331,7 +331,7 @@ static ssize_t set_cookie_show(struct device *dev,
* the v1.1 namespace label cookie definition. To read all this
* data we need to wait for probing to settle.
*/
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
if (nd_region->ndr_mappings) {
@@ -348,7 +348,7 @@ static ssize_t set_cookie_show(struct device *dev,
}
}
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
if (rc)
return rc;
@@ -424,10 +424,12 @@ static ssize_t available_size_show(struct device *dev,
* memory nvdimm_bus_lock() is dropped, but that's userspace's
* problem to not race itself.
*/
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_available_dpa(nd_region);
nvdimm_bus_unlock(dev);
+ nd_device_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
@@ -439,10 +441,12 @@ static ssize_t max_available_extent_show(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev);
unsigned long long available = 0;
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_allocatable_dpa(nd_region);
nvdimm_bus_unlock(dev);
+ nd_device_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
@@ -561,12 +565,12 @@ static ssize_t region_badblocks_show(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver)
rc = badblocks_show(&nd_region->bb, buf, 0);
else
rc = -ENXIO;
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -711,85 +715,37 @@ void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
}
/*
- * Upon successful probe/remove, take/release a reference on the
- * associated interleave set (if present), and plant new btt + namespace
- * seeds. Also, on the removal of a BLK region, notify the provider to
- * disable the region.
+ * When a namespace is activated create new seeds for the next
+ * namespace, or namespace-personality to be configured.
*/
-static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
- struct device *dev, bool probe)
+void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
{
- struct nd_region *nd_region;
-
- if (!probe && is_nd_region(dev)) {
- int i;
-
- nd_region = to_nd_region(dev);
- for (i = 0; i < nd_region->ndr_mappings; i++) {
- struct nd_mapping *nd_mapping = &nd_region->mapping[i];
- struct nvdimm_drvdata *ndd = nd_mapping->ndd;
- struct nvdimm *nvdimm = nd_mapping->nvdimm;
-
- mutex_lock(&nd_mapping->lock);
- nd_mapping_free_labels(nd_mapping);
- mutex_unlock(&nd_mapping->lock);
-
- put_ndd(ndd);
- nd_mapping->ndd = NULL;
- if (ndd)
- atomic_dec(&nvdimm->busy);
- }
- }
- if (dev->parent && is_nd_region(dev->parent) && probe) {
- nd_region = to_nd_region(dev->parent);
- nvdimm_bus_lock(dev);
- if (nd_region->ns_seed == dev)
- nd_region_create_ns_seed(nd_region);
- nvdimm_bus_unlock(dev);
- }
- if (is_nd_btt(dev) && probe) {
+ nvdimm_bus_lock(dev);
+ if (nd_region->ns_seed == dev) {
+ nd_region_create_ns_seed(nd_region);
+ } else if (is_nd_btt(dev)) {
struct nd_btt *nd_btt = to_nd_btt(dev);
- nd_region = to_nd_region(dev->parent);
- nvdimm_bus_lock(dev);
if (nd_region->btt_seed == dev)
nd_region_create_btt_seed(nd_region);
if (nd_region->ns_seed == &nd_btt->ndns->dev)
nd_region_create_ns_seed(nd_region);
- nvdimm_bus_unlock(dev);
- }
- if (is_nd_pfn(dev) && probe) {
+ } else if (is_nd_pfn(dev)) {
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
- nd_region = to_nd_region(dev->parent);
- nvdimm_bus_lock(dev);
if (nd_region->pfn_seed == dev)
nd_region_create_pfn_seed(nd_region);
if (nd_region->ns_seed == &nd_pfn->ndns->dev)
nd_region_create_ns_seed(nd_region);
- nvdimm_bus_unlock(dev);
- }
- if (is_nd_dax(dev) && probe) {
+ } else if (is_nd_dax(dev)) {
struct nd_dax *nd_dax = to_nd_dax(dev);
- nd_region = to_nd_region(dev->parent);
- nvdimm_bus_lock(dev);
if (nd_region->dax_seed == dev)
nd_region_create_dax_seed(nd_region);
if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
nd_region_create_ns_seed(nd_region);
- nvdimm_bus_unlock(dev);
}
-}
-
-void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
-{
- nd_region_notify_driver_action(nvdimm_bus, dev, true);
-}
-
-void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
-{
- nd_region_notify_driver_action(nvdimm_bus, dev, false);
+ nvdimm_bus_unlock(dev);
}
static ssize_t mappingN(struct device *dev, char *buf, int n)
@@ -988,10 +944,10 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
struct nvdimm *nvdimm = mapping->nvdimm;
- if ((mapping->start | mapping->size) % SZ_4K) {
- dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
- caller, dev_name(&nvdimm->dev), i);
-
+ if ((mapping->start | mapping->size) % PAGE_SIZE) {
+ dev_err(&nvdimm_bus->dev,
+ "%s: %s mapping%d is not %ld aligned\n",
+ caller, dev_name(&nvdimm->dev), i, PAGE_SIZE);
return NULL;
}
@@ -1021,10 +977,9 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
}
region_buf = ndbr;
} else {
- nd_region = kzalloc(sizeof(struct nd_region)
- + sizeof(struct nd_mapping)
- * ndr_desc->num_mappings,
- GFP_KERNEL);
+ nd_region = kzalloc(struct_size(nd_region, mapping,
+ ndr_desc->num_mappings),
+ GFP_KERNEL);
region_buf = nd_region;
}
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index a570f2263a42..9e45b207ff01 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -158,7 +158,7 @@ static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
}
nvdimm_put_key(key);
- nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
return 0;
}
@@ -174,7 +174,7 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->unlock
- || nvdimm->sec.state < 0)
+ || !nvdimm->sec.flags)
return -EIO;
if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
@@ -189,7 +189,7 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
* freeze of the security configuration. I.e. if the OS does not
* have the key, security is being managed pre-OS.
*/
- if (nvdimm->sec.state == NVDIMM_SECURITY_UNLOCKED) {
+ if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) {
if (!key_revalidate)
return 0;
@@ -202,7 +202,7 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
rc == 0 ? "success" : "fail");
nvdimm_put_key(key);
- nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
return rc;
}
@@ -217,7 +217,25 @@ int nvdimm_security_unlock(struct device *dev)
return rc;
}
-int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
+static int check_security_state(struct nvdimm *nvdimm)
+{
+ struct device *dev = &nvdimm->dev;
+
+ if (test_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags)) {
+ dev_dbg(dev, "Incorrect security state: %#lx\n",
+ nvdimm->sec.flags);
+ return -EIO;
+ }
+
+ if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
+ dev_dbg(dev, "Security operation in progress.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
@@ -229,19 +247,12 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable
- || nvdimm->sec.state < 0)
+ || !nvdimm->sec.flags)
return -EOPNOTSUPP;
- if (nvdimm->sec.state >= NVDIMM_SECURITY_FROZEN) {
- dev_dbg(dev, "Incorrect security state: %d\n",
- nvdimm->sec.state);
- return -EIO;
- }
-
- if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
- dev_dbg(dev, "Security operation in progress.\n");
- return -EBUSY;
- }
+ rc = check_security_state(nvdimm);
+ if (rc)
+ return rc;
data = nvdimm_get_user_key_payload(nvdimm, keyid,
NVDIMM_BASE_KEY, &key);
@@ -253,11 +264,11 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
rc == 0 ? "success" : "fail");
nvdimm_put_key(key);
- nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
return rc;
}
-int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
+static int security_update(struct nvdimm *nvdimm, unsigned int keyid,
unsigned int new_keyid,
enum nvdimm_passphrase_type pass_type)
{
@@ -271,14 +282,12 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->change_key
- || nvdimm->sec.state < 0)
+ || !nvdimm->sec.flags)
return -EOPNOTSUPP;
- if (nvdimm->sec.state >= NVDIMM_SECURITY_FROZEN) {
- dev_dbg(dev, "Incorrect security state: %d\n",
- nvdimm->sec.state);
- return -EIO;
- }
+ rc = check_security_state(nvdimm);
+ if (rc)
+ return rc;
data = nvdimm_get_user_key_payload(nvdimm, keyid,
NVDIMM_BASE_KEY, &key);
@@ -301,15 +310,15 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
nvdimm_put_key(newkey);
nvdimm_put_key(key);
if (pass_type == NVDIMM_MASTER)
- nvdimm->sec.ext_state = nvdimm_security_state(nvdimm,
+ nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm,
NVDIMM_MASTER);
else
- nvdimm->sec.state = nvdimm_security_state(nvdimm,
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm,
NVDIMM_USER);
return rc;
}
-int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
+static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
enum nvdimm_passphrase_type pass_type)
{
struct device *dev = &nvdimm->dev;
@@ -322,26 +331,14 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->erase
- || nvdimm->sec.state < 0)
+ || !nvdimm->sec.flags)
return -EOPNOTSUPP;
- if (atomic_read(&nvdimm->busy)) {
- dev_dbg(dev, "Unable to secure erase while DIMM active.\n");
- return -EBUSY;
- }
-
- if (nvdimm->sec.state >= NVDIMM_SECURITY_FROZEN) {
- dev_dbg(dev, "Incorrect security state: %d\n",
- nvdimm->sec.state);
- return -EIO;
- }
-
- if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
- dev_dbg(dev, "Security operation in progress.\n");
- return -EBUSY;
- }
+ rc = check_security_state(nvdimm);
+ if (rc)
+ return rc;
- if (nvdimm->sec.ext_state != NVDIMM_SECURITY_UNLOCKED
+ if (!test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.ext_flags)
&& pass_type == NVDIMM_MASTER) {
dev_dbg(dev,
"Attempt to secure erase in wrong master state.\n");
@@ -359,11 +356,11 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
rc == 0 ? "success" : "fail");
nvdimm_put_key(key);
- nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
return rc;
}
-int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
+static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
@@ -375,29 +372,17 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->overwrite
- || nvdimm->sec.state < 0)
+ || !nvdimm->sec.flags)
return -EOPNOTSUPP;
- if (atomic_read(&nvdimm->busy)) {
- dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
- return -EBUSY;
- }
-
if (dev->driver == NULL) {
dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
return -EINVAL;
}
- if (nvdimm->sec.state >= NVDIMM_SECURITY_FROZEN) {
- dev_dbg(dev, "Incorrect security state: %d\n",
- nvdimm->sec.state);
- return -EIO;
- }
-
- if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
- dev_dbg(dev, "Security operation in progress.\n");
- return -EBUSY;
- }
+ rc = check_security_state(nvdimm);
+ if (rc)
+ return rc;
data = nvdimm_get_user_key_payload(nvdimm, keyid,
NVDIMM_BASE_KEY, &key);
@@ -412,7 +397,7 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
if (rc == 0) {
set_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
set_bit(NDD_WORK_PENDING, &nvdimm->flags);
- nvdimm->sec.state = NVDIMM_SECURITY_OVERWRITE;
+ set_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags);
/*
* Make sure we don't lose device while doing overwrite
* query.
@@ -443,7 +428,7 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
tmo = nvdimm->sec.overwrite_tmo;
if (!nvdimm->sec.ops || !nvdimm->sec.ops->query_overwrite
- || nvdimm->sec.state < 0)
+ || !nvdimm->sec.flags)
return;
rc = nvdimm->sec.ops->query_overwrite(nvdimm);
@@ -467,8 +452,8 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
put_device(&nvdimm->dev);
- nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
- nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, NVDIMM_MASTER);
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
}
void nvdimm_security_overwrite_query(struct work_struct *work)
@@ -480,3 +465,85 @@ void nvdimm_security_overwrite_query(struct work_struct *work)
__nvdimm_security_overwrite_query(nvdimm);
nvdimm_bus_unlock(&nvdimm->dev);
}
+
+#define OPS \
+ C( OP_FREEZE, "freeze", 1), \
+ C( OP_DISABLE, "disable", 2), \
+ C( OP_UPDATE, "update", 3), \
+ C( OP_ERASE, "erase", 2), \
+ C( OP_OVERWRITE, "overwrite", 2), \
+ C( OP_MASTER_UPDATE, "master_update", 3), \
+ C( OP_MASTER_ERASE, "master_erase", 2)
+#undef C
+#define C(a, b, c) a
+enum nvdimmsec_op_ids { OPS };
+#undef C
+#define C(a, b, c) { b, c }
+static struct {
+ const char *name;
+ int args;
+} ops[] = { OPS };
+#undef C
+
+#define SEC_CMD_SIZE 32
+#define KEY_ID_SIZE 10
+
+ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ ssize_t rc;
+ char cmd[SEC_CMD_SIZE+1], keystr[KEY_ID_SIZE+1],
+ nkeystr[KEY_ID_SIZE+1];
+ unsigned int key, newkey;
+ int i;
+
+ rc = sscanf(buf, "%"__stringify(SEC_CMD_SIZE)"s"
+ " %"__stringify(KEY_ID_SIZE)"s"
+ " %"__stringify(KEY_ID_SIZE)"s",
+ cmd, keystr, nkeystr);
+ if (rc < 1)
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(ops); i++)
+ if (sysfs_streq(cmd, ops[i].name))
+ break;
+ if (i >= ARRAY_SIZE(ops))
+ return -EINVAL;
+ if (ops[i].args > 1)
+ rc = kstrtouint(keystr, 0, &key);
+ if (rc >= 0 && ops[i].args > 2)
+ rc = kstrtouint(nkeystr, 0, &newkey);
+ if (rc < 0)
+ return rc;
+
+ if (i == OP_FREEZE) {
+ dev_dbg(dev, "freeze\n");
+ rc = nvdimm_security_freeze(nvdimm);
+ } else if (i == OP_DISABLE) {
+ dev_dbg(dev, "disable %u\n", key);
+ rc = security_disable(nvdimm, key);
+ } else if (i == OP_UPDATE || i == OP_MASTER_UPDATE) {
+ dev_dbg(dev, "%s %u %u\n", ops[i].name, key, newkey);
+ rc = security_update(nvdimm, key, newkey, i == OP_UPDATE
+ ? NVDIMM_USER : NVDIMM_MASTER);
+ } else if (i == OP_ERASE || i == OP_MASTER_ERASE) {
+ dev_dbg(dev, "%s %u\n", ops[i].name, key);
+ if (atomic_read(&nvdimm->busy)) {
+ dev_dbg(dev, "Unable to secure erase while DIMM active.\n");
+ return -EBUSY;
+ }
+ rc = security_erase(nvdimm, key, i == OP_ERASE
+ ? NVDIMM_USER : NVDIMM_MASTER);
+ } else if (i == OP_OVERWRITE) {
+ dev_dbg(dev, "overwrite %u\n", key);
+ if (atomic_read(&nvdimm->busy)) {
+ dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
+ return -EBUSY;
+ }
+ rc = security_overwrite(nvdimm, key);
+ } else
+ return -EINVAL;
+
+ if (rc == 0)
+ rc = len;
+ return rc;
+}
OpenPOWER on IntegriCloud