summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/Makefile1
-rw-r--r--drivers/nvme/host/core.c1
-rw-r--r--drivers/nvme/host/fc.c37
-rw-r--r--drivers/nvme/host/pci.c14
-rw-r--r--drivers/nvme/host/rdma.c27
-rw-r--r--drivers/nvme/target/Makefile1
-rw-r--r--drivers/nvme/target/configfs.c30
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/nvmet.h2
9 files changed, 90 insertions, 38 deletions
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index b856f2f549cd..a25fd43650ad 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 993813ccdc0b..25da74d310d1 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1407,6 +1407,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
goto out;
}
+ __nvme_revalidate_disk(disk, id);
nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
dev_err(ctrl->device,
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 6eb460b117d6..7ab0be55c7d0 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2880,10 +2880,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
nvme_fc_abort_aen_ops(ctrl);
/* wait for all io that had to be aborted */
- spin_lock_irqsave(&ctrl->lock, flags);
+ spin_lock_irq(&ctrl->lock);
wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
ctrl->flags &= ~FCCTRL_TERMIO;
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ spin_unlock_irq(&ctrl->lock);
nvme_fc_term_aen_ops(ctrl);
@@ -3072,7 +3072,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
{
struct nvme_fc_ctrl *ctrl;
unsigned long flags;
- int ret, idx;
+ int ret, idx, retry;
if (!(rport->remoteport.port_role &
(FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
@@ -3105,6 +3105,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->dev = lport->dev;
ctrl->cnum = idx;
ctrl->assoc_active = false;
+ init_waitqueue_head(&ctrl->ioabort_wait);
get_device(ctrl->dev);
kref_init(&ctrl->ref);
@@ -3170,9 +3171,37 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
spin_unlock_irqrestore(&rport->lock, flags);
- ret = nvme_fc_create_association(ctrl);
+ /*
+ * It's possible that transactions used to create the association
+ * may fail. Examples: CreateAssociation LS or CreateIOConnection
+ * LS gets dropped/corrupted/fails; or a frame gets dropped or a
+ * command times out for one of the actions to init the controller
+ * (Connect, Get/Set_Property, Set_Features, etc). Many of these
+ * transport errors (frame drop, LS failure) inherently must kill
+ * the association. The transport is coded so that any command used
+ * to create the association (prior to a LIVE state transition
+ * while NEW or RECONNECTING) will fail if it completes in error or
+ * times out.
+ *
+ * As such: as the connect request was mostly likely due to a
+ * udev event that discovered the remote port, meaning there is
+ * not an admin or script there to restart if the connect
+ * request fails, retry the initial connection creation up to
+ * three times before giving up and declaring failure.
+ */
+ for (retry = 0; retry < 3; retry++) {
+ ret = nvme_fc_create_association(ctrl);
+ if (!ret)
+ break;
+ }
+
if (ret) {
+ /* couldn't schedule retry - fail out */
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
+
ctrl->ctrl.opts = NULL;
+
/* initiate nvme ctrl ref counting teardown */
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 762b8402e04c..a11cfd470089 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -92,7 +92,7 @@ struct nvme_dev {
struct mutex shutdown_lock;
bool subsystem;
void __iomem *cmb;
- dma_addr_t cmb_dma_addr;
+ pci_bus_addr_t cmb_bus_addr;
u64 cmb_size;
u32 cmbsz;
u32 cmbloc;
@@ -1372,7 +1372,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
dev->ctrl.page_size);
- nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
+ nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
nvmeq->sq_cmds_io = dev->cmb + offset;
} else {
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
@@ -1669,7 +1669,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev);
void __iomem *cmb;
- dma_addr_t dma_addr;
+ int bar;
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!(NVME_CMB_SZ(dev->cmbsz)))
@@ -1682,7 +1682,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
size = szu * NVME_CMB_SZ(dev->cmbsz);
offset = szu * NVME_CMB_OFST(dev->cmbloc);
- bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
+ bar = NVME_CMB_BIR(dev->cmbloc);
+ bar_size = pci_resource_len(pdev, bar);
if (offset > bar_size)
return NULL;
@@ -1695,12 +1696,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
- cmb = ioremap_wc(dma_addr, size);
+ cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
if (!cmb)
return NULL;
- dev->cmb_dma_addr = dma_addr;
+ dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
dev->cmb_size = size;
return cmb;
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index c8d854474a5b..4f9bf2f815c3 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -563,6 +563,12 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
return;
+ if (nvme_rdma_queue_idx(queue) == 0) {
+ nvme_rdma_free_qe(queue->device->dev,
+ &queue->ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ }
+
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
}
@@ -731,8 +737,6 @@ out:
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
- nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
- sizeof(struct nvme_command), DMA_TO_DEVICE);
nvme_rdma_stop_queue(&ctrl->queues[0]);
if (remove) {
blk_cleanup_queue(ctrl->ctrl.admin_q);
@@ -757,8 +761,10 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (new) {
ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
- if (IS_ERR(ctrl->ctrl.admin_tagset))
+ if (IS_ERR(ctrl->ctrl.admin_tagset)) {
+ error = PTR_ERR(ctrl->ctrl.admin_tagset);
goto out_free_queue;
+ }
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.admin_q)) {
@@ -837,8 +843,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
if (new) {
ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
- if (IS_ERR(ctrl->ctrl.tagset))
+ if (IS_ERR(ctrl->ctrl.tagset)) {
+ ret = PTR_ERR(ctrl->ctrl.tagset);
goto out_free_io_queues;
+ }
ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
if (IS_ERR(ctrl->ctrl.connect_q)) {
@@ -1594,12 +1602,15 @@ nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
/*
* reconnecting state means transport disruption, which
* can take a long time and even might fail permanently,
- * so we can't let incoming I/O be requeued forever.
- * fail it fast to allow upper layers a chance to
- * failover.
+ * fail fast to give upper layers a chance to failover.
+ * deleting state means that the ctrl will never accept
+ * commands again, fail it permanently.
*/
- if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING)
+ if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
+ queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
+ nvme_req(rq)->status = NVME_SC_ABORT_REQ;
return BLK_STS_IOERR;
+ }
return BLK_STS_RESOURCE; /* try again later */
}
}
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index fecc14f535b2..488250189c99 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_NVME_TARGET) += nvmet.o
obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index b6aeb1d70951..e6b2d2af81b6 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -20,8 +20,8 @@
#include "nvmet.h"
-static struct config_item_type nvmet_host_type;
-static struct config_item_type nvmet_subsys_type;
+static const struct config_item_type nvmet_host_type;
+static const struct config_item_type nvmet_subsys_type;
/*
* nvmet_port Generic ConfigFS definitions.
@@ -425,7 +425,7 @@ static struct configfs_item_operations nvmet_ns_item_ops = {
.release = nvmet_ns_release,
};
-static struct config_item_type nvmet_ns_type = {
+static const struct config_item_type nvmet_ns_type = {
.ct_item_ops = &nvmet_ns_item_ops,
.ct_attrs = nvmet_ns_attrs,
.ct_owner = THIS_MODULE,
@@ -464,7 +464,7 @@ static struct configfs_group_operations nvmet_namespaces_group_ops = {
.make_group = nvmet_ns_make,
};
-static struct config_item_type nvmet_namespaces_type = {
+static const struct config_item_type nvmet_namespaces_type = {
.ct_group_ops = &nvmet_namespaces_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -540,7 +540,7 @@ static struct configfs_item_operations nvmet_port_subsys_item_ops = {
.drop_link = nvmet_port_subsys_drop_link,
};
-static struct config_item_type nvmet_port_subsys_type = {
+static const struct config_item_type nvmet_port_subsys_type = {
.ct_item_ops = &nvmet_port_subsys_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -613,7 +613,7 @@ static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
.drop_link = nvmet_allowed_hosts_drop_link,
};
-static struct config_item_type nvmet_allowed_hosts_type = {
+static const struct config_item_type nvmet_allowed_hosts_type = {
.ct_item_ops = &nvmet_allowed_hosts_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -729,7 +729,7 @@ static struct configfs_item_operations nvmet_subsys_item_ops = {
.release = nvmet_subsys_release,
};
-static struct config_item_type nvmet_subsys_type = {
+static const struct config_item_type nvmet_subsys_type = {
.ct_item_ops = &nvmet_subsys_item_ops,
.ct_attrs = nvmet_subsys_attrs,
.ct_owner = THIS_MODULE,
@@ -767,7 +767,7 @@ static struct configfs_group_operations nvmet_subsystems_group_ops = {
.make_group = nvmet_subsys_make,
};
-static struct config_item_type nvmet_subsystems_type = {
+static const struct config_item_type nvmet_subsystems_type = {
.ct_group_ops = &nvmet_subsystems_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -827,7 +827,7 @@ static struct configfs_item_operations nvmet_referral_item_ops = {
.release = nvmet_referral_release,
};
-static struct config_item_type nvmet_referral_type = {
+static const struct config_item_type nvmet_referral_type = {
.ct_owner = THIS_MODULE,
.ct_attrs = nvmet_referral_attrs,
.ct_item_ops = &nvmet_referral_item_ops,
@@ -852,7 +852,7 @@ static struct configfs_group_operations nvmet_referral_group_ops = {
.make_group = nvmet_referral_make,
};
-static struct config_item_type nvmet_referrals_type = {
+static const struct config_item_type nvmet_referrals_type = {
.ct_owner = THIS_MODULE,
.ct_group_ops = &nvmet_referral_group_ops,
};
@@ -880,7 +880,7 @@ static struct configfs_item_operations nvmet_port_item_ops = {
.release = nvmet_port_release,
};
-static struct config_item_type nvmet_port_type = {
+static const struct config_item_type nvmet_port_type = {
.ct_attrs = nvmet_port_attrs,
.ct_item_ops = &nvmet_port_item_ops,
.ct_owner = THIS_MODULE,
@@ -921,7 +921,7 @@ static struct configfs_group_operations nvmet_ports_group_ops = {
.make_group = nvmet_ports_make,
};
-static struct config_item_type nvmet_ports_type = {
+static const struct config_item_type nvmet_ports_type = {
.ct_group_ops = &nvmet_ports_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -940,7 +940,7 @@ static struct configfs_item_operations nvmet_host_item_ops = {
.release = nvmet_host_release,
};
-static struct config_item_type nvmet_host_type = {
+static const struct config_item_type nvmet_host_type = {
.ct_item_ops = &nvmet_host_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -963,14 +963,14 @@ static struct configfs_group_operations nvmet_hosts_group_ops = {
.make_group = nvmet_hosts_make_group,
};
-static struct config_item_type nvmet_hosts_type = {
+static const struct config_item_type nvmet_hosts_type = {
.ct_group_ops = &nvmet_hosts_group_ops,
.ct_owner = THIS_MODULE,
};
static struct config_group nvmet_hosts_group;
-static struct config_item_type nvmet_root_type = {
+static const struct config_item_type nvmet_root_type = {
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 22a2a2bb40f9..b54748ad5f48 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -400,12 +400,21 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
+ u32 old_sqhd, new_sqhd;
+ u16 sqhd;
+
if (status)
nvmet_set_status(req, status);
- if (req->sq->size)
- req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size;
- req->rsp->sq_head = cpu_to_le16(req->sq->sqhd);
+ if (req->sq->size) {
+ do {
+ old_sqhd = req->sq->sqhd;
+ new_sqhd = (old_sqhd + 1) % req->sq->size;
+ } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
+ old_sqhd);
+ }
+ sqhd = req->sq->sqhd & 0x0000FFFF;
+ req->rsp->sq_head = cpu_to_le16(sqhd);
req->rsp->sq_id = cpu_to_le16(req->sq->qid);
req->rsp->command_id = req->cmd->common.command_id;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 194ebffc688c..417f6c0331cc 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -74,7 +74,7 @@ struct nvmet_sq {
struct percpu_ref ref;
u16 qid;
u16 size;
- u16 sqhd;
+ u32 sqhd;
struct completion free_done;
struct completion confirm_done;
};
OpenPOWER on IntegriCloud