diff options
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r-- | drivers/nvme/target/admin-cmd.c | 6 | ||||
-rw-r--r-- | drivers/nvme/target/core.c | 23 | ||||
-rw-r--r-- | drivers/nvme/target/discovery.c | 4 | ||||
-rw-r--r-- | drivers/nvme/target/fabrics-cmd.c | 6 | ||||
-rw-r--r-- | drivers/nvme/target/fc.c | 8 | ||||
-rw-r--r-- | drivers/nvme/target/loop.c | 95 | ||||
-rw-r--r-- | drivers/nvme/target/nvmet.h | 2 | ||||
-rw-r--r-- | drivers/nvme/target/rdma.c | 15 |
8 files changed, 89 insertions, 70 deletions
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 95ae52390478..a7bcff45f437 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -13,6 +13,8 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> +#include <linux/rculist.h> + #include <generated/utsrelease.h> #include <asm/unaligned.h> #include "nvmet.h" @@ -41,7 +43,7 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); if (!ns) { status = NVME_SC_INVALID_NS; - pr_err("nvmet : Counld not find namespace id : %d\n", + pr_err("nvmet : Could not find namespace id : %d\n", le32_to_cpu(req->cmd->get_log_page.nsid)); goto out; } @@ -509,7 +511,7 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req) break; case nvme_admin_identify: req->data_len = 4096; - switch (le32_to_cpu(cmd->identify.cns)) { + switch (cmd->identify.cns) { case NVME_ID_CNS_NS: req->execute = nvmet_execute_identify_ns; return 0; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index fc5ba2f9e15f..798653b329b2 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -14,9 +14,12 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/random.h> +#include <linux/rculist.h> + #include "nvmet.h" static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; +static DEFINE_IDA(cntlid_ida); /* * This read/write semaphore is used to synchronize access to configuration @@ -422,6 +425,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, ctrl->sqs[qid] = sq; } +static void nvmet_confirm_sq(struct percpu_ref *ref) +{ + struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); + + complete(&sq->confirm_done); +} + void nvmet_sq_destroy(struct nvmet_sq *sq) { /* @@ -430,7 +440,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq) */ if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) nvmet_async_events_free(sq->ctrl); - percpu_ref_kill(&sq->ref); + percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); + wait_for_completion(&sq->confirm_done); wait_for_completion(&sq->free_done); percpu_ref_exit(&sq->ref); @@ -458,6 +469,7 @@ int nvmet_sq_init(struct nvmet_sq *sq) return ret; } init_completion(&sq->free_done); + init_completion(&sq->confirm_done); return 0; } @@ -749,7 +761,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, if (!ctrl->sqs) goto out_free_cqs; - ret = ida_simple_get(&subsys->cntlid_ida, + ret = ida_simple_get(&cntlid_ida, NVME_CNTLID_MIN, NVME_CNTLID_MAX, GFP_KERNEL); if (ret < 0) { @@ -819,7 +831,7 @@ static void nvmet_ctrl_free(struct kref *ref) flush_work(&ctrl->async_event_work); cancel_work_sync(&ctrl->fatal_err_work); - ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); + ida_simple_remove(&cntlid_ida, ctrl->cntlid); nvmet_subsys_put(subsys); kfree(ctrl->sqs); @@ -918,9 +930,6 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, mutex_init(&subsys->lock); INIT_LIST_HEAD(&subsys->namespaces); INIT_LIST_HEAD(&subsys->ctrls); - - ida_init(&subsys->cntlid_ida); - INIT_LIST_HEAD(&subsys->hosts); return subsys; @@ -933,7 +942,6 @@ static void nvmet_subsys_free(struct kref *ref) WARN_ON_ONCE(!list_empty(&subsys->namespaces)); - ida_destroy(&subsys->cntlid_ida); kfree(subsys->subsysnqn); kfree(subsys); } @@ -976,6 +984,7 @@ static void __exit nvmet_exit(void) { nvmet_exit_configfs(); nvmet_exit_discovery(); + ida_destroy(&cntlid_ida); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index 12f39eea569f..af8aabf05335 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -186,14 +186,14 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req) } case nvme_admin_identify: req->data_len = 4096; - switch (le32_to_cpu(cmd->identify.cns)) { + switch (cmd->identify.cns) { case NVME_ID_CNS_CTRL: req->execute = nvmet_execute_identify_disc_ctrl; return 0; default: pr_err("nvmet: unsupported identify cns %d\n", - le32_to_cpu(cmd->identify.cns)); + cmd->identify.cns); return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } default: diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index f4088198cd0d..8bd022af3df6 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -153,8 +153,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) goto out; } - pr_info("creating controller %d for NQN %s.\n", - ctrl->cntlid, ctrl->hostnqn); + pr_info("creating controller %d for subsystem %s for NQN %s.\n", + ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn); req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid); out: @@ -220,7 +220,7 @@ int nvmet_parse_connect_cmd(struct nvmet_req *req) req->ns = NULL; - if (req->cmd->common.opcode != nvme_fabrics_command) { + if (cmd->common.opcode != nvme_fabrics_command) { pr_err("invalid command 0x%x on unconnected queue.\n", cmd->fabrics.opcode); return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index ba57f9852bde..8f483ee7868c 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1817,16 +1817,14 @@ nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) /* data no longer needed */ nvmet_fc_free_tgt_pgs(fod); - if (fcpreq->fcp_error || abort) - nvmet_req_complete(&fod->req, fcpreq->fcp_error); - + nvmet_req_complete(&fod->req, fcpreq->fcp_error); return; } switch (fcpreq->op) { case NVMET_FCOP_WRITEDATA: - if (abort || fcpreq->fcp_error || + if (fcpreq->fcp_error || fcpreq->transferred_length != fcpreq->transfer_length) { nvmet_req_complete(&fod->req, NVME_SC_FC_TRANSPORT_ERROR); @@ -1849,7 +1847,7 @@ nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) case NVMET_FCOP_READDATA: case NVMET_FCOP_READDATA_RSP: - if (abort || fcpreq->fcp_error || + if (fcpreq->fcp_error || fcpreq->transferred_length != fcpreq->transfer_length) { /* data no longer needed */ nvmet_fc_free_tgt_pgs(fod); diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 9aaa70071ae5..22f7bc6bac7f 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -104,7 +104,7 @@ static void nvme_loop_complete_rq(struct request *req) return; } - if (req->cmd_type == REQ_TYPE_DRV_PRIV) + if (blk_rq_is_passthrough(req)) error = req->errors; else error = nvme_error_status(req->errors); @@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx) static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, struct nvme_loop_iod *iod, unsigned int queue_idx) { - BUG_ON(queue_idx >= ctrl->queue_count); - iod->req.cmd = &iod->cmd; iod->req.rsp = &iod->rsp; iod->queue = &ctrl->queues[queue_idx]; @@ -288,9 +286,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = { static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) { + nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); blk_cleanup_queue(ctrl->ctrl.admin_q); blk_mq_free_tag_set(&ctrl->admin_tag_set); - nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); } static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) @@ -314,6 +312,43 @@ free_ctrl: kfree(ctrl); } +static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) +{ + int i; + + for (i = 1; i < ctrl->queue_count; i++) + nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); +} + +static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) +{ + struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; + unsigned int nr_io_queues; + int ret, i; + + nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); + ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); + if (ret || !nr_io_queues) + return ret; + + dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); + + for (i = 1; i <= nr_io_queues; i++) { + ctrl->queues[i].ctrl = ctrl; + ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); + if (ret) + goto out_destroy_queues; + + ctrl->queue_count++; + } + + return 0; + +out_destroy_queues: + nvme_loop_destroy_io_queues(ctrl); + return ret; +} + static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) { int error; @@ -385,17 +420,13 @@ out_free_sq: static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) { - int i; - nvme_stop_keep_alive(&ctrl->ctrl); if (ctrl->queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, &ctrl->ctrl); - - for (i = 1; i < ctrl->queue_count; i++) - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); + nvme_loop_destroy_io_queues(ctrl); } if (ctrl->ctrl.state == NVME_CTRL_LIVE) @@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) if (ret) goto out_disable; - for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { - ctrl->queues[i].ctrl = ctrl; - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); - if (ret) - goto out_free_queues; - - ctrl->queue_count++; - } + ret = nvme_loop_init_io_queues(ctrl); + if (ret) + goto out_destroy_admin; - for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { + for (i = 1; i < ctrl->queue_count; i++) { ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) - goto out_free_queues; + goto out_destroy_io; } changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); @@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) return; -out_free_queues: - for (i = 1; i < ctrl->queue_count; i++) - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); +out_destroy_io: + nvme_loop_destroy_io_queues(ctrl); +out_destroy_admin: nvme_loop_destroy_admin_queue(ctrl); out_disable: dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); @@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) { - struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; int ret, i; - ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); - if (ret || !opts->nr_io_queues) + ret = nvme_loop_init_io_queues(ctrl); + if (ret) return ret; - dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", - opts->nr_io_queues); - - for (i = 1; i <= opts->nr_io_queues; i++) { - ctrl->queues[i].ctrl = ctrl; - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); - if (ret) - goto out_destroy_queues; - - ctrl->queue_count++; - } - memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); ctrl->tag_set.ops = &nvme_loop_mq_ops; ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; @@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) goto out_free_tagset; } - for (i = 1; i <= opts->nr_io_queues; i++) { + for (i = 1; i < ctrl->queue_count; i++) { ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) goto out_cleanup_connect_q; @@ -588,8 +601,7 @@ out_cleanup_connect_q: out_free_tagset: blk_mq_free_tag_set(&ctrl->tag_set); out_destroy_queues: - for (i = 1; i < ctrl->queue_count; i++) - nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); + nvme_loop_destroy_io_queues(ctrl); return ret; } @@ -724,8 +736,7 @@ static int __init nvme_loop_init_module(void) ret = nvmet_register_transport(&nvme_loop_ops); if (ret) return ret; - nvmf_register_transport(&nvme_loop_transport); - return 0; + return nvmf_register_transport(&nvme_loop_transport); } static void __exit nvme_loop_cleanup_module(void) diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index cc7ad06b43a7..f7ff15f17ca9 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -73,6 +73,7 @@ struct nvmet_sq { u16 qid; u16 size; struct completion free_done; + struct completion confirm_done; }; /** @@ -142,7 +143,6 @@ struct nvmet_subsys { unsigned int max_nsid; struct list_head ctrls; - struct ida cntlid_ida; struct list_head hosts; bool allow_any_host; diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 60990220bd83..ecc4fe862561 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, { u16 status; - cmd->queue = queue; - cmd->n_rdma = 0; - cmd->req.port = queue->port; - - ib_dma_sync_single_for_cpu(queue->dev->device, cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, DMA_FROM_DEVICE); @@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) cmd->queue = queue; rsp = nvmet_rdma_get_rsp(queue); + rsp->queue = queue; rsp->cmd = cmd; rsp->flags = 0; rsp->req.cmd = cmd->nvme_cmd; + rsp->req.port = queue->port; + rsp->n_rdma = 0; if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { unsigned long flags; @@ -1041,6 +1039,9 @@ static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, { struct nvme_rdma_cm_rej rej; + pr_debug("rejecting connect request: status %d (%s)\n", + status, nvme_rdma_cm_msg(status)); + rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); rej.sts = cpu_to_le16(status); @@ -1091,7 +1092,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); if (queue->idx < 0) { ret = NVME_RDMA_CM_NO_RSC; - goto out_free_queue; + goto out_destroy_sq; } ret = nvmet_rdma_alloc_rsps(queue); @@ -1135,7 +1136,6 @@ out_destroy_sq: out_free_queue: kfree(queue); out_reject: - pr_debug("rejecting connect request with status code %d\n", ret); nvmet_rdma_cm_reject(cm_id, ret); return NULL; } @@ -1188,7 +1188,6 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, ndev = nvmet_rdma_find_get_device(cm_id); if (!ndev) { - pr_err("no client data!\n"); nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); return -ECONNREFUSED; } |