diff options
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r-- | drivers/nvme/target/admin-cmd.c | 165 | ||||
-rw-r--r-- | drivers/nvme/target/core.c | 100 | ||||
-rw-r--r-- | drivers/nvme/target/discovery.c | 74 | ||||
-rw-r--r-- | drivers/nvme/target/fabrics-cmd.c | 30 | ||||
-rw-r--r-- | drivers/nvme/target/fc.c | 31 | ||||
-rw-r--r-- | drivers/nvme/target/fcloop.c | 1 | ||||
-rw-r--r-- | drivers/nvme/target/io-cmd-bdev.c | 59 | ||||
-rw-r--r-- | drivers/nvme/target/io-cmd-file.c | 20 | ||||
-rw-r--r-- | drivers/nvme/target/loop.c | 47 | ||||
-rw-r--r-- | drivers/nvme/target/nvmet.h | 11 | ||||
-rw-r--r-- | drivers/nvme/target/rdma.c | 8 | ||||
-rw-r--r-- | drivers/nvme/target/tcp.c | 26 | ||||
-rw-r--r-- | drivers/nvme/target/trace.c | 18 |
13 files changed, 357 insertions, 233 deletions
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 4dc12ea52f23..72a7e41f3018 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -24,6 +24,16 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd) return len; } +static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10) +{ + switch (cdw10 & 0xff) { + case NVME_FEAT_HOST_ID: + return sizeof(req->sq->ctrl->hostid); + default: + return 0; + } +} + u64 nvmet_get_log_page_offset(struct nvme_command *cmd) { return le64_to_cpu(cmd->get_log_page.lpo); @@ -31,13 +41,12 @@ u64 nvmet_get_log_page_offset(struct nvme_command *cmd) static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) { - nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len)); + nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len)); } static void nvmet_execute_get_log_page_error(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; - u16 status = NVME_SC_SUCCESS; unsigned long flags; off_t offset = 0; u64 slot; @@ -47,9 +56,8 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req) slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS; for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) { - status = nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot], - sizeof(struct nvme_error_slot)); - if (status) + if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot], + sizeof(struct nvme_error_slot))) break; if (slot == 0) @@ -59,7 +67,7 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req) offset += sizeof(struct nvme_error_slot); } spin_unlock_irqrestore(&ctrl->error_lock, flags); - nvmet_req_complete(req, status); + nvmet_req_complete(req, 0); } static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, @@ -81,9 +89,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, goto out; host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]); - data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]); + data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part, + sectors[READ]), 1000); host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]); - data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]); + data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part, + sectors[WRITE]), 1000); put_unaligned_le64(host_reads, &slog->host_reads[0]); put_unaligned_le64(data_units_read, &slog->data_units_read[0]); @@ -111,11 +121,11 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req, if (!ns->bdev) continue; host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]); - data_units_read += - part_stat_read(ns->bdev->bd_part, sectors[READ]); + data_units_read += DIV_ROUND_UP( + part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000); host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]); - data_units_written += - part_stat_read(ns->bdev->bd_part, sectors[WRITE]); + data_units_written += DIV_ROUND_UP( + part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000); } rcu_read_unlock(); @@ -134,7 +144,7 @@ static void nvmet_execute_get_log_page_smart(struct nvmet_req *req) u16 status = NVME_SC_INTERNAL; unsigned long flags; - if (req->data_len != sizeof(*log)) + if (req->transfer_len != sizeof(*log)) goto out; log = kzalloc(sizeof(*log), GFP_KERNEL); @@ -196,7 +206,7 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req) u16 status = NVME_SC_INTERNAL; size_t len; - if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) + if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) goto out; mutex_lock(&ctrl->lock); @@ -206,7 +216,7 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req) len = ctrl->nr_changed_ns * sizeof(__le32); status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len); if (!status) - status = nvmet_zero_sgl(req, len, req->data_len - len); + status = nvmet_zero_sgl(req, len, req->transfer_len - len); ctrl->nr_changed_ns = 0; nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR); mutex_unlock(&ctrl->lock); @@ -282,6 +292,36 @@ out: nvmet_req_complete(req, status); } +static void nvmet_execute_get_log_page(struct nvmet_req *req) +{ + if (!nvmet_check_data_len(req, nvmet_get_log_page_len(req->cmd))) + return; + + switch (req->cmd->get_log_page.lid) { + case NVME_LOG_ERROR: + return nvmet_execute_get_log_page_error(req); + case NVME_LOG_SMART: + return nvmet_execute_get_log_page_smart(req); + case NVME_LOG_FW_SLOT: + /* + * We only support a single firmware slot which always is + * active, so we can zero out the whole firmware slot log and + * still claim to fully implement this mandatory log page. + */ + return nvmet_execute_get_log_page_noop(req); + case NVME_LOG_CHANGED_NS: + return nvmet_execute_get_log_changed_ns(req); + case NVME_LOG_CMD_EFFECTS: + return nvmet_execute_get_log_cmd_effects_ns(req); + case NVME_LOG_ANA: + return nvmet_execute_get_log_page_ana(req); + } + pr_err("unhandled lid %d on qid %d\n", + req->cmd->get_log_page.lid, req->sq->qid); + req->error_loc = offsetof(struct nvme_get_log_page_command, lid); + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); +} + static void nvmet_execute_identify_ctrl(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -565,6 +605,28 @@ out: nvmet_req_complete(req, status); } +static void nvmet_execute_identify(struct nvmet_req *req) +{ + if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE)) + return; + + switch (req->cmd->identify.cns) { + case NVME_ID_CNS_NS: + return nvmet_execute_identify_ns(req); + case NVME_ID_CNS_CTRL: + return nvmet_execute_identify_ctrl(req); + case NVME_ID_CNS_NS_ACTIVE_LIST: + return nvmet_execute_identify_nslist(req); + case NVME_ID_CNS_NS_DESC_LIST: + return nvmet_execute_identify_desclist(req); + } + + pr_err("unhandled identify cns %d on qid %d\n", + req->cmd->identify.cns, req->sq->qid); + req->error_loc = offsetof(struct nvme_identify, cns); + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); +} + /* * A "minimum viable" abort implementation: the command is mandatory in the * spec, but we are not required to do any useful work. We couldn't really @@ -574,6 +636,8 @@ out: */ static void nvmet_execute_abort(struct nvmet_req *req) { + if (!nvmet_check_data_len(req, 0)) + return; nvmet_set_result(req, 1); nvmet_req_complete(req, 0); } @@ -658,6 +722,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req) u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 status = 0; + if (!nvmet_check_data_len(req, 0)) + return; + switch (cdw10 & 0xff) { case NVME_FEAT_NUM_QUEUES: nvmet_set_result(req, @@ -721,6 +788,9 @@ static void nvmet_execute_get_features(struct nvmet_req *req) u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 status = 0; + if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10))) + return; + switch (cdw10 & 0xff) { /* * These features are mandatory in the spec, but we don't @@ -785,6 +855,9 @@ void nvmet_execute_async_event(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; + if (!nvmet_check_data_len(req, 0)) + return; + mutex_lock(&ctrl->lock); if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { mutex_unlock(&ctrl->lock); @@ -801,6 +874,9 @@ void nvmet_execute_keep_alive(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; + if (!nvmet_check_data_len(req, 0)) + return; + pr_debug("ctrl %d update keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); @@ -813,77 +889,36 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) struct nvme_command *cmd = req->cmd; u16 ret; + if (nvme_is_fabrics(cmd)) + return nvmet_parse_fabrics_cmd(req); + if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) + return nvmet_parse_discovery_cmd(req); + ret = nvmet_check_ctrl_status(req, cmd); if (unlikely(ret)) return ret; switch (cmd->common.opcode) { case nvme_admin_get_log_page: - req->data_len = nvmet_get_log_page_len(cmd); - - switch (cmd->get_log_page.lid) { - case NVME_LOG_ERROR: - req->execute = nvmet_execute_get_log_page_error; - return 0; - case NVME_LOG_SMART: - req->execute = nvmet_execute_get_log_page_smart; - return 0; - case NVME_LOG_FW_SLOT: - /* - * We only support a single firmware slot which always - * is active, so we can zero out the whole firmware slot - * log and still claim to fully implement this mandatory - * log page. - */ - req->execute = nvmet_execute_get_log_page_noop; - return 0; - case NVME_LOG_CHANGED_NS: - req->execute = nvmet_execute_get_log_changed_ns; - return 0; - case NVME_LOG_CMD_EFFECTS: - req->execute = nvmet_execute_get_log_cmd_effects_ns; - return 0; - case NVME_LOG_ANA: - req->execute = nvmet_execute_get_log_page_ana; - return 0; - } - break; + req->execute = nvmet_execute_get_log_page; + return 0; case nvme_admin_identify: - req->data_len = NVME_IDENTIFY_DATA_SIZE; - switch (cmd->identify.cns) { - case NVME_ID_CNS_NS: - req->execute = nvmet_execute_identify_ns; - return 0; - case NVME_ID_CNS_CTRL: - req->execute = nvmet_execute_identify_ctrl; - return 0; - case NVME_ID_CNS_NS_ACTIVE_LIST: - req->execute = nvmet_execute_identify_nslist; - return 0; - case NVME_ID_CNS_NS_DESC_LIST: - req->execute = nvmet_execute_identify_desclist; - return 0; - } - break; + req->execute = nvmet_execute_identify; + return 0; case nvme_admin_abort_cmd: req->execute = nvmet_execute_abort; - req->data_len = 0; return 0; case nvme_admin_set_features: req->execute = nvmet_execute_set_features; - req->data_len = 0; return 0; case nvme_admin_get_features: req->execute = nvmet_execute_get_features; - req->data_len = 0; return 0; case nvme_admin_async_event: req->execute = nvmet_execute_async_event; - req->data_len = 0; return 0; case nvme_admin_keep_alive: req->execute = nvmet_execute_keep_alive; - req->data_len = 0; return 0; } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 3a67e244e568..576de773b4db 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -129,27 +129,8 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen) return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16); } -static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) +static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status) { - struct nvmet_req *req; - - while (1) { - mutex_lock(&ctrl->lock); - if (!ctrl->nr_async_event_cmds) { - mutex_unlock(&ctrl->lock); - return; - } - - req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; - mutex_unlock(&ctrl->lock); - nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); - } -} - -static void nvmet_async_event_work(struct work_struct *work) -{ - struct nvmet_ctrl *ctrl = - container_of(work, struct nvmet_ctrl, async_event_work); struct nvmet_async_event *aen; struct nvmet_req *req; @@ -159,18 +140,41 @@ static void nvmet_async_event_work(struct work_struct *work) struct nvmet_async_event, entry); if (!aen || !ctrl->nr_async_event_cmds) { mutex_unlock(&ctrl->lock); - return; + break; } req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; - nvmet_set_result(req, nvmet_async_event_result(aen)); + if (status == 0) + nvmet_set_result(req, nvmet_async_event_result(aen)); list_del(&aen->entry); kfree(aen); mutex_unlock(&ctrl->lock); - nvmet_req_complete(req, 0); + nvmet_req_complete(req, status); + } +} + +static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) +{ + struct nvmet_req *req; + + mutex_lock(&ctrl->lock); + while (ctrl->nr_async_event_cmds) { + req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; + mutex_unlock(&ctrl->lock); + nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); + mutex_lock(&ctrl->lock); } + mutex_unlock(&ctrl->lock); +} + +static void nvmet_async_event_work(struct work_struct *work) +{ + struct nvmet_ctrl *ctrl = + container_of(work, struct nvmet_ctrl, async_event_work); + + nvmet_async_events_process(ctrl, 0); } void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, @@ -555,7 +559,8 @@ int nvmet_ns_enable(struct nvmet_ns *ns) } else { struct nvmet_ns *old; - list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) { + list_for_each_entry_rcu(old, &subsys->namespaces, dev_link, + lockdep_is_held(&subsys->lock)) { BUG_ON(ns->nsid == old->nsid); if (ns->nsid < old->nsid) break; @@ -752,19 +757,24 @@ static void nvmet_confirm_sq(struct percpu_ref *ref) void nvmet_sq_destroy(struct nvmet_sq *sq) { + u16 status = NVME_SC_INTERNAL | NVME_SC_DNR; + struct nvmet_ctrl *ctrl = sq->ctrl; + /* * If this is the admin queue, complete all AERs so that our * queue doesn't have outstanding requests on it. */ - if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) - nvmet_async_events_free(sq->ctrl); + if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) { + nvmet_async_events_process(ctrl, status); + nvmet_async_events_free(ctrl); + } percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); wait_for_completion(&sq->confirm_done); wait_for_completion(&sq->free_done); percpu_ref_exit(&sq->ref); - if (sq->ctrl) { - nvmet_ctrl_put(sq->ctrl); + if (ctrl) { + nvmet_ctrl_put(ctrl); sq->ctrl = NULL; /* allows reusing the queue later */ } } @@ -892,14 +902,10 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, } if (unlikely(!req->sq->ctrl)) - /* will return an error for any Non-connect command: */ + /* will return an error for any non-connect command: */ status = nvmet_parse_connect_cmd(req); else if (likely(req->sq->qid != 0)) status = nvmet_parse_io_cmd(req); - else if (nvme_is_fabrics(req->cmd)) - status = nvmet_parse_fabrics_cmd(req); - else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) - status = nvmet_parse_discovery_cmd(req); else status = nvmet_parse_admin_cmd(req); @@ -930,15 +936,28 @@ void nvmet_req_uninit(struct nvmet_req *req) } EXPORT_SYMBOL_GPL(nvmet_req_uninit); -void nvmet_req_execute(struct nvmet_req *req) +bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len) { - if (unlikely(req->data_len != req->transfer_len)) { + if (unlikely(data_len != req->transfer_len)) { req->error_loc = offsetof(struct nvme_common_command, dptr); nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); - } else - req->execute(req); + return false; + } + + return true; +} +EXPORT_SYMBOL_GPL(nvmet_check_data_len); + +bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len) +{ + if (unlikely(data_len > req->transfer_len)) { + req->error_loc = offsetof(struct nvme_common_command, dptr); + nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); + return false; + } + + return true; } -EXPORT_SYMBOL_GPL(nvmet_req_execute); int nvmet_req_alloc_sgl(struct nvmet_req *req) { @@ -966,7 +985,7 @@ int nvmet_req_alloc_sgl(struct nvmet_req *req) } req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt); - if (!req->sg) + if (unlikely(!req->sg)) return -ENOMEM; return 0; @@ -1174,7 +1193,8 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, ctrl->p2p_client = get_device(req->p2p_client); - list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) + list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link, + lockdep_is_held(&ctrl->subsys->lock)) nvmet_p2pmem_ns_add_p2p(ctrl, ns); } diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index 8efca26b4776..0c2274b21e15 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -157,7 +157,7 @@ static size_t discovery_log_entries(struct nvmet_req *req) return entries; } -static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) +static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) { const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -171,6 +171,16 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) u16 status = 0; void *buffer; + if (!nvmet_check_data_len(req, data_len)) + return; + + if (req->cmd->get_log_page.lid != NVME_LOG_DISC) { + req->error_loc = + offsetof(struct nvme_get_log_page_command, lid); + status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + goto out; + } + /* Spec requires dword aligned offsets */ if (offset & 0x3) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; @@ -227,20 +237,35 @@ out: nvmet_req_complete(req, status); } -static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req) +static void nvmet_execute_disc_identify(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvme_id_ctrl *id; + const char model[] = "Linux"; u16 status = 0; + if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE)) + return; + + if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) { + req->error_loc = offsetof(struct nvme_identify, cns); + status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + goto out; + } + id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) { status = NVME_SC_INTERNAL; goto out; } + memset(id->sn, ' ', sizeof(id->sn)); + bin2hex(id->sn, &ctrl->subsys->serial, + min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2)); memset(id->fr, ' ', sizeof(id->fr)); - strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr)); + memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' '); + memcpy_and_pad(id->fr, sizeof(id->fr), + UTS_RELEASE, strlen(UTS_RELEASE), ' '); /* no limit on data transfer sizes for now */ id->mdts = 0; @@ -273,6 +298,9 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req) u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 stat; + if (!nvmet_check_data_len(req, 0)) + return; + switch (cdw10 & 0xff) { case NVME_FEAT_KATO: stat = nvmet_set_feat_kato(req); @@ -296,6 +324,9 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req) u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 stat = 0; + if (!nvmet_check_data_len(req, 0)) + return; + switch (cdw10 & 0xff) { case NVME_FEAT_KATO: nvmet_get_feat_kato(req); @@ -328,47 +359,22 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) switch (cmd->common.opcode) { case nvme_admin_set_features: req->execute = nvmet_execute_disc_set_features; - req->data_len = 0; return 0; case nvme_admin_get_features: req->execute = nvmet_execute_disc_get_features; - req->data_len = 0; return 0; case nvme_admin_async_event: req->execute = nvmet_execute_async_event; - req->data_len = 0; return 0; case nvme_admin_keep_alive: req->execute = nvmet_execute_keep_alive; - req->data_len = 0; return 0; case nvme_admin_get_log_page: - req->data_len = nvmet_get_log_page_len(cmd); - - switch (cmd->get_log_page.lid) { - case NVME_LOG_DISC: - req->execute = nvmet_execute_get_disc_log_page; - return 0; - default: - pr_err("unsupported get_log_page lid %d\n", - cmd->get_log_page.lid); - req->error_loc = - offsetof(struct nvme_get_log_page_command, lid); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; - } + req->execute = nvmet_execute_disc_get_log_page; + return 0; case nvme_admin_identify: - req->data_len = NVME_IDENTIFY_DATA_SIZE; - switch (cmd->identify.cns) { - case NVME_ID_CNS_CTRL: - req->execute = - nvmet_execute_identify_disc_ctrl; - return 0; - default: - pr_err("unsupported identify cns %d\n", - cmd->identify.cns); - req->error_loc = offsetof(struct nvme_identify, cns); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; - } + req->execute = nvmet_execute_disc_identify; + return 0; default: pr_err("unhandled cmd %d\n", cmd->common.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); @@ -381,9 +387,7 @@ int __init nvmet_init_discovery(void) { nvmet_disc_subsys = nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC); - if (IS_ERR(nvmet_disc_subsys)) - return PTR_ERR(nvmet_disc_subsys); - return 0; + return PTR_ERR_OR_ZERO(nvmet_disc_subsys); } void nvmet_exit_discovery(void) diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index d16b55ffe79f..feef15c38ec9 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -12,6 +12,9 @@ static void nvmet_execute_prop_set(struct nvmet_req *req) u64 val = le64_to_cpu(req->cmd->prop_set.value); u16 status = 0; + if (!nvmet_check_data_len(req, 0)) + return; + if (req->cmd->prop_set.attrib & 1) { req->error_loc = offsetof(struct nvmf_property_set_command, attrib); @@ -38,6 +41,9 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) u16 status = 0; u64 val = 0; + if (!nvmet_check_data_len(req, 0)) + return; + if (req->cmd->prop_get.attrib & 1) { switch (le32_to_cpu(req->cmd->prop_get.offset)) { case NVME_REG_CAP: @@ -82,11 +88,9 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req) switch (cmd->fabrics.fctype) { case nvme_fabrics_type_property_set: - req->data_len = 0; req->execute = nvmet_execute_prop_set; break; case nvme_fabrics_type_property_get: - req->data_len = 0; req->execute = nvmet_execute_prop_get; break; default: @@ -105,6 +109,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) u16 qid = le16_to_cpu(c->qid); u16 sqsize = le16_to_cpu(c->sqsize); struct nvmet_ctrl *old; + u16 ret; old = cmpxchg(&req->sq->ctrl, NULL, ctrl); if (old) { @@ -115,7 +120,8 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) if (!sqsize) { pr_warn("queue size zero!\n"); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); - return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + goto err; } /* note: convert queue size from 0's-based value to 1's-based value */ @@ -128,16 +134,19 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) } if (ctrl->ops->install_queue) { - u16 ret = ctrl->ops->install_queue(req->sq); - + ret = ctrl->ops->install_queue(req->sq); if (ret) { pr_err("failed to install queue %d cntlid %d ret %x\n", - qid, ret, ctrl->cntlid); - return ret; + qid, ctrl->cntlid, ret); + goto err; } } return 0; + +err: + req->sq->ctrl = NULL; + return ret; } static void nvmet_execute_admin_connect(struct nvmet_req *req) @@ -147,6 +156,9 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) struct nvmet_ctrl *ctrl = NULL; u16 status = 0; + if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data))) + return; + d = kmalloc(sizeof(*d), GFP_KERNEL); if (!d) { status = NVME_SC_INTERNAL; @@ -211,6 +223,9 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) u16 qid = le16_to_cpu(c->qid); u16 status = 0; + if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data))) + return; + d = kmalloc(sizeof(*d), GFP_KERNEL); if (!d) { status = NVME_SC_INTERNAL; @@ -281,7 +296,6 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req) return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } - req->data_len = sizeof(struct nvmf_connect_data); if (cmd->connect.qid == 0) req->execute = nvmet_execute_admin_connect; else diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index ce8d819f86cc..a0db6371b43e 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1495,20 +1495,20 @@ static void nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { - struct fcnvme_ls_disconnect_rqst *rqst = - (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; - struct fcnvme_ls_disconnect_acc *acc = - (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; + struct fcnvme_ls_disconnect_assoc_rqst *rqst = + (struct fcnvme_ls_disconnect_assoc_rqst *)iod->rqstbuf; + struct fcnvme_ls_disconnect_assoc_acc *acc = + (struct fcnvme_ls_disconnect_assoc_acc *)iod->rspbuf; struct nvmet_fc_tgt_assoc *assoc; int ret = 0; memset(acc, 0, sizeof(*acc)); - if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst)) + if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_assoc_rqst)) ret = VERR_DISCONN_LEN; else if (rqst->desc_list_len != fcnvme_lsdesc_len( - sizeof(struct fcnvme_ls_disconnect_rqst))) + sizeof(struct fcnvme_ls_disconnect_assoc_rqst))) ret = VERR_DISCONN_RQST_LEN; else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) ret = VERR_ASSOC_ID; @@ -1523,8 +1523,11 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, fcnvme_lsdesc_len( sizeof(struct fcnvme_lsdesc_disconn_cmd))) ret = VERR_DISCONN_CMD_LEN; - else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) && - (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION)) + /* + * As the standard changed on the LS, check if old format and scope + * something other than Association (e.g. 0). + */ + else if (rqst->discon_cmd.rsvd8[0]) ret = VERR_DISCONN_SCOPE; else { /* match an active association */ @@ -1556,8 +1559,8 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, fcnvme_lsdesc_len( - sizeof(struct fcnvme_ls_disconnect_acc)), - FCNVME_LS_DISCONNECT); + sizeof(struct fcnvme_ls_disconnect_assoc_acc)), + FCNVME_LS_DISCONNECT_ASSOC); /* release get taken in nvmet_fc_find_target_assoc */ nvmet_fc_tgt_a_put(iod->assoc); @@ -1632,7 +1635,7 @@ nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, /* Creates an IO Queue/Connection */ nvmet_fc_ls_create_connection(tgtport, iod); break; - case FCNVME_LS_DISCONNECT: + case FCNVME_LS_DISCONNECT_ASSOC: /* Terminate a Queue/Connection or the Association */ nvmet_fc_ls_disconnect(tgtport, iod); break; @@ -2015,7 +2018,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) } /* data transfer complete, resume with nvmet layer */ - nvmet_req_execute(&fod->req); + fod->req.execute(&fod->req); break; case NVMET_FCOP_READDATA: @@ -2231,7 +2234,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, * can invoke the nvmet_layer now. If read data, cmd completion will * push the data */ - nvmet_req_execute(&fod->req); + fod->req.execute(&fod->req); return; transport_error: @@ -2299,7 +2302,7 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, /* validate iu, so the connection id can be used to find the queue */ if ((cmdiubuf_len != sizeof(*cmdiu)) || - (cmdiu->scsi_id != NVME_CMD_SCSI_ID) || + (cmdiu->format_id != NVME_CMD_FORMAT_ID) || (cmdiu->fc_id != NVME_CMD_FC_ID) || (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) return -EIO; diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index b50b53db3746..1c50af6219f3 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -850,6 +850,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) #define FCLOOP_DMABOUND_4G 0xFFFFFFFF static struct nvme_fc_port_template fctemplate = { + .module = THIS_MODULE, .localport_delete = fcloop_localport_delete, .remoteport_delete = fcloop_remoteport_delete, .create_queue = fcloop_create_queue, diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index de0bff70ebb6..ea0e596be15d 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -11,10 +11,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) { const struct queue_limits *ql = &bdev_get_queue(bdev)->limits; - /* Number of physical blocks per logical block. */ - const u32 ppl = ql->physical_block_size / ql->logical_block_size; - /* Physical blocks per logical block, 0's based. */ - const __le16 ppl0b = to0based(ppl); + /* Number of logical blocks per physical block. */ + const u32 lpp = ql->physical_block_size / ql->logical_block_size; + /* Logical blocks per physical block, 0's based. */ + const __le16 lpp0b = to0based(lpp); /* * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN, @@ -25,9 +25,9 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) * field from the identify controller data structure should be used. */ id->nsfeat |= 1 << 1; - id->nawun = ppl0b; - id->nawupf = ppl0b; - id->nacwu = ppl0b; + id->nawun = lpp0b; + id->nawupf = lpp0b; + id->nacwu = lpp0b; /* * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and @@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) */ id->nsfeat |= 1 << 4; /* NPWG = Namespace Preferred Write Granularity. 0's based */ - id->npwg = ppl0b; + id->npwg = lpp0b; /* NPWA = Namespace Preferred Write Alignment. 0's based */ id->npwa = id->npwg; /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */ @@ -147,8 +147,12 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) int sg_cnt = req->sg_cnt; struct bio *bio; struct scatterlist *sg; + struct blk_plug plug; sector_t sector; - int op, op_flags = 0, i; + int op, i; + + if (!nvmet_check_data_len(req, nvmet_rw_len(req))) + return; if (!req->sg_cnt) { nvmet_req_complete(req, 0); @@ -156,21 +160,20 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) } if (req->cmd->rw.opcode == nvme_cmd_write) { - op = REQ_OP_WRITE; - op_flags = REQ_SYNC | REQ_IDLE; + op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) - op_flags |= REQ_FUA; + op |= REQ_FUA; } else { op = REQ_OP_READ; } if (is_pci_p2pdma_page(sg_page(req->sg))) - op_flags |= REQ_NOMERGE; + op |= REQ_NOMERGE; sector = le64_to_cpu(req->cmd->rw.slba); sector <<= (req->ns->blksize_shift - 9); - if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) { + if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) { bio = &req->b.inline_bio; bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); } else { @@ -180,8 +183,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) bio->bi_iter.bi_sector = sector; bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; - bio_set_op_attrs(bio, op, op_flags); + bio->bi_opf = op; + blk_start_plug(&plug); for_each_sg(req->sg, sg, req->sg_cnt, i) { while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) != sg->length) { @@ -190,7 +194,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); bio_set_dev(bio, req->ns->bdev); bio->bi_iter.bi_sector = sector; - bio_set_op_attrs(bio, op, op_flags); + bio->bi_opf = op; bio_chain(bio, prev); submit_bio(prev); @@ -201,12 +205,16 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) } submit_bio(bio); + blk_finish_plug(&plug); } static void nvmet_bdev_execute_flush(struct nvmet_req *req) { struct bio *bio = &req->b.inline_bio; + if (!nvmet_check_data_len(req, 0)) + return; + bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); bio_set_dev(bio, req->ns->bdev); bio->bi_private = req; @@ -261,12 +269,10 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req) if (bio) { bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; - if (status) { - bio->bi_status = BLK_STS_IOERR; - bio_endio(bio); - } else { + if (status) + bio_io_error(bio); + else submit_bio(bio); - } } else { nvmet_req_complete(req, status); } @@ -274,6 +280,9 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req) static void nvmet_bdev_execute_dsm(struct nvmet_req *req) { + if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) + return; + switch (le32_to_cpu(req->cmd->dsm.attributes)) { case NVME_DSMGMT_AD: nvmet_bdev_execute_discard(req); @@ -295,6 +304,9 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req) sector_t nr_sector; int ret; + if (!nvmet_check_data_len(req, 0)) + return; + sector = le64_to_cpu(write_zeroes->slba) << (req->ns->blksize_shift - 9); nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << @@ -319,20 +331,15 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) case nvme_cmd_read: case nvme_cmd_write: req->execute = nvmet_bdev_execute_rw; - req->data_len = nvmet_rw_len(req); return 0; case nvme_cmd_flush: req->execute = nvmet_bdev_execute_flush; - req->data_len = 0; return 0; case nvme_cmd_dsm: req->execute = nvmet_bdev_execute_dsm; - req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * - sizeof(struct nvme_dsm_range); return 0; case nvme_cmd_write_zeroes: req->execute = nvmet_bdev_execute_write_zeroes; - req->data_len = 0; return 0; default: pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 05453f5d1448..cd5670b83118 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -126,7 +126,7 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2) mempool_free(req->f.bvec, req->ns->bvec_pool); } - if (unlikely(ret != req->data_len)) + if (unlikely(ret != req->transfer_len)) status = errno_to_nvme_status(req, ret); nvmet_req_complete(req, status); } @@ -146,7 +146,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) is_sync = true; pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; - if (unlikely(pos + req->data_len > req->ns->size)) { + if (unlikely(pos + req->transfer_len > req->ns->size)) { nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); return true; } @@ -173,7 +173,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) nr_bvec--; } - if (WARN_ON_ONCE(total_len != req->data_len)) { + if (WARN_ON_ONCE(total_len != req->transfer_len)) { ret = -EIO; goto complete; } @@ -232,6 +232,9 @@ static void nvmet_file_execute_rw(struct nvmet_req *req) { ssize_t nr_bvec = req->sg_cnt; + if (!nvmet_check_data_len(req, nvmet_rw_len(req))) + return; + if (!req->sg_cnt || !nr_bvec) { nvmet_req_complete(req, 0); return; @@ -273,6 +276,8 @@ static void nvmet_file_flush_work(struct work_struct *w) static void nvmet_file_execute_flush(struct nvmet_req *req) { + if (!nvmet_check_data_len(req, 0)) + return; INIT_WORK(&req->f.work, nvmet_file_flush_work); schedule_work(&req->f.work); } @@ -331,6 +336,8 @@ static void nvmet_file_dsm_work(struct work_struct *w) static void nvmet_file_execute_dsm(struct nvmet_req *req) { + if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) + return; INIT_WORK(&req->f.work, nvmet_file_dsm_work); schedule_work(&req->f.work); } @@ -359,6 +366,8 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w) static void nvmet_file_execute_write_zeroes(struct nvmet_req *req) { + if (!nvmet_check_data_len(req, 0)) + return; INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work); schedule_work(&req->f.work); } @@ -371,20 +380,15 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) case nvme_cmd_read: case nvme_cmd_write: req->execute = nvmet_file_execute_rw; - req->data_len = nvmet_rw_len(req); return 0; case nvme_cmd_flush: req->execute = nvmet_file_execute_flush; - req->data_len = 0; return 0; case nvme_cmd_dsm: req->execute = nvmet_file_execute_dsm; - req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * - sizeof(struct nvme_dsm_range); return 0; case nvme_cmd_write_zeroes: req->execute = nvmet_file_execute_write_zeroes; - req->data_len = 0; return 0; default: pr_err("unhandled cmd for file ns %d on qid %d\n", diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 0940c5024a34..4df4ebde208a 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -76,8 +76,7 @@ static void nvme_loop_complete_rq(struct request *req) { struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); - nvme_cleanup_cmd(req); - sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE); + sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT); nvme_complete_rq(req); } @@ -102,8 +101,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req) * aborts. We don't even bother to allocate a struct request * for them but rather special case them here. */ - if (unlikely(nvme_loop_queue_idx(queue) == 0 && - cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) { + if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue), + cqe->command_id))) { nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, &cqe->result); } else { @@ -126,7 +125,7 @@ static void nvme_loop_execute_work(struct work_struct *work) struct nvme_loop_iod *iod = container_of(work, struct nvme_loop_iod, work); - nvmet_req_execute(&iod->req); + iod->req.execute(&iod->req); } static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, @@ -157,8 +156,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, iod->sg_table.sgl = iod->first_sgl; if (sg_alloc_table_chained(&iod->sg_table, blk_rq_nr_phys_segments(req), - iod->sg_table.sgl, SG_CHUNK_SIZE)) + iod->sg_table.sgl, NVME_INLINE_SG_CNT)) { + nvme_cleanup_cmd(req); return BLK_STS_RESOURCE; + } iod->req.sg = iod->sg_table.sgl; iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); @@ -253,6 +254,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); blk_cleanup_queue(ctrl->ctrl.admin_q); + blk_cleanup_queue(ctrl->ctrl.fabrics_q); blk_mq_free_tag_set(&ctrl->admin_tag_set); } @@ -340,7 +342,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) + - SG_CHUNK_SIZE * sizeof(struct scatterlist); + NVME_INLINE_SG_CNT * sizeof(struct scatterlist); ctrl->admin_tag_set.driver_data = ctrl; ctrl->admin_tag_set.nr_hw_queues = 1; ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; @@ -357,10 +359,16 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) goto out_free_sq; ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; + ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); + if (IS_ERR(ctrl->ctrl.fabrics_q)) { + error = PTR_ERR(ctrl->ctrl.fabrics_q); + goto out_free_tagset; + } + ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); if (IS_ERR(ctrl->ctrl.admin_q)) { error = PTR_ERR(ctrl->ctrl.admin_q); - goto out_free_tagset; + goto out_cleanup_fabrics_q; } error = nvmf_connect_admin_queue(&ctrl->ctrl); @@ -369,23 +377,15 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); - error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap); - if (error) { - dev_err(ctrl->ctrl.device, - "prop_get NVME_REG_CAP failed\n"); - goto out_cleanup_queue; - } - - ctrl->ctrl.sqsize = - min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize); - - error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); + error = nvme_enable_ctrl(&ctrl->ctrl); if (error) goto out_cleanup_queue; ctrl->ctrl.max_hw_sectors = (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9); + blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); + error = nvme_init_identify(&ctrl->ctrl); if (error) goto out_cleanup_queue; @@ -394,6 +394,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) out_cleanup_queue: blk_cleanup_queue(ctrl->ctrl.admin_q); +out_cleanup_fabrics_q: + blk_cleanup_queue(ctrl->ctrl.fabrics_q); out_free_tagset: blk_mq_free_tag_set(&ctrl->admin_tag_set); out_free_sq: @@ -407,16 +409,17 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) nvme_stop_queues(&ctrl->ctrl); blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, &ctrl->ctrl); + blk_mq_tagset_wait_completed_request(&ctrl->tag_set); nvme_loop_destroy_io_queues(ctrl); } + blk_mq_quiesce_queue(ctrl->ctrl.admin_q); if (ctrl->ctrl.state == NVME_CTRL_LIVE) nvme_shutdown_ctrl(&ctrl->ctrl); - blk_mq_quiesce_queue(ctrl->ctrl.admin_q); blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request, &ctrl->ctrl); - blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); + blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); nvme_loop_destroy_admin_queue(ctrl); } @@ -513,7 +516,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) ctrl->tag_set.numa_node = NUMA_NO_NODE; ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + - SG_CHUNK_SIZE * sizeof(struct scatterlist); + NVME_INLINE_SG_CNT * sizeof(struct scatterlist); ctrl->tag_set.driver_data = ctrl; ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; ctrl->tag_set.timeout = NVME_IO_TIMEOUT; diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index c51f8dd01dc4..eda28b22a2c8 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -304,8 +304,6 @@ struct nvmet_req { } f; }; int sg_cnt; - /* data length as parsed from the command: */ - size_t data_len; /* data length as parsed from the SGL descriptor: */ size_t transfer_len; @@ -375,7 +373,8 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req); bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); void nvmet_req_uninit(struct nvmet_req *req); -void nvmet_req_execute(struct nvmet_req *req); +bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len); +bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len); void nvmet_req_complete(struct nvmet_req *req, u16 status); int nvmet_req_alloc_sgl(struct nvmet_req *req); void nvmet_req_free_sgl(struct nvmet_req *req); @@ -495,6 +494,12 @@ static inline u32 nvmet_rw_len(struct nvmet_req *req) req->ns->blksize_shift; } +static inline u32 nvmet_dsm_len(struct nvmet_req *req) +{ + return (le32_to_cpu(req->cmd->dsm.nr) + 1) * + sizeof(struct nvme_dsm_range); +} + u16 errno_to_nvme_status(struct nvmet_req *req, int errno); /* Convert a 32-bit number to a 16-bit 0's based number */ diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 36d906a7f70d..37d262a65877 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -603,7 +603,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) return; } - nvmet_req_execute(&rsp->req); + rsp->req.execute(&rsp->req); } static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, @@ -672,13 +672,13 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, return 0; ret = nvmet_req_alloc_sgl(&rsp->req); - if (ret < 0) + if (unlikely(ret < 0)) goto error_out; ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, nvmet_data_dir(&rsp->req)); - if (ret < 0) + if (unlikely(ret < 0)) goto error_out; rsp->n_rdma += ret; @@ -746,7 +746,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) queue->cm_id->port_num, &rsp->read_cqe, NULL)) nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); } else { - nvmet_req_execute(&rsp->req); + rsp->req.execute(&rsp->req); } return true; diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 69b83fa0c76c..af674fc0bb1e 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -320,7 +320,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; u32 len = le32_to_cpu(sgl->length); - if (!cmd->req.data_len) + if (!len) return 0; if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | @@ -813,13 +813,11 @@ free_crypto: static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) { + size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); int ret; - /* recover the expected data transfer length */ - req->data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); - if (!nvme_is_write(cmd->req.cmd) || - req->data_len > cmd->req.port->inline_data_size) { + data_len > cmd->req.port->inline_data_size) { nvmet_prepare_receive_pdu(queue); return; } @@ -932,7 +930,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) goto out; } - nvmet_req_execute(&queue->cmd->req); + queue->cmd->req.execute(&queue->cmd->req); out: nvmet_prepare_receive_pdu(queue); return ret; @@ -1052,7 +1050,7 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) nvmet_tcp_prep_recv_ddgst(cmd); return 0; } - nvmet_req_execute(&cmd->req); + cmd->req.execute(&cmd->req); } nvmet_prepare_receive_pdu(queue); @@ -1092,7 +1090,7 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && cmd->rbytes_done == cmd->req.transfer_len) - nvmet_req_execute(&cmd->req); + cmd->req.execute(&cmd->req); ret = 0; out: nvmet_prepare_receive_pdu(queue); @@ -1306,6 +1304,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd) { nvmet_req_uninit(&cmd->req); nvmet_tcp_unmap_pdu_iovec(cmd); + kfree(cmd->iov); sgl_free(cmd->req.sg); } @@ -1410,6 +1409,7 @@ done: static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) { struct socket *sock = queue->sock; + struct inet_sock *inet = inet_sk(sock->sk); struct linger sol = { .l_onoff = 1, .l_linger = 0 }; int ret; @@ -1433,6 +1433,16 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) if (ret) return ret; + /* Set socket type of service */ + if (inet->rcv_tos > 0) { + int tos = inet->rcv_tos; + + ret = kernel_setsockopt(sock, SOL_IP, IP_TOS, + (char *)&tos, sizeof(tos)); + if (ret) + return ret; + } + write_lock_bh(&sock->sk->sk_callback_lock); sock->sk->sk_user_data = queue; queue->data_ready = sock->sk->sk_data_ready; diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c index 6af11d493271..1373a3c67962 100644 --- a/drivers/nvme/target/trace.c +++ b/drivers/nvme/target/trace.c @@ -33,6 +33,22 @@ static const char *nvmet_trace_admin_get_features(struct trace_seq *p, return ret; } +static const char *nvmet_trace_get_lba_status(struct trace_seq *p, + u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u64 slba = get_unaligned_le64(cdw10); + u32 mndw = get_unaligned_le32(cdw10 + 8); + u16 rl = get_unaligned_le16(cdw10 + 12); + u8 atype = cdw10[15]; + + trace_seq_printf(p, "slba=0x%llx, mndw=0x%x, rl=0x%x, atype=%u", + slba, mndw, rl, atype); + trace_seq_putc(p, 0); + + return ret; +} + static const char *nvmet_trace_read_write(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); @@ -80,6 +96,8 @@ const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p, return nvmet_trace_admin_identify(p, cdw10); case nvme_admin_get_features: return nvmet_trace_admin_get_features(p, cdw10); + case nvme_admin_get_lba_status: + return nvmet_trace_get_lba_status(p, cdw10); default: return nvmet_trace_common(p, cdw10); } |