summaryrefslogtreecommitdiffstats
path: root/drivers/nvme/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/admin-cmd.c39
-rw-r--r--drivers/nvme/target/configfs.c2
-rw-r--r--drivers/nvme/target/core.c24
-rw-r--r--drivers/nvme/target/fabrics-cmd.c10
-rw-r--r--drivers/nvme/target/fc.c81
-rw-r--r--drivers/nvme/target/fcloop.c107
-rw-r--r--drivers/nvme/target/io-cmd.c6
-rw-r--r--drivers/nvme/target/loop.c1
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/nvme/target/rdma.c5
10 files changed, 115 insertions, 162 deletions
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 2d7a98ab53fb..c4a0bf36e752 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -100,7 +100,7 @@ static u16 nvmet_get_smart_log(struct nvmet_req *req,
u16 status;
WARN_ON(req == NULL || slog == NULL);
- if (req->cmd->get_log_page.nsid == cpu_to_le32(0xFFFFFFFF))
+ if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
status = nvmet_get_smart_log_all(req, slog);
else
status = nvmet_get_smart_log_nsid(req, slog);
@@ -168,15 +168,6 @@ out:
nvmet_req_complete(req, status);
}
-static void copy_and_pad(char *dst, int dst_len, const char *src, int src_len)
-{
- int len = min(src_len, dst_len);
-
- memcpy(dst, src, len);
- if (dst_len > len)
- memset(dst + len, ' ', dst_len - len);
-}
-
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -196,14 +187,9 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
bin2hex(id->sn, &ctrl->subsys->serial,
min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
- copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
- copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
-
- memset(id->mn, ' ', sizeof(id->mn));
- strncpy((char *)id->mn, "Linux", sizeof(id->mn));
-
- memset(id->fr, ' ', sizeof(id->fr));
- strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
+ memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
+ memcpy_and_pad(id->fr, sizeof(id->fr),
+ UTS_RELEASE, strlen(UTS_RELEASE), ' ');
id->rab = 6;
@@ -457,7 +443,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
u32 val32;
u16 status = 0;
- switch (cdw10 & 0xf) {
+ switch (cdw10 & 0xff) {
case NVME_FEAT_NUM_QUEUES:
nvmet_set_result(req,
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
@@ -467,6 +453,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
nvmet_set_result(req, req->sq->ctrl->kato);
break;
+ case NVME_FEAT_HOST_ID:
+ status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ break;
default:
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
@@ -481,7 +470,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
u16 status = 0;
- switch (cdw10 & 0xf) {
+ switch (cdw10 & 0xff) {
/*
* These features are mandatory in the spec, but we don't
* have a useful way to implement them. We'll eventually
@@ -515,6 +504,16 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
case NVME_FEAT_KATO:
nvmet_set_result(req, req->sq->ctrl->kato * 1000);
break;
+ case NVME_FEAT_HOST_ID:
+ /* need 128-bit host identifier flag */
+ if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
+ sizeof(req->sq->ctrl->hostid));
+ break;
default:
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 0a0067e771f5..b6aeb1d70951 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -444,7 +444,7 @@ static struct config_group *nvmet_ns_make(struct config_group *group,
goto out;
ret = -EINVAL;
- if (nsid == 0 || nsid == 0xffffffff)
+ if (nsid == 0 || nsid == NVME_NSID_ALL)
goto out;
ret = -ENOMEM;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index f4b02bb4a1a8..1b208beeef50 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -390,10 +390,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
if (status)
nvmet_set_status(req, status);
- /* XXX: need to fill in something useful for sq_head */
- req->rsp->sq_head = 0;
- if (likely(req->sq)) /* may happen during early failure */
- req->rsp->sq_id = cpu_to_le16(req->sq->qid);
+ if (req->sq->size)
+ req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size;
+ req->rsp->sq_head = cpu_to_le16(req->sq->sqhd);
+ req->rsp->sq_id = cpu_to_le16(req->sq->qid);
req->rsp->command_id = req->cmd->common.command_id;
if (req->ns)
@@ -420,6 +420,7 @@ void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
u16 qid, u16 size)
{
+ sq->sqhd = 0;
sq->qid = qid;
sq->size = size;
@@ -538,37 +539,37 @@ EXPORT_SYMBOL_GPL(nvmet_req_uninit);
static inline bool nvmet_cc_en(u32 cc)
{
- return cc & 0x1;
+ return (cc >> NVME_CC_EN_SHIFT) & 0x1;
}
static inline u8 nvmet_cc_css(u32 cc)
{
- return (cc >> 4) & 0x7;
+ return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
}
static inline u8 nvmet_cc_mps(u32 cc)
{
- return (cc >> 7) & 0xf;
+ return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
}
static inline u8 nvmet_cc_ams(u32 cc)
{
- return (cc >> 11) & 0x7;
+ return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
}
static inline u8 nvmet_cc_shn(u32 cc)
{
- return (cc >> 14) & 0x3;
+ return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
}
static inline u8 nvmet_cc_iosqes(u32 cc)
{
- return (cc >> 16) & 0xf;
+ return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
}
static inline u8 nvmet_cc_iocqes(u32 cc)
{
- return (cc >> 20) & 0xf;
+ return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
}
static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
@@ -749,6 +750,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
hostnqn, subsysnqn);
req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem);
+ status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
goto out_put_subsystem;
}
up_read(&nvmet_config_sem);
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 3cc17269504b..db3bf6b8bf9e 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -109,9 +109,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
pr_warn("queue already connected!\n");
return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
}
+ if (!sqsize) {
+ pr_warn("queue size zero!\n");
+ return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ }
- nvmet_cq_setup(ctrl, req->cq, qid, sqsize);
- nvmet_sq_setup(ctrl, req->sq, qid, sqsize);
+ /* note: convert queue size from 0's-based value to 1's-based value */
+ nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
+ nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
return 0;
}
@@ -154,6 +159,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
le32_to_cpu(c->kato), &ctrl);
if (status)
goto out;
+ uuid_copy(&ctrl->hostid, &d->hostid);
status = nvmet_install_queue(ctrl, req);
if (status) {
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 1b7f2520a20d..58e010bdda3e 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -58,7 +58,8 @@ struct nvmet_fc_ls_iod {
struct work_struct work;
} __aligned(sizeof(unsigned long long));
-#define NVMET_FC_MAX_KB_PER_XFR 256
+#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
+#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
enum nvmet_fcp_datadir {
NVMET_FCP_NODATA,
@@ -74,9 +75,7 @@ struct nvmet_fc_fcp_iod {
struct nvme_fc_ersp_iu rspiubuf;
dma_addr_t rspdma;
struct scatterlist *data_sg;
- struct scatterlist *next_sg;
int data_sg_cnt;
- u32 next_sg_offset;
u32 total_length;
u32 offset;
enum nvmet_fcp_datadir io_dir;
@@ -112,6 +111,7 @@ struct nvmet_fc_tgtport {
struct ida assoc_cnt;
struct nvmet_port *port;
struct kref ref;
+ u32 max_sg_cnt;
};
struct nvmet_fc_defer_fcp_req {
@@ -148,7 +148,7 @@ struct nvmet_fc_tgt_assoc {
u32 a_id;
struct nvmet_fc_tgtport *tgtport;
struct list_head a_list;
- struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES];
+ struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
};
@@ -394,7 +394,7 @@ nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
static struct nvmet_fc_ls_iod *
nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
{
- static struct nvmet_fc_ls_iod *iod;
+ struct nvmet_fc_ls_iod *iod;
unsigned long flags;
spin_lock_irqsave(&tgtport->lock, flags);
@@ -471,7 +471,7 @@ nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
static struct nvmet_fc_fcp_iod *
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
{
- static struct nvmet_fc_fcp_iod *fod;
+ struct nvmet_fc_fcp_iod *fod;
lockdep_assert_held(&queue->qlock);
@@ -608,7 +608,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
unsigned long flags;
int ret;
- if (qid >= NVMET_NR_QUEUES)
+ if (qid > NVMET_NR_QUEUES)
return NULL;
queue = kzalloc((sizeof(*queue) +
@@ -704,7 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
{
struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
struct nvmet_fc_fcp_iod *fod = queue->fod;
- struct nvmet_fc_defer_fcp_req *deferfcp;
+ struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
unsigned long flags;
int i, writedataactive;
bool disconnect;
@@ -735,7 +735,8 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
}
/* Cleanup defer'ed IOs in queue */
- list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) {
+ list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
+ req_list) {
list_del(&deferfcp->req_list);
kfree(deferfcp);
}
@@ -782,6 +783,9 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
u16 qid = nvmet_fc_getqueueid(connection_id);
unsigned long flags;
+ if (qid > NVMET_NR_QUEUES)
+ return NULL;
+
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
@@ -887,7 +891,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
int i;
spin_lock_irqsave(&tgtport->lock, flags);
- for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
+ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
queue = assoc->queues[i];
if (queue) {
if (!nvmet_fc_tgt_q_get(queue))
@@ -993,6 +997,8 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
INIT_LIST_HEAD(&newrec->assoc_list);
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
+ newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
+ template->max_sgl_segments);
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
@@ -1865,51 +1871,23 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod, u8 op)
{
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
- struct scatterlist *sg, *datasg;
unsigned long flags;
- u32 tlen, sg_off;
+ u32 tlen;
int ret;
fcpreq->op = op;
fcpreq->offset = fod->offset;
fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
- tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
+
+ tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
(fod->total_length - fod->offset));
- tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
- tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
- * PAGE_SIZE);
fcpreq->transfer_length = tlen;
fcpreq->transferred_length = 0;
fcpreq->fcp_error = 0;
fcpreq->rsplen = 0;
- fcpreq->sg_cnt = 0;
-
- datasg = fod->next_sg;
- sg_off = fod->next_sg_offset;
-
- for (sg = fcpreq->sg ; tlen; sg++) {
- *sg = *datasg;
- if (sg_off) {
- sg->offset += sg_off;
- sg->length -= sg_off;
- sg->dma_address += sg_off;
- sg_off = 0;
- }
- if (tlen < sg->length) {
- sg->length = tlen;
- fod->next_sg = datasg;
- fod->next_sg_offset += tlen;
- } else if (tlen == sg->length) {
- fod->next_sg_offset = 0;
- fod->next_sg = sg_next(datasg);
- } else {
- fod->next_sg_offset = 0;
- datasg = sg_next(datasg);
- }
- tlen -= sg->length;
- fcpreq->sg_cnt++;
- }
+ fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
+ fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
/*
* If the last READDATA request: check if LLDD supports
@@ -1935,8 +1913,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
spin_lock_irqsave(&fod->flock, flags);
fod->writedataactive = false;
spin_unlock_irqrestore(&fod->flock, flags);
- nvmet_req_complete(&fod->req,
- NVME_SC_FC_TRANSPORT_ERROR);
+ nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
} else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
fcpreq->fcp_error = ret;
fcpreq->transferred_length = 0;
@@ -1954,8 +1931,7 @@ __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
/* if in the middle of an io and we need to tear down */
if (abort) {
if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
- nvmet_req_complete(&fod->req,
- NVME_SC_FC_TRANSPORT_ERROR);
+ nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
return true;
}
@@ -1993,8 +1969,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
fod->abort = true;
spin_unlock(&fod->flock);
- nvmet_req_complete(&fod->req,
- NVME_SC_FC_TRANSPORT_ERROR);
+ nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
return;
}
@@ -2224,8 +2199,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.sg = fod->data_sg;
fod->req.sg_cnt = fod->data_sg_cnt;
fod->offset = 0;
- fod->next_sg = fod->data_sg;
- fod->next_sg_offset = 0;
if (fod->io_dir == NVMET_FCP_WRITE) {
/* pull the data over before invoking nvmet layer */
@@ -2560,13 +2533,17 @@ nvmet_fc_remove_port(struct nvmet_port *port)
{
struct nvmet_fc_tgtport *tgtport = port->priv;
unsigned long flags;
+ bool matched = false;
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
if (tgtport->port == port) {
- nvmet_fc_tgtport_put(tgtport);
+ matched = true;
tgtport->port = NULL;
}
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ if (matched)
+ nvmet_fc_tgtport_put(tgtport);
}
static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 1bb9d5b311b1..7b75d9de55ab 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -193,9 +193,6 @@ out_free_options:
#define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
-#define ALL_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | NVMF_OPT_ROLES | \
- NVMF_OPT_FCADDR | NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
-
static DEFINE_SPINLOCK(fcloop_lock);
static LIST_HEAD(fcloop_lports);
@@ -227,8 +224,6 @@ struct fcloop_nport {
struct fcloop_lport *lport;
struct list_head nport_list;
struct kref ref;
- struct completion rport_unreg_done;
- struct completion tport_unreg_done;
u64 node_name;
u64 port_name;
u32 port_role;
@@ -579,7 +574,7 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
tfcp_req->aborted = true;
spin_unlock(&tfcp_req->reqlock);
- tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED;
+ tfcp_req->status = NVME_SC_INTERNAL;
/*
* nothing more to do. If io wasn't active, the transport should
@@ -634,6 +629,32 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
}
static void
+fcloop_nport_free(struct kref *ref)
+{
+ struct fcloop_nport *nport =
+ container_of(ref, struct fcloop_nport, ref);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_del(&nport->nport_list);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ kfree(nport);
+}
+
+static void
+fcloop_nport_put(struct fcloop_nport *nport)
+{
+ kref_put(&nport->ref, fcloop_nport_free);
+}
+
+static int
+fcloop_nport_get(struct fcloop_nport *nport)
+{
+ return kref_get_unless_zero(&nport->ref);
+}
+
+static void
fcloop_localport_delete(struct nvme_fc_local_port *localport)
{
struct fcloop_lport *lport = localport->private;
@@ -647,8 +668,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{
struct fcloop_rport *rport = remoteport->private;
- /* release any threads waiting for the unreg to complete */
- complete(&rport->nport->rport_unreg_done);
+ fcloop_nport_put(rport->nport);
}
static void
@@ -656,8 +676,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
{
struct fcloop_tport *tport = targetport->private;
- /* release any threads waiting for the unreg to complete */
- complete(&tport->nport->tport_unreg_done);
+ fcloop_nport_put(tport->nport);
}
#define FCLOOP_HW_QUEUES 4
@@ -725,6 +744,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
goto out_free_opts;
}
+ memset(&pinfo, 0, sizeof(pinfo));
pinfo.node_name = opts->wwnn;
pinfo.port_name = opts->wwpn;
pinfo.port_role = opts->roles;
@@ -807,32 +827,6 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
return ret ? ret : count;
}
-static void
-fcloop_nport_free(struct kref *ref)
-{
- struct fcloop_nport *nport =
- container_of(ref, struct fcloop_nport, ref);
- unsigned long flags;
-
- spin_lock_irqsave(&fcloop_lock, flags);
- list_del(&nport->nport_list);
- spin_unlock_irqrestore(&fcloop_lock, flags);
-
- kfree(nport);
-}
-
-static void
-fcloop_nport_put(struct fcloop_nport *nport)
-{
- kref_put(&nport->ref, fcloop_nport_free);
-}
-
-static int
-fcloop_nport_get(struct fcloop_nport *nport)
-{
- return kref_get_unless_zero(&nport->ref);
-}
-
static struct fcloop_nport *
fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
{
@@ -941,6 +935,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
if (!nport)
return -EIO;
+ memset(&pinfo, 0, sizeof(pinfo));
pinfo.node_name = nport->node_name;
pinfo.port_name = nport->port_name;
pinfo.port_role = nport->port_role;
@@ -982,24 +977,12 @@ __unlink_remote_port(struct fcloop_nport *nport)
}
static int
-__wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
+__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
{
- int ret;
-
if (!rport)
return -EALREADY;
- init_completion(&nport->rport_unreg_done);
-
- ret = nvme_fc_unregister_remoteport(rport->remoteport);
- if (ret)
- return ret;
-
- wait_for_completion(&nport->rport_unreg_done);
-
- fcloop_nport_put(nport);
-
- return ret;
+ return nvme_fc_unregister_remoteport(rport->remoteport);
}
static ssize_t
@@ -1032,7 +1015,7 @@ fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
if (!nport)
return -ENOENT;
- ret = __wait_remoteport_unreg(nport, rport);
+ ret = __remoteport_unreg(nport, rport);
return ret ? ret : count;
}
@@ -1089,24 +1072,12 @@ __unlink_target_port(struct fcloop_nport *nport)
}
static int
-__wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
+__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
{
- int ret;
-
if (!tport)
return -EALREADY;
- init_completion(&nport->tport_unreg_done);
-
- ret = nvmet_fc_unregister_targetport(tport->targetport);
- if (ret)
- return ret;
-
- wait_for_completion(&nport->tport_unreg_done);
-
- fcloop_nport_put(nport);
-
- return ret;
+ return nvmet_fc_unregister_targetport(tport->targetport);
}
static ssize_t
@@ -1139,7 +1110,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
if (!nport)
return -ENOENT;
- ret = __wait_targetport_unreg(nport, tport);
+ ret = __targetport_unreg(nport, tport);
return ret ? ret : count;
}
@@ -1226,11 +1197,11 @@ static void __exit fcloop_exit(void)
spin_unlock_irqrestore(&fcloop_lock, flags);
- ret = __wait_targetport_unreg(nport, tport);
+ ret = __targetport_unreg(nport, tport);
if (ret)
pr_warn("%s: Failed deleting target port\n", __func__);
- ret = __wait_remoteport_unreg(nport, rport);
+ ret = __remoteport_unreg(nport, rport);
if (ret)
pr_warn("%s: Failed deleting remote port\n", __func__);
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 3b4d47a6abdb..0d4c23dc4532 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -68,7 +68,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
nvmet_inline_bio_init(req);
bio = &req->inline_bio;
- bio->bi_bdev = req->ns->bdev;
+ bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
@@ -80,7 +80,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
struct bio *prev = bio;
bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
- bio->bi_bdev = req->ns->bdev;
+ bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio_set_op_attrs(bio, op, op_flags);
@@ -104,7 +104,7 @@ static void nvmet_execute_flush(struct nvmet_req *req)
nvmet_inline_bio_init(req);
bio = &req->inline_bio;
- bio->bi_bdev = req->ns->bdev;
+ bio_set_dev(bio, req->ns->bdev);
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 717ed7ddb2f6..92628c432926 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -375,6 +375,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
if (error)
goto out_free_sq;
+ ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.admin_q)) {
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index e3b244c7e443..7b8e20adf760 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -74,6 +74,7 @@ struct nvmet_sq {
struct percpu_ref ref;
u16 qid;
u16 size;
+ u16 sqhd;
struct completion free_done;
struct completion confirm_done;
};
@@ -115,6 +116,7 @@ struct nvmet_ctrl {
u32 cc;
u32 csts;
+ uuid_t hostid;
u16 cntlid;
u32 kato;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 56a4cba690b5..76d2bb793afe 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1510,10 +1510,6 @@ static struct nvmet_fabrics_ops nvmet_rdma_ops = {
.delete_ctrl = nvmet_rdma_delete_ctrl,
};
-static void nvmet_rdma_add_one(struct ib_device *ib_device)
-{
-}
-
static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
{
struct nvmet_rdma_queue *queue;
@@ -1534,7 +1530,6 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
static struct ib_client nvmet_rdma_ib_client = {
.name = "nvmet_rdma",
- .add = nvmet_rdma_add_one,
.remove = nvmet_rdma_remove_one
};
OpenPOWER on IntegriCloud