summaryrefslogtreecommitdiffstats
path: root/drivers/nvme/target/fc.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 10:39:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 10:39:57 -0700
commit694752922b12bd318aa80191bd9d8c3dcfb39055 (patch)
tree5afe83fd99100bea546dd5a1c1f778c58f41e5c0 /drivers/nvme/target/fc.c
parenta351e9b9fc24e982ec2f0e76379a49826036da12 (diff)
parent9438b3e080beccf6022138ea62192d55cc7dc4ed (diff)
downloadtalos-obmc-linux-694752922b12bd318aa80191bd9d8c3dcfb39055.tar.gz
talos-obmc-linux-694752922b12bd318aa80191bd9d8c3dcfb39055.zip
Merge branch 'for-4.12/block' of git://git.kernel.dk/linux-block
Pull block layer updates from Jens Axboe: - Add BFQ IO scheduler under the new blk-mq scheduling framework. BFQ was initially a fork of CFQ, but subsequently changed to implement fairness based on B-WF2Q+, a modified variant of WF2Q. BFQ is meant to be used on desktop type single drives, providing good fairness. From Paolo. - Add Kyber IO scheduler. This is a full multiqueue aware scheduler, using a scalable token based algorithm that throttles IO based on live completion IO stats, similary to blk-wbt. From Omar. - A series from Jan, moving users to separately allocated backing devices. This continues the work of separating backing device life times, solving various problems with hot removal. - A series of updates for lightnvm, mostly from Javier. Includes a 'pblk' target that exposes an open channel SSD as a physical block device. - A series of fixes and improvements for nbd from Josef. - A series from Omar, removing queue sharing between devices on mostly legacy drivers. This helps us clean up other bits, if we know that a queue only has a single device backing. This has been overdue for more than a decade. - Fixes for the blk-stats, and improvements to unify the stats and user windows. This both improves blk-wbt, and enables other users to register a need to receive IO stats for a device. From Omar. - blk-throttle improvements from Shaohua. This provides a scalable framework for implementing scalable priotization - particularly for blk-mq, but applicable to any type of block device. The interface is marked experimental for now. - Bucketized IO stats for IO polling from Stephen Bates. This improves efficiency of polled workloads in the presence of mixed block size IO. - A few fixes for opal, from Scott. - A few pulls for NVMe, including a lot of fixes for NVMe-over-fabrics. From a variety of folks, mostly Sagi and James Smart. - A series from Bart, improving our exposed info and capabilities from the blk-mq debugfs support. - A series from Christoph, cleaning up how handle WRITE_ZEROES. - A series from Christoph, cleaning up the block layer handling of how we track errors in a request. On top of being a nice cleanup, it also shrinks the size of struct request a bit. - Removal of mg_disk and hd (sorry Linus) by Christoph. The former was never used by platforms, and the latter has outlived it's usefulness. - Various little bug fixes and cleanups from a wide variety of folks. * 'for-4.12/block' of git://git.kernel.dk/linux-block: (329 commits) block: hide badblocks attribute by default blk-mq: unify hctx delay_work and run_work block: add kblock_mod_delayed_work_on() blk-mq: unify hctx delayed_run_work and run_work nbd: fix use after free on module unload MAINTAINERS: bfq: Add Paolo as maintainer for the BFQ I/O scheduler blk-mq-sched: alloate reserved tags out of normal pool mtip32xx: use runtime tag to initialize command header scsi: Implement blk_mq_ops.show_rq() blk-mq: Add blk_mq_ops.show_rq() blk-mq: Show operation, cmd_flags and rq_flags names blk-mq: Make blk_flags_show() callers append a newline character blk-mq: Move the "state" debugfs attribute one level down blk-mq: Unregister debugfs attributes earlier blk-mq: Only unregister hctxs for which registration succeeded blk-mq-debugfs: Rename functions for registering and unregistering the mq directory blk-mq: Let blk_mq_debugfs_register() look up the queue name blk-mq: Register <dev>/queue/mq after having registered <dev>/queue ide-pm: always pass 0 error to ide_complete_rq in ide_do_devset ide-pm: always pass 0 error to __blk_end_request_all ..
Diffstat (limited to 'drivers/nvme/target/fc.c')
-rw-r--r--drivers/nvme/target/fc.c274
1 files changed, 207 insertions, 67 deletions
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 8f483ee7868c..074bd3743b5f 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -82,10 +82,13 @@ struct nvmet_fc_fcp_iod {
enum nvmet_fcp_datadir io_dir;
bool active;
bool abort;
+ bool aborted;
+ bool writedataactive;
spinlock_t flock;
struct nvmet_req req;
struct work_struct work;
+ struct work_struct done_work;
struct nvmet_fc_tgtport *tgtport;
struct nvmet_fc_tgt_queue *queue;
@@ -213,6 +216,7 @@ static DEFINE_IDA(nvmet_fc_tgtport_cnt);
static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
+static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
@@ -414,9 +418,13 @@ nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
for (i = 0; i < queue->sqsize; fod++, i++) {
INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
+ INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
fod->tgtport = tgtport;
fod->queue = queue;
fod->active = false;
+ fod->abort = false;
+ fod->aborted = false;
+ fod->fcpreq = NULL;
list_add_tail(&fod->fcp_list, &queue->fod_list);
spin_lock_init(&fod->flock);
@@ -463,7 +471,6 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
if (fod) {
list_del(&fod->fcp_list);
fod->active = true;
- fod->abort = false;
/*
* no queue reference is taken, as it was taken by the
* queue lookup just prior to the allocation. The iod
@@ -479,17 +486,30 @@ static void
nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
struct nvmet_fc_fcp_iod *fod)
{
+ struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+ struct nvmet_fc_tgtport *tgtport = fod->tgtport;
unsigned long flags;
+ fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
+ sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+
+ fcpreq->nvmet_fc_private = NULL;
+
spin_lock_irqsave(&queue->qlock, flags);
list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
fod->active = false;
+ fod->abort = false;
+ fod->aborted = false;
+ fod->writedataactive = false;
+ fod->fcpreq = NULL;
spin_unlock_irqrestore(&queue->qlock, flags);
/*
* release the reference taken at queue lookup and fod allocation
*/
nvmet_fc_tgt_q_put(queue);
+
+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
}
static int
@@ -616,32 +636,12 @@ nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
static void
-nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
- struct nvmefc_tgt_fcp_req *fcpreq)
-{
- int ret;
-
- fcpreq->op = NVMET_FCOP_ABORT;
- fcpreq->offset = 0;
- fcpreq->timeout = 0;
- fcpreq->transfer_length = 0;
- fcpreq->transferred_length = 0;
- fcpreq->fcp_error = 0;
- fcpreq->sg_cnt = 0;
-
- ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fcpreq);
- if (ret)
- /* should never reach here !! */
- WARN_ON(1);
-}
-
-
-static void
nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
{
+ struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
struct nvmet_fc_fcp_iod *fod = queue->fod;
unsigned long flags;
- int i;
+ int i, writedataactive;
bool disconnect;
disconnect = atomic_xchg(&queue->connected, 0);
@@ -652,7 +652,20 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
if (fod->active) {
spin_lock(&fod->flock);
fod->abort = true;
+ writedataactive = fod->writedataactive;
spin_unlock(&fod->flock);
+ /*
+ * only call lldd abort routine if waiting for
+ * writedata. other outstanding ops should finish
+ * on their own.
+ */
+ if (writedataactive) {
+ spin_lock(&fod->flock);
+ fod->aborted = true;
+ spin_unlock(&fod->flock);
+ tgtport->ops->fcp_abort(
+ &tgtport->fc_target_port, fod->fcpreq);
+ }
}
}
spin_unlock_irqrestore(&queue->qlock, flags);
@@ -846,7 +859,8 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
int ret, idx;
if (!template->xmt_ls_rsp || !template->fcp_op ||
- !template->targetport_delete ||
+ !template->fcp_abort ||
+ !template->fcp_req_release || !template->targetport_delete ||
!template->max_hw_queues || !template->max_sgl_segments ||
!template->max_dif_sgl_segments || !template->dma_boundary) {
ret = -EINVAL;
@@ -1189,8 +1203,8 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
validation_errors[ret]);
iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
- ELS_RJT_LOGIC,
- ELS_EXPL_NONE, 0);
+ FCNVME_RJT_RC_LOGIC,
+ FCNVME_RJT_EXP_NONE, 0);
return;
}
@@ -1281,8 +1295,9 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
(ret == VERR_NO_ASSOC) ?
- ELS_RJT_PROT : ELS_RJT_LOGIC,
- ELS_EXPL_NONE, 0);
+ FCNVME_RJT_RC_INV_ASSOC :
+ FCNVME_RJT_RC_LOGIC,
+ FCNVME_RJT_EXP_NONE, 0);
return;
}
@@ -1369,8 +1384,12 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
validation_errors[ret]);
iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
- (ret == 8) ? ELS_RJT_PROT : ELS_RJT_LOGIC,
- ELS_EXPL_NONE, 0);
+ (ret == VERR_NO_ASSOC) ?
+ FCNVME_RJT_RC_INV_ASSOC :
+ (ret == VERR_NO_CONN) ?
+ FCNVME_RJT_RC_INV_CONN :
+ FCNVME_RJT_RC_LOGIC,
+ FCNVME_RJT_EXP_NONE, 0);
return;
}
@@ -1479,7 +1498,7 @@ nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
default:
iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
- ELS_RJT_INVAL, ELS_EXPL_NONE, 0);
+ FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
}
nvmet_fc_xmt_ls_rsp(tgtport, iod);
@@ -1619,6 +1638,8 @@ nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
__free_page(sg_page(sg));
kfree(fod->data_sg);
+ fod->data_sg = NULL;
+ fod->data_sg_cnt = 0;
}
@@ -1704,6 +1725,26 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
static void
+nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod)
+{
+ struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+
+ /* data no longer needed */
+ nvmet_fc_free_tgt_pgs(fod);
+
+ /*
+ * if an ABTS was received or we issued the fcp_abort early
+ * don't call abort routine again.
+ */
+ /* no need to take lock - lock was taken earlier to get here */
+ if (!fod->aborted)
+ tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
+
+ nvmet_fc_free_fcp_iod(fod->queue, fod);
+}
+
+static void
nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod)
{
@@ -1716,7 +1757,7 @@ nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
if (ret)
- nvmet_fc_abort_op(tgtport, fod->fcpreq);
+ nvmet_fc_abort_op(tgtport, fod);
}
static void
@@ -1725,6 +1766,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
{
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
struct scatterlist *sg, *datasg;
+ unsigned long flags;
u32 tlen, sg_off;
int ret;
@@ -1789,10 +1831,13 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
*/
fod->abort = true;
- if (op == NVMET_FCOP_WRITEDATA)
+ if (op == NVMET_FCOP_WRITEDATA) {
+ spin_lock_irqsave(&fod->flock, flags);
+ fod->writedataactive = false;
+ spin_unlock_irqrestore(&fod->flock, flags);
nvmet_req_complete(&fod->req,
NVME_SC_FC_TRANSPORT_ERROR);
- else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
+ } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
fcpreq->fcp_error = ret;
fcpreq->transferred_length = 0;
nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
@@ -1800,32 +1845,54 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
}
}
+static inline bool
+__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
+{
+ struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+ struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+
+ /* if in the middle of an io and we need to tear down */
+ if (abort) {
+ if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
+ nvmet_req_complete(&fod->req,
+ NVME_SC_FC_TRANSPORT_ERROR);
+ return true;
+ }
+
+ nvmet_fc_abort_op(tgtport, fod);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * actual done handler for FCP operations when completed by the lldd
+ */
static void
-nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
+nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
{
- struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+ struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
struct nvmet_fc_tgtport *tgtport = fod->tgtport;
unsigned long flags;
bool abort;
spin_lock_irqsave(&fod->flock, flags);
abort = fod->abort;
+ fod->writedataactive = false;
spin_unlock_irqrestore(&fod->flock, flags);
- /* if in the middle of an io and we need to tear down */
- if (abort && fcpreq->op != NVMET_FCOP_ABORT) {
- /* data no longer needed */
- nvmet_fc_free_tgt_pgs(fod);
-
- nvmet_req_complete(&fod->req, fcpreq->fcp_error);
- return;
- }
-
switch (fcpreq->op) {
case NVMET_FCOP_WRITEDATA:
+ if (__nvmet_fc_fod_op_abort(fod, abort))
+ return;
if (fcpreq->fcp_error ||
fcpreq->transferred_length != fcpreq->transfer_length) {
+ spin_lock(&fod->flock);
+ fod->abort = true;
+ spin_unlock(&fod->flock);
+
nvmet_req_complete(&fod->req,
NVME_SC_FC_TRANSPORT_ERROR);
return;
@@ -1833,6 +1900,10 @@ nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
fod->offset += fcpreq->transferred_length;
if (fod->offset != fod->total_length) {
+ spin_lock_irqsave(&fod->flock, flags);
+ fod->writedataactive = true;
+ spin_unlock_irqrestore(&fod->flock, flags);
+
/* transfer the next chunk */
nvmet_fc_transfer_fcp_data(tgtport, fod,
NVMET_FCOP_WRITEDATA);
@@ -1847,12 +1918,11 @@ nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
case NVMET_FCOP_READDATA:
case NVMET_FCOP_READDATA_RSP:
+ if (__nvmet_fc_fod_op_abort(fod, abort))
+ return;
if (fcpreq->fcp_error ||
fcpreq->transferred_length != fcpreq->transfer_length) {
- /* data no longer needed */
- nvmet_fc_free_tgt_pgs(fod);
-
- nvmet_fc_abort_op(tgtport, fod->fcpreq);
+ nvmet_fc_abort_op(tgtport, fod);
return;
}
@@ -1861,8 +1931,6 @@ nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
/* data no longer needed */
nvmet_fc_free_tgt_pgs(fod);
- fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
- sizeof(fod->rspiubuf), DMA_TO_DEVICE);
nvmet_fc_free_fcp_iod(fod->queue, fod);
return;
}
@@ -1885,19 +1953,38 @@ nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
break;
case NVMET_FCOP_RSP:
- case NVMET_FCOP_ABORT:
- fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
- sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+ if (__nvmet_fc_fod_op_abort(fod, abort))
+ return;
nvmet_fc_free_fcp_iod(fod->queue, fod);
break;
default:
- nvmet_fc_free_tgt_pgs(fod);
- nvmet_fc_abort_op(tgtport, fod->fcpreq);
break;
}
}
+static void
+nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
+{
+ struct nvmet_fc_fcp_iod *fod =
+ container_of(work, struct nvmet_fc_fcp_iod, done_work);
+
+ nvmet_fc_fod_op_done(fod);
+}
+
+static void
+nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
+{
+ struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+ struct nvmet_fc_tgt_queue *queue = fod->queue;
+
+ if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
+ /* context switch so completion is not in ISR context */
+ queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
+ else
+ nvmet_fc_fod_op_done(fod);
+}
+
/*
* actual completion handler after execution by the nvmet layer
*/
@@ -1919,10 +2006,7 @@ __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
fod->queue->sqhd = cqe->sq_head;
if (abort) {
- /* data no longer needed */
- nvmet_fc_free_tgt_pgs(fod);
-
- nvmet_fc_abort_op(tgtport, fod->fcpreq);
+ nvmet_fc_abort_op(tgtport, fod);
return;
}
@@ -2018,8 +2102,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
&fod->queue->nvme_cq,
&fod->queue->nvme_sq,
&nvmet_fc_tgt_fcp_ops);
- if (!ret) { /* bad SQE content */
- nvmet_fc_abort_op(tgtport, fod->fcpreq);
+ if (!ret) { /* bad SQE content or invalid ctrl state */
+ nvmet_fc_abort_op(tgtport, fod);
return;
}
@@ -2059,7 +2143,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
return;
transport_error:
- nvmet_fc_abort_op(tgtport, fod->fcpreq);
+ nvmet_fc_abort_op(tgtport, fod);
}
/*
@@ -2089,7 +2173,7 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
* If this routine returns error, the lldd should abort the exchange.
*
* @target_port: pointer to the (registered) target port the FCP CMD IU
- * was receive on.
+ * was received on.
* @fcpreq: pointer to a fcpreq request structure to be used to reference
* the exchange corresponding to the FCP Exchange.
* @cmdiubuf: pointer to the buffer containing the FCP CMD IU
@@ -2112,7 +2196,6 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
return -EIO;
-
queue = nvmet_fc_find_target_queue(tgtport,
be64_to_cpu(cmdiu->connection_id));
if (!queue)
@@ -2142,12 +2225,68 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
- queue_work_on(queue->cpu, queue->work_q, &fod->work);
+ if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
+ queue_work_on(queue->cpu, queue->work_q, &fod->work);
+ else
+ nvmet_fc_handle_fcp_rqst(tgtport, fod);
return 0;
}
EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
+/**
+ * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
+ * upon the reception of an ABTS for a FCP command
+ *
+ * Notify the transport that an ABTS has been received for a FCP command
+ * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
+ * LLDD believes the command is still being worked on
+ * (template_ops->fcp_req_release() has not been called).
+ *
+ * The transport will wait for any outstanding work (an op to the LLDD,
+ * which the lldd should complete with error due to the ABTS; or the
+ * completion from the nvmet layer of the nvme command), then will
+ * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
+ * return the i/o context to the LLDD. The LLDD may send the BA_ACC
+ * to the ABTS either after return from this function (assuming any
+ * outstanding op work has been terminated) or upon the callback being
+ * called.
+ *
+ * @target_port: pointer to the (registered) target port the FCP CMD IU
+ * was received on.
+ * @fcpreq: pointer to the fcpreq request structure that corresponds
+ * to the exchange that received the ABTS.
+ */
+void
+nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
+ struct nvmefc_tgt_fcp_req *fcpreq)
+{
+ struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+ struct nvmet_fc_tgt_queue *queue;
+ unsigned long flags;
+
+ if (!fod || fod->fcpreq != fcpreq)
+ /* job appears to have already completed, ignore abort */
+ return;
+
+ queue = fod->queue;
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ if (fod->active) {
+ /*
+ * mark as abort. The abort handler, invoked upon completion
+ * of any work, will detect the aborted status and do the
+ * callback.
+ */
+ spin_lock(&fod->flock);
+ fod->abort = true;
+ fod->aborted = true;
+ spin_unlock(&fod->flock);
+ }
+ spin_unlock_irqrestore(&queue->qlock, flags);
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
+
enum {
FCT_TRADDR_ERR = 0,
FCT_TRADDR_WWNN = 1 << 0,
@@ -2177,7 +2316,7 @@ nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
if (!options)
return -ENOMEM;
- while ((p = strsep(&o, ",\n")) != NULL) {
+ while ((p = strsep(&o, ":\n")) != NULL) {
if (!*p)
continue;
@@ -2238,6 +2377,7 @@ nvmet_fc_add_port(struct nvmet_port *port)
if (!tgtport->port) {
tgtport->port = port;
port->priv = tgtport;
+ nvmet_fc_tgtport_get(tgtport);
ret = 0;
} else
ret = -EALREADY;
OpenPOWER on IntegriCloud