summaryrefslogtreecommitdiffstats
path: root/include/linux/nvme-fc-driver.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 10:39:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 10:39:57 -0700
commit694752922b12bd318aa80191bd9d8c3dcfb39055 (patch)
tree5afe83fd99100bea546dd5a1c1f778c58f41e5c0 /include/linux/nvme-fc-driver.h
parenta351e9b9fc24e982ec2f0e76379a49826036da12 (diff)
parent9438b3e080beccf6022138ea62192d55cc7dc4ed (diff)
downloadtalos-obmc-linux-694752922b12bd318aa80191bd9d8c3dcfb39055.tar.gz
talos-obmc-linux-694752922b12bd318aa80191bd9d8c3dcfb39055.zip
Merge branch 'for-4.12/block' of git://git.kernel.dk/linux-block
Pull block layer updates from Jens Axboe: - Add BFQ IO scheduler under the new blk-mq scheduling framework. BFQ was initially a fork of CFQ, but subsequently changed to implement fairness based on B-WF2Q+, a modified variant of WF2Q. BFQ is meant to be used on desktop type single drives, providing good fairness. From Paolo. - Add Kyber IO scheduler. This is a full multiqueue aware scheduler, using a scalable token based algorithm that throttles IO based on live completion IO stats, similary to blk-wbt. From Omar. - A series from Jan, moving users to separately allocated backing devices. This continues the work of separating backing device life times, solving various problems with hot removal. - A series of updates for lightnvm, mostly from Javier. Includes a 'pblk' target that exposes an open channel SSD as a physical block device. - A series of fixes and improvements for nbd from Josef. - A series from Omar, removing queue sharing between devices on mostly legacy drivers. This helps us clean up other bits, if we know that a queue only has a single device backing. This has been overdue for more than a decade. - Fixes for the blk-stats, and improvements to unify the stats and user windows. This both improves blk-wbt, and enables other users to register a need to receive IO stats for a device. From Omar. - blk-throttle improvements from Shaohua. This provides a scalable framework for implementing scalable priotization - particularly for blk-mq, but applicable to any type of block device. The interface is marked experimental for now. - Bucketized IO stats for IO polling from Stephen Bates. This improves efficiency of polled workloads in the presence of mixed block size IO. - A few fixes for opal, from Scott. - A few pulls for NVMe, including a lot of fixes for NVMe-over-fabrics. From a variety of folks, mostly Sagi and James Smart. - A series from Bart, improving our exposed info and capabilities from the blk-mq debugfs support. - A series from Christoph, cleaning up how handle WRITE_ZEROES. - A series from Christoph, cleaning up the block layer handling of how we track errors in a request. On top of being a nice cleanup, it also shrinks the size of struct request a bit. - Removal of mg_disk and hd (sorry Linus) by Christoph. The former was never used by platforms, and the latter has outlived it's usefulness. - Various little bug fixes and cleanups from a wide variety of folks. * 'for-4.12/block' of git://git.kernel.dk/linux-block: (329 commits) block: hide badblocks attribute by default blk-mq: unify hctx delay_work and run_work block: add kblock_mod_delayed_work_on() blk-mq: unify hctx delayed_run_work and run_work nbd: fix use after free on module unload MAINTAINERS: bfq: Add Paolo as maintainer for the BFQ I/O scheduler blk-mq-sched: alloate reserved tags out of normal pool mtip32xx: use runtime tag to initialize command header scsi: Implement blk_mq_ops.show_rq() blk-mq: Add blk_mq_ops.show_rq() blk-mq: Show operation, cmd_flags and rq_flags names blk-mq: Make blk_flags_show() callers append a newline character blk-mq: Move the "state" debugfs attribute one level down blk-mq: Unregister debugfs attributes earlier blk-mq: Only unregister hctxs for which registration succeeded blk-mq-debugfs: Rename functions for registering and unregistering the mq directory blk-mq: Let blk_mq_debugfs_register() look up the queue name blk-mq: Register <dev>/queue/mq after having registered <dev>/queue ide-pm: always pass 0 error to ide_complete_rq in ide_do_devset ide-pm: always pass 0 error to __blk_end_request_all ..
Diffstat (limited to 'include/linux/nvme-fc-driver.h')
-rw-r--r--include/linux/nvme-fc-driver.h104
1 files changed, 70 insertions, 34 deletions
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index f21471f7ee40..0db37158a61d 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -137,9 +137,9 @@ enum nvmefc_fcp_datadir {
* transferred. Should equal payload_length on success.
* @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
* @status: Completion status of the FCP operation. must be 0 upon success,
- * NVME_SC_FC_xxx value upon failure. Note: this is NOT a
- * reflection of the NVME CQE completion status. Only the status
- * of the FCP operation at the NVME-FC level.
+ * negative errno value upon failure (ex: -EIO). Note: this is
+ * NOT a reflection of the NVME CQE completion status. Only the
+ * status of the FCP operation at the NVME-FC level.
*/
struct nvmefc_fcp_req {
void *cmdaddr;
@@ -533,9 +533,6 @@ enum {
* rsp as well
*/
NVMET_FCOP_RSP = 4, /* send rsp frame */
- NVMET_FCOP_ABORT = 5, /* abort exchange via ABTS */
- NVMET_FCOP_BA_ACC = 6, /* send BA_ACC */
- NVMET_FCOP_BA_RJT = 7, /* send BA_RJT */
};
/**
@@ -572,8 +569,6 @@ enum {
* upon compeletion of the operation. The nvmet-fc layer will also set a
* private pointer for its own use in the done routine.
*
- * Note: the LLDD must never fail a NVMET_FCOP_ABORT request !!
- *
* Values set by the NVMET-FC layer prior to calling the LLDD fcp_op
* entrypoint.
* @op: Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx)
@@ -655,6 +650,22 @@ enum {
* on. The transport should pick a cpu to schedule the work
* on.
*/
+ NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 2),
+ /* Bit 2: When 0, the LLDD is calling the cmd rcv handler
+ * in a non-isr context, allowing the transport to finish
+ * op completion in the calling context. When 1, the LLDD
+ * is calling the cmd rcv handler in an ISR context,
+ * requiring the transport to transition to a workqueue
+ * for op completion.
+ */
+ NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 3),
+ /* Bit 3: When 0, the LLDD is calling the op done handler
+ * in a non-isr context, allowing the transport to finish
+ * op completion in the calling context. When 1, the LLDD
+ * is calling the op done handler in an ISR context,
+ * requiring the transport to transition to a workqueue
+ * for op completion.
+ */
};
@@ -725,12 +736,12 @@ struct nvmet_fc_target_port {
* be freed/released.
* Entrypoint is Mandatory.
*
- * @fcp_op: Called to perform a data transfer, transmit a response, or
- * abort an FCP opertion. The nvmefc_tgt_fcp_req structure is the same
- * LLDD-supplied exchange structure specified in the
- * nvmet_fc_rcv_fcp_req() call made when the FCP CMD IU was received.
- * The op field in the structure shall indicate the operation for
- * the LLDD to perform relative to the io.
+ * @fcp_op: Called to perform a data transfer or transmit a response.
+ * The nvmefc_tgt_fcp_req structure is the same LLDD-supplied
+ * exchange structure specified in the nvmet_fc_rcv_fcp_req() call
+ * made when the FCP CMD IU was received. The op field in the
+ * structure shall indicate the operation for the LLDD to perform
+ * relative to the io.
* NVMET_FCOP_READDATA operation: the LLDD is to send the
* payload data (described by sglist) to the host in 1 or
* more FC sequences (preferrably 1). Note: the fc-nvme layer
@@ -752,29 +763,31 @@ struct nvmet_fc_target_port {
* successfully, the LLDD is to update the nvmefc_tgt_fcp_req
* transferred_length field and may subsequently transmit the
* FCP_RSP iu payload (described by rspbuf, rspdma, rsplen).
- * The LLDD is to await FCP_CONF reception to confirm the RSP
- * reception by the host. The LLDD may retramsit the FCP_RSP iu
- * if necessary per FC-NVME. Upon reception of FCP_CONF, or upon
- * FCP_CONF failure, the LLDD is to set the nvmefc_tgt_fcp_req
- * fcp_error field and consider the operation complete..
+ * If FCP_CONF is supported, the LLDD is to await FCP_CONF
+ * reception to confirm the RSP reception by the host. The LLDD
+ * may retramsit the FCP_RSP iu if necessary per FC-NVME. Upon
+ * transmission of the FCP_RSP iu if FCP_CONF is not supported,
+ * or upon success/failure of FCP_CONF if it is supported, the
+ * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
+ * consider the operation complete.
* NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload
- * (described by rspbuf, rspdma, rsplen). The LLDD is to await
- * FCP_CONF reception to confirm the RSP reception by the host.
- * The LLDD may retramsit the FCP_RSP iu if necessary per FC-NVME.
- * Upon reception of FCP_CONF, or upon FCP_CONF failure, the
+ * (described by rspbuf, rspdma, rsplen). If FCP_CONF is
+ * supported, the LLDD is to await FCP_CONF reception to confirm
+ * the RSP reception by the host. The LLDD may retramsit the
+ * FCP_RSP iu if FCP_CONF is not received per FC-NVME. Upon
+ * transmission of the FCP_RSP iu if FCP_CONF is not supported,
+ * or upon success/failure of FCP_CONF if it is supported, the
* LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
- * consider the operation complete..
- * NVMET_FCOP_ABORT: the LLDD is to terminate the exchange
- * corresponding to the fcp operation. The LLDD shall send
- * ABTS and follow FC exchange abort-multi rules, including
- * ABTS retries and possible logout.
+ * consider the operation complete.
* Upon completing the indicated operation, the LLDD is to set the
* status fields for the operation (tranferred_length and fcp_error
- * status) in the request, then all the "done" routine
- * indicated in the fcp request. Upon return from the "done"
- * routine for either a NVMET_FCOP_RSP or NVMET_FCOP_ABORT operation
- * the fc-nvme layer will not longer reference the fcp request,
- * allowing the LLDD to free/release the fcp request.
+ * status) in the request, then call the "done" routine
+ * indicated in the fcp request. After the operation completes,
+ * regardless of whether the FCP_RSP iu was successfully transmit,
+ * the LLDD-supplied exchange structure must remain valid until the
+ * transport calls the fcp_req_release() callback to return ownership
+ * of the exchange structure back to the LLDD so that it may be used
+ * for another fcp command.
* Note: when calling the done routine for READDATA or WRITEDATA
* operations, the fc-nvme layer may immediate convert, in the same
* thread and before returning to the LLDD, the fcp operation to
@@ -786,6 +799,22 @@ struct nvmet_fc_target_port {
* Returns 0 on success, -<errno> on failure (Ex: -EIO)
* Entrypoint is Mandatory.
*
+ * @fcp_abort: Called by the transport to abort an active command.
+ * The command may be in-between operations (nothing active in LLDD)
+ * or may have an active WRITEDATA operation pending. The LLDD is to
+ * initiate the ABTS process for the command and return from the
+ * callback. The ABTS does not need to be complete on the command.
+ * The fcp_abort callback inherently cannot fail. After the
+ * fcp_abort() callback completes, the transport will wait for any
+ * outstanding operation (if there was one) to complete, then will
+ * call the fcp_req_release() callback to return the command's
+ * exchange context back to the LLDD.
+ *
+ * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req
+ * to the LLDD after all operations on the fcp operation are complete.
+ * This may be due to the command completing or upon completion of
+ * abort cleanup.
+ *
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
@@ -820,7 +849,11 @@ struct nvmet_fc_target_template {
int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_ls_req *tls_req);
int (*fcp_op)(struct nvmet_fc_target_port *tgtport,
- struct nvmefc_tgt_fcp_req *);
+ struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*fcp_abort)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
u32 max_hw_queues;
u16 max_sgl_segments;
@@ -848,4 +881,7 @@ int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq,
void *cmdiubuf, u32 cmdiubuf_len);
+void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
+
#endif /* _NVME_FC_DRIVER_H */
OpenPOWER on IntegriCloud