summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h54
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c382
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c62
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c182
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c340
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h61
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c173
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c380
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h68
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1818
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c64
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c286
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c474
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c129
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c624
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c903
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h95
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c8
26 files changed, 4110 insertions, 2060 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 2c3bb8a966e5..04d73e2be373 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -51,6 +51,8 @@ struct lpfc_sli2_slim;
cmnd for menlo needs nearly twice as for firmware
downloads using bsg */
+#define LPFC_DEFAULT_XPSGL_SIZE 256
+#define LPFC_MAX_SG_TABLESIZE 0xffff
#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
#define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
@@ -603,6 +605,12 @@ struct lpfc_epd_pool {
spinlock_t lock; /* lock for expedite pool */
};
+enum ras_state {
+ INACTIVE,
+ REG_INPROGRESS,
+ ACTIVE
+};
+
struct lpfc_ras_fwlog {
uint8_t *fwlog_buff;
uint32_t fw_buffcount; /* Buffer size posted to FW */
@@ -619,7 +627,7 @@ struct lpfc_ras_fwlog {
bool ras_enabled; /* Ras Enabled for the function */
#define LPFC_RAS_DISABLE_LOGGING 0x00
#define LPFC_RAS_ENABLE_LOGGING 0x01
- bool ras_active; /* RAS logging running state */
+ enum ras_state state; /* RAS logging running state */
};
struct lpfc_hba {
@@ -723,6 +731,7 @@ struct lpfc_hba {
#define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */
#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
+#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */
#define ELS_XRI_ABORT_EVENT 0x40
#define ASYNC_EVENT 0x80
#define LINK_DISABLED 0x100 /* Link disabled by user */
@@ -732,14 +741,13 @@ struct lpfc_hba {
#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
-#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
+#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
#define HBA_FORCED_LINK_SPEED 0x40000 /*
* Firmware supports Forced Link Speed
* capability
*/
-#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
@@ -795,10 +803,12 @@ struct lpfc_hba {
uint8_t mds_diags_support;
uint8_t bbcredit_support;
uint8_t enab_exp_wqcq_pages;
+ u8 nsler; /* Firmware supports FC-NVMe-2 SLER */
/* HBA Config Parameters */
uint32_t cfg_ack0;
uint32_t cfg_xri_rebalancing;
+ uint32_t cfg_xpsgl;
uint32_t cfg_enable_npiv;
uint32_t cfg_enable_rrq;
uint32_t cfg_topology;
@@ -824,8 +834,10 @@ struct lpfc_hba {
uint32_t cfg_cq_poll_threshold;
uint32_t cfg_cq_max_proc_limit;
uint32_t cfg_fcp_cpu_map;
+ uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann;
+ uint32_t cfg_irq_numa;
uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd;
@@ -868,7 +880,6 @@ struct lpfc_hba {
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade;
- uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
uint32_t cfg_rrq_xri_bitmap_sz;
uint32_t cfg_delay_discovery;
@@ -904,6 +915,7 @@ struct lpfc_hba {
wait_queue_head_t work_waitq;
struct task_struct *worker_thread;
unsigned long data_flags;
+ uint32_t border_sge_num;
uint32_t hbq_in_use; /* HBQs in use flag */
uint32_t hbq_count; /* Count of configured HBQs */
@@ -985,7 +997,7 @@ struct lpfc_hba {
struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */
struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
- struct dma_pool *txrdy_payload_pool;
+ struct dma_pool *lpfc_cmd_rsp_buf_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
mempool_t *mbox_mem_pool;
@@ -1033,8 +1045,6 @@ struct lpfc_hba {
struct dentry *debug_hbqinfo;
struct dentry *debug_dumpHostSlim;
struct dentry *debug_dumpHBASlim;
- struct dentry *debug_dumpData; /* BlockGuard BPL */
- struct dentry *debug_dumpDif; /* BlockGuard BPL */
struct dentry *debug_InjErrLBA; /* LBA to inject errors at */
struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */
struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */
@@ -1051,6 +1061,7 @@ struct lpfc_hba {
#ifdef LPFC_HDWQ_LOCK_STAT
struct dentry *debug_lockstat;
#endif
+ struct dentry *debug_ras_log;
atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size;
uint32_t nvmeio_trc_output_idx;
@@ -1205,6 +1216,15 @@ struct lpfc_hba {
uint64_t ktime_seg10_min;
uint64_t ktime_seg10_max;
#endif
+
+ struct hlist_node cpuhp; /* used for cpuhp per hba callback */
+ struct timer_list cpuhp_poll_timer;
+ struct list_head poll_list; /* slowpath eq polling list */
+#define LPFC_POLL_HB 1 /* slowpath heartbeat */
+#define LPFC_POLL_FASTPATH 0 /* called from fastpath */
+#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
+
+ char os_host_name[MAXHOSTNAMELEN];
};
static inline struct Scsi_Host *
@@ -1295,6 +1315,26 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
}
/**
+ * lpfc_next_online_numa_cpu - Finds next online CPU on NUMA node
+ * @numa_mask: Pointer to phba's numa_mask member.
+ * @start: starting cpu index
+ *
+ * Note: If no valid cpu found, then nr_cpu_ids is returned.
+ *
+ **/
+static inline unsigned int
+lpfc_next_online_numa_cpu(const struct cpumask *numa_mask, unsigned int start)
+{
+ unsigned int cpu_it;
+
+ for_each_cpu_wrap(cpu_it, numa_mask, start) {
+ if (cpu_online(cpu_it))
+ break;
+ }
+
+ return cpu_it;
+}
+/**
* lpfc_sli4_mod_hba_eq_delay - update EQ delay
* @phba: Pointer to HBA context object.
* @q: The Event Queue to update.
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index ea62322ffe2b..46f56f30f77e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -176,7 +176,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
int i;
int len = 0;
char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
- unsigned long iflags = 0;
if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
@@ -347,7 +346,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done;
- rcu_read_lock();
scnprintf(tmp, sizeof(tmp),
"XRI Dist lpfc%d Total %d IO %d ELS %d\n",
phba->brd_no,
@@ -355,7 +353,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
phba->sli4_hba.io_xri_max,
lpfc_sli4_get_els_iocb_cnt(phba));
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto buffer_done;
/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -371,15 +369,17 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_nodename.u.wwn),
localport->port_id, statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto buffer_done;
+
+ spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL;
- spin_lock_irqsave(&vport->phba->hbalock, iflags);
+ spin_lock(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nrport = rport->remoteport;
- spin_unlock_irqrestore(&vport->phba->hbalock, iflags);
+ spin_unlock(&vport->phba->hbalock);
if (!nrport)
continue;
@@ -398,39 +398,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
/* Tab in to show lport ownership. */
if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
if (phba->brd_no >= 10) {
if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
nrport->port_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
nrport->node_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "DID x%06x ",
nrport->port_id);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
/* An NVME rport can have multiple roles. */
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
FC_PORT_ROLE_NVME_TARGET |
@@ -438,14 +438,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
nrport->port_role);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
scnprintf(tmp, sizeof(tmp), "%s\n", statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
- rcu_read_unlock();
+ spin_unlock_irq(shost->host_lock);
if (!lport)
goto buffer_done;
@@ -505,11 +505,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&lport->cmpl_fcp_err));
strlcat(buf, tmp, PAGE_SIZE);
- /* RCU is already unlocked. */
+ /* host_lock is already unlocked. */
goto buffer_done;
- rcu_unlock_buf_done:
- rcu_read_unlock();
+ unlock_buf_done:
+ spin_unlock_irq(shost->host_lock);
buffer_done:
len = strnlen(buf, PAGE_SIZE);
@@ -841,7 +841,8 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
lpfc_vpd_t *vp = &phba->vpd;
lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
- return scnprintf(buf, PAGE_SIZE, "%s\n", hdw);
+ return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
+ vp->rev.smRev, vp->rev.smFwRev);
}
/**
@@ -1474,8 +1475,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
int i;
msleep(100);
- lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
- &portstat_reg.word0);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0))
+ return -EIO;
/* verify if privileged for the request operation */
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
@@ -1485,8 +1487,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
/* wait for the SLI port firmware ready after firmware reset */
for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
msleep(10);
- lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
- &portstat_reg.word0);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0))
+ continue;
if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
continue;
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
@@ -1641,7 +1644,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
{
LPFC_MBOXQ_t *mbox = NULL;
unsigned long val = 0;
- char *pval = 0;
+ char *pval = NULL;
int rc = 0;
if (!strncmp("enable", buff_out,
@@ -3532,6 +3535,31 @@ LPFC_ATTR_R(enable_rrq, 2, 0, 2,
LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
LPFC_DELAY_INIT_LINK_INDEFINITELY,
"Suppress Link Up at initialization");
+
+static ssize_t
+lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ phba->sli4_hba.pc_sli4_params.pls);
+}
+static DEVICE_ATTR(pls, 0444,
+ lpfc_pls_show, NULL);
+
+static ssize_t
+lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
+}
+static DEVICE_ATTR(pt, 0444,
+ lpfc_pt_show, NULL);
+
/*
# lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
# 1 - (1024)
@@ -3579,9 +3607,6 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL);
-LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
- "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
-
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
# until the timer expires. Value range is [0,255]. Default value is 30.
@@ -3682,8 +3707,8 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
if (rport)
remoteport = rport->remoteport;
spin_unlock(&vport->phba->hbalock);
- if (remoteport)
- nvme_fc_set_remoteport_devloss(rport->remoteport,
+ if (rport && remoteport)
+ nvme_fc_set_remoteport_devloss(remoteport,
vport->cfg_devloss_tmo);
#endif
}
@@ -4095,8 +4120,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
val);
return -EINVAL;
}
- if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
- phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+ /*
+ * The 'topology' is not a configurable parameter if :
+ * - persistent topology enabled
+ * - G7/G6 with no private loop support
+ */
+
+ if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
+ (!phba->sli4_hba.pc_sli4_params.pls &&
+ (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
+ phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3114 Loop mode not supported\n");
@@ -5297,7 +5330,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
len += scnprintf(buf + len, PAGE_SIZE - len,
"CPU %02d not present\n",
phba->sli4_hba.curr_disp_cpu);
- else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) {
+ else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
len += scnprintf(
buf + len, PAGE_SIZE - len,
@@ -5310,10 +5343,10 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
else
len += scnprintf(
buf + len, PAGE_SIZE - len,
- "CPU %02d EQ %04d hdwq %04d "
+ "CPU %02d EQ None hdwq %04d "
"physid %d coreid %d ht %d ua %d\n",
phba->sli4_hba.curr_disp_cpu,
- cpup->eq, cpup->hdwq, cpup->phys_id,
+ cpup->hdwq, cpup->phys_id,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN));
@@ -5328,7 +5361,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
- cpup->irq);
+ lpfc_get_irq(cpup->eq));
else
len += scnprintf(
buf + len, PAGE_SIZE - len,
@@ -5339,7 +5372,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
- cpup->irq);
+ lpfc_get_irq(cpup->eq));
}
phba->sli4_hba.curr_disp_cpu++;
@@ -5467,15 +5500,12 @@ LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
* lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
* For the Initiator (I), enabling this parameter means that an NVMET
* PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
- * processed by the initiator for subsequent NVME FCP IO. For the target
- * function (T), enabling this parameter qualifies the lpfc_nvmet_fb_size
- * driver parameter as the target function's first burst size returned to the
- * initiator in the target's NVME PRLI response. Parameter supported on physical
- * port only - no NPIV support.
+ * processed by the initiator for subsequent NVME FCP IO.
+ * Currently, this feature is not supported on the NVME target
* Value range is [0,1]. Default value is 0 (disabled).
*/
LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
- "Enable First Burst feature on I and T functions.");
+ "Enable First Burst feature for NVME Initiator.");
/*
# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
@@ -5709,6 +5739,19 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
"Embed NVME Command in WQE");
/*
+ * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
+ * the driver will advertise it supports to the SCSI layer.
+ *
+ * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
+ * 1,256 = Manually specify nr_hw_queue value to be advertised,
+ *
+ * Value range is [0,256]. Default value is 8.
+ */
+LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
+ LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
+ "Set the number of SCSI Queues advertised");
+
+/*
* lpfc_hdw_queue: Set the number of Hardware Queues the driver
* will advertise it supports to the NVME and SCSI layers. This also
* will map to the number of CQ/WQ pairs the driver will create.
@@ -5718,30 +5761,130 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
* A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
*
* 0 = Configure the number of hdw queues to the number of active CPUs.
- * 1,128 = Manually specify how many hdw queues to use.
+ * 1,256 = Manually specify how many hdw queues to use.
*
- * Value range is [0,128]. Default value is 0.
+ * Value range is [0,256]. Default value is 0.
*/
LPFC_ATTR_R(hdw_queue,
LPFC_HBA_HDWQ_DEF,
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
"Set the number of I/O Hardware Queues");
+static inline void
+lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
+{
+#if IS_ENABLED(CONFIG_X86)
+ /* If AMD architecture, then default is LPFC_IRQ_CHANN_NUMA */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ phba->cfg_irq_numa = 1;
+ else
+ phba->cfg_irq_numa = 0;
+#else
+ phba->cfg_irq_numa = 0;
+#endif
+}
+
/*
* lpfc_irq_chann: Set the number of IRQ vectors that are available
* for Hardware Queues to utilize. This also will map to the number
* of EQ / MSI-X vectors the driver will create. This should never be
* more than the number of Hardware Queues
*
- * 0 = Configure number of IRQ Channels to the number of active CPUs.
- * 1,128 = Manually specify how many IRQ Channels to use.
+ * 0 = Configure number of IRQ Channels to:
+ * if AMD architecture, number of CPUs on HBA's NUMA node
+ * otherwise, number of active CPUs.
+ * [1,256] = Manually specify how many IRQ Channels to use.
*
- * Value range is [0,128]. Default value is 0.
+ * Value range is [0,256]. Default value is [0].
*/
-LPFC_ATTR_R(irq_chann,
- LPFC_HBA_HDWQ_DEF,
- LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
- "Set the number of I/O IRQ Channels");
+static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
+module_param(lpfc_irq_chann, uint, 0444);
+MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
+
+/* lpfc_irq_chann_init - Set the hba irq_chann initial value
+ * @phba: lpfc_hba pointer.
+ * @val: contains the initial value
+ *
+ * Description:
+ * Validates the initial value is within range and assigns it to the
+ * adapter. If not in range, an error message is posted and the
+ * default value is assigned.
+ *
+ * Returns:
+ * zero if value is in range and is set
+ * -EINVAL if value was out of range
+ **/
+static int
+lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
+{
+ const struct cpumask *numa_mask;
+
+ if (phba->cfg_use_msi != 2) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8532 use_msi = %u ignoring cfg_irq_numa\n",
+ phba->cfg_use_msi);
+ phba->cfg_irq_numa = 0;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ return 0;
+ }
+
+ /* Check if default setting was passed */
+ if (val == LPFC_IRQ_CHANN_DEF)
+ lpfc_assign_default_irq_numa(phba);
+
+ if (phba->cfg_irq_numa) {
+ numa_mask = &phba->sli4_hba.numa_mask;
+
+ if (cpumask_empty(numa_mask)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8533 Could not identify NUMA node, "
+ "ignoring cfg_irq_numa\n");
+ phba->cfg_irq_numa = 0;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ } else {
+ phba->cfg_irq_chann = cpumask_weight(numa_mask);
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8543 lpfc_irq_chann set to %u "
+ "(numa)\n", phba->cfg_irq_chann);
+ }
+ } else {
+ if (val > LPFC_IRQ_CHANN_MAX) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8545 lpfc_irq_chann attribute cannot "
+ "be set to %u, allowed range is "
+ "[%u,%u]\n",
+ val,
+ LPFC_IRQ_CHANN_MIN,
+ LPFC_IRQ_CHANN_MAX);
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ return -EINVAL;
+ }
+ phba->cfg_irq_chann = val;
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_irq_chann_show - Display value of irq_chann
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains a string with the list sizes
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
+}
+
+static DEVICE_ATTR_RO(lpfc_irq_chann);
/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
@@ -5914,7 +6057,7 @@ lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
* 1 = MDS Diagnostics enabled
* Value range is [0,1]. Default value is 0.
*/
-LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
+LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
/*
* lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
@@ -5922,7 +6065,53 @@ LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
* [1-4] = Multiple of 1/4th Mb of host memory for FW logging
* Value range [0..4]. Default value is 0
*/
-LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+lpfc_param_show(ras_fwlog_buffsize);
+
+static ssize_t
+lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
+{
+ int ret = 0;
+ enum ras_state state;
+
+ if (!lpfc_rangecheck(val, 0, 4))
+ return -EINVAL;
+
+ if (phba->cfg_ras_fwlog_buffsize == val)
+ return 0;
+
+ if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
+ return -EINVAL;
+
+ spin_lock_irq(&phba->hbalock);
+ state = phba->ras_fwlog.state;
+ spin_unlock_irq(&phba->hbalock);
+
+ if (state == REG_INPROGRESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
+ "registration is in progress\n");
+ return -EBUSY;
+ }
+
+ /* For disable logging: stop the logs and free the DMA.
+ * For ras_fwlog_buffsize size change we still need to free and
+ * reallocate the DMA in lpfc_sli4_ras_fwlog_init.
+ */
+ phba->cfg_ras_fwlog_buffsize = val;
+ if (state == ACTIVE) {
+ lpfc_ras_stop_fwlog(phba);
+ lpfc_sli4_ras_dma_free(phba);
+ }
+
+ lpfc_sli4_ras_init(phba);
+ if (phba->ras_fwlog.ras_enabled)
+ ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
+ LPFC_RAS_ENABLE_LOGGING);
+ return ret;
+}
+
+lpfc_param_store(ras_fwlog_buffsize);
+static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
/*
* lpfc_ras_fwlog_level: Firmware logging verbosity level
@@ -6030,6 +6219,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_cq_poll_threshold,
&dev_attr_lpfc_cq_max_proc_limit,
&dev_attr_lpfc_fcp_cpu_map,
+ &dev_attr_lpfc_fcp_mq_threshold,
&dev_attr_lpfc_hdw_queue,
&dev_attr_lpfc_irq_chann,
&dev_attr_lpfc_suppress_rsp,
@@ -6059,8 +6249,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_sriov_nr_virtfn,
&dev_attr_lpfc_req_fw_upgrade,
&dev_attr_lpfc_suppress_link_up,
- &dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw,
+ &dev_attr_pls,
+ &dev_attr_pt,
&dev_attr_txq_hw,
&dev_attr_txcmplq_hw,
&dev_attr_lpfc_fips_level,
@@ -6845,10 +7036,31 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
static void
lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
+ struct lpfc_rport_data *rdata = rport->dd_data;
+ struct lpfc_nodelist *ndlp = rdata->pnode;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ struct lpfc_nvme_rport *nrport = NULL;
+#endif
+
if (timeout)
rport->dev_loss_tmo = timeout;
else
rport->dev_loss_tmo = 1;
+
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ dev_info(&rport->dev, "Cannot find remote node to "
+ "set rport dev loss tmo, port_id x%x\n",
+ rport->port_id);
+ return;
+ }
+
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ nrport = lpfc_ndlp_get_nrport(ndlp);
+
+ if (nrport && nrport->remoteport)
+ nvme_fc_set_remoteport_devloss(nrport->remoteport,
+ rport->dev_loss_tmo);
+#endif
}
/**
@@ -7045,12 +7257,39 @@ struct fc_function_template lpfc_vport_transport_functions = {
};
/**
+ * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE
+ * Mode
+ * @phba: lpfc_hba pointer.
+ **/
+static void
+lpfc_get_hba_function_mode(struct lpfc_hba *phba)
+{
+ /* If the adapter supports FCoE mode */
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_SKYHAWK:
+ case PCI_DEVICE_ID_SKYHAWK_VF:
+ case PCI_DEVICE_ID_LANCER_FCOE:
+ case PCI_DEVICE_ID_LANCER_FCOE_VF:
+ case PCI_DEVICE_ID_ZEPHYR_DCSP:
+ case PCI_DEVICE_ID_HORNET:
+ case PCI_DEVICE_ID_TIGERSHARK:
+ case PCI_DEVICE_ID_TOMCAT:
+ phba->hba_flag |= HBA_FCOE_MODE;
+ break;
+ default:
+ /* for others, clear the flag */
+ phba->hba_flag &= ~HBA_FCOE_MODE;
+ }
+}
+
+/**
* lpfc_get_cfgparam - Used during probe_one to init the adapter structure
* @phba: lpfc_hba pointer.
**/
void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
+ lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
lpfc_ns_query_init(phba, lpfc_ns_query);
lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
@@ -7100,8 +7339,18 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
else
phba->cfg_poll = lpfc_poll;
- if (phba->cfg_enable_bg)
+ /* Get the function mode */
+ lpfc_get_hba_function_mode(phba);
+
+ /* BlockGuard allowed for FC only. */
+ if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0581 BlockGuard feature not supported\n");
+ /* If set, clear the BlockGuard support param */
+ phba->cfg_enable_bg = 0;
+ } else if (phba->cfg_enable_bg) {
phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
+ }
lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
@@ -7112,6 +7361,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
/* Initialize first burst. Target vs Initiator are different. */
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
+ lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
lpfc_irq_chann_init(phba, lpfc_irq_chann);
lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
@@ -7146,12 +7396,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
- lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
- lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
lpfc_sli_mode_init(phba, lpfc_sli_mode);
phba->cfg_enable_dss = 1;
@@ -7160,16 +7408,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
-
- /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
- * accommodate 512K and 1M IOs in a single nvme buf and supply
- * enough NVME LS iocb buffers for larger connectivity counts.
- */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- phba->cfg_iocb_cnt = 5;
- }
-
return;
}
@@ -7207,11 +7445,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
}
if (!phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
- if (phba->cfg_nvmet_mrq > phba->cfg_irq_chann) {
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index b7216d694bff..0ea03ae93d91 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1040,7 +1040,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (!dmabuf) {
lpfc_printf_log(phba, KERN_ERR,
LOG_LIBDFC, "2616 No dmabuf "
- "found for iocbq 0x%p\n",
+ "found for iocbq x%px\n",
iocbq);
kfree(evt_dat->data);
kfree(evt_dat);
@@ -1276,9 +1276,7 @@ lpfc_bsg_hba_set_event(struct bsg_job *job)
return 0; /* call job done later */
job_error:
- if (dd_data != NULL)
- kfree(dd_data);
-
+ kfree(dd_data);
job->dd_data = NULL;
return rc;
}
@@ -1571,7 +1569,6 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
- ctiocb->iocb_cmpl = NULL;
ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
ctiocb->vport = phba->pport;
ctiocb->context1 = dd_data;
@@ -4492,12 +4489,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
phba->mbox_ext_buf_ctx.seqNum++;
nemb_tp = phba->mbox_ext_buf_ctx.nembType;
- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
- if (!dd_data) {
- rc = -ENOMEM;
- goto job_error;
- }
-
pbuf = (uint8_t *)dmabuf->virt;
size = job->request_payload.payload_len;
sg_copy_to_buffer(job->request_payload.sg_list,
@@ -4534,6 +4525,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
"2968 SLI_CONFIG ext-buffer wr all %d "
"ebuffers received\n",
phba->mbox_ext_buf_ctx.numBuf);
+
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
/* mailbox command structure for base driver */
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
@@ -4582,6 +4580,8 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
return SLI_CONFIG_HANDLED;
job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
lpfc_bsg_dma_page_free(phba, dmabuf);
kfree(dd_data);
@@ -5438,10 +5438,12 @@ lpfc_bsg_get_ras_config(struct bsg_job *job)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
/* Current logging state */
- if (ras_fwlog->ras_active == true)
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state == ACTIVE)
ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
else
ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
+ spin_unlock_irq(&phba->hbalock);
ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
@@ -5451,7 +5453,9 @@ ras_job_error:
bsg_reply->result = rc;
/* complete the job back to userspace */
- bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -5496,10 +5500,13 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
if (action == LPFC_RASACTION_STOP_LOGGING) {
/* Check if already disabled */
- if (ras_fwlog->ras_active == false) {
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
rc = -ESRCH;
goto ras_job_error;
}
+ spin_unlock_irq(&phba->hbalock);
/* Disable logging */
lpfc_ras_stop_fwlog(phba);
@@ -5510,8 +5517,10 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
* FW-logging with new log-level. Return status
* "Logging already Running" to caller.
**/
- if (ras_fwlog->ras_active)
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state != INACTIVE)
action_status = -EINPROGRESS;
+ spin_unlock_irq(&phba->hbalock);
/* Enable logging */
rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
@@ -5530,8 +5539,9 @@ ras_job_error:
bsg_reply->result = rc;
/* complete the job back to userspace */
- bsg_job_done(job, bsg_reply->result,
- bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -5591,7 +5601,9 @@ ras_job_error:
bsg_reply->result = rc;
/* complete the job back to userspace */
- bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -5624,10 +5636,13 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
goto ras_job_error;
/* Logging to be stopped before reading */
- if (ras_fwlog->ras_active == true) {
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state == ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
rc = -EINPROGRESS;
goto ras_job_error;
}
+ spin_unlock_irq(&phba->hbalock);
if (job->request_len <
sizeof(struct fc_bsg_request) +
@@ -5673,7 +5688,9 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
ras_job_error:
bsg_reply->result = rc;
- bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -5744,8 +5761,9 @@ lpfc_get_trunk_info(struct bsg_job *job)
phba->sli4_hba.link_state.logical_speed / 1000;
job_error:
bsg_reply->result = rc;
- bsg_job_done(job, bsg_reply->result,
- bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 68e9f96242d3..25d3dd39bc05 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -180,7 +180,7 @@ int lpfc_issue_gidft(struct lpfc_vport *vport);
int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t);
-void lpfc_fdmi_num_disc_check(struct lpfc_vport *);
+void lpfc_fdmi_change_check(struct lpfc_vport *vport);
void lpfc_delayed_disc_tmo(struct timer_list *);
void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
@@ -215,6 +215,12 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
+void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
+int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
+void lpfc_sli4_poll_hbtimer(struct timer_list *t);
+void lpfc_sli4_start_polling(struct lpfc_queue *q);
+void lpfc_sli4_stop_polling(struct lpfc_queue *q);
+
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -326,7 +332,7 @@ void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
-void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
+void lpfc_sli_flush_io_rings(struct lpfc_hba *phba);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *);
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
@@ -433,16 +439,6 @@ int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t,
int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
uint16_t *, uint16_t *);
-/* externs BlockGuard */
-extern char *_dump_buf_data;
-extern unsigned long _dump_buf_data_order;
-extern char *_dump_buf_dif;
-extern unsigned long _dump_buf_dif_order;
-extern spinlock_t _dump_buf_lock;
-extern int _dump_buf_done;
-extern spinlock_t pgcnt_lock;
-extern unsigned int pgcnt;
-
/* Interface exported by fabric iocb scheduler */
void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
void lpfc_fabric_abort_hba(struct lpfc_hba *);
@@ -595,6 +591,8 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
struct lpfc_sli4_hdw_queue *qp);
void lpfc_nvme_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
+void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
+void lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index ec72c39997d2..58b35a1442c1 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -462,6 +462,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
struct lpfc_nodelist *ndlp;
if ((vport->port_type != LPFC_NPIV_PORT) ||
+ (fc4_type == FC_TYPE_FCP) ||
!(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
ndlp = lpfc_setup_disc_node(vport, Did);
@@ -480,10 +481,20 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0238 Process x%06x NameServer Rsp "
- "Data: x%x x%x x%x x%x\n", Did,
+ "Data: x%x x%x x%x x%x x%x\n", Did,
ndlp->nlp_flag, ndlp->nlp_fc4_type,
- vport->fc_flag,
+ ndlp->nlp_state, vport->fc_flag,
vport->fc_rscn_id_cnt);
+
+ /* if ndlp needs to be discovered and prior
+ * state of ndlp hit devloss, change state to
+ * allow rediscovery.
+ */
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC &&
+ ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_NPR_NODE);
+ }
} else {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d",
@@ -491,9 +502,9 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0239 Skip x%06x NameServer Rsp "
- "Data: x%x x%x\n", Did,
- vport->fc_flag,
- vport->fc_rscn_id_cnt);
+ "Data: x%x x%x %p\n",
+ Did, vport->fc_flag,
+ vport->fc_rscn_id_cnt, ndlp);
}
} else {
if (!(vport->fc_flag & FC_RSCN_MODE) ||
@@ -751,9 +762,13 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (CTrsp->CommandResponse.bits.CmdRsp ==
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0208 NameServer Rsp Data: x%x x%x\n",
+ "0208 NameServer Rsp Data: x%x x%x "
+ "x%x x%x sz x%x\n",
vport->fc_flag,
- CTreq->un.gid.Fc4Type);
+ CTreq->un.gid.Fc4Type,
+ vport->num_disc_nodes,
+ vport->gidft_inp,
+ irsp->un.genreq64.bdl.bdeSize);
lpfc_ns_rsp(vport,
outp,
@@ -814,6 +829,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
vport->gidft_inp--;
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "4216 GID_FT cmpl inp %d disc %d\n",
+ vport->gidft_inp, vport->num_disc_nodes);
+
/* Link up / RSCN discovery */
if ((vport->num_disc_nodes == 0) &&
(vport->gidft_inp == 0)) {
@@ -943,9 +963,13 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (CTrsp->CommandResponse.bits.CmdRsp ==
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "4105 NameServer Rsp Data: x%x x%x\n",
+ "4105 NameServer Rsp Data: x%x x%x "
+ "x%x x%x sz x%x\n",
vport->fc_flag,
- CTreq->un.gid.Fc4Type);
+ CTreq->un.gid.Fc4Type,
+ vport->num_disc_nodes,
+ vport->gidft_inp,
+ irsp->un.genreq64.bdl.bdeSize);
lpfc_ns_rsp(vport,
outp,
@@ -1007,6 +1031,11 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
vport->gidft_inp--;
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6450 GID_PT cmpl inp %d disc %d\n",
+ vport->gidft_inp, vport->num_disc_nodes);
+
/* Link up / RSCN discovery */
if ((vport->num_disc_nodes == 0) &&
(vport->gidft_inp == 0)) {
@@ -1141,6 +1170,11 @@ out:
/* Link up / RSCN discovery */
if (vport->num_disc_nodes)
vport->num_disc_nodes--;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6451 GFF_ID cmpl inp %d disc %d\n",
+ vport->gidft_inp, vport->num_disc_nodes);
+
if (vport->num_disc_nodes == 0) {
/*
* The driver has cycled through all Nports in the RSCN payload.
@@ -1209,14 +1243,34 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK)
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "3064 Setting ndlp %p, DID x%06x with "
- "FC4 x%08x, Data: x%08x x%08x\n",
+ "3064 Setting ndlp x%px, DID x%06x "
+ "with FC4 x%08x, Data: x%08x x%08x "
+ "%d\n",
ndlp, did, ndlp->nlp_fc4_type,
- FC_TYPE_FCP, FC_TYPE_NVME);
- ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
-
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
- lpfc_issue_els_prli(vport, ndlp, 0);
+ FC_TYPE_FCP, FC_TYPE_NVME,
+ ndlp->nlp_state);
+
+ if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE &&
+ ndlp->nlp_fc4_type) {
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_PRLI_ISSUE);
+ lpfc_issue_els_prli(vport, ndlp, 0);
+ } else if (!ndlp->nlp_fc4_type) {
+ /* If fc4 type is still unknown, then LOGO */
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY,
+ "6443 Sending LOGO ndlp x%px,"
+ "DID x%06x with fc4_type: "
+ "x%08x, state: %d\n",
+ ndlp, did, ndlp->nlp_fc4_type,
+ ndlp->nlp_state);
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_NPR_NODE);
+ }
}
} else
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
@@ -1439,33 +1493,35 @@ int
lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
size_t size)
{
- char fwrev[FW_REV_STR_SIZE];
- int n;
+ char fwrev[FW_REV_STR_SIZE] = {0};
+ char tmp[MAXHOSTNAMELEN] = {0};
- lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+ memset(symbol, 0, size);
- n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
- if (size < n)
- return n;
+ scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
- n += scnprintf(symbol + n, size - n, " FV%s", fwrev);
- if (size < n)
- return n;
+ lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+ scnprintf(tmp, sizeof(tmp), " FV%s", fwrev);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
- n += scnprintf(symbol + n, size - n, " DV%s.",
- lpfc_release_version);
- if (size < n)
- return n;
+ scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
- n += scnprintf(symbol + n, size - n, " HN:%s.",
- init_utsname()->nodename);
- if (size < n)
- return n;
+ scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
/* Note :- OS name is "Linux" */
- n += scnprintf(symbol + n, size - n, " OS:%s",
- init_utsname()->sysname);
- return n;
+ scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname);
+ strlcat(symbol, tmp, size);
+
+buffer_done:
+ return strnlen(symbol, size);
+
}
static uint32_t
@@ -1830,6 +1886,12 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
case IOERR_SLI_ABORTED:
+ case IOERR_SLI_DOWN:
+ /* Driver aborted this IO. No retry as error
+ * is likely Offline->Online or some adapter
+ * error. Recovery will try again.
+ */
+ break;
case IOERR_ABORT_IN_PROGRESS:
case IOERR_SEQUENCE_TIMEOUT:
case IOERR_ILLEGAL_FRAME:
@@ -1938,14 +2000,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/**
- * lpfc_fdmi_num_disc_check - Check how many mapped NPorts we are connected to
+ * lpfc_fdmi_change_check - Check for changed FDMI parameters
* @vport: pointer to a host virtual N_Port data structure.
*
- * Called from hbeat timeout routine to check if the number of discovered
- * ports has changed. If so, re-register thar port Attribute.
+ * Check how many mapped NPorts we are connected to
+ * Check if our hostname changed
+ * Called from hbeat timeout routine to check if any FDMI parameters
+ * changed. If so, re-register those Attributes.
*/
void
-lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
+lpfc_fdmi_change_check(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
@@ -1958,17 +2022,41 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
if (!(vport->fc_flag & FC_FABRIC))
return;
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return;
+
+ /* Check if system hostname changed */
+ if (strcmp(phba->os_host_name, init_utsname()->nodename)) {
+ memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
+ scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
+ init_utsname()->nodename);
+ lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
+
+ /* Since this effects multiple HBA and PORT attributes, we need
+ * de-register and go thru the whole FDMI registration cycle.
+ * DHBA -> DPRT -> RHBA -> RPA (physical port)
+ * DPRT -> RPRT (vports)
+ */
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
+ else
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
+
+ /* Since this code path registers all the port attributes
+ * we can just return without further checking.
+ */
+ return;
+ }
+
if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc))
return;
+ /* Check if the number of mapped NPorts changed */
cnt = lpfc_find_map_node(vport);
if (cnt == vport->fdmi_num_disc)
return;
- ndlp = lpfc_findnode_did(vport, FDMI_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
- return;
-
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
LPFC_FDMI_PORT_ATTR_num_disc);
@@ -2515,7 +2603,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
hsp = (struct serv_parm *)&vport->fc_sparam;
- ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) |
+ ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
(uint32_t) hsp->cmn.bbRcvSizeLsb;
ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
size = FOURBYTES + sizeof(uint32_t);
@@ -2556,8 +2644,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
memset(ae, 0, 256);
- snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
- init_utsname()->nodename);
+ scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
+ vport->phba->os_host_name);
len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
len += (len & 3) ? (4 - (len & 3)) : 4;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 1ee857d9d165..819335b16c2e 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
+#include <linux/vmalloc.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -361,7 +362,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
len += scnprintf(buf+len, size-len,
- "Buf%d: %p %06x\n", i,
+ "Buf%d: x%px %06x\n", i,
hbq_buf->dbuf.virt, hbq_buf->tag);
found = 1;
break;
@@ -416,8 +417,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i);
- spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
- spin_lock(&qp->abts_nvme_buf_list_lock);
+ spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
spin_lock(&qp->io_buf_list_get_lock);
spin_lock(&qp->io_buf_list_put_lock);
out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
@@ -430,8 +430,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
qp->abts_nvme_io_bufs, out);
spin_unlock(&qp->io_buf_list_put_lock);
spin_unlock(&qp->io_buf_list_get_lock);
- spin_unlock(&qp->abts_nvme_buf_list_lock);
- spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
+ spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
lpfc_debugfs_last_xripool++;
if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue)
@@ -533,9 +532,7 @@ lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size)
continue;
pbl_pool = &multixri_pool->pbl_pool;
pvt_pool = &multixri_pool->pvt_pool;
- txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
- if (qp->nvme_wq)
- txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
scnprintf(tmp, sizeof(tmp),
"%03d: %4d %4d %4d %4d | %10d %10d ",
@@ -2082,8 +2079,49 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
}
#endif
+static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
+ char *buffer, int size)
+{
+ int copied = 0;
+ struct lpfc_dmabuf *dmabuf, *next;
+
+ memset(buffer, 0, size);
+
+ spin_lock_irq(&phba->hbalock);
+ if (phba->ras_fwlog.state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EINVAL;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ list_for_each_entry_safe(dmabuf, next,
+ &phba->ras_fwlog.fwlog_buff_list, list) {
+ /* Check if copying will go over size and a '\0' char */
+ if ((copied + LPFC_RAS_MAX_ENTRY_SIZE) >= (size - 1)) {
+ memcpy(buffer + copied, dmabuf->virt,
+ size - copied - 1);
+ copied += size - copied - 1;
+ break;
+ }
+ memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE);
+ copied += LPFC_RAS_MAX_ENTRY_SIZE;
+ }
+ return copied;
+}
+
+static int
+lpfc_debugfs_ras_log_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ vfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
/**
- * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer
+ * lpfc_debugfs_ras_log_open - Open the RAS log debugfs buffer
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
@@ -2098,34 +2136,48 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
* error value.
**/
static int
-lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
+lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file)
{
struct lpfc_hba *phba = inode->i_private;
struct lpfc_debug *debug;
+ int size;
int rc = -ENOMEM;
+ spin_lock_irq(&phba->hbalock);
+ if (phba->ras_fwlog.state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
+ rc = -EINVAL;
+ goto out;
+ }
+ spin_unlock_irq(&phba->hbalock);
debug = kmalloc(sizeof(*debug), GFP_KERNEL);
if (!debug)
goto out;
- /* Round to page boundary */
- debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL);
- if (!debug->buffer) {
- kfree(debug);
- goto out;
- }
+ size = LPFC_RAS_MIN_BUFF_POST_SIZE * phba->cfg_ras_fwlog_buffsize;
+ debug->buffer = vmalloc(size);
+ if (!debug->buffer)
+ goto free_debug;
- debug->len = lpfc_debugfs_dumpHBASlim_data(phba, debug->buffer,
- LPFC_DUMPHBASLIM_SIZE);
+ debug->len = lpfc_debugfs_ras_log_data(phba, debug->buffer, size);
+ if (debug->len < 0) {
+ rc = -EINVAL;
+ goto free_buffer;
+ }
file->private_data = debug;
- rc = 0;
+ return 0;
+
+free_buffer:
+ vfree(debug->buffer);
+free_debug:
+ kfree(debug);
out:
return rc;
}
/**
- * lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer
+ * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
@@ -2140,7 +2192,7 @@ out:
* error value.
**/
static int
-lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
+lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
{
struct lpfc_hba *phba = inode->i_private;
struct lpfc_debug *debug;
@@ -2151,44 +2203,14 @@ lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
goto out;
/* Round to page boundary */
- debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL);
- if (!debug->buffer) {
- kfree(debug);
- goto out;
- }
-
- debug->len = lpfc_debugfs_dumpHostSlim_data(phba, debug->buffer,
- LPFC_DUMPHOSTSLIM_SIZE);
- file->private_data = debug;
-
- rc = 0;
-out:
- return rc;
-}
-
-static int
-lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
-{
- struct lpfc_debug *debug;
- int rc = -ENOMEM;
-
- if (!_dump_buf_data)
- return -EBUSY;
-
- debug = kmalloc(sizeof(*debug), GFP_KERNEL);
- if (!debug)
- goto out;
-
- /* Round to page boundary */
- pr_err("9059 BLKGRD: %s: _dump_buf_data=0x%p\n",
- __func__, _dump_buf_data);
- debug->buffer = _dump_buf_data;
+ debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
- debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT;
+ debug->len = lpfc_debugfs_dumpHBASlim_data(phba, debug->buffer,
+ LPFC_DUMPHBASLIM_SIZE);
file->private_data = debug;
rc = 0;
@@ -2196,29 +2218,41 @@ out:
return rc;
}
+/**
+ * lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
static int
-lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
+lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
{
+ struct lpfc_hba *phba = inode->i_private;
struct lpfc_debug *debug;
int rc = -ENOMEM;
- if (!_dump_buf_dif)
- return -EBUSY;
-
debug = kmalloc(sizeof(*debug), GFP_KERNEL);
if (!debug)
goto out;
/* Round to page boundary */
- pr_err("9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n",
- __func__, _dump_buf_dif, file);
- debug->buffer = _dump_buf_dif;
+ debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
- debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT;
+ debug->len = lpfc_debugfs_dumpHostSlim_data(phba, debug->buffer,
+ LPFC_DUMPHOSTSLIM_SIZE);
file->private_data = debug;
rc = 0;
@@ -2227,29 +2261,6 @@ out:
}
static ssize_t
-lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
- size_t nbytes, loff_t *ppos)
-{
- /*
- * The Data/DIF buffers only save one failing IO
- * The write op is used as a reset mechanism after an IO has
- * already been saved to the next one can be saved
- */
- spin_lock(&_dump_buf_lock);
-
- memset((void *)_dump_buf_data, 0,
- ((1 << PAGE_SHIFT) << _dump_buf_data_order));
- memset((void *)_dump_buf_dif, 0,
- ((1 << PAGE_SHIFT) << _dump_buf_dif_order));
-
- _dump_buf_done = 0;
-
- spin_unlock(&_dump_buf_lock);
-
- return nbytes;
-}
-
-static ssize_t
lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
@@ -2461,17 +2472,6 @@ lpfc_debugfs_release(struct inode *inode, struct file *file)
return 0;
}
-static int
-lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
-{
- struct lpfc_debug *debug = file->private_data;
-
- debug->buffer = NULL;
- kfree(debug);
-
- return 0;
-}
-
/**
* lpfc_debugfs_multixripools_write - Clear multi-XRI pools statistics
* @file: The file pointer to read from.
@@ -3786,23 +3786,13 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
int qidx;
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
+ qp = phba->sli4_hba.hdwq[qidx].io_wq;
if (qp->assoc_qid != cq_id)
continue;
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
if (*len >= max_cnt)
return 1;
}
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
- if (qp->assoc_qid != cq_id)
- continue;
- *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
- if (*len >= max_cnt)
- return 1;
- }
- }
return 0;
}
@@ -3868,9 +3858,9 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
struct lpfc_queue *qp;
int rc;
- qp = phba->sli4_hba.hdwq[eqidx].fcp_cq;
+ qp = phba->sli4_hba.hdwq[eqidx].io_cq;
- *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len);
+ *len = __lpfc_idiag_print_cq(qp, "IO", pbuffer, *len);
/* Reset max counter */
qp->CQ_max_cqe = 0;
@@ -3878,28 +3868,11 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
if (*len >= max_cnt)
return 1;
- rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len,
+ rc = lpfc_idiag_wqs_for_cq(phba, "IO", pbuffer, len,
max_cnt, qp->queue_id);
if (rc)
return 1;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- qp = phba->sli4_hba.hdwq[eqidx].nvme_cq;
-
- *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
-
- /* Reset max counter */
- qp->CQ_max_cqe = 0;
-
- if (*len >= max_cnt)
- return 1;
-
- rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
- max_cnt, qp->queue_id);
- if (rc)
- return 1;
- }
-
if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
/* NVMET CQset */
qp = phba->sli4_hba.nvmet_cqset[eqidx];
@@ -4348,7 +4321,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
if (phba->sli4_hba.hdwq) {
for (qidx = 0; qidx < phba->cfg_hdw_queue;
qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].fcp_cq;
+ qp = phba->sli4_hba.hdwq[qidx].io_cq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -4360,22 +4333,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
}
- /* NVME complete queue */
- if (phba->sli4_hba.hdwq) {
- qidx = 0;
- do {
- qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
- if (qp && qp->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- qp, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = qp;
- goto pass_check;
- }
- } while (++qidx < phba->cfg_hdw_queue);
- }
goto error_out;
break;
case LPFC_IDIAG_MQ:
@@ -4419,20 +4376,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
if (phba->sli4_hba.hdwq) {
/* FCP/SCSI work queue */
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
- if (qp && qp->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- qp, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = qp;
- goto pass_check;
- }
- }
- /* NVME work queue */
- for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
+ qp = phba->sli4_hba.hdwq[qidx].io_wq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -5440,6 +5384,15 @@ static const struct file_operations lpfc_debugfs_op_lockstat = {
};
#endif
+#undef lpfc_debugfs_ras_log
+static const struct file_operations lpfc_debugfs_ras_log = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_ras_log_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_ras_log_release,
+};
+
#undef lpfc_debugfs_op_dumpHBASlim
static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
.owner = THIS_MODULE,
@@ -5508,26 +5461,6 @@ static const struct file_operations lpfc_debugfs_op_cpucheck = {
.release = lpfc_debugfs_release,
};
-#undef lpfc_debugfs_op_dumpData
-static const struct file_operations lpfc_debugfs_op_dumpData = {
- .owner = THIS_MODULE,
- .open = lpfc_debugfs_dumpData_open,
- .llseek = lpfc_debugfs_lseek,
- .read = lpfc_debugfs_read,
- .write = lpfc_debugfs_dumpDataDif_write,
- .release = lpfc_debugfs_dumpDataDif_release,
-};
-
-#undef lpfc_debugfs_op_dumpDif
-static const struct file_operations lpfc_debugfs_op_dumpDif = {
- .owner = THIS_MODULE,
- .open = lpfc_debugfs_dumpDif_open,
- .llseek = lpfc_debugfs_lseek,
- .read = lpfc_debugfs_read,
- .write = lpfc_debugfs_dumpDataDif_write,
- .release = lpfc_debugfs_dumpDataDif_release,
-};
-
#undef lpfc_debugfs_op_dif_err
static const struct file_operations lpfc_debugfs_op_dif_err = {
.owner = THIS_MODULE,
@@ -5630,7 +5563,6 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
.write = lpfc_idiag_extacc_write,
.release = lpfc_idiag_cmd_release,
};
-
#endif
/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
@@ -5881,6 +5813,19 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
+ /* RAS log */
+ snprintf(name, sizeof(name), "ras_log");
+ phba->debug_ras_log =
+ debugfs_create_file(name, 0644,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_ras_log);
+ if (!phba->debug_ras_log) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "6148 Cannot create debugfs"
+ " ras_log\n");
+ goto debug_failed;
+ }
+
/* Setup hbqinfo */
snprintf(name, sizeof(name), "hbqinfo");
phba->debug_hbqinfo =
@@ -5924,20 +5869,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
} else
phba->debug_dumpHostSlim = NULL;
- /* Setup dumpData */
- snprintf(name, sizeof(name), "dumpData");
- phba->debug_dumpData =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpData);
-
- /* Setup dumpDif */
- snprintf(name, sizeof(name), "dumpDif");
- phba->debug_dumpDif =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpDif);
-
/* Setup DIF Error Injections */
snprintf(name, sizeof(name), "InjErrLBA");
phba->debug_InjErrLBA =
@@ -6305,6 +6236,9 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
phba->debug_hbqinfo = NULL;
+ debugfs_remove(phba->debug_ras_log);
+ phba->debug_ras_log = NULL;
+
#ifdef LPFC_HDWQ_LOCK_STAT
debugfs_remove(phba->debug_lockstat); /* lockstat */
phba->debug_lockstat = NULL;
@@ -6315,12 +6249,6 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
phba->debug_dumpHostSlim = NULL;
- debugfs_remove(phba->debug_dumpData); /* dumpData */
- phba->debug_dumpData = NULL;
-
- debugfs_remove(phba->debug_dumpDif); /* dumpDif */
- phba->debug_dumpDif = NULL;
-
debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
phba->debug_InjErrLBA = NULL;
@@ -6442,12 +6370,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
- }
+ lpfc_debug_dump_wq(phba, DUMP_IO, idx);
lpfc_debug_dump_hdr_rq(phba);
lpfc_debug_dump_dat_rq(phba);
@@ -6459,12 +6382,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
- }
+ lpfc_debug_dump_cq(phba, DUMP_IO, idx);
/*
* Dump Event Queues (EQs)
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 34070874616d..20f2537af511 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -291,8 +291,7 @@ struct lpfc_idiag {
#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192
enum {
- DUMP_FCP,
- DUMP_NVME,
+ DUMP_IO,
DUMP_MBX,
DUMP_ELS,
DUMP_NVMELS,
@@ -415,12 +414,9 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
struct lpfc_queue *wq;
char *qtypestr;
- if (qtype == DUMP_FCP) {
- wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
- qtypestr = "FCP";
- } else if (qtype == DUMP_NVME) {
- wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
- qtypestr = "NVME";
+ if (qtype == DUMP_IO) {
+ wq = phba->sli4_hba.hdwq[wqidx].io_wq;
+ qtypestr = "IO";
} else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq;
qtypestr = "MBX";
@@ -433,7 +429,7 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
} else
return;
- if (qtype == DUMP_FCP || qtype == DUMP_NVME)
+ if (qtype == DUMP_IO)
pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n",
qtypestr, wqidx, wq->queue_id);
else
@@ -459,17 +455,13 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
char *qtypestr;
int eqidx;
- /* fcp/nvme wq and cq are 1:1, thus same indexes */
+ /* io wq and cq are 1:1, thus same indexes */
eq = NULL;
- if (qtype == DUMP_FCP) {
- wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
- cq = phba->sli4_hba.hdwq[wqidx].fcp_cq;
- qtypestr = "FCP";
- } else if (qtype == DUMP_NVME) {
- wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
- cq = phba->sli4_hba.hdwq[wqidx].nvme_cq;
- qtypestr = "NVME";
+ if (qtype == DUMP_IO) {
+ wq = phba->sli4_hba.hdwq[wqidx].io_wq;
+ cq = phba->sli4_hba.hdwq[wqidx].io_cq;
+ qtypestr = "IO";
} else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq;
cq = phba->sli4_hba.mbx_cq;
@@ -496,7 +488,7 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
eq = phba->sli4_hba.hdwq[0].hba_eq;
}
- if (qtype == DUMP_FCP || qtype == DUMP_NVME)
+ if (qtype == DUMP_IO)
pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
"->EQ[Idx:%d|Qid:%d]:\n",
qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id,
@@ -572,20 +564,11 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
int wq_idx;
for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
- if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid)
+ if (phba->sli4_hba.hdwq[wq_idx].io_wq->queue_id == qid)
break;
if (wq_idx < phba->cfg_hdw_queue) {
- pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq);
- return;
- }
-
- for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
- if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid)
- break;
- if (wq_idx < phba->cfg_hdw_queue) {
- pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq);
+ pr_err("IO WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].io_wq);
return;
}
@@ -654,22 +637,12 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
int cq_idx;
for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
- if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid)
- break;
-
- if (cq_idx < phba->cfg_hdw_queue) {
- pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq);
- return;
- }
-
- for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
- if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid)
+ if (phba->sli4_hba.hdwq[cq_idx].io_cq->queue_id == qid)
break;
if (cq_idx < phba->cfg_hdw_queue) {
- pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq);
+ pr_err("IO CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].io_cq);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 1c89c9f314fa..482e4a888dae 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -112,6 +112,8 @@ struct lpfc_nodelist {
uint8_t nlp_retry; /* used for ELS retries */
uint8_t nlp_fcp_info; /* class info, bits 0-3 */
#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
+ u8 nlp_nvme_info; /* NVME NSLER Support */
+#define NLP_NVME_NSLER 0x1 /* NVME NSLER device */
uint16_t nlp_usg_map; /* ndlp management usage bitmap */
#define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */
@@ -157,6 +159,7 @@ struct lpfc_node_rrq {
/* Defines for nlp_flag (uint32) */
#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */
+#define NLP_RELEASE_RPI 0x00000004 /* Release RPI to free pool */
#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */
#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */
#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f12780f4cfbb..42a2bf38eaea 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1052,17 +1052,18 @@ stop_rr_fcf_flogi:
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0150 FLOGI failure Status:x%x/x%x "
+ "xri x%x TMO:x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ cmdiocb->sli4_xritag, irsp->ulpTimeout);
+
/* If this is not a loop open failure, bail out */
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
goto flogifail;
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
- "0150 FLOGI failure Status:x%x/x%x xri x%x TMO:x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- cmdiocb->sli4_xritag, irsp->ulpTimeout);
-
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -1207,6 +1208,39 @@ out:
}
/**
+ * lpfc_cmpl_els_link_down - Completion callback function for ELS command
+ * aborted during a link down
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ */
+static void
+lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp;
+ uint32_t *pcmd;
+ uint32_t cmd;
+
+ pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
+ cmd = *pcmd;
+ irsp = &rspiocb->iocb;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "6445 ELS completes after LINK_DOWN: "
+ " Status %x/%x cmd x%x flg x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
+ cmdiocb->iocb_flag);
+
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
+ cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ atomic_dec(&phba->fabric_iocb_count);
+ }
+ lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+/**
* lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
@@ -2107,7 +2141,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
!(vport->fc_flag & FC_OFFLINE_MODE)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"4110 Issue PLOGI x%x deferred "
- "on NPort x%x rpi x%x Data: %p\n",
+ "on NPort x%x rpi x%x Data: x%px\n",
ndlp->nlp_defer_did, ndlp->nlp_DID,
ndlp->nlp_rpi, ndlp);
@@ -2202,6 +2236,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
+ char *mode;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -2239,8 +2274,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
+ /* If we don't send GFT_ID to Fabric, a PRLI error
+ * could be expected.
+ */
+ if ((vport->fc_flag & FC_FABRIC) ||
+ (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH))
+ mode = KERN_ERR;
+ else
+ mode = KERN_INFO;
+
/* PRLI failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, mode, LOG_ELS,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
"data: x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
@@ -2401,6 +2445,10 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
npr_nvme = (struct lpfc_nvme_prli *)pcmd;
bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
+ if (phba->nsler) {
+ bf_set(prli_nsler, npr_nvme, 1);
+ bf_set(prli_conf, npr_nvme, 1);
+ }
/* Only initiators request first burst. */
if ((phba->cfg_nvme_enable_fb) &&
@@ -4203,7 +4251,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mempool_free(pmb, phba->mbox_mem_pool);
if (ndlp) {
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
+ "0006 rpi%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -4253,6 +4301,11 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &rspiocb->iocb;
+ if (!vport) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "3177 ELS response failed\n");
+ goto out;
+ }
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
@@ -4392,7 +4445,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool);
}
out:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
spin_unlock_irq(shost->host_lock);
@@ -5222,6 +5275,11 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
}
}
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6452 Discover PLOGI %d flag x%x\n",
+ sentplogi, vport->fc_flag);
+
if (sentplogi) {
lpfc_set_disctmo(vport);
}
@@ -5634,16 +5692,16 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
if (vport->fc_flag & FC_FABRIC) {
memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
- sizeof(desc->port_names.wwnn));
+ sizeof(desc->port_names.wwnn));
memcpy(desc->port_names.wwpn, &vport->fabric_portname,
- sizeof(desc->port_names.wwpn));
+ sizeof(desc->port_names.wwpn));
} else { /* Point to Point */
memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
- sizeof(desc->port_names.wwnn));
+ sizeof(desc->port_names.wwnn));
- memcpy(desc->port_names.wwnn, &ndlp->nlp_portname,
- sizeof(desc->port_names.wwpn));
+ memcpy(desc->port_names.wwpn, &ndlp->nlp_portname,
+ sizeof(desc->port_names.wwpn));
}
desc->length = cpu_to_be32(sizeof(desc->port_names));
@@ -6327,7 +6385,11 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
continue;
}
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME)
+ /* Check to see if we need to NVME rescan this target
+ * remoteport.
+ */
+ if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
+ ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
lpfc_nvme_rescan_port(vport, ndlp);
lpfc_disc_state_machine(vport, ndlp, NULL,
@@ -6413,7 +6475,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t payload_len, length, nportid, *cmd;
int rscn_cnt;
int rscn_id = 0, hba_id = 0;
- int i;
+ int i, tmo;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
@@ -6441,7 +6503,11 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
*lp, vport->fc_flag, payload_len);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME)
+ /* Check to see if we need to NVME rescan this target
+ * remoteport.
+ */
+ if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
+ ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
lpfc_nvme_rescan_port(vport, ndlp);
return 0;
}
@@ -6515,6 +6581,13 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_DEFERRED;
+
+ /* Restart disctmo if its already running */
+ if (vport->fc_flag & FC_DISC_TMO) {
+ tmo = ((phba->fc_ratov * 3) + 3);
+ mod_timer(&vport->fc_disctmo,
+ jiffies + msecs_to_jiffies(1000 * tmo));
+ }
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!(vport->fc_flag & FC_RSCN_DISCOVERY)) {
vport->fc_flag |= FC_RSCN_MODE;
@@ -6617,9 +6690,10 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
/* RSCN processed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0215 RSCN processed Data: x%x x%x x%x x%x\n",
+ "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n",
vport->fc_flag, 0, vport->fc_rscn_id_cnt,
- vport->port_state);
+ vport->port_state, vport->num_disc_nodes,
+ vport->gidft_inp);
/* To process RSCN, first compare RSCN data with NameServer */
vport->fc_ns_retry = 0;
@@ -7940,53 +8014,83 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
+ unsigned long iflags = 0;
lpfc_fabric_abort_vport(vport);
+
/*
* For SLI3, only the hbalock is required. But SLI4 needs to coordinate
* with the ring insert operation. Because lpfc_sli_issue_abort_iotag
* ultimately grabs the ring_lock, the driver must splice the list into
* a working list and release the locks before calling the abort.
*/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
pring = lpfc_phba_elsring(phba);
/* Bail out if we've no ELS wq, like in PCI error recovery case. */
if (unlikely(!pring)) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
+ /* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
if (piocb->iocb_flag & LPFC_IO_LIBDFC)
continue;
if (piocb->vport != vport)
continue;
- list_add_tail(&piocb->dlist, &abort_list);
+
+ if (piocb->iocb_flag & LPFC_DRIVER_ABORTED)
+ continue;
+
+ /* On the ELS ring we can have ELS_REQUESTs or
+ * GEN_REQUESTs waiting for a response.
+ */
+ cmd = &piocb->iocb;
+ if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+ list_add_tail(&piocb->dlist, &abort_list);
+
+ /* If the link is down when flushing ELS commands
+ * the firmware will not complete them till after
+ * the link comes back up. This may confuse
+ * discovery for the new link up, so we need to
+ * change the compl routine to just clean up the iocb
+ * and avoid any retry logic.
+ */
+ if (phba->link_state == LPFC_LINK_DOWN)
+ piocb->iocb_cmpl = lpfc_cmpl_els_link_down;
+ }
+ if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR)
+ list_add_tail(&piocb->dlist, &abort_list);
}
+
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
- /* Abort each iocb on the aborted list and remove the dlist links. */
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ /* Abort each txcmpl iocb on aborted list and remove the dlist links. */
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
list_del_init(&piocb->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, piocb);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
if (!list_empty(&abort_list))
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"3387 abort list for txq not empty\n");
INIT_LIST_HEAD(&abort_list);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
+ /* No need to abort the txq list,
+ * just queue them up for lpfc_sli_cancel_iocbs
+ */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
cmd = &piocb->iocb;
@@ -8007,11 +8111,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
list_del_init(&piocb->list);
list_add_tail(&piocb->list, &abort_list);
}
+
+ /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */
+ if (vport == phba->pport) {
+ list_for_each_entry_safe(piocb, tmp_iocb,
+ &phba->fabric_iocb_list, list) {
+ cmd = &piocb->iocb;
+ list_del_init(&piocb->list);
+ list_add_tail(&piocb->list, &abort_list);
+ }
+ }
+
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
- /* Cancell all the IOCBs from the completions list */
+ /* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &abort_list,
IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 28ecaa7fc715..dcc8999c6a68 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -28,6 +28,7 @@
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/lockdep.h>
+#include <linux/utsname.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -118,6 +119,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
struct lpfc_work_evt *evtp;
int put_node;
int put_rport;
+ unsigned long iflags;
rdata = rport->dd_data;
ndlp = rdata->pnode;
@@ -132,7 +134,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
+ "3181 dev_loss_callbk x%06x, rport x%px flg x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
/* Don't defer this if we are in the process of deleting the vport
@@ -170,22 +172,22 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
}
shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
/* We need to hold the node by incrementing the reference
* count until this queued work is done
*/
evtp->evt_arg1 = lpfc_nlp_get(ndlp);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (evtp->evt_arg1) {
evtp->evt = LPFC_EVT_DEV_LOSS;
list_add_tail(&evtp->evt_listp, &phba->work_list);
lpfc_worker_wake_up(phba);
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -212,14 +214,15 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
int put_node;
int warn_on = 0;
int fcf_inuse = 0;
+ unsigned long iflags;
rport = ndlp->rport;
vport = ndlp->vport;
shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
if (!rport)
return fcf_inuse;
@@ -235,7 +238,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
+ "3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
/*
@@ -698,7 +701,10 @@ lpfc_work_done(struct lpfc_hba *phba)
if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
set_bit(LPFC_DATA_READY, &phba->data_flags);
} else {
- if (phba->link_state >= LPFC_LINK_UP ||
+ /* Driver could have abort request completed in queue
+ * when link goes down. Allow for this transition.
+ */
+ if (phba->link_state >= LPFC_LINK_DOWN ||
phba->link_flag & LS_MDS_LOOPBACK) {
pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -903,6 +909,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
phba->trunk_link.link1.state = 0;
phba->trunk_link.link2.state = 0;
phba->trunk_link.link3.state = 0;
+ phba->sli4_hba.link_state.logical_speed =
+ LPFC_LINK_SPEED_UNKNOWN;
}
spin_lock_irq(shost->host_lock);
phba->pport->fc_flag &= ~FC_LBIT;
@@ -1131,7 +1139,6 @@ void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- uint8_t bbscn = 0;
if (pmb->u.mb.mbxStatus)
goto out;
@@ -1158,17 +1165,11 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Start discovery by sending a FLOGI. port_state is identically
* LPFC_FLOGI while waiting for FLOGI cmpl
*/
- if (vport->port_state != LPFC_FLOGI) {
- if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
- bbscn = bf_get(lpfc_bbscn_def,
- &phba->sli4_hba.bbscn_params);
- vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
- vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
- }
+ if (vport->port_state != LPFC_FLOGI)
lpfc_initial_flogi(vport);
- } else if (vport->fc_flag & FC_PT2PT) {
+ else if (vport->fc_flag & FC_PT2PT)
lpfc_disc_start(vport);
- }
+
return;
out:
@@ -3115,8 +3116,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
int rc;
struct fcf_record *fcf_record;
uint32_t fc_flags = 0;
+ unsigned long iflags;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
@@ -3213,12 +3215,12 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
vport->fc_myDID = phba->fc_pref_DID;
fc_flags |= FC_LBIT;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
if (fc_flags) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
vport->fc_flag |= fc_flags;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
}
lpfc_linkup(phba);
@@ -3292,33 +3294,37 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
* The driver is expected to do FIP/FCF. Call the port
* and get the FCF Table.
*/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->hba_flag & FCF_TS_INPROG) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
/* This is the initial FCF discovery scan */
phba->fcf.fcf_flag |= FCF_INIT_DISC;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2778 Start FCF table scan at linkup\n");
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
LPFC_FCOE_FCF_GET_FIRST);
if (rc) {
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
goto out;
}
/* Reset FCF roundrobin bmask for new discovery */
lpfc_sli4_clear_fcf_rr_bmask(phba);
}
+ /* Prepare for LINK up registrations */
+ memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
+ scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
+ init_utsname()->nodename);
return;
out:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
- "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
+ "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
vport->port_state, sparam_mbox, cfglink_mbox);
lpfc_issue_clear_la(phba, vport);
return;
@@ -3366,6 +3372,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
uint8_t attn_type;
+ unsigned long iflags;
/* Unblock ELS traffic */
pring = lpfc_phba_elsring(phba);
@@ -3387,12 +3394,12 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy(&phba->alpa_map[0], mp->virt, 128);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
if (bf_get(lpfc_mbx_read_top_pb, la))
vport->fc_flag |= FC_BYPASSED_MODE;
else
vport->fc_flag &= ~FC_BYPASSED_MODE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
if (phba->fc_eventTag <= la->eventTag) {
phba->fc_stat.LinkMultiEvent++;
@@ -3403,12 +3410,12 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->fc_eventTag = la->eventTag;
if (phba->sli_rev < LPFC_SLI_REV4) {
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (bf_get(lpfc_mbx_read_top_mm, la))
phba->sli.sli_flag |= LPFC_MENLO_MAINT;
else
phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
phba->link_events++;
@@ -3450,8 +3457,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->pport->port_state, vport->fc_flag);
else if (attn_type == LPFC_ATT_UNEXP_WWPN)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1313 Link Down UNEXP WWPN Event x%x received "
- "Data: x%x x%x x%x x%x x%x\n",
+ "1313 Link Down Unexpected FA WWPN Event x%x "
+ "received Data: x%x x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
bf_get(lpfc_mbx_read_top_mm, la),
@@ -3529,7 +3536,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->ctx_ndlp = NULL;
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ "0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -4040,8 +4047,8 @@ out:
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
+ "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -4160,7 +4167,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
fc_remote_port_rolechg(rport, rport_ids.roles);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3183 rport register x%06x, rport %p role x%x\n",
+ "3183 rport register x%06x, rport x%px role x%x\n",
ndlp->nlp_DID, rport, rport_ids.roles);
if ((rport->scsi_target_id != -1) &&
@@ -4184,7 +4191,7 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "3184 rport unregister x%06x, rport %p\n",
+ "3184 rport unregister x%06x, rport x%px\n",
ndlp->nlp_DID, rport);
fc_remote_port_delete(rport);
@@ -4196,8 +4203,9 @@ static void
lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ unsigned long iflags;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
switch (state) {
case NLP_STE_UNUSED_NODE:
vport->fc_unused_cnt += count;
@@ -4227,7 +4235,7 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
vport->fc_npr_cnt += count;
break;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
}
static void
@@ -4480,9 +4488,21 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return NULL;
if (phba->sli_rev == LPFC_SLI_REV4) {
- rpi = lpfc_sli4_alloc_rpi(vport->phba);
- if (rpi == LPFC_RPI_ALLOC_ERROR)
+ if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
+ rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ else
+ rpi = ndlp->nlp_rpi;
+
+ if (rpi == LPFC_RPI_ALLOC_ERROR) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+ "0359 %s: ndlp:x%px "
+ "usgmap:x%x refcnt:%d FAILED RPI "
+ " ALLOC\n",
+ __func__,
+ (void *)ndlp, ndlp->nlp_usg_map,
+ kref_read(&ndlp->kref));
return NULL;
+ }
}
spin_lock_irqsave(&phba->ndlp_lock, flags);
@@ -4490,9 +4510,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (NLP_CHK_FREE_REQ(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0277 lpfc_enable_node: ndlp:x%p "
+ "0277 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
goto free_rpi;
}
@@ -4500,9 +4520,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (NLP_CHK_NODE_ACT(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0278 lpfc_enable_node: ndlp:x%p "
+ "0278 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
goto free_rpi;
}
@@ -4532,7 +4552,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0008 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+ "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -4541,6 +4561,14 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (state != NLP_STE_UNUSED_NODE)
lpfc_nlp_set_state(vport, ndlp, state);
+ else
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0013 rpi:%x DID:%x flg:%x refcnt:%d "
+ "map:%x x%px STATE=UNUSED\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag,
+ kref_read(&ndlp->kref),
+ ndlp->nlp_usg_map, ndlp);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
"node enable: did:x%x",
@@ -4548,8 +4576,10 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp;
free_rpi:
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_sli4_free_rpi(vport->phba, rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ }
return NULL;
}
@@ -4797,7 +4827,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
(ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"1434 UNREG cmpl deferred logo x%x "
- "on NPort x%x Data: x%x %p\n",
+ "on NPort x%x Data: x%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_defer_did, ndlp);
@@ -4805,11 +4835,54 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
} else {
+ if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+ lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ }
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
}
/*
+ * Sets the mailbox completion handler to be used for the
+ * unreg_rpi command. The handler varies based on the state of
+ * the port and what will be happening to the rpi next.
+ */
+static void
+lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
+{
+ unsigned long iflags;
+
+ if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
+ mbox->ctx_ndlp = ndlp;
+ mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
+
+ } else if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (!(vport->load_flag & FC_UNLOADING)) &&
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
+ LPFC_SLI_INTF_IF_TYPE_2) &&
+ (kref_read(&ndlp->kref) > 0)) {
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
+ } else {
+ if (vport->load_flag & FC_UNLOADING) {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ spin_lock_irqsave(&vport->phba->ndlp_lock,
+ iflags);
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
+ spin_unlock_irqrestore(&vport->phba->ndlp_lock,
+ iflags);
+ }
+ lpfc_nlp_get(ndlp);
+ }
+ mbox->ctx_ndlp = ndlp;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+}
+
+/*
* Free rpi associated with LPFC_NODELIST entry.
* This routine is called from lpfc_freenode(), when we are removing
* a LPFC_NODELIST entry. It is also called if the driver initiates a
@@ -4829,7 +4902,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"3366 RPI x%x needs to be "
"unregistered nlp_flag x%x "
"did x%x\n",
@@ -4840,10 +4914,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* no need to queue up another one.
*/
if (ndlp->nlp_flag & NLP_UNREG_INP) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"1436 unreg_rpi SKIP UNREG x%x on "
"NPort x%x deferred x%x flg x%x "
- "Data: %p\n",
+ "Data: x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_defer_did,
ndlp->nlp_flag, ndlp);
@@ -4859,41 +4934,22 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
- if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
- mbox->ctx_ndlp = ndlp;
- mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
- } else {
- if (phba->sli_rev == LPFC_SLI_REV4 &&
- (!(vport->load_flag & FC_UNLOADING)) &&
- (bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf) >=
- LPFC_SLI_INTF_IF_TYPE_2) &&
- (kref_read(&ndlp->kref) > 0)) {
- mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
- mbox->mbox_cmpl =
- lpfc_sli4_unreg_rpi_cmpl_clr;
- /*
- * accept PLOGIs after unreg_rpi_cmpl
- */
- acc_plogi = 0;
- } else if (vport->load_flag & FC_UNLOADING) {
- mbox->ctx_ndlp = NULL;
- mbox->mbox_cmpl =
- lpfc_sli_def_mbox_cmpl;
- } else {
- mbox->ctx_ndlp = ndlp;
- mbox->mbox_cmpl =
- lpfc_sli_def_mbox_cmpl;
- }
- }
+ lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
+ if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
+ /*
+ * accept PLOGIs after unreg_rpi_cmpl
+ */
+ acc_plogi = 0;
if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
Fabric_DID_MASK) &&
(!(vport->fc_flag & FC_OFFLINE_MODE)))
ndlp->nlp_flag |= NLP_UNREG_INP;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"1433 unreg_rpi UNREG x%x on "
- "NPort x%x deferred flg x%x Data:%p\n",
+ "NPort x%x deferred flg x%x "
+ "Data:x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp);
@@ -5025,6 +5081,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp;
+ unsigned long iflags;
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -5034,16 +5091,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_state, ndlp->nlp_rpi);
if (NLP_CHK_FREE_REQ(ndlp)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0280 lpfc_cleanup_node: ndlp:x%p "
+ "0280 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
lpfc_dequeue_node(vport, ndlp);
} else {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0281 lpfc_cleanup_node: ndlp:x%p "
+ "0281 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
lpfc_disable_node(vport, ndlp);
}
@@ -5104,8 +5161,22 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_del_init(&ndlp->els_retry_evt.evt_listp);
list_del_init(&ndlp->dev_loss_evt.evt_listp);
lpfc_cleanup_vports_rrqs(vport, ndlp);
- lpfc_unreg_rpi(vport, ndlp);
-
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
+ if (!lpfc_unreg_rpi(vport, ndlp)) {
+ /* Clean up unregistered and non freed rpis */
+ if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
+ !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
+ lpfc_sli4_free_rpi(vport->phba,
+ ndlp->nlp_rpi);
+ spin_lock_irqsave(&vport->phba->ndlp_lock,
+ iflags);
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ spin_unlock_irqrestore(&vport->phba->ndlp_lock,
+ iflags);
+ }
+ }
return 0;
}
@@ -5131,8 +5202,10 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
*/
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x "
+ "ref %d map:x%x ndlp x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -5168,9 +5241,10 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* for registered rport so need to cleanup rport
*/
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0940 removed node x%p DID x%x "
- " rport not null %p\n",
- ndlp, ndlp->nlp_DID, ndlp->rport);
+ "0940 removed node x%px DID x%x "
+ "rpi %d rport not null x%px\n",
+ ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
+ ndlp->rport);
rport = ndlp->rport;
rdata = rport->dd_data;
rdata->pnode = NULL;
@@ -5243,15 +5317,15 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (lpfc_matchdid(vport, ndlp, did)) {
- data1 = (((uint32_t) ndlp->nlp_state << 24) |
- ((uint32_t) ndlp->nlp_xri << 16) |
- ((uint32_t) ndlp->nlp_type << 8) |
- ((uint32_t) ndlp->nlp_rpi & 0xff));
+ data1 = (((uint32_t)ndlp->nlp_state << 24) |
+ ((uint32_t)ndlp->nlp_xri << 16) |
+ ((uint32_t)ndlp->nlp_type << 8) |
+ ((uint32_t)ndlp->nlp_usg_map & 0xff));
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0929 FIND node DID "
- "Data: x%p x%x x%x x%x %p\n",
+ "Data: x%px x%x x%x x%x x%x x%px\n",
ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag, data1,
+ ndlp->nlp_flag, data1, ndlp->nlp_rpi,
ndlp->active_rrqs_xri_bitmap);
return ndlp;
}
@@ -5296,7 +5370,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
spin_unlock_irqrestore(shost->host_lock, iflags);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"2025 FIND node DID "
- "Data: x%p x%x x%x x%x %p\n",
+ "Data: x%px x%x x%x x%x x%px\n",
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1,
ndlp->active_rrqs_xri_bitmap);
@@ -5328,6 +5402,13 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (!ndlp)
return NULL;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6453 Setup New Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -5336,8 +5417,17 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (vport->phba->nvmet_support)
return NULL;
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
- if (!ndlp)
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
+ "0014 Could not enable ndlp\n");
return NULL;
+ }
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6454 Setup Enabled Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -5357,6 +5447,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
*/
lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6455 Setup RSCN Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
/* NVME Target mode waits until rport is known to be
* impacted by the RSCN before it transitions. No
* active management - just go to NPR provided the
@@ -5368,15 +5464,32 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
/* If we've already received a PLOGI from this NPort
* we don't need to try to discover it again.
*/
- if (ndlp->nlp_flag & NLP_RCV_PLOGI)
+ if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
+ !(ndlp->nlp_type &
+ (NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL;
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
- } else
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6456 Skip Setup RSCN Node x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
ndlp = NULL;
+ }
} else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6457 Setup Active Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
/* If the initiator received a PLOGI from this NPort or if the
* initiator is already in the process of discovery on it,
* there's no need to try to discover it again.
@@ -5528,10 +5641,10 @@ lpfc_disc_start(struct lpfc_vport *vport)
/* Start Discovery state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0202 Start Discovery hba state x%x "
- "Data: x%x x%x x%x\n",
+ "0202 Start Discovery port state x%x "
+ "flg x%x Data: x%x x%x x%x\n",
vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
- vport->fc_adisc_cnt);
+ vport->fc_adisc_cnt, vport->fc_npr_cnt);
/* First do ADISCs - if any */
num_sent = lpfc_els_disc_adisc(vport);
@@ -5959,8 +6072,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
+ "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -6014,8 +6127,8 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (filter(ndlp, param)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "3185 FIND node filter %p DID "
- "ndlp %p did x%x flg x%x st x%x "
+ "3185 FIND node filter %ps DID "
+ "ndlp x%px did x%x flg x%x st x%x "
"xri x%x type x%x rpi x%x\n",
filter, ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state,
@@ -6025,7 +6138,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
}
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "3186 FIND node filter %p NOT FOUND.\n", filter);
+ "3186 FIND node filter %ps NOT FOUND.\n", filter);
return NULL;
}
@@ -6065,10 +6178,11 @@ lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
+ unsigned long flags;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, flags);
ndlp = __lpfc_findnode_rpi(vport, rpi);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, flags);
return ndlp;
}
@@ -6147,12 +6261,12 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = rpi;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0007 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
+ "0007 Init New ndlp x%px, rpi:x%x DID:%x "
+ "flg:x%x refcnt:%d map:x%x\n",
+ ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, kref_read(&ndlp->kref),
+ ndlp->nlp_usg_map);
ndlp->active_rrqs_xri_bitmap =
mempool_alloc(vport->phba->active_rrq_pool,
@@ -6187,8 +6301,9 @@ lpfc_nlp_release(struct kref *kref)
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0279 lpfc_nlp_release: ndlp:x%p did %x "
+ "0279 %s: ndlp:x%px did %x "
"usgmap:x%x refcnt:%d rpi:%x\n",
+ __func__,
(void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
kref_read(&ndlp->kref), ndlp->nlp_rpi);
@@ -6200,8 +6315,6 @@ lpfc_nlp_release(struct kref *kref)
spin_lock_irqsave(&phba->ndlp_lock, flags);
NLP_CLR_NODE_ACT(ndlp);
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- if (phba->sli_rev == LPFC_SLI_REV4)
- lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
/* free ndlp memory for final ndlp release */
if (NLP_CHK_FREE_REQ(ndlp)) {
@@ -6237,9 +6350,9 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0276 lpfc_nlp_get: ndlp:x%p "
+ "0276 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
return NULL;
} else
@@ -6265,9 +6378,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
return 1;
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
- "node put: did:x%x flg:x%x refcnt:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref));
+ "node put: did:x%x flg:x%x refcnt:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ kref_read(&ndlp->kref));
phba = ndlp->phba;
spin_lock_irqsave(&phba->ndlp_lock, flags);
/* Check the ndlp memory free acknowledge flag to avoid the
@@ -6277,9 +6390,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
if (NLP_CHK_FREE_ACK(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0274 lpfc_nlp_put: ndlp:x%p "
+ "0274 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
return 1;
}
@@ -6290,9 +6403,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
if (NLP_CHK_IACT_REQ(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0275 lpfc_nlp_put: ndlp:x%p "
+ "0275 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
return 1;
}
@@ -6382,7 +6495,8 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
goto out;
} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
ret = 1;
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"2624 RPI %x DID %x flag %x "
"still logged in\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 5b439a6dcde1..436cdc8c5ef4 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -843,7 +843,7 @@ typedef struct _ADISC { /* Structure is in Big Endian format */
struct lpfc_name portName;
struct lpfc_name nodeName;
uint32_t DID;
-} ADISC;
+} __packed ADISC;
typedef struct _FARP { /* Structure is in Big Endian format */
uint32_t Mflags:8;
@@ -873,7 +873,7 @@ typedef struct _FAN { /* Structure is in Big Endian format */
uint32_t Fdid;
struct lpfc_name FportName;
struct lpfc_name FnodeName;
-} FAN;
+} __packed FAN;
typedef struct _SCR { /* Structure is in Big Endian format */
uint8_t resvd1;
@@ -917,7 +917,7 @@ typedef struct _RNID { /* Structure is in Big Endian format */
union {
RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */
} un;
-} RNID;
+} __packed RNID;
typedef struct _RPS { /* Structure is in Big Endian format */
union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 77f9a55a3f54..9a064b96e570 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -210,7 +210,6 @@ struct lpfc_sli_intf {
#define LPFC_MAX_IMAX 5000000
#define LPFC_DEF_IMAX 0
-#define LPFC_IMAX_THRESHOLD 1000
#define LPFC_MAX_AUTO_EQ_DELAY 120
#define LPFC_EQ_DELAY_STEP 15
#define LPFC_EQD_ISR_TRIGGER 20000
@@ -2050,6 +2049,23 @@ struct sli4_sge { /* SLI-4 */
uint32_t sge_len;
};
+struct sli4_hybrid_sgl {
+ struct list_head list_node;
+ struct sli4_sge *dma_sgl;
+ dma_addr_t dma_phys_sgl;
+};
+
+struct fcp_cmd_rsp_buf {
+ struct list_head list_node;
+
+ /* for storing cmd/rsp dma alloc'ed virt_addr */
+ struct fcp_cmnd *fcp_cmnd;
+ struct fcp_rsp *fcp_rsp;
+
+ /* for storing this cmd/rsp's dma mapped phys addr from per CPU pool */
+ dma_addr_t fcp_cmd_rsp_dma_handle;
+};
+
struct sli4_sge_diseed { /* SLI-4 */
uint32_t ref_tag;
uint32_t ref_tag_tran;
@@ -2303,6 +2319,7 @@ struct lpfc_mbx_redisc_fcf_tbl {
#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
#define ADD_STATUS_FW_NOT_SUPPORTED 0xEB
#define ADD_STATUS_INVALID_REQUEST 0x4B
+#define ADD_STATUS_FW_DOWNLOAD_HW_DISABLED 0x58
struct lpfc_mbx_sli4_config {
struct mbox_header header;
@@ -2792,6 +2809,15 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_trunk_SHIFT 12
#define lpfc_mbx_rd_conf_trunk_MASK 0x0000000F
#define lpfc_mbx_rd_conf_trunk_WORD word2
+#define lpfc_mbx_rd_conf_pt_SHIFT 20
+#define lpfc_mbx_rd_conf_pt_MASK 0x00000003
+#define lpfc_mbx_rd_conf_pt_WORD word2
+#define lpfc_mbx_rd_conf_tf_SHIFT 22
+#define lpfc_mbx_rd_conf_tf_MASK 0x00000001
+#define lpfc_mbx_rd_conf_tf_WORD word2
+#define lpfc_mbx_rd_conf_ptv_SHIFT 23
+#define lpfc_mbx_rd_conf_ptv_MASK 0x00000001
+#define lpfc_mbx_rd_conf_ptv_WORD word2
#define lpfc_mbx_rd_conf_topology_SHIFT 24
#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
#define lpfc_mbx_rd_conf_topology_WORD word2
@@ -3449,6 +3475,9 @@ struct lpfc_sli4_parameters {
#define cfg_xib_SHIFT 4
#define cfg_xib_MASK 0x00000001
#define cfg_xib_WORD word19
+#define cfg_xpsgl_SHIFT 6
+#define cfg_xpsgl_MASK 0x00000001
+#define cfg_xpsgl_WORD word19
#define cfg_eqdr_SHIFT 8
#define cfg_eqdr_MASK 0x00000001
#define cfg_eqdr_WORD word19
@@ -3459,6 +3488,13 @@ struct lpfc_sli4_parameters {
#define cfg_bv1s_SHIFT 10
#define cfg_bv1s_MASK 0x00000001
#define cfg_bv1s_WORD word19
+#define cfg_pvl_SHIFT 13
+#define cfg_pvl_MASK 0x00000001
+#define cfg_pvl_WORD word19
+
+#define cfg_nsler_SHIFT 12
+#define cfg_nsler_MASK 0x00000001
+#define cfg_nsler_WORD word19
uint32_t word20;
#define cfg_max_tow_xri_SHIFT 0
@@ -3494,6 +3530,7 @@ struct lpfc_sli4_parameters {
#define LPFC_SET_UE_RECOVERY 0x10
#define LPFC_SET_MDS_DIAGS 0x11
+#define LPFC_SET_DUAL_DUMP 0x1e
struct lpfc_mbx_set_feature {
struct mbox_header header;
uint32_t feature;
@@ -3508,6 +3545,15 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1
#define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001
#define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6
+#define lpfc_mbx_set_feature_dd_SHIFT 0
+#define lpfc_mbx_set_feature_dd_MASK 0x00000001
+#define lpfc_mbx_set_feature_dd_WORD word6
+#define lpfc_mbx_set_feature_ddquery_SHIFT 1
+#define lpfc_mbx_set_feature_ddquery_MASK 0x00000001
+#define lpfc_mbx_set_feature_ddquery_WORD word6
+#define LPFC_DISABLE_DUAL_DUMP 0
+#define LPFC_ENABLE_DUAL_DUMP 1
+#define LPFC_QUERY_OP_DUAL_DUMP 2
uint32_t word7;
#define lpfc_mbx_set_feature_UERP_SHIFT 0
#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff
@@ -3879,6 +3925,9 @@ struct lpfc_mbx_wr_object {
#define LPFC_CHANGE_STATUS_FW_RESET 0x02
#define LPFC_CHANGE_STATUS_PORT_MIGRATION 0x04
#define LPFC_CHANGE_STATUS_PCI_RESET 0x05
+#define lpfc_wr_object_csf_SHIFT 8
+#define lpfc_wr_object_csf_MASK 0x00000001
+#define lpfc_wr_object_csf_WORD word5
} response;
} u;
};
@@ -4237,6 +4286,8 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA
+#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF
+#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
};
/*
@@ -4314,6 +4365,12 @@ struct wqe_common {
#define wqe_rcvoxid_SHIFT 16
#define wqe_rcvoxid_MASK 0x0000FFFF
#define wqe_rcvoxid_WORD word9
+#define wqe_sof_SHIFT 24
+#define wqe_sof_MASK 0x000000FF
+#define wqe_sof_WORD word9
+#define wqe_eof_SHIFT 16
+#define wqe_eof_MASK 0x000000FF
+#define wqe_eof_WORD word9
uint32_t word10;
#define wqe_ebde_cnt_SHIFT 0
#define wqe_ebde_cnt_MASK 0x0000000f
@@ -4595,6 +4652,7 @@ struct lpfc_nvme_prli {
#define prli_type_code_WORD word1
uint32_t word_rsvd2;
uint32_t word_rsvd3;
+
uint32_t word4;
#define prli_fba_SHIFT 0
#define prli_fba_MASK 0x00000001
@@ -4611,6 +4669,9 @@ struct lpfc_nvme_prli {
#define prli_conf_SHIFT 7
#define prli_conf_MASK 0x00000001
#define prli_conf_WORD word4
+#define prli_nsler_SHIFT 8
+#define prli_nsler_MASK 0x00000001
+#define prli_nsler_WORD word4
uint32_t word5;
#define prli_fb_sz_SHIFT 0
#define prli_fb_sz_MASK 0x0000ffff
@@ -4625,6 +4686,7 @@ struct create_xri_wqe {
uint32_t rsvd_12_15[4]; /* word 12-15 */
};
+#define INHIBIT_ABORT 1
#define T_REQUEST_TAG 3
#define T_XRI_TAG 1
@@ -4773,8 +4835,8 @@ union lpfc_wqe128 {
struct send_frame_wqe send_frame;
};
-#define MAGIC_NUMER_G6 0xFEAA0003
-#define MAGIC_NUMER_G7 0xFEAA0005
+#define MAGIC_NUMBER_G6 0xFEAA0003
+#define MAGIC_NUMBER_G7 0xFEAA0005
struct lpfc_grp_hdr {
uint32_t size;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a7549ae32542..5a605773dd0a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -39,6 +39,9 @@
#include <linux/msi.h>
#include <linux/irq.h>
#include <linux/bitops.h>
+#include <linux/crash_dump.h>
+#include <linux/cpu.h>
+#include <linux/cpuhotplug.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -65,15 +68,13 @@
#include "lpfc_version.h"
#include "lpfc_ids.h"
-char *_dump_buf_data;
-unsigned long _dump_buf_data_order;
-char *_dump_buf_dif;
-unsigned long _dump_buf_dif_order;
-spinlock_t _dump_buf_lock;
-
+static enum cpuhp_state lpfc_cpuhp_state;
/* Used when mapping IRQ vectors in a driver centric manner */
static uint32_t lpfc_present_cpu;
+static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
+static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
+static void lpfc_cpuhp_add(struct lpfc_hba *phba);
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@@ -1081,8 +1082,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
- spin_lock(&qp->abts_scsi_buf_list_lock);
- list_splice_init(&qp->lpfc_abts_scsi_buf_list,
+ spin_lock(&qp->abts_io_buf_list_lock);
+ list_splice_init(&qp->lpfc_abts_io_buf_list,
&aborts);
list_for_each_entry_safe(psb, psb_next, &aborts, list) {
@@ -1093,29 +1094,11 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
spin_lock(&qp->io_buf_list_put_lock);
list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
qp->put_io_bufs += qp->abts_scsi_io_bufs;
+ qp->put_io_bufs += qp->abts_nvme_io_bufs;
qp->abts_scsi_io_bufs = 0;
+ qp->abts_nvme_io_bufs = 0;
spin_unlock(&qp->io_buf_list_put_lock);
- spin_unlock(&qp->abts_scsi_buf_list_lock);
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- spin_lock(&qp->abts_nvme_buf_list_lock);
- list_splice_init(&qp->lpfc_abts_nvme_buf_list,
- &nvme_aborts);
- list_for_each_entry_safe(psb, psb_next, &nvme_aborts,
- list) {
- psb->pCmd = NULL;
- psb->status = IOSTAT_SUCCESS;
- cnt++;
- }
- spin_lock(&qp->io_buf_list_put_lock);
- qp->put_io_bufs += qp->abts_nvme_io_bufs;
- qp->abts_nvme_io_bufs = 0;
- list_splice_init(&nvme_aborts,
- &qp->lpfc_io_buf_list_put);
- spin_unlock(&qp->io_buf_list_put_lock);
- spin_unlock(&qp->abts_nvme_buf_list_lock);
-
- }
+ spin_unlock(&qp->abts_io_buf_list_lock);
}
spin_unlock_irq(&phba->hbalock);
@@ -1258,7 +1241,7 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
struct lpfc_hba, eq_delay_work);
struct lpfc_eq_intr_info *eqi, *eqi_new;
struct lpfc_queue *eq, *eq_next;
- unsigned char *eqcnt = NULL;
+ unsigned char *ena_delay = NULL;
uint32_t usdelay;
int i;
@@ -1269,35 +1252,36 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
phba->pport->fc_flag & FC_OFFLINE_MODE)
goto requeue;
- eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
- GFP_KERNEL);
- if (!eqcnt)
+ ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
+ GFP_KERNEL);
+ if (!ena_delay)
goto requeue;
- /* Loop thru all IRQ vectors */
for (i = 0; i < phba->cfg_irq_chann; i++) {
/* Get the EQ corresponding to the IRQ vector */
eq = phba->sli4_hba.hba_eq_hdl[i].eq;
- if (eq && eqcnt[eq->last_cpu] < 2)
- eqcnt[eq->last_cpu]++;
- continue;
+ if (!eq)
+ continue;
+ if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
+ eq->q_flag &= ~HBA_EQ_DELAY_CHK;
+ ena_delay[eq->last_cpu] = 1;
+ }
}
for_each_present_cpu(i) {
- if (phba->cfg_irq_chann > 1 && eqcnt[i] < 2)
- continue;
-
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
-
- usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
- LPFC_EQ_DELAY_STEP;
- if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
- usdelay = LPFC_MAX_AUTO_EQ_DELAY;
+ if (ena_delay[i]) {
+ usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
+ if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
+ usdelay = LPFC_MAX_AUTO_EQ_DELAY;
+ } else {
+ usdelay = 0;
+ }
eqi->icnt = 0;
list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
- if (eq->last_cpu != i) {
+ if (unlikely(eq->last_cpu != i)) {
eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
eq->last_cpu);
list_move_tail(&eq->cpu_list, &eqi_new->list);
@@ -1309,7 +1293,7 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
}
}
- kfree(eqcnt);
+ kfree(ena_delay);
requeue:
queue_delayed_work(phba->wq, &phba->eq_delay_work,
@@ -1378,7 +1362,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
if (vports != NULL)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
lpfc_rcv_seq_check_edtov(vports[i]);
- lpfc_fdmi_num_disc_check(vports[i]);
+ lpfc_fdmi_change_check(vports[i]);
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -1535,6 +1519,7 @@ lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+ lpfc_sli_flush_io_rings(phba);
lpfc_offline(phba);
lpfc_hba_down_post(phba);
lpfc_unblock_mgmt_io(phba);
@@ -1796,6 +1781,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
"2887 Reset Needed: Attempting Port "
"Recovery...\n");
lpfc_offline_prep(phba, mbx_action);
+ lpfc_sli_flush_io_rings(phba);
lpfc_offline(phba);
/* release interrupt for possible resource change */
lpfc_sli4_disable_intr(phba);
@@ -1915,7 +1901,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"7624 Firmware not ready: Failing UE recovery,"
" waited %dSec", i);
- lpfc_sli4_offline_eratt(phba);
+ phba->link_state = LPFC_HBA_ERROR;
break;
case LPFC_SLI_INTF_IF_TYPE_2:
@@ -1989,9 +1975,8 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
}
/* fall through for not able to recover */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3152 Unrecoverable error, bring the port "
- "offline\n");
- lpfc_sli4_offline_eratt(phba);
+ "3152 Unrecoverable error\n");
+ phba->link_state = LPFC_HBA_ERROR;
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
@@ -2863,7 +2848,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
&vport->fc_nodes, nlp_listp) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
LOG_NODE,
- "0282 did:x%x ndlp:x%p "
+ "0282 did:x%x ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
ndlp->nlp_DID, (void *)ndlp,
ndlp->nlp_usg_map,
@@ -3065,11 +3050,12 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
continue;
}
ndlp->nlp_rpi = rpi;
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0009 rpi:%x DID:%x "
- "flg:%x map:%x %p\n", ndlp->nlp_rpi,
- ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0009 Assign RPI x%x to ndlp x%px "
+ "DID:x%06x flg:x%x map:x%x\n",
+ ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_usg_map);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -3252,12 +3238,8 @@ static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
lpfc_destroy_expedite_pool(phba);
- if (!(phba->pport->load_flag & FC_UNLOADING)) {
- lpfc_sli_flush_fcp_rings(phba);
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- lpfc_sli_flush_nvme_rings(phba);
- }
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_sli_flush_io_rings(phba);
hwq_count = phba->cfg_hdw_queue;
@@ -3403,6 +3385,8 @@ lpfc_online(struct lpfc_hba *phba)
if (phba->cfg_xri_rebalancing)
lpfc_create_multixri_pools(phba);
+ lpfc_cpuhp_add(phba);
+
lpfc_unblock_mgmt_io(phba);
return 0;
}
@@ -3469,10 +3453,15 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ if ((!NLP_CHK_NODE_ACT(ndlp)) ||
+ ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ /* Driver must assume RPI is invalid for
+ * any unused or inactive node.
+ */
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
continue;
+ }
+
if (ndlp->nlp_type & NLP_FABRIC) {
lpfc_disc_state_machine(vports[i], ndlp,
NULL, NLP_EVT_DEVICE_RECOVERY);
@@ -3488,16 +3477,16 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
* comes back online.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
- lpfc_printf_vlog(ndlp->vport,
- KERN_INFO, LOG_NODE,
- "0011 lpfc_offline: "
- "ndlp:x%p did %x "
- "usgmap:x%x rpi:%x\n",
- ndlp, ndlp->nlp_DID,
- ndlp->nlp_usg_map,
- ndlp->nlp_rpi);
-
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0011 Free RPI x%x on "
+ "ndlp:x%px did x%x "
+ "usgmap:x%x\n",
+ ndlp->nlp_rpi, ndlp,
+ ndlp->nlp_DID,
+ ndlp->nlp_usg_map);
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
lpfc_unreg_rpi(vports[i], ndlp);
}
@@ -3561,6 +3550,7 @@ lpfc_offline(struct lpfc_hba *phba)
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
+ __lpfc_cpuhp_remove(phba);
if (phba->cfg_xri_rebalancing)
lpfc_destroy_multixri_pools(phba);
@@ -3636,6 +3626,9 @@ lpfc_io_free(struct lpfc_hba *phba)
qp->put_io_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
kfree(lpfc_ncmd);
qp->total_io_bufs--;
}
@@ -3649,6 +3642,9 @@ lpfc_io_free(struct lpfc_hba *phba)
qp->get_io_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
kfree(lpfc_ncmd);
qp->total_io_bufs--;
}
@@ -4097,18 +4093,9 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
LIST_HEAD(post_nblist);
LIST_HEAD(nvme_nblist);
- /* Sanity check to ensure our sizing is right for both SCSI and NVME */
- if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "6426 Common buffer size %zd exceeds %d\n",
- sizeof(struct lpfc_io_buf),
- LPFC_COMMON_IO_BUF_SZ);
- return 0;
- }
-
phba->sli4_hba.io_xri_cnt = 0;
for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
- lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL);
+ lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
if (!lpfc_ncmd)
break;
/*
@@ -4124,22 +4111,30 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
break;
}
- /*
- * 4K Page alignment is CRITICAL to BlockGuard, double check
- * to be sure.
- */
- if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
- (((unsigned long)(lpfc_ncmd->data) &
- (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "3369 Memory alignment err: addr=%lx\n",
- (unsigned long)lpfc_ncmd->data);
- dma_pool_free(phba->lpfc_sg_dma_buf_pool,
- lpfc_ncmd->data, lpfc_ncmd->dma_handle);
- kfree(lpfc_ncmd);
- break;
+ if (phba->cfg_xpsgl && !phba->nvmet_support) {
+ INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
+ } else {
+ /*
+ * 4K Page alignment is CRITICAL to BlockGuard, double
+ * check to be sure.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+ (((unsigned long)(lpfc_ncmd->data) &
+ (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "3369 Memory alignment err: "
+ "addr=%lx\n",
+ (unsigned long)lpfc_ncmd->data);
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+ lpfc_ncmd->data,
+ lpfc_ncmd->dma_handle);
+ kfree(lpfc_ncmd);
+ break;
+ }
}
+ INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
+
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
@@ -4309,14 +4304,20 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_cmd_len = 16;
if (phba->sli_rev == LPFC_SLI_REV4) {
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
- shost->nr_hw_queues = phba->cfg_hdw_queue;
- else
- shost->nr_hw_queues = phba->sli4_hba.num_present_cpu;
+ if (!phba->cfg_fcp_mq_threshold ||
+ phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
+ phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
+
+ shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
+ phba->cfg_fcp_mq_threshold);
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
- shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
+
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
+ else
+ shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
} else
/* SLI-3 has a limited number of hardware queues (3),
* thus there is only one for FCP processing.
@@ -5288,10 +5289,10 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
evt_type = bf_get(lpfc_trailer_type, acqe_sli);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2901 Async SLI event - Event Data1:x%08x Event Data2:"
- "x%08x SLI Event Type:%d\n",
+ "2901 Async SLI event - Type:%d, Event Data: x%08x "
+ "x%08x x%08x x%08x\n", evt_type,
acqe_sli->event_data1, acqe_sli->event_data2,
- evt_type);
+ acqe_sli->reserved, acqe_sli->trailer);
port_name = phba->Port[0];
if (port_name == 0x00)
@@ -5438,11 +5439,26 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"Event Data1:x%08x Event Data2: x%08x\n",
acqe_sli->event_data1, acqe_sli->event_data2);
break;
+ case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
+ /* Misconfigured WWN. Reports that the SLI Port is configured
+ * to use FA-WWN, but the attached device doesn’t support it.
+ * No driver action is required.
+ * Event Data1 - N.A, Event Data2 - N.A
+ */
+ lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
+ "2699 Misconfigured FA-WWN - Attached device does "
+ "not support FA-WWN\n");
+ break;
+ case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
+ /* EEPROM failure. No driver action is required */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2518 EEPROM failure - "
+ "Event Data1: x%08x Event Data2: x%08x\n",
+ acqe_sli->event_data1, acqe_sli->event_data2);
+ break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3193 Async SLI event - Event Data1:x%08x Event Data2:"
- "x%08x SLI Event Type:%d\n",
- acqe_sli->event_data1, acqe_sli->event_data2,
+ "3193 Unrecognized SLI event, type: 0x%x",
evt_type);
break;
}
@@ -5867,7 +5883,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "1804 Invalid asynchrous event code: "
+ "1804 Invalid asynchronous event code: "
"x%x\n", bf_get(lpfc_trailer_code,
&cq_event->cqe.mcqe_cmpl));
break;
@@ -5981,6 +5997,29 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
}
/**
+ * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node
+ * @phba: Pointer to HBA context object.
+ *
+ **/
+static void
+lpfc_cpumask_of_node_init(struct lpfc_hba *phba)
+{
+ unsigned int cpu, numa_node;
+ struct cpumask *numa_mask = &phba->sli4_hba.numa_mask;
+
+ cpumask_clear(numa_mask);
+
+ /* Check if we're a NUMA architecture */
+ numa_node = dev_to_node(&phba->pcidev->dev);
+ if (numa_node == NUMA_NO_NODE)
+ return;
+
+ for_each_possible_cpu(cpu)
+ if (cpu_to_node(cpu) == numa_node)
+ cpumask_set_cpu(cpu, numa_mask);
+}
+
+/**
* lpfc_enable_pci_dev - Enable a generic PCI device.
* @phba: pointer to lpfc hba data structure.
*
@@ -6334,6 +6373,24 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
return -ENOMEM;
+ phba->lpfc_sg_dma_buf_pool =
+ dma_pool_create("lpfc_sg_dma_buf_pool",
+ &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
+ BPL_ALIGN_SZ, 0);
+
+ if (!phba->lpfc_sg_dma_buf_pool)
+ goto fail_free_mem;
+
+ phba->lpfc_cmd_rsp_buf_pool =
+ dma_pool_create("lpfc_cmd_rsp_buf_pool",
+ &phba->pcidev->dev,
+ sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp),
+ BPL_ALIGN_SZ, 0);
+
+ if (!phba->lpfc_cmd_rsp_buf_pool)
+ goto fail_free_dma_buf_pool;
+
/*
* Enable sr-iov virtual functions if supported and configured
* through the module parameter.
@@ -6352,6 +6409,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
}
return 0;
+
+fail_free_dma_buf_pool:
+ dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+ phba->lpfc_sg_dma_buf_pool = NULL;
+fail_free_mem:
+ lpfc_mem_free(phba);
+ return -ENOMEM;
}
/**
@@ -6396,8 +6460,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
u32 if_fam;
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
- phba->sli4_hba.num_possible_cpu = num_possible_cpus();
+ phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
phba->sli4_hba.curr_disp_cpu = 0;
+ lpfc_cpumask_of_node_init(phba);
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
@@ -6412,6 +6477,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
if (rc)
return -ENODEV;
+ /* Allocate all driver workqueues here */
+
+ /* The lpfc_wq workqueue for deferred irq use */
+ phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+
/*
* Initialize timers used by driver
*/
@@ -6446,102 +6516,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* The WQ create will allocate the ring.
*/
- /*
- * 1 for cmd, 1 for rsp, NVME adds an extra one
- * for boundary conditions in its max_sgl_segment template.
- */
- extra = 2;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- extra++;
-
- /*
- * It doesn't matter what family our adapter is in, we are
- * limited to 2 Pages, 512 SGEs, for our SGL.
- * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
- */
- max_buf_size = (2 * SLI4_PAGE_SIZE);
-
- /*
- * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
- * used to create the sg_dma_buf_pool must be calculated.
- */
- if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
- /*
- * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
- * the FCP rsp, and a SGE. Sice we have no control
- * over how many protection segments the SCSI Layer
- * will hand us (ie: there could be one for every block
- * in the IO), just allocate enough SGEs to accomidate
- * our max amount and we need to limit lpfc_sg_seg_cnt
- * to minimize the risk of running out.
- */
- phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
- sizeof(struct fcp_rsp) + max_buf_size;
-
- /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
- phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
-
- /*
- * If supporting DIF, reduce the seg count for scsi to
- * allow room for the DIF sges.
- */
- if (phba->cfg_enable_bg &&
- phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
- phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
- else
- phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
-
- } else {
- /*
- * The scsi_buf for a regular I/O holds the FCP cmnd,
- * the FCP rsp, a SGE for each, and a SGE for up to
- * cfg_sg_seg_cnt data segments.
- */
- phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
- sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + extra) *
- sizeof(struct sli4_sge));
-
- /* Total SGEs for scsi_sg_list */
- phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
- phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
-
- /*
- * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
- * need to post 1 page for the SGL.
- */
- }
-
- /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
- "6300 Reducing NVME sg segment "
- "cnt to %d\n",
- LPFC_MAX_NVME_SEG_CNT);
- phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- } else
- phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
- }
-
- /* Initialize the host templates with the updated values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
- lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
-
- if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
- phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
- else
- phba->cfg_sg_dma_buf_size =
- SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
- "9087 sg_seg_cnt:%d dmabuf_size:%d "
- "total:%d scsi:%d nvme:%d\n",
- phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
- phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
- phba->cfg_nvme_seg_cnt);
-
/* Initialize buffer queue management fields */
INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
@@ -6550,11 +6524,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/*
* Initialize the SLI Layer to run with lpfc SLI4 HBAs.
*/
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
- /* Initialize the Abort scsi buffer list used by driver */
- spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
- }
+ /* Initialize the Abort buffer list used by driver */
+ spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
/* Initialize the Abort nvme buffer list used by driver */
@@ -6762,6 +6734,131 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
}
+ /*
+ * 1 for cmd, 1 for rsp, NVME adds an extra one
+ * for boundary conditions in its max_sgl_segment template.
+ */
+ extra = 2;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ extra++;
+
+ /*
+ * It doesn't matter what family our adapter is in, we are
+ * limited to 2 Pages, 512 SGEs, for our SGL.
+ * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
+ */
+ max_buf_size = (2 * SLI4_PAGE_SIZE);
+
+ /*
+ * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
+ * used to create the sg_dma_buf_pool must be calculated.
+ */
+ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
+ /* Both cfg_enable_bg and cfg_external_dif code paths */
+
+ /*
+ * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
+ * the FCP rsp, and a SGE. Sice we have no control
+ * over how many protection segments the SCSI Layer
+ * will hand us (ie: there could be one for every block
+ * in the IO), just allocate enough SGEs to accomidate
+ * our max amount and we need to limit lpfc_sg_seg_cnt
+ * to minimize the risk of running out.
+ */
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) + max_buf_size;
+
+ /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
+ phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
+
+ /*
+ * If supporting DIF, reduce the seg count for scsi to
+ * allow room for the DIF sges.
+ */
+ if (phba->cfg_enable_bg &&
+ phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
+ phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
+ else
+ phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
+
+ } else {
+ /*
+ * The scsi_buf for a regular I/O holds the FCP cmnd,
+ * the FCP rsp, a SGE for each, and a SGE for up to
+ * cfg_sg_seg_cnt data segments.
+ */
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) +
+ ((phba->cfg_sg_seg_cnt + extra) *
+ sizeof(struct sli4_sge));
+
+ /* Total SGEs for scsi_sg_list */
+ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
+ phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
+
+ /*
+ * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
+ * need to post 1 page for the SGL.
+ */
+ }
+
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
+ else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
+ phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
+ else
+ phba->cfg_sg_dma_buf_size =
+ SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
+
+ phba->border_sge_num = phba->cfg_sg_dma_buf_size /
+ sizeof(struct sli4_sge);
+
+ /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+ "6300 Reducing NVME sg segment "
+ "cnt to %d\n",
+ LPFC_MAX_NVME_SEG_CNT);
+ phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ } else
+ phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
+ }
+
+ /* Initialize the host templates with the updated values. */
+ lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+ lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+ "9087 sg_seg_cnt:%d dmabuf_size:%d "
+ "total:%d scsi:%d nvme:%d\n",
+ phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+ phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
+ phba->cfg_nvme_seg_cnt);
+
+ if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
+ i = phba->cfg_sg_dma_buf_size;
+ else
+ i = SLI4_PAGE_SIZE;
+
+ phba->lpfc_sg_dma_buf_pool =
+ dma_pool_create("lpfc_sg_dma_buf_pool",
+ &phba->pcidev->dev,
+ phba->cfg_sg_dma_buf_size,
+ i, 0);
+ if (!phba->lpfc_sg_dma_buf_pool)
+ goto out_free_bsmbx;
+
+ phba->lpfc_cmd_rsp_buf_pool =
+ dma_pool_create("lpfc_cmd_rsp_buf_pool",
+ &phba->pcidev->dev,
+ sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp),
+ i, 0);
+ if (!phba->lpfc_cmd_rsp_buf_pool)
+ goto out_free_sg_dma_buf;
+
mempool_free(mboxq, phba->mbox_mem_pool);
/* Verify OAS is supported */
@@ -6773,12 +6870,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Verify all the SLI4 queues */
rc = lpfc_sli4_queue_verify(phba);
if (rc)
- goto out_free_bsmbx;
+ goto out_free_cmd_rsp_buf;
/* Create driver internal CQE event pool */
rc = lpfc_sli4_cq_event_pool_create(phba);
if (rc)
- goto out_free_bsmbx;
+ goto out_free_cmd_rsp_buf;
/* Initialize sgl lists per host */
lpfc_init_sgl_list(phba);
@@ -6869,6 +6966,12 @@ out_free_active_sgl:
lpfc_free_active_sgl(phba);
out_destroy_cq_event_pool:
lpfc_sli4_cq_event_pool_destroy(phba);
+out_free_cmd_rsp_buf:
+ dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
+ phba->lpfc_cmd_rsp_buf_pool = NULL;
+out_free_sg_dma_buf:
+ dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+ phba->lpfc_sg_dma_buf_pool = NULL;
out_free_bsmbx:
lpfc_destroy_bootstrap_mbox(phba);
out_free_mem:
@@ -6895,6 +6998,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
phba->sli4_hba.num_possible_cpu = 0;
phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.curr_disp_cpu = 0;
+ cpumask_clear(&phba->sli4_hba.numa_mask);
/* Free memory allocated for fast-path work queue handles */
kfree(phba->sli4_hba.hba_eq_hdl);
@@ -6995,12 +7099,6 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
return error;
}
- /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */
- if (phba->sli_rev == LPFC_SLI_REV4)
- phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
- else
- phba->wq = NULL;
-
return 0;
}
@@ -7074,7 +7172,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
if (iocbq_entry == NULL) {
printk(KERN_ERR "%s: only allocated %d iocbs of "
"expected %d count. Unloading driver.\n",
- __func__, i, LPFC_IOCB_LIST_CNT);
+ __func__, i, iocb_count);
goto out_free_iocbq;
}
@@ -7493,18 +7591,10 @@ lpfc_create_shost(struct lpfc_hba *phba)
if (phba->nvmet_support) {
/* Only 1 vport (pport) will support NVME target */
- if (phba->txrdy_payload_pool == NULL) {
- phba->txrdy_payload_pool = dma_pool_create(
- "txrdy_pool", &phba->pcidev->dev,
- TXRDY_PAYLOAD_LEN, 16, 0);
- if (phba->txrdy_payload_pool) {
- phba->targetport = NULL;
- phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
- lpfc_printf_log(phba, KERN_INFO,
- LOG_INIT | LOG_NVME_DISC,
- "6076 NVME Target Found\n");
- }
- }
+ phba->targetport = NULL;
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
+ "6076 NVME Target Found\n");
}
lpfc_debugfs_initialize(vport);
@@ -7561,7 +7651,6 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
uint32_t old_mask;
uint32_t old_guard;
- int pagecnt = 10;
if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"1478 Registering BlockGuard with the "
@@ -7598,56 +7687,6 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
"layer, Bad protection parameters: %d %d\n",
old_mask, old_guard);
}
-
- if (!_dump_buf_data) {
- while (pagecnt) {
- spin_lock_init(&_dump_buf_lock);
- _dump_buf_data =
- (char *) __get_free_pages(GFP_KERNEL, pagecnt);
- if (_dump_buf_data) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9043 BLKGRD: allocated %d pages for "
- "_dump_buf_data at 0x%p\n",
- (1 << pagecnt), _dump_buf_data);
- _dump_buf_data_order = pagecnt;
- memset(_dump_buf_data, 0,
- ((1 << PAGE_SHIFT) << pagecnt));
- break;
- } else
- --pagecnt;
- }
- if (!_dump_buf_data_order)
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9044 BLKGRD: ERROR unable to allocate "
- "memory for hexdump\n");
- } else
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
- "\n", _dump_buf_data);
- if (!_dump_buf_dif) {
- while (pagecnt) {
- _dump_buf_dif =
- (char *) __get_free_pages(GFP_KERNEL, pagecnt);
- if (_dump_buf_dif) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9046 BLKGRD: allocated %d pages for "
- "_dump_buf_dif at 0x%p\n",
- (1 << pagecnt), _dump_buf_dif);
- _dump_buf_dif_order = pagecnt;
- memset(_dump_buf_dif, 0,
- ((1 << PAGE_SHIFT) << pagecnt));
- break;
- } else
- --pagecnt;
- }
- if (!_dump_buf_dif_order)
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9047 BLKGRD: ERROR unable to allocate "
- "memory for hexdump\n");
- } else
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
- _dump_buf_dif);
}
/**
@@ -8234,6 +8273,86 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
}
+static const char * const lpfc_topo_to_str[] = {
+ "Loop then P2P",
+ "Loopback",
+ "P2P Only",
+ "Unsupported",
+ "Loop Only",
+ "Unsupported",
+ "P2P then Loop",
+};
+
+/**
+ * lpfc_map_topology - Map the topology read from READ_CONFIG
+ * @phba: pointer to lpfc hba data structure.
+ * @rdconf: pointer to read config data
+ *
+ * This routine is invoked to map the topology values as read
+ * from the read config mailbox command. If the persistent
+ * topology feature is supported, the firmware will provide the
+ * saved topology information to be used in INIT_LINK
+ *
+ **/
+#define LINK_FLAGS_DEF 0x0
+#define LINK_FLAGS_P2P 0x1
+#define LINK_FLAGS_LOOP 0x2
+static void
+lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
+{
+ u8 ptv, tf, pt;
+
+ ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
+ tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
+ pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
+ ptv, tf, pt);
+ if (!ptv) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2019 FW does not support persistent topology "
+ "Using driver parameter defined value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ return;
+ }
+ /* FW supports persistent topology - override module parameter value */
+ phba->hba_flag |= HBA_PERSISTENT_TOPO;
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_LANCER_G7_FC:
+ case PCI_DEVICE_ID_LANCER_G6_FC:
+ if (!tf) {
+ phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
+ ? FLAGS_TOPOLOGY_MODE_LOOP
+ : FLAGS_TOPOLOGY_MODE_PT_PT);
+ } else {
+ phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
+ }
+ break;
+ default: /* G5 */
+ if (tf) {
+ /* If topology failover set - pt is '0' or '1' */
+ phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
+ FLAGS_TOPOLOGY_MODE_LOOP_PT);
+ } else {
+ phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
+ ? FLAGS_TOPOLOGY_MODE_PT_PT
+ : FLAGS_TOPOLOGY_MODE_LOOP);
+ }
+ break;
+ }
+ if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2020 Using persistent topology value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2021 Invalid topology values from FW "
+ "Using driver parameter defined value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ }
+}
+
/**
* lpfc_sli4_read_config - Get the config parameters.
* @phba: pointer to lpfc hba data structure.
@@ -8307,6 +8426,10 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
phba->sli4_hba.max_cfg_param.max_xri =
bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+ /* Reduce resource usage in kdump environment */
+ if (is_kdump_kernel() &&
+ phba->sli4_hba.max_cfg_param.max_xri > 512)
+ phba->sli4_hba.max_cfg_param.max_xri = 512;
phba->sli4_hba.max_cfg_param.xri_base =
bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
phba->sli4_hba.max_cfg_param.max_vpi =
@@ -8341,6 +8464,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
+ lpfc_map_topology(phba, rd_config);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2003 cfg params Extents? %d "
"XRI(B:%d M:%d), "
@@ -8380,11 +8504,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
*/
qmin -= 4;
- /* If NVME is configured, double the number of CQ/WQs needed */
- if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
- !phba->nvmet_support)
- qmin /= 2;
-
/* Check to see if there is enough for NVME */
if ((phba->cfg_irq_chann > qmin) ||
(phba->cfg_hdw_queue > qmin)) {
@@ -8619,8 +8738,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
*/
if (phba->nvmet_support) {
- if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
}
@@ -8641,51 +8760,14 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
}
static int
-lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
+lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
{
struct lpfc_queue *qdesc;
+ u32 wqesize;
int cpu;
- cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
- phba->sli4_hba.cq_esize,
- LPFC_CQE_EXP_COUNT, cpu);
- if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0508 Failed allocate fast-path NVME CQ (%d)\n",
- wqidx);
- return 1;
- }
- qdesc->qe_valid = 1;
- qdesc->hdwq = wqidx;
- qdesc->chann = cpu;
- phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc;
-
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
- LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT,
- cpu);
- if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0509 Failed allocate fast-path NVME WQ (%d)\n",
- wqidx);
- return 1;
- }
- qdesc->hdwq = wqidx;
- qdesc->chann = wqidx;
- phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc;
- list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
- return 0;
-}
-
-static int
-lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
-{
- struct lpfc_queue *qdesc;
- uint32_t wqesize;
- int cpu;
-
- cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
- /* Create Fast Path FCP CQs */
+ cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
+ /* Create Fast Path IO CQs */
if (phba->enab_exp_wqcq_pages)
/* Increase the CQ size when WQEs contain an embedded cdb */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
@@ -8698,15 +8780,15 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
+ "0499 Failed allocate fast-path IO CQ (%d)\n", idx);
return 1;
}
qdesc->qe_valid = 1;
- qdesc->hdwq = wqidx;
+ qdesc->hdwq = idx;
qdesc->chann = cpu;
- phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc;
+ phba->sli4_hba.hdwq[idx].io_cq = qdesc;
- /* Create Fast Path FCP WQs */
+ /* Create Fast Path IO WQs */
if (phba->enab_exp_wqcq_pages) {
/* Increase the WQ size when WQEs contain an embedded cdb */
wqesize = (phba->fcp_embed_io) ?
@@ -8721,13 +8803,13 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0503 Failed allocate fast-path FCP WQ (%d)\n",
- wqidx);
+ "0503 Failed allocate fast-path IO WQ (%d)\n",
+ idx);
return 1;
}
- qdesc->hdwq = wqidx;
- qdesc->chann = wqidx;
- phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc;
+ qdesc->hdwq = idx;
+ qdesc->chann = cpu;
+ phba->sli4_hba.hdwq[idx].io_wq = qdesc;
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
return 0;
}
@@ -8791,12 +8873,13 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
qp->get_io_bufs = 0;
qp->put_io_bufs = 0;
qp->total_io_bufs = 0;
- spin_lock_init(&qp->abts_scsi_buf_list_lock);
- INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list);
+ spin_lock_init(&qp->abts_io_buf_list_lock);
+ INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
qp->abts_scsi_io_bufs = 0;
- spin_lock_init(&qp->abts_nvme_buf_list_lock);
- INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list);
qp->abts_nvme_io_bufs = 0;
+ INIT_LIST_HEAD(&qp->sgl_list);
+ INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
+ spin_lock_init(&qp->hdwq_lock);
}
}
@@ -8862,7 +8945,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
}
qdesc->qe_valid = 1;
qdesc->hdwq = cpup->hdwq;
- qdesc->chann = cpu; /* First CPU this EQ is affinitised to */
+ qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
qdesc->last_cpu = qdesc->chann;
/* Save the allocated EQ in the Hardware Queue */
@@ -8893,41 +8976,31 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
}
- /* Allocate SCSI SLI4 CQ/WQs */
+ /* Allocate IO Path SLI4 CQ/WQs */
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
- if (lpfc_alloc_fcp_wq_cq(phba, idx))
+ if (lpfc_alloc_io_wq_cq(phba, idx))
goto out_error;
}
- /* Allocate NVME SLI4 CQ/WQs */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
- if (lpfc_alloc_nvme_wq_cq(phba, idx))
- goto out_error;
- }
-
- if (phba->nvmet_support) {
- for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
- cpu = lpfc_find_cpu_handle(phba, idx,
- LPFC_FIND_BY_HDWQ);
- qdesc = lpfc_sli4_queue_alloc(
- phba,
+ if (phba->nvmet_support) {
+ for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+ cpu = lpfc_find_cpu_handle(phba, idx,
+ LPFC_FIND_BY_HDWQ);
+ qdesc = lpfc_sli4_queue_alloc(phba,
LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount,
cpu);
- if (!qdesc) {
- lpfc_printf_log(
- phba, KERN_ERR, LOG_INIT,
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3142 Failed allocate NVME "
"CQ Set (%d)\n", idx);
- goto out_error;
- }
- qdesc->qe_valid = 1;
- qdesc->hdwq = idx;
- qdesc->chann = cpu;
- phba->sli4_hba.nvmet_cqset[idx] = qdesc;
+ goto out_error;
}
+ qdesc->qe_valid = 1;
+ qdesc->hdwq = idx;
+ qdesc->chann = cpu;
+ phba->sli4_hba.nvmet_cqset[idx] = qdesc;
}
}
@@ -8958,7 +9031,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
goto out_error;
}
qdesc->qe_valid = 1;
- qdesc->chann = 0;
+ qdesc->chann = cpu;
phba->sli4_hba.els_cq = qdesc;
@@ -8976,7 +9049,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"0505 Failed allocate slow-path MQ\n");
goto out_error;
}
- qdesc->chann = 0;
+ qdesc->chann = cpu;
phba->sli4_hba.mbx_wq = qdesc;
/*
@@ -8992,7 +9065,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"0504 Failed allocate slow-path ELS WQ\n");
goto out_error;
}
- qdesc->chann = 0;
+ qdesc->chann = cpu;
phba->sli4_hba.els_wq = qdesc;
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
@@ -9006,7 +9079,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"6079 Failed allocate NVME LS CQ\n");
goto out_error;
}
- qdesc->chann = 0;
+ qdesc->chann = cpu;
qdesc->qe_valid = 1;
phba->sli4_hba.nvmels_cq = qdesc;
@@ -9019,7 +9092,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"6080 Failed allocate NVME LS WQ\n");
goto out_error;
}
- qdesc->chann = 0;
+ qdesc->chann = cpu;
phba->sli4_hba.nvmels_wq = qdesc;
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
}
@@ -9101,7 +9174,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
}
}
-#if defined(BUILD_NVME)
/* Clear NVME stats */
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
@@ -9109,7 +9181,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
}
}
-#endif
/* Clear SCSI stats */
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
@@ -9162,15 +9233,13 @@ lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
/* Loop thru all Hardware Queues */
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
/* Free the CQ/WQ corresponding to the Hardware Queue */
- lpfc_sli4_queue_free(hdwq[idx].fcp_cq);
- lpfc_sli4_queue_free(hdwq[idx].nvme_cq);
- lpfc_sli4_queue_free(hdwq[idx].fcp_wq);
- lpfc_sli4_queue_free(hdwq[idx].nvme_wq);
- hdwq[idx].hba_eq = NULL;
- hdwq[idx].fcp_cq = NULL;
- hdwq[idx].nvme_cq = NULL;
- hdwq[idx].fcp_wq = NULL;
- hdwq[idx].nvme_wq = NULL;
+ lpfc_sli4_queue_free(hdwq[idx].io_cq);
+ lpfc_sli4_queue_free(hdwq[idx].io_wq);
+ hdwq[idx].io_cq = NULL;
+ hdwq[idx].io_wq = NULL;
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
+ lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
}
/* Loop thru all IRQ vectors */
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
@@ -9210,6 +9279,8 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
}
spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_cleanup_poll_list(phba);
+
/* Release HBA eqs */
if (phba->sli4_hba.hdwq)
lpfc_sli4_release_hdwq(phba);
@@ -9370,8 +9441,7 @@ lpfc_setup_cq_lookup(struct lpfc_hba *phba)
list_for_each_entry(childq, &eq->child_list, list) {
if (childq->queue_id > phba->sli4_hba.cq_max)
continue;
- if ((childq->subtype == LPFC_FCP) ||
- (childq->subtype == LPFC_NVME))
+ if (childq->subtype == LPFC_IO)
phba->sli4_hba.cq_lookup[childq->queue_id] =
childq;
}
@@ -9497,31 +9567,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
/* Loop thru all Hardware Queues */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- cpu = lpfc_find_cpu_handle(phba, qidx,
- LPFC_FIND_BY_HDWQ);
- cpup = &phba->sli4_hba.cpu_map[cpu];
-
- /* Create the CQ/WQ corresponding to the
- * Hardware Queue
- */
- rc = lpfc_create_wq_cq(phba,
- phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
- qp[qidx].nvme_cq,
- qp[qidx].nvme_wq,
- &phba->sli4_hba.hdwq[qidx].nvme_cq_map,
- qidx, LPFC_NVME);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6123 Failed to setup fastpath "
- "NVME WQ/CQ (%d), rc = 0x%x\n",
- qidx, (uint32_t)rc);
- goto out_destroy;
- }
- }
- }
-
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
cpup = &phba->sli4_hba.cpu_map[cpu];
@@ -9529,14 +9574,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
/* Create the CQ/WQ corresponding to the Hardware Queue */
rc = lpfc_create_wq_cq(phba,
phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
- qp[qidx].fcp_cq,
- qp[qidx].fcp_wq,
- &phba->sli4_hba.hdwq[qidx].fcp_cq_map,
- qidx, LPFC_FCP);
+ qp[qidx].io_cq,
+ qp[qidx].io_wq,
+ &phba->sli4_hba.hdwq[qidx].io_cq_map,
+ qidx,
+ LPFC_IO);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0535 Failed to setup fastpath "
- "FCP WQ/CQ (%d), rc = 0x%x\n",
+ "IO WQ/CQ (%d), rc = 0x%x\n",
qidx, (uint32_t)rc);
goto out_destroy;
}
@@ -9836,10 +9882,8 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
/* Destroy the CQ/WQ corresponding to Hardware Queue */
qp = &phba->sli4_hba.hdwq[qidx];
- lpfc_wq_destroy(phba, qp->fcp_wq);
- lpfc_wq_destroy(phba, qp->nvme_wq);
- lpfc_cq_destroy(phba, qp->fcp_cq);
- lpfc_cq_destroy(phba, qp->nvme_cq);
+ lpfc_wq_destroy(phba, qp->io_wq);
+ lpfc_cq_destroy(phba, qp->io_cq);
}
/* Loop thru all IRQ vectors */
for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
@@ -10397,6 +10441,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
case LPFC_SLI_INTF_IF_TYPE_6:
iounmap(phba->sli4_hba.drbl_regs_memmap_p);
iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ if (phba->sli4_hba.dpp_regs_memmap_p)
+ iounmap(phba->sli4_hba.dpp_regs_memmap_p);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
@@ -10658,7 +10704,6 @@ lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
*/
if ((match == LPFC_FIND_BY_EQ) &&
(cpup->flag & LPFC_CPU_FIRST_IRQ) &&
- (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
(cpup->eq == id))
return cpu;
@@ -10696,6 +10741,75 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
}
#endif
+/*
+ * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
+ * @phba: pointer to lpfc hba data structure.
+ * @eqidx: index for eq and irq vector
+ * @flag: flags to set for vector_map structure
+ * @cpu: cpu used to index vector_map structure
+ *
+ * The routine assigns eq info into vector_map structure
+ */
+static inline void
+lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
+ unsigned int cpu)
+{
+ struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
+ struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
+
+ cpup->eq = eqidx;
+ cpup->flag |= flag;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
+ cpu, eqhdl->irq, cpup->eq, cpup->flag);
+}
+
+/**
+ * lpfc_cpu_map_array_init - Initialize cpu_map structure
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The routine initializes the cpu_map array structure
+ */
+static void
+lpfc_cpu_map_array_init(struct lpfc_hba *phba)
+{
+ struct lpfc_vector_map_info *cpup;
+ struct lpfc_eq_intr_info *eqi;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->eq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->flag = 0;
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
+ INIT_LIST_HEAD(&eqi->list);
+ eqi->icnt = 0;
+ }
+}
+
+/**
+ * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The routine initializes the hba_eq_hdl array structure
+ */
+static void
+lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
+{
+ struct lpfc_hba_eq_hdl *eqhdl;
+ int i;
+
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ eqhdl = lpfc_get_eq_hdl(i);
+ eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
+ eqhdl->phba = phba;
+ }
+}
+
/**
* lpfc_cpu_affinity_check - Check vector CPU affinity mappings
* @phba: pointer to lpfc hba data structure.
@@ -10709,27 +10823,15 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
static void
lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
{
- int i, cpu, idx, new_cpu, start_cpu, first_cpu;
+ int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
int max_phys_id, min_phys_id;
int max_core_id, min_core_id;
struct lpfc_vector_map_info *cpup;
struct lpfc_vector_map_info *new_cpup;
- const struct cpumask *maskp;
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
- /* Init cpu_map array */
- for_each_possible_cpu(cpu) {
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
- cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
- cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
- cpup->eq = LPFC_VECTOR_MAP_EMPTY;
- cpup->irq = LPFC_VECTOR_MAP_EMPTY;
- cpup->flag = 0;
- }
-
max_phys_id = 0;
min_phys_id = LPFC_VECTOR_MAP_EMPTY;
max_core_id = 0;
@@ -10751,8 +10853,8 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
#endif
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3328 CPU physid %d coreid %d\n",
- cpup->phys_id, cpup->core_id);
+ "3328 CPU %d physid %d coreid %d flag x%x\n",
+ cpu, cpup->phys_id, cpup->core_id, cpup->flag);
if (cpup->phys_id > max_phys_id)
max_phys_id = cpup->phys_id;
@@ -10765,65 +10867,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
min_core_id = cpup->core_id;
}
- for_each_possible_cpu(i) {
- struct lpfc_eq_intr_info *eqi =
- per_cpu_ptr(phba->sli4_hba.eq_info, i);
-
- INIT_LIST_HEAD(&eqi->list);
- eqi->icnt = 0;
- }
-
- /* This loop sets up all CPUs that are affinitized with a
- * irq vector assigned to the driver. All affinitized CPUs
- * will get a link to that vectors IRQ and EQ.
- *
- * NULL affinity mask handling:
- * If irq count is greater than one, log an error message.
- * If the null mask is received for the first irq, find the
- * first present cpu, and assign the eq index to ensure at
- * least one EQ is assigned.
- */
- for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
- /* Get a CPU mask for all CPUs affinitized to this vector */
- maskp = pci_irq_get_affinity(phba->pcidev, idx);
- if (!maskp) {
- if (phba->cfg_irq_chann > 1)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3329 No affinity mask found "
- "for vector %d (%d)\n",
- idx, phba->cfg_irq_chann);
- if (!idx) {
- cpu = cpumask_first(cpu_present_mask);
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->eq = idx;
- cpup->irq = pci_irq_vector(phba->pcidev, idx);
- cpup->flag |= LPFC_CPU_FIRST_IRQ;
- }
- break;
- }
-
- i = 0;
- /* Loop through all CPUs associated with vector idx */
- for_each_cpu_and(cpu, maskp, cpu_present_mask) {
- /* Set the EQ index and IRQ for that vector */
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->eq = idx;
- cpup->irq = pci_irq_vector(phba->pcidev, idx);
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3336 Set Affinity: CPU %d "
- "irq %d eq %d\n",
- cpu, cpup->irq, cpup->eq);
-
- /* If this is the first CPU thats assigned to this
- * vector, set LPFC_CPU_FIRST_IRQ.
- */
- if (!i)
- cpup->flag |= LPFC_CPU_FIRST_IRQ;
- i++;
- }
- }
-
/* After looking at each irq vector assigned to this pcidev, its
* possible to see that not ALL CPUs have been accounted for.
* Next we will set any unassigned (unaffinitized) cpu map
@@ -10849,7 +10892,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
- (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
+ (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
(new_cpup->phys_id == cpup->phys_id))
goto found_same;
new_cpu = cpumask_next(
@@ -10862,7 +10905,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
found_same:
/* We found a matching phys_id, so copy the IRQ info */
cpup->eq = new_cpup->eq;
- cpup->irq = new_cpup->irq;
/* Bump start_cpu to the next slot to minmize the
* chance of having multiple unassigned CPU entries
@@ -10874,9 +10916,10 @@ found_same:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3337 Set Affinity: CPU %d "
- "irq %d from id %d same "
+ "eq %d from peer cpu %d same "
"phys_id (%d)\n",
- cpu, cpup->irq, new_cpu, cpup->phys_id);
+ cpu, cpup->eq, new_cpu,
+ cpup->phys_id);
}
}
@@ -10900,7 +10943,7 @@ found_same:
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
- (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY))
+ (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
goto found_any;
new_cpu = cpumask_next(
new_cpu, cpu_present_mask);
@@ -10910,13 +10953,12 @@ found_same:
/* We should never leave an entry unassigned */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3339 Set Affinity: CPU %d "
- "irq %d UNASSIGNED\n",
- cpup->hdwq, cpup->irq);
+ "eq %d UNASSIGNED\n",
+ cpup->hdwq, cpup->eq);
continue;
found_any:
/* We found an available entry, copy the IRQ info */
cpup->eq = new_cpup->eq;
- cpup->irq = new_cpup->irq;
/* Bump start_cpu to the next slot to minmize the
* chance of having multiple unassigned CPU entries
@@ -10928,75 +10970,126 @@ found_any:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3338 Set Affinity: CPU %d "
- "irq %d from id %d (%d/%d)\n",
- cpu, cpup->irq, new_cpu,
+ "eq %d from peer cpu %d (%d/%d)\n",
+ cpu, cpup->eq, new_cpu,
new_cpup->phys_id, new_cpup->core_id);
}
}
- /* Finally we need to associate a hdwq with each cpu_map entry
+ /* Assign hdwq indices that are unique across all cpus in the map
+ * that are also FIRST_CPUs.
+ */
+ idx = 0;
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Only FIRST IRQs get a hdwq index assignment. */
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
+ continue;
+
+ /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
+ cpup->hdwq = idx;
+ idx++;
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3333 Set Affinity: CPU %d (phys %d core %d): "
+ "hdwq %d eq %d flg x%x\n",
+ cpu, cpup->phys_id, cpup->core_id,
+ cpup->hdwq, cpup->eq, cpup->flag);
+ }
+ /* Associate a hdwq with each cpu_map entry
* This will be 1 to 1 - hdwq to cpu, unless there are less
* hardware queues then CPUs. For that case we will just round-robin
* the available hardware queues as they get assigned to CPUs.
+ * The next_idx is the idx from the FIRST_CPU loop above to account
+ * for irq_chann < hdwq. The idx is used for round-robin assignments
+ * and needs to start at 0.
*/
- idx = 0;
+ next_idx = idx;
start_cpu = 0;
+ idx = 0;
for_each_present_cpu(cpu) {
cpup = &phba->sli4_hba.cpu_map[cpu];
- if (idx >= phba->cfg_hdw_queue) {
- /* We need to reuse a Hardware Queue for another CPU,
- * so be smart about it and pick one that has its
- * IRQ/EQ mapped to the same phys_id (CPU package).
- * and core_id.
- */
- new_cpu = start_cpu;
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
- new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
- if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
- (new_cpup->phys_id == cpup->phys_id) &&
- (new_cpup->core_id == cpup->core_id))
- goto found_hdwq;
- new_cpu = cpumask_next(
- new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
- new_cpu = first_cpu;
- }
- /* If we can't match both phys_id and core_id,
- * settle for just a phys_id match.
- */
- new_cpu = start_cpu;
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
- new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
- if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
- (new_cpup->phys_id == cpup->phys_id))
- goto found_hdwq;
- new_cpu = cpumask_next(
- new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
- new_cpu = first_cpu;
+ /* FIRST cpus are already mapped. */
+ if (cpup->flag & LPFC_CPU_FIRST_IRQ)
+ continue;
+
+ /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
+ * of the unassigned cpus to the next idx so that all
+ * hdw queues are fully utilized.
+ */
+ if (next_idx < phba->cfg_hdw_queue) {
+ cpup->hdwq = next_idx;
+ next_idx++;
+ continue;
+ }
+
+ /* Not a First CPU and all hdw_queues are used. Reuse a
+ * Hardware Queue for another CPU, so be smart about it
+ * and pick one that has its IRQ/EQ mapped to the same phys_id
+ * (CPU package) and core_id.
+ */
+ new_cpu = start_cpu;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+ if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
+ new_cpup->phys_id == cpup->phys_id &&
+ new_cpup->core_id == cpup->core_id) {
+ goto found_hdwq;
}
+ new_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (new_cpu == nr_cpumask_bits)
+ new_cpu = first_cpu;
+ }
- /* Otherwise just round robin on cfg_hdw_queue */
- cpup->hdwq = idx % phba->cfg_hdw_queue;
- goto logit;
-found_hdwq:
- /* We found an available entry, copy the IRQ info */
- start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu == nr_cpumask_bits)
- start_cpu = first_cpu;
- cpup->hdwq = new_cpup->hdwq;
- } else {
- /* 1 to 1, CPU to hdwq */
- cpup->hdwq = idx;
+ /* If we can't match both phys_id and core_id,
+ * settle for just a phys_id match.
+ */
+ new_cpu = start_cpu;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+ if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
+ new_cpup->phys_id == cpup->phys_id)
+ goto found_hdwq;
+
+ new_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (new_cpu == nr_cpumask_bits)
+ new_cpu = first_cpu;
}
-logit:
+
+ /* Otherwise just round robin on cfg_hdw_queue */
+ cpup->hdwq = idx % phba->cfg_hdw_queue;
+ idx++;
+ goto logit;
+ found_hdwq:
+ /* We found an available entry, copy the IRQ info */
+ start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (start_cpu == nr_cpumask_bits)
+ start_cpu = first_cpu;
+ cpup->hdwq = new_cpup->hdwq;
+ logit:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3335 Set Affinity: CPU %d (phys %d core %d): "
- "hdwq %d eq %d irq %d flg x%x\n",
+ "hdwq %d eq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
- cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
- idx++;
+ cpup->hdwq, cpup->eq, cpup->flag);
+ }
+
+ /*
+ * Initialize the cpu_map slots for not-present cpus in case
+ * a cpu is hot-added. Perform a simple hdwq round robin assignment.
+ */
+ idx = 0;
+ for_each_possible_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
+ continue;
+
+ cpup->hdwq = idx++ % phba->cfg_hdw_queue;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3340 Set Affinity: not present "
+ "CPU %d hdwq %d\n",
+ cpu, cpup->hdwq);
}
/* The cpu_map array will be used later during initialization
@@ -11006,11 +11099,280 @@ logit:
}
/**
+ * lpfc_cpuhp_get_eq
+ *
+ * @phba: pointer to lpfc hba data structure.
+ * @cpu: cpu going offline
+ * @eqlist:
+ */
+static void
+lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
+ struct list_head *eqlist)
+{
+ const struct cpumask *maskp;
+ struct lpfc_queue *eq;
+ cpumask_t tmp;
+ u16 idx;
+
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ maskp = pci_irq_get_affinity(phba->pcidev, idx);
+ if (!maskp)
+ continue;
+ /*
+ * if irq is not affinitized to the cpu going
+ * then we don't need to poll the eq attached
+ * to it.
+ */
+ if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
+ continue;
+ /* get the cpus that are online and are affini-
+ * tized to this irq vector. If the count is
+ * more than 1 then cpuhp is not going to shut-
+ * down this vector. Since this cpu has not
+ * gone offline yet, we need >1.
+ */
+ cpumask_and(&tmp, maskp, cpu_online_mask);
+ if (cpumask_weight(&tmp) > 1)
+ continue;
+
+ /* Now that we have an irq to shutdown, get the eq
+ * mapped to this irq. Note: multiple hdwq's in
+ * the software can share an eq, but eventually
+ * only eq will be mapped to this vector
+ */
+ eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
+ list_add(&eq->_poll_list, eqlist);
+ }
+}
+
+static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+
+ cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
+ &phba->cpuhp);
+ /*
+ * unregistering the instance doesn't stop the polling
+ * timer. Wait for the poll timer to retire.
+ */
+ synchronize_rcu();
+ del_timer_sync(&phba->cpuhp_poll_timer);
+}
+
+static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
+{
+ if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ return;
+
+ __lpfc_cpuhp_remove(phba);
+}
+
+static void lpfc_cpuhp_add(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+
+ rcu_read_lock();
+
+ if (!list_empty(&phba->poll_list)) {
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+ }
+
+ rcu_read_unlock();
+
+ cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
+ &phba->cpuhp);
+}
+
+static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
+{
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ *retval = -EAGAIN;
+ return true;
+ }
+
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ *retval = 0;
+ return true;
+ }
+
+ /* proceed with the hotplug */
+ return false;
+}
+
+/**
+ * lpfc_irq_set_aff - set IRQ affinity
+ * @eqhdl: EQ handle
+ * @cpu: cpu to set affinity
+ *
+ **/
+static inline void
+lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
+{
+ cpumask_clear(&eqhdl->aff_mask);
+ cpumask_set_cpu(cpu, &eqhdl->aff_mask);
+ irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
+ irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+}
+
+/**
+ * lpfc_irq_clear_aff - clear IRQ affinity
+ * @eqhdl: EQ handle
+ *
+ **/
+static inline void
+lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
+{
+ cpumask_clear(&eqhdl->aff_mask);
+ irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
+ irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+}
+
+/**
+ * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
+ * @phba: pointer to HBA context object.
+ * @cpu: cpu going offline/online
+ * @offline: true, cpu is going offline. false, cpu is coming online.
+ *
+ * If cpu is going offline, we'll try our best effort to find the next
+ * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities.
+ *
+ * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu.
+ *
+ * Note: Call only if cfg_irq_numa is enabled, otherwise rely on
+ * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
+ *
+ **/
+static void
+lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
+{
+ struct lpfc_vector_map_info *cpup;
+ struct cpumask *aff_mask;
+ unsigned int cpu_select, cpu_next, idx;
+ const struct cpumask *numa_mask;
+
+ if (!phba->cfg_irq_numa)
+ return;
+
+ numa_mask = &phba->sli4_hba.numa_mask;
+
+ if (!cpumask_test_cpu(cpu, numa_mask))
+ return;
+
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
+ return;
+
+ if (offline) {
+ /* Find next online CPU on NUMA node */
+ cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true);
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next);
+
+ /* Found a valid CPU */
+ if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
+ /* Go through each eqhdl and ensure offlining
+ * cpu aff_mask is migrated
+ */
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ aff_mask = lpfc_get_aff_mask(idx);
+
+ /* Migrate affinity */
+ if (cpumask_test_cpu(cpu, aff_mask))
+ lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
+ cpu_select);
+ }
+ } else {
+ /* Rely on irqbalance if no online CPUs left on NUMA */
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++)
+ lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
+ }
+ } else {
+ /* Migrate affinity back to this CPU */
+ lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
+ }
+}
+
+static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
+ struct lpfc_queue *eq, *next;
+ LIST_HEAD(eqlist);
+ int retval;
+
+ if (!phba) {
+ WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
+ return 0;
+ }
+
+ if (__lpfc_cpuhp_checks(phba, &retval))
+ return retval;
+
+ lpfc_irq_rebalance(phba, cpu, true);
+
+ lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
+
+ /* start polling on these eq's */
+ list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
+ list_del_init(&eq->_poll_list);
+ lpfc_sli4_start_polling(eq);
+ }
+
+ return 0;
+}
+
+static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
+ struct lpfc_queue *eq, *next;
+ unsigned int n;
+ int retval;
+
+ if (!phba) {
+ WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
+ return 0;
+ }
+
+ if (__lpfc_cpuhp_checks(phba, &retval))
+ return retval;
+
+ lpfc_irq_rebalance(phba, cpu, false);
+
+ list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
+ n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
+ if (n == cpu)
+ lpfc_sli4_stop_polling(eq);
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable the MSI-X interrupt vectors to device
- * with SLI-4 interface spec.
+ * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
+ * to cpus on the system.
+ *
+ * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
+ * the number of cpus on the same numa node as this adapter. The vectors are
+ * allocated without requesting OS affinity mapping. A vector will be
+ * allocated and assigned to each online and offline cpu. If the cpu is
+ * online, then affinity will be set to that cpu. If the cpu is offline, then
+ * affinity will be set to the nearest peer cpu within the numa node that is
+ * online. If there are no online cpus within the numa node, affinity is not
+ * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
+ * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
+ * configured.
+ *
+ * If numa mode is not enabled and there is more than 1 vector allocated, then
+ * the driver relies on the managed irq interface where the OS assigns vector to
+ * cpu affinity. The driver will then use that affinity mapping to setup its
+ * cpu mapping table.
*
* Return codes
* 0 - successful
@@ -11021,13 +11383,31 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
{
int vectors, rc, index;
char *name;
+ const struct cpumask *numa_mask = NULL;
+ unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
+ struct lpfc_hba_eq_hdl *eqhdl;
+ const struct cpumask *maskp;
+ bool first;
+ unsigned int flags = PCI_IRQ_MSIX;
/* Set up MSI-X multi-message vectors */
vectors = phba->cfg_irq_chann;
- rc = pci_alloc_irq_vectors(phba->pcidev,
- 1,
- vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (phba->cfg_irq_numa) {
+ numa_mask = &phba->sli4_hba.numa_mask;
+ cpu_cnt = cpumask_weight(numa_mask);
+ vectors = min(phba->cfg_irq_chann, cpu_cnt);
+
+ /* cpu: iterates over numa_mask including offline or online
+ * cpu_select: iterates over online numa_mask to set affinity
+ */
+ cpu = cpumask_first(numa_mask);
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ } else {
+ flags |= PCI_IRQ_AFFINITY;
+ }
+
+ rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
if (rc < 0) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0484 PCI enable MSI-X failed (%d)\n", rc);
@@ -11037,23 +11417,61 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Assign MSI-X vectors to interrupt handlers */
for (index = 0; index < vectors; index++) {
- name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
+ eqhdl = lpfc_get_eq_hdl(index);
+ name = eqhdl->handler_name;
memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
LPFC_DRIVER_HANDLER_NAME"%d", index);
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+ eqhdl->idx = index;
rc = request_irq(pci_irq_vector(phba->pcidev, index),
&lpfc_sli4_hba_intr_handler, 0,
- name,
- &phba->sli4_hba.hba_eq_hdl[index]);
+ name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
"request_irq failed (%d)\n", index, rc);
goto cfg_fail_out;
}
+
+ eqhdl->irq = pci_irq_vector(phba->pcidev, index);
+
+ if (phba->cfg_irq_numa) {
+ /* If found a neighboring online cpu, set affinity */
+ if (cpu_select < nr_cpu_ids)
+ lpfc_irq_set_aff(eqhdl, cpu_select);
+
+ /* Assign EQ to cpu_map */
+ lpfc_assign_eq_map_info(phba, index,
+ LPFC_CPU_FIRST_IRQ,
+ cpu);
+
+ /* Iterate to next offline or online cpu in numa_mask */
+ cpu = cpumask_next(cpu, numa_mask);
+
+ /* Find next online cpu in numa_mask to set affinity */
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ } else if (vectors == 1) {
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
+ cpu);
+ } else {
+ maskp = pci_irq_get_affinity(phba->pcidev, index);
+
+ first = true;
+ /* Loop through all CPUs associated with vector index */
+ for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+ /* If this is the first CPU thats assigned to
+ * this vector, set LPFC_CPU_FIRST_IRQ.
+ */
+ lpfc_assign_eq_map_info(phba, index,
+ first ?
+ LPFC_CPU_FIRST_IRQ : 0,
+ cpu);
+ if (first)
+ first = false;
+ }
+ }
}
if (vectors != phba->cfg_irq_chann) {
@@ -11063,17 +11481,18 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
phba->cfg_irq_chann, vectors);
if (phba->cfg_irq_chann > vectors)
phba->cfg_irq_chann = vectors;
- if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors))
- phba->cfg_nvmet_mrq = vectors;
}
return rc;
cfg_fail_out:
/* free the irq already requested */
- for (--index; index >= 0; index--)
- free_irq(pci_irq_vector(phba->pcidev, index),
- &phba->sli4_hba.hba_eq_hdl[index]);
+ for (--index; index >= 0; index--) {
+ eqhdl = lpfc_get_eq_hdl(index);
+ lpfc_irq_clear_aff(eqhdl);
+ irq_set_affinity_hint(eqhdl->irq, NULL);
+ free_irq(eqhdl->irq, eqhdl);
+ }
/* Unconfigure MSI-X capability structure */
pci_free_irq_vectors(phba->pcidev);
@@ -11087,10 +11506,10 @@ vec_fail_out:
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable the MSI interrupt mode to device with
- * SLI-4 interface spec. The kernel function pci_enable_msi() is called
- * to enable the MSI vector. The device driver is responsible for calling
- * the request_irq() to register MSI vector with a interrupt the handler,
- * which is done in this function.
+ * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
+ * called to enable the MSI vector. The device driver is responsible for
+ * calling the request_irq() to register MSI vector with a interrupt the
+ * handler, which is done in this function.
*
* Return codes
* 0 - successful
@@ -11100,29 +11519,38 @@ static int
lpfc_sli4_enable_msi(struct lpfc_hba *phba)
{
int rc, index;
+ unsigned int cpu;
+ struct lpfc_hba_eq_hdl *eqhdl;
- rc = pci_enable_msi(phba->pcidev);
- if (!rc)
+ rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
+ if (rc > 0)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0487 PCI enable MSI mode success.\n");
else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0488 PCI enable MSI mode failed (%d)\n", rc);
- return rc;
+ return rc ? rc : -1;
}
rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
0, LPFC_DRIVER_NAME, phba);
if (rc) {
- pci_disable_msi(phba->pcidev);
+ pci_free_irq_vectors(phba->pcidev);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0490 MSI request_irq failed (%d)\n", rc);
return rc;
}
+ eqhdl = lpfc_get_eq_hdl(0);
+ eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
+
for (index = 0; index < phba->cfg_irq_chann; index++) {
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+ eqhdl = lpfc_get_eq_hdl(index);
+ eqhdl->idx = index;
}
return 0;
@@ -11180,15 +11608,21 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
if (!retval) {
struct lpfc_hba_eq_hdl *eqhdl;
+ unsigned int cpu;
/* Indicate initialization to INTx mode */
phba->intr_type = INTx;
intr_mode = 0;
+ eqhdl = lpfc_get_eq_hdl(0);
+ eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
+ cpu);
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
- eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
+ eqhdl = lpfc_get_eq_hdl(idx);
eqhdl->idx = idx;
- eqhdl->phba = phba;
}
}
}
@@ -11210,14 +11644,14 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
/* Disable the currently initialized interrupt mode */
if (phba->intr_type == MSIX) {
int index;
+ struct lpfc_hba_eq_hdl *eqhdl;
/* Free up MSI-X multi-message vectors */
for (index = 0; index < phba->cfg_irq_chann; index++) {
- irq_set_affinity_hint(
- pci_irq_vector(phba->pcidev, index),
- NULL);
- free_irq(pci_irq_vector(phba->pcidev, index),
- &phba->sli4_hba.hba_eq_hdl[index]);
+ eqhdl = lpfc_get_eq_hdl(index);
+ lpfc_irq_clear_aff(eqhdl);
+ irq_set_affinity_hint(eqhdl->irq, NULL);
+ free_irq(eqhdl->irq, eqhdl);
}
} else {
free_irq(phba->pcidev->irq, phba);
@@ -11280,11 +11714,10 @@ static void
lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
{
struct lpfc_sli4_hdw_queue *qp;
- int idx, ccnt, fcnt;
+ int idx, ccnt;
int wait_time = 0;
int io_xri_cmpl = 1;
int nvmet_xri_cmpl = 1;
- int fcp_xri_cmpl = 1;
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
/* Driver just aborted IOs during the hba_unset process. Pause
@@ -11298,32 +11731,21 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
lpfc_nvme_wait_for_io_drain(phba);
ccnt = 0;
- fcnt = 0;
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
- fcp_xri_cmpl = list_empty(
- &qp->lpfc_abts_scsi_buf_list);
- if (!fcp_xri_cmpl) /* if list is NOT empty */
- fcnt++;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- io_xri_cmpl = list_empty(
- &qp->lpfc_abts_nvme_buf_list);
- if (!io_xri_cmpl) /* if list is NOT empty */
- ccnt++;
- }
+ io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
+ if (!io_xri_cmpl) /* if list is NOT empty */
+ ccnt++;
}
if (ccnt)
io_xri_cmpl = 0;
- if (fcnt)
- fcp_xri_cmpl = 0;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
nvmet_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
}
- while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl ||
- !nvmet_xri_cmpl) {
+ while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
if (!nvmet_xri_cmpl)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -11332,12 +11754,7 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
wait_time/1000);
if (!io_xri_cmpl)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6100 NVME XRI exchange busy "
- "wait time: %d seconds.\n",
- wait_time/1000);
- if (!fcp_xri_cmpl)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2877 FCP XRI exchange busy "
+ "6100 IO XRI exchange busy "
"wait time: %d seconds.\n",
wait_time/1000);
if (!els_xri_cmpl)
@@ -11353,24 +11770,15 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
}
ccnt = 0;
- fcnt = 0;
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
- fcp_xri_cmpl = list_empty(
- &qp->lpfc_abts_scsi_buf_list);
- if (!fcp_xri_cmpl) /* if list is NOT empty */
- fcnt++;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- io_xri_cmpl = list_empty(
- &qp->lpfc_abts_nvme_buf_list);
- if (!io_xri_cmpl) /* if list is NOT empty */
- ccnt++;
- }
+ io_xri_cmpl = list_empty(
+ &qp->lpfc_abts_io_buf_list);
+ if (!io_xri_cmpl) /* if list is NOT empty */
+ ccnt++;
}
if (ccnt)
io_xri_cmpl = 0;
- if (fcnt)
- fcp_xri_cmpl = 0;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
nvmet_xri_cmpl = list_empty(
@@ -11435,6 +11843,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Wait for completion of device XRI exchange busy */
lpfc_sli4_xri_exchange_busy_wait(phba);
+ /* per-phba callback de-registration for hotplug event */
+ lpfc_cpuhp_remove(phba);
+
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
@@ -11606,6 +12017,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
+ sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
mbx_sli4_parameters);
sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
@@ -11614,6 +12026,9 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
+ /* Check for Extended Pre-Registered SGL support */
+ phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
+
/* Check for firmware nvme support */
rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
bf_get(cfg_xib, mbx_sli4_parameters));
@@ -11644,6 +12059,7 @@ fcponly:
phba->nvme_support = 0;
phba->nvmet_support = 0;
phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvme_seg_cnt = 0;
/* If no FC4 type support, move to just SCSI support */
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
@@ -11652,6 +12068,12 @@ fcponly:
}
}
+ /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
+ * accommodate 512K and 1M IOs in a single nvme buf.
+ */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+
/* Only embed PBDE for if_type 6, PBDE support requires xib be set */
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
@@ -11716,6 +12138,14 @@ fcponly:
else
phba->mds_diags_support = 0;
+ /*
+ * Check if the SLI port supports NSLER
+ */
+ if (bf_get(cfg_nsler, mbx_sli4_parameters))
+ phba->nsler = 1;
+ else
+ phba->nsler = 0;
+
return 0;
}
@@ -12144,7 +12574,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
lpfc_scsi_dev_block(phba);
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
- lpfc_sli_flush_fcp_rings(phba);
+ lpfc_sli_flush_io_rings(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -12174,7 +12604,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
lpfc_stop_hba_timers(phba);
/* Clean up all driver's outstanding SCSI I/Os */
- lpfc_sli_flush_fcp_rings(phba);
+ lpfc_sli_flush_io_rings(phba);
}
/**
@@ -12359,35 +12789,57 @@ lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
}
-static void
+static int
lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
const struct firmware *fw)
{
- if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
+ int rc;
+
+ /* Three cases: (1) FW was not supported on the detected adapter.
+ * (2) FW update has been locked out administratively.
+ * (3) Some other error during FW update.
+ * In each case, an unmaskable message is written to the console
+ * for admin diagnosis.
+ */
+ if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
(phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
- magic_number != MAGIC_NUMER_G6) ||
+ magic_number != MAGIC_NUMBER_G6) ||
(phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
- magic_number != MAGIC_NUMER_G7))
+ magic_number != MAGIC_NUMBER_G7)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3030 This firmware version is not supported on "
- "this HBA model. Device:%x Magic:%x Type:%x "
- "ID:%x Size %d %zd\n",
- phba->pcidev->device, magic_number, ftype, fid,
- fsize, fw->size);
- else
+ "3030 This firmware version is not supported on"
+ " this HBA model. Device:%x Magic:%x Type:%x "
+ "ID:%x Size %d %zd\n",
+ phba->pcidev->device, magic_number, ftype, fid,
+ fsize, fw->size);
+ rc = -EINVAL;
+ } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3021 Firmware downloads have been prohibited "
+ "by a system configuration setting on "
+ "Device:%x Magic:%x Type:%x ID:%x Size %d "
+ "%zd\n",
+ phba->pcidev->device, magic_number, ftype, fid,
+ fsize, fw->size);
+ rc = -EACCES;
+ } else {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3022 FW Download failed. Device:%x Magic:%x Type:%x "
- "ID:%x Size %d %zd\n",
- phba->pcidev->device, magic_number, ftype, fid,
- fsize, fw->size);
+ "3022 FW Download failed. Add Status x%x "
+ "Device:%x Magic:%x Type:%x ID:%x Size %d "
+ "%zd\n",
+ offset, phba->pcidev->device, magic_number,
+ ftype, fid, fsize, fw->size);
+ rc = -EIO;
+ }
+ return rc;
}
-
/**
* lpfc_write_firmware - attempt to write a firmware image to the port
* @fw: pointer to firmware image returned from request_firmware.
- * @phba: pointer to lpfc hba data structure.
+ * @context: pointer to firmware image returned from request_firmware.
+ * @ret: return value this routine provides to the caller.
*
**/
static void
@@ -12456,8 +12908,12 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
rc = lpfc_wr_object(phba, &dma_buffer_list,
(fw->size - offset), &offset);
if (rc) {
- lpfc_log_write_firmware_error(phba, offset,
- magic_number, ftype, fid, fsize, fw);
+ rc = lpfc_log_write_firmware_error(phba, offset,
+ magic_number,
+ ftype,
+ fid,
+ fsize,
+ fw);
goto release_out;
}
}
@@ -12477,9 +12933,12 @@ release_out:
}
release_firmware(fw);
out:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3024 Firmware update done: %d.\n", rc);
- return;
+ if (rc < 0)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3062 Firmware update error, status %d.\n", rc);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3024 Firmware update success: size %d.\n", rc);
}
/**
@@ -12598,6 +13057,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->pport = NULL;
lpfc_stop_port(phba);
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
+
+ /* Init hba_eq_hdl array */
+ lpfc_hba_eq_hdl_array_init(phba);
+
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
if (intr_mode == LPFC_INTR_ERROR) {
@@ -12679,6 +13144,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Enable RAS FW log support */
lpfc_sli4_ras_setup(phba);
+ INIT_LIST_HEAD(&phba->poll_list);
+ cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
+
return 0;
out_free_sysfs_attr:
@@ -12946,12 +13414,8 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
/* Block all SCSI devices' I/Os on the host */
lpfc_scsi_dev_block(phba);
- /* Flush all driver's outstanding SCSI I/Os as we are to reset */
- lpfc_sli_flush_fcp_rings(phba);
-
- /* Flush the outstanding NVME IOs if fc4 type enabled. */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- lpfc_sli_flush_nvme_rings(phba);
+ /* Flush all driver's outstanding I/Os as we are to reset */
+ lpfc_sli_flush_io_rings(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -12982,12 +13446,8 @@ lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
/* stop all timers */
lpfc_stop_hba_timers(phba);
- /* Clean up all driver's outstanding SCSI I/Os */
- lpfc_sli_flush_fcp_rings(phba);
-
- /* Flush the outstanding NVME IOs if fc4 type enabled. */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- lpfc_sli_flush_nvme_rings(phba);
+ /* Clean up all driver's outstanding I/Os */
+ lpfc_sli_flush_io_rings(phba);
}
/**
@@ -13399,8 +13859,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
phba->cfg_fof = 1;
} else {
phba->cfg_fof = 0;
- if (phba->device_data_mem_pool)
- mempool_destroy(phba->device_data_mem_pool);
+ mempool_destroy(phba->device_data_mem_pool);
phba->device_data_mem_pool = NULL;
}
@@ -13505,11 +13964,24 @@ lpfc_init(void)
/* Initialize in case vector mapping is needed */
lpfc_present_cpu = num_present_cpus();
+ error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "lpfc/sli4:online",
+ lpfc_cpu_online, lpfc_cpu_offline);
+ if (error < 0)
+ goto cpuhp_failure;
+ lpfc_cpuhp_state = error;
+
error = pci_register_driver(&lpfc_driver);
- if (error) {
- fc_release_transport(lpfc_transport_template);
- fc_release_transport(lpfc_vport_transport_template);
- }
+ if (error)
+ goto unwind;
+
+ return error;
+
+unwind:
+ cpuhp_remove_multi_state(lpfc_cpuhp_state);
+cpuhp_failure:
+ fc_release_transport(lpfc_transport_template);
+ fc_release_transport(lpfc_vport_transport_template);
return error;
}
@@ -13526,21 +13998,9 @@ lpfc_exit(void)
{
misc_deregister(&lpfc_mgmt_dev);
pci_unregister_driver(&lpfc_driver);
+ cpuhp_remove_multi_state(lpfc_cpuhp_state);
fc_release_transport(lpfc_transport_template);
fc_release_transport(lpfc_vport_transport_template);
- if (_dump_buf_data) {
- printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
- "_dump_buf_data at 0x%p\n",
- (1L << _dump_buf_data_order), _dump_buf_data);
- free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
- }
-
- if (_dump_buf_dif) {
- printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
- "_dump_buf_dif at 0x%p\n",
- (1L << _dump_buf_dif_order), _dump_buf_dif);
- free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
- }
idr_destroy(&lpfc_hba_index);
}
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index ea10f03437f5..148d02a27b58 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -46,6 +46,23 @@
#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
+/* generate message by verbose log setting or severity */
+#define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \
+{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '4')) \
+ dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
+ fmt, (vport)->phba->brd_no, vport->vpi, ##arg); }
+
+#define lpfc_log_msg(phba, level, mask, fmt, arg...) \
+do { \
+ { uint32_t log_verbose = (phba)->pport ? \
+ (phba)->pport->cfg_log_verbose : \
+ (phba)->cfg_log_verbose; \
+ if (((mask) & log_verbose) || (level[1] <= '4')) \
+ dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
+ fmt, phba->brd_no, ##arg); \
+ } \
+} while (0)
+
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
do { \
{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 8abe933bad09..d1773c01d2b3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -515,6 +515,7 @@ lpfc_init_link(struct lpfc_hba * phba,
if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+ !(phba->sli4_hba.pc_sli4_params.pls) &&
mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 66191fa35f63..7082279e4c01 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -72,8 +72,8 @@ lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
* lpfc_mem_alloc - create and allocate all PCI and memory pools
* @phba: HBA to allocate pools for
*
- * Description: Creates and allocates PCI pools lpfc_sg_dma_buf_pool,
- * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
+ * Description: Creates and allocates PCI pools lpfc_mbuf_pool,
+ * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
* for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
*
* Notes: Not interrupt-safe. Must be called with no locks held. If any
@@ -89,36 +89,12 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
int i;
- if (phba->sli_rev == LPFC_SLI_REV4) {
- /* Calculate alignment */
- if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
- i = phba->cfg_sg_dma_buf_size;
- else
- i = SLI4_PAGE_SIZE;
-
- phba->lpfc_sg_dma_buf_pool =
- dma_pool_create("lpfc_sg_dma_buf_pool",
- &phba->pcidev->dev,
- phba->cfg_sg_dma_buf_size,
- i, 0);
- if (!phba->lpfc_sg_dma_buf_pool)
- goto fail;
-
- } else {
- phba->lpfc_sg_dma_buf_pool =
- dma_pool_create("lpfc_sg_dma_buf_pool",
- &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
- align, 0);
-
- if (!phba->lpfc_sg_dma_buf_pool)
- goto fail;
- }
phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
LPFC_BPL_SIZE,
align, 0);
if (!phba->lpfc_mbuf_pool)
- goto fail_free_dma_buf_pool;
+ goto fail;
pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE,
sizeof(struct lpfc_dmabuf),
@@ -208,9 +184,6 @@ fail_free_drb_pool:
fail_free_lpfc_mbuf_pool:
dma_pool_destroy(phba->lpfc_mbuf_pool);
phba->lpfc_mbuf_pool = NULL;
- fail_free_dma_buf_pool:
- dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
- phba->lpfc_sg_dma_buf_pool = NULL;
fail:
return -ENOMEM;
}
@@ -248,25 +221,19 @@ lpfc_mem_free(struct lpfc_hba *phba)
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
- if (phba->lpfc_nvmet_drb_pool)
- dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
+ dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
phba->lpfc_nvmet_drb_pool = NULL;
- if (phba->lpfc_drb_pool)
- dma_pool_destroy(phba->lpfc_drb_pool);
+
+ dma_pool_destroy(phba->lpfc_drb_pool);
phba->lpfc_drb_pool = NULL;
- if (phba->lpfc_hrb_pool)
- dma_pool_destroy(phba->lpfc_hrb_pool);
+
+ dma_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
- if (phba->txrdy_payload_pool)
- dma_pool_destroy(phba->txrdy_payload_pool);
- phba->txrdy_payload_pool = NULL;
- if (phba->lpfc_hbq_pool)
- dma_pool_destroy(phba->lpfc_hbq_pool);
+ dma_pool_destroy(phba->lpfc_hbq_pool);
phba->lpfc_hbq_pool = NULL;
- if (phba->rrq_pool)
- mempool_destroy(phba->rrq_pool);
+ mempool_destroy(phba->rrq_pool);
phba->rrq_pool = NULL;
/* Free NLP memory pool */
@@ -290,10 +257,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
dma_pool_destroy(phba->lpfc_mbuf_pool);
phba->lpfc_mbuf_pool = NULL;
- /* Free DMA buffer memory pool */
- dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
- phba->lpfc_sg_dma_buf_pool = NULL;
-
/* Free Device Data memory pool */
if (phba->device_data_mem_pool) {
/* Ensure all objects have been returned to the pool */
@@ -366,6 +329,13 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
/* Free and destroy all the allocated memory pools */
lpfc_mem_free(phba);
+ /* Free DMA buffer memory pool */
+ dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+ phba->lpfc_sg_dma_buf_pool = NULL;
+
+ dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
+ phba->lpfc_cmd_rsp_buf_pool = NULL;
+
/* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
psli->iocbq_lookup = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 59252bfca14e..a024e5a3918f 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -279,6 +279,109 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
}
+/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
+ * @phba: pointer to lpfc hba data structure.
+ * @link_mbox: pointer to CONFIG_LINK mailbox object
+ *
+ * This routine is only called if we are SLI3, direct connect pt2pt
+ * mode and the remote NPort issues the PLOGI after link up.
+ */
+static void
+lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
+{
+ LPFC_MBOXQ_t *login_mbox;
+ MAILBOX_t *mb = &link_mbox->u.mb;
+ struct lpfc_iocbq *save_iocb;
+ struct lpfc_nodelist *ndlp;
+ int rc;
+
+ ndlp = link_mbox->ctx_ndlp;
+ login_mbox = link_mbox->context3;
+ save_iocb = login_mbox->context3;
+ link_mbox->context3 = NULL;
+ login_mbox->context3 = NULL;
+
+ /* Check for CONFIG_LINK error */
+ if (mb->mbxStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
+ mb->mbxStatus);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ kfree(save_iocb);
+ return;
+ }
+
+ /* Now that CONFIG_LINK completed, and our SID is configured,
+ * we can now proceed with sending the PLOGI ACC.
+ */
+ rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
+ save_iocb, ndlp, login_mbox);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4576 PLOGI ACC fails pt2pt discovery: %x\n",
+ rc);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ }
+
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ kfree(save_iocb);
+}
+
+/**
+ * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function provides the unreg rpi mailbox completion handler for a tgt.
+ * The routine frees the memory resources associated with the completed
+ * mailbox command and transmits the ELS ACC.
+ *
+ * This routine is only called if we are SLI4, acting in target
+ * mode and the remote NPort issues the PLOGI after link up.
+ **/
+static void
+lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
+ LPFC_MBOXQ_t *mbox = pmb->context3;
+ struct lpfc_iocbq *piocb = NULL;
+ int rc;
+
+ if (mbox) {
+ pmb->context3 = NULL;
+ piocb = mbox->context3;
+ mbox->context3 = NULL;
+ }
+
+ /*
+ * Complete the unreg rpi mbx request, and update flags.
+ * This will also restart any deferred events.
+ */
+ lpfc_nlp_get(ndlp);
+ lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
+
+ if (!piocb) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
+ "4578 PLOGI ACC fail\n");
+ if (mbox)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+
+ rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
+ if (rc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
+ "4579 PLOGI ACC fail %x\n", rc);
+ if (mbox)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ kfree(piocb);
+out:
+ lpfc_nlp_put(ndlp);
+}
+
static int
lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
@@ -291,10 +394,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
IOCB_t *icmd;
struct serv_parm *sp;
uint32_t ed_tov;
- LPFC_MBOXQ_t *mbox;
+ LPFC_MBOXQ_t *link_mbox;
+ LPFC_MBOXQ_t *login_mbox;
+ struct lpfc_iocbq *save_iocb;
struct ls_rjt stat;
uint32_t vid, flag;
- int rc;
+ u16 rpi;
+ int rc, defer_acc;
memset(&stat, 0, sizeof (struct ls_rjt));
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -343,6 +449,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
else
ndlp->nlp_fcp_info |= CLASS3;
+ defer_acc = 0;
ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -354,7 +461,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
-
/* if already logged in, do implicit logout */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
@@ -396,6 +502,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+ login_mbox = NULL;
+ link_mbox = NULL;
+ save_iocb = NULL;
+
/* Check for Nport to NPort pt2pt protocol */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -423,17 +533,22 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_issue_reg_vfi(vport);
else {
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (mbox == NULL)
+ defer_acc = 1;
+ link_mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!link_mbox)
goto out;
- lpfc_config_link(phba, mbox);
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_config_link(phba, link_mbox);
+ link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
+ link_mbox->vport = vport;
+ link_mbox->ctx_ndlp = ndlp;
+
+ save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
+ if (!save_iocb)
goto out;
- }
+ /* Save info from cmd IOCB used in rsp */
+ memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
+ sizeof(struct lpfc_iocbq));
}
lpfc_can_disctmo(vport);
@@ -448,30 +563,57 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
}
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
+ login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!login_mbox)
goto out;
/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->nvmet_support && !defer_acc) {
+ link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!link_mbox)
+ goto out;
+
+ /* As unique identifiers such as iotag would be overwritten
+ * with those from the cmdiocb, allocate separate temporary
+ * storage for the copy.
+ */
+ save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
+ if (!save_iocb)
+ goto out;
+
+ /* Unreg RPI is required for SLI4. */
+ rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
+ link_mbox->vport = vport;
+ link_mbox->ctx_ndlp = ndlp;
+ link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
+
+ if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+ (!(vport->fc_flag & FC_OFFLINE_MODE)))
+ ndlp->nlp_flag |= NLP_UNREG_INP;
+
+ /* Save info from cmd IOCB used in rsp */
+ memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
+
+ /* Delay sending ACC till unreg RPI completes. */
+ defer_acc = 1;
+ } else if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_unreg_rpi(vport, ndlp);
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
- (uint8_t *) sp, mbox, ndlp->nlp_rpi);
- if (rc) {
- mempool_free(mbox, phba->mbox_mem_pool);
+ (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
+ if (rc)
goto out;
- }
/* ACC PLOGI rsp command needs to execute first,
- * queue this mbox command to be processed later.
+ * queue this login_mbox command to be processed later.
*/
- mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
/*
- * mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
+ * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
* command issued in lpfc_cmpl_els_acc().
*/
- mbox->vport = vport;
+ login_mbox->vport = vport;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
spin_unlock_irq(shost->host_lock);
@@ -484,8 +626,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* single discovery thread, this will cause a huge delay in
* discovery. Also this will cause multiple state machines
* running in parallel for this node.
+ * This only applies to a fabric environment.
*/
- if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+ if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
+ (vport->fc_flag & FC_FABRIC)) {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
}
@@ -493,6 +637,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if ((vport->port_type == LPFC_NPIV_PORT &&
vport->cfg_restrict_login)) {
+ /* no deferred ACC */
+ kfree(save_iocb);
+
/* In order to preserve RPIs, we want to cleanup
* the default RPI the firmware created to rcv
* this ELS request. The only way to do this is
@@ -504,16 +651,50 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
- ndlp, mbox);
+ ndlp, login_mbox);
if (rc)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+ if (defer_acc) {
+ /* So the order here should be:
+ * SLI3 pt2pt
+ * Issue CONFIG_LINK mbox
+ * CONFIG_LINK cmpl
+ * SLI4 tgt
+ * Issue UNREG RPI mbx
+ * UNREG RPI cmpl
+ * Issue PLOGI ACC
+ * PLOGI ACC cmpl
+ * Issue REG_LOGIN mbox
+ */
+
+ /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
+ link_mbox->context3 = login_mbox;
+ login_mbox->context3 = save_iocb;
+
+ /* Start the ball rolling by issuing CONFIG_LINK here */
+ rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ goto out;
return 1;
}
- rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+
+ rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
if (rc)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
return 1;
out:
+ if (defer_acc)
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4577 discovery failure: %p %p %p\n",
+ save_iocb, link_mbox, login_mbox);
+ kfree(save_iocb);
+ if (link_mbox)
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ if (login_mbox)
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
@@ -614,7 +795,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
out:
/* If we are authenticated, move to the proper state */
- if (ndlp->nlp_type & NLP_FCP_TARGET)
+ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
else
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -799,9 +980,15 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (npr->writeXferRdyDis)
ndlp->nlp_flag |= NLP_FIRSTBURST;
}
- if (npr->Retry)
+ if (npr->Retry && ndlp->nlp_type &
+ (NLP_FCP_INITIATOR | NLP_FCP_TARGET))
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+ if (npr->Retry && phba->nsler &&
+ ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET))
+ ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
+
+
/* If this driver is in nvme target mode, set the ndlp's fc4
* type to NVME provided the PRLI response claims NVME FC4
* type. Target mode does not issue gft_id so doesn't get
@@ -845,9 +1032,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!(vport->fc_flag & FC_PT2PT)) {
/* Check config parameter use-adisc or FCP-2 */
- if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
+ if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
- (ndlp->nlp_type & NLP_FCP_TARGET))) {
+ (ndlp->nlp_type & NLP_FCP_TARGET)))) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_ADISC;
spin_unlock_irq(shost->host_lock);
@@ -885,7 +1072,7 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"1435 release_rpi SKIP UNREG x%x on "
"NPort x%x deferred x%x flg x%x "
- "Data: %p\n",
+ "Data: x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_defer_did,
ndlp->nlp_flag, ndlp);
@@ -1661,6 +1848,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
LPFC_MBOXQ_t *mb;
LPFC_MBOXQ_t *nextmb;
struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ns_ndlp;
cmdiocb = (struct lpfc_iocbq *) arg;
@@ -1693,6 +1881,13 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
}
spin_unlock_irq(&phba->hbalock);
+ /* software abort if any GID_FT is outstanding */
+ if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
+ ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (ns_ndlp && NLP_CHK_NODE_ACT(ns_ndlp))
+ lpfc_els_abort(phba, ns_ndlp);
+ }
+
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
@@ -1814,7 +2009,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
- lpfc_issue_els_prli(vport, ndlp, 0);
+ if (lpfc_issue_els_prli(vport, ndlp, 0)) {
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ }
} else {
if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
phba->targetport->port_id = vport->fc_myDID;
@@ -2012,6 +2211,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_init, nvpr))
ndlp->nlp_type |= NLP_NVME_INITIATOR;
+ if (phba->nsler && bf_get_be32(prli_nsler, nvpr) &&
+ bf_get_be32(prli_conf, nvpr))
+
+ ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
+ else
+ ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
+
/* Target driver cannot solicit NVME FB. */
if (bf_get_be32(prli_tgt, nvpr)) {
/* Complete the nvme target roles. The transport
@@ -2891,18 +3097,21 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
uint32_t);
uint32_t got_ndlp = 0;
+ uint32_t data1;
if (lpfc_nlp_get(ndlp))
got_ndlp = 1;
cur_state = ndlp->nlp_state;
+ data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
+ ((uint32_t)ndlp->nlp_type));
/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0211 DSM in event x%x on NPort x%x in "
"state %d rpi x%x Data: x%x x%x\n",
evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi,
- ndlp->nlp_flag, ndlp->nlp_fc4_type);
+ ndlp->nlp_flag, data1);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
"DSM in: evt:%d ste:%d did:x%x",
@@ -2913,10 +3122,13 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* DSM out state <rc> on NPort <nlp_DID> */
if (got_ndlp) {
+ data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
+ ((uint32_t)ndlp->nlp_type));
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0212 DSM out state %d on NPort x%x "
- "rpi x%x Data: x%x\n",
- rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag);
+ "rpi x%x Data: x%x x%x\n",
+ rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag,
+ data1);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
"DSM out: ste:%d did:x%x flg:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 946642cee3df..f6c8963c915d 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -196,6 +196,46 @@ lpfc_nvme_cmd_template(void)
}
/**
+ * lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry.
+ * @pwqeq: Pointer to command iocb.
+ * @xritag: Tag that uniqely identifies the local exchange resource.
+ * @opt: Option bits -
+ * bit 0 = inhibit sending abts on the link
+ *
+ * This function is called with hbalock held.
+ **/
+void
+lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
+{
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
+
+ /* WQEs are reused. Clear stale data and set key fields to
+ * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
+ */
+ memset(wqe, 0, sizeof(*wqe));
+
+ if (opt & INHIBIT_ABORT)
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+ /* Abort specified xri tag, with the mask deliberately zeroed */
+ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+
+ /* Abort the IO associated with this outstanding exchange ID. */
+ wqe->abort_cmd.wqe_com.abort_tag = xritag;
+
+ /* iotag for the wqe completion. */
+ bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
+
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+
+ bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+ bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+}
+
+/**
* lpfc_nvme_create_queue -
* @lpfc_pnvme: Pointer to the driver's nvme instance data
* @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
@@ -247,7 +287,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6073 Binding %s HdwQueue %d (cpu %d) to "
- "hdw_queue %d qhandle %p\n", str,
+ "hdw_queue %d qhandle x%px\n", str,
qidx, qhandle->cpu_id, qhandle->index, qhandle);
*handle = (void *)qhandle;
return 0;
@@ -282,7 +322,7 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
+ "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
lport, qidx, handle);
kfree(handle);
}
@@ -293,7 +333,7 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
struct lpfc_nvme_lport *lport = localport->private;
lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
- "6173 localport %p delete complete\n",
+ "6173 localport x%px delete complete\n",
lport);
/* release any threads waiting for the unreg to complete */
@@ -332,7 +372,7 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
* calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6146 remoteport delete of remoteport %p\n",
+ "6146 remoteport delete of remoteport x%px\n",
remoteport);
spin_lock_irq(&vport->phba->hbalock);
@@ -383,8 +423,8 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6047 nvme cmpl Enter "
- "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
- "lsreg:%p bmp:%p ndlp:%p\n",
+ "Data %px DID %x Xri: %x status %x reason x%x "
+ "cmd:x%px lsreg:x%px bmp:x%px ndlp:x%px\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status,
(wcqe->parameter & 0xffff),
@@ -404,7 +444,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
else
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
"6046 nvme cmpl without done call back? "
- "Data %p DID %x Xri: %x status %x\n",
+ "Data %px DID %x Xri: %x status %x\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status);
if (ndlp) {
@@ -436,6 +476,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
return 1;
wqe = &genwqe->wqe;
+ /* Initialize only 64 bytes */
memset(wqe, 0, sizeof(union lpfc_wqe));
genwqe->context3 = (uint8_t *)bmp;
@@ -516,7 +557,8 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Issue GEN REQ WQE for NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"6050 Issue GEN REQ WQE to NPORT x%x "
- "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
+ "Data: x%x x%x wq:x%px lsreq:x%px bmp:x%px "
+ "xmit:%d 1st:%d\n",
ndlp->nlp_DID, genwqe->iotag,
vport->port_state,
genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
@@ -594,7 +636,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
ndlp = rport->ndlp;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6051 Remoteport %p, rport has invalid ndlp. "
+ "6051 Remoteport x%px, rport has invalid ndlp. "
"Failing LS Req\n", pnvme_rport);
return -ENODEV;
}
@@ -646,10 +688,10 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
/* Expand print to include key fields. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
- "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
- ndlp->nlp_DID,
- pnvme_lport, pnvme_rport,
+ "6149 Issue LS Req to DID 0x%06x lport x%px, "
+ "rport x%px lsreq x%px rqstlen:%d rsplen:%d "
+ "%pad %pad\n",
+ ndlp->nlp_DID, pnvme_lport, pnvme_rport,
pnvme_lsreq, pnvme_lsreq->rqstlen,
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
&pnvme_lsreq->rspdma);
@@ -665,8 +707,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
if (ret != WQE_SUCCESS) {
atomic_inc(&lport->xmt_ls_err);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6052 EXIT. issue ls wqe failed lport %p, "
- "rport %p lsreq%p Status %x DID %x\n",
+ "6052 EXIT. issue ls wqe failed lport x%px, "
+ "rport x%px lsreq x%px Status %x DID %x\n",
pnvme_lport, pnvme_rport, pnvme_lsreq,
ret, ndlp->nlp_DID);
lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
@@ -723,7 +765,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
/* Expand print to include key fields. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
- "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d "
+ "6040 ENTER. lport x%px, rport x%px lsreq x%px rqstlen:%d "
"rsplen:%d %pad %pad\n",
pnvme_lport, pnvme_rport,
pnvme_lsreq, pnvme_lsreq->rqstlen,
@@ -984,8 +1026,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
if (!lpfc_ncmd->nvmeCmd) {
spin_unlock(&lpfc_ncmd->buf_lock);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
- "nvmeCmd %p\n",
+ "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
+ "nvmeCmd x%px\n",
lpfc_ncmd, lpfc_ncmd->nvmeCmd);
/* Release the lpfc_ncmd regardless of the missing elements. */
@@ -998,9 +1040,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
- if (vport->localport) {
+ if (unlikely(status && vport->localport)) {
lport = (struct lpfc_nvme_lport *)vport->localport->private;
- if (lport && status) {
+ if (lport) {
if (bf_get(lpfc_wcqe_c_xb, wcqe))
atomic_inc(&lport->cmpl_fcp_xb);
atomic_inc(&lport->cmpl_fcp_err);
@@ -1100,8 +1142,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_IOERR,
- "6032 Delay Aborted cmd %p "
- "nvme cmd %p, xri x%x, "
+ "6032 Delay Aborted cmd x%px "
+ "nvme cmd x%px, xri x%x, "
"xb %d\n",
lpfc_ncmd, nCmd,
lpfc_ncmd->cur_iocbq.sli4_xritag,
@@ -1140,7 +1182,7 @@ out_err:
phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
lpfc_nvme_ktime(phba, lpfc_ncmd);
}
- if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
+ if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) {
uint32_t cpu;
idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
cpu = raw_smp_processor_id();
@@ -1253,6 +1295,9 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
sizeof(uint32_t) * 8);
cstat->control_requests++;
}
+
+ if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
+ bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
/*
* Finish initializing those WQE fields that are independent
* of the nvme_cmnd request_buffer
@@ -1304,14 +1349,16 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
struct scatterlist *data_sg;
struct sli4_sge *first_data_sgl;
struct ulp_bde64 *bde;
- dma_addr_t physaddr;
+ dma_addr_t physaddr = 0;
uint32_t num_bde = 0;
- uint32_t dma_len;
+ uint32_t dma_len = 0;
uint32_t dma_offset = 0;
- int nseg, i;
+ int nseg, i, j;
+ bool lsp_just_set = false;
/* Fix up the command and response DMA stuff. */
lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
@@ -1348,6 +1395,9 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
*/
nseg = nCmd->sg_cnt;
data_sg = nCmd->first_sgl;
+
+ /* for tracking the segment boundaries */
+ j = 2;
for (i = 0; i < nseg; i++) {
if (data_sg == NULL) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1356,23 +1406,76 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
lpfc_ncmd->seg_cnt = 0;
return 1;
}
- physaddr = data_sg->dma_address;
- dma_len = data_sg->length;
- sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
- sgl->word2 = le32_to_cpu(sgl->word2);
- if ((num_bde + 1) == nseg)
+
+ sgl->word2 = 0;
+ if ((num_bde + 1) == nseg) {
bf_set(lpfc_sli4_sge_last, sgl, 1);
- else
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ } else {
bf_set(lpfc_sli4_sge_last, sgl, 0);
- bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
- sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len = cpu_to_le32(dma_len);
-
- dma_offset += dma_len;
- data_sg = sg_next(data_sg);
- sgl++;
+
+ /* expand the segment */
+ if (!lsp_just_set &&
+ !((j + 1) % phba->border_sge_num) &&
+ ((nseg - 1) != i)) {
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_LSP);
+
+ sgl_xtra = lpfc_get_sgl_per_hdwq(
+ phba, lpfc_ncmd);
+
+ if (unlikely(!sgl_xtra)) {
+ lpfc_ncmd->seg_cnt = 0;
+ return 1;
+ }
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ sgl_xtra->dma_phys_sgl));
+
+ } else {
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ }
+ }
+
+ if (!(bf_get(lpfc_sli4_sge_type, sgl) &
+ LPFC_SGE_TYPE_LSP)) {
+ if ((nseg - 1) == i)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+
+ physaddr = data_sg->dma_address;
+ dma_len = data_sg->length;
+ sgl->addr_lo = cpu_to_le32(
+ putPaddrLow(physaddr));
+ sgl->addr_hi = cpu_to_le32(
+ putPaddrHigh(physaddr));
+
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(dma_len);
+
+ dma_offset += dma_len;
+ data_sg = sg_next(data_sg);
+
+ sgl++;
+
+ lsp_just_set = false;
+ } else {
+ sgl->word2 = cpu_to_le32(sgl->word2);
+
+ sgl->sge_len = cpu_to_le32(
+ phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+ i = i - 1;
+
+ lsp_just_set = true;
+ }
+
+ j++;
}
if (phba->cfg_enable_pbde) {
/* Use PBDE support for first SGL only, offset == 0 */
@@ -1474,7 +1577,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_fail;
}
- if (vport->load_flag & FC_UNLOADING) {
+ if (unlikely(vport->load_flag & FC_UNLOADING)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
atomic_inc(&lport->xmt_fcp_err);
@@ -1505,8 +1608,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ndlp = rport->ndlp;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
- "6053 Fail IO, ndlp not ready: rport %p "
- "ndlp %p, DID x%06x\n",
+ "6053 Busy IO, ndlp not ready: rport x%px "
+ "ndlp x%px, DID x%06x\n",
rport, ndlp, pnvme_rport->port_id);
atomic_inc(&lport->xmt_fcp_err);
ret = -EBUSY;
@@ -1728,7 +1831,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
struct lpfc_nvme_fcpreq_priv *freqpriv;
- union lpfc_wqe128 *abts_wqe;
unsigned long flags;
int ret_val;
@@ -1758,7 +1860,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Announce entry to new IO submit field. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6002 Abort Request to rport DID x%06x "
- "for nvme_fc_req %p\n",
+ "for nvme_fc_req x%px\n",
pnvme_rport->port_id,
pnvme_fcreq);
@@ -1767,7 +1869,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
*/
spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6139 Driver in reset cleanup - flushing "
@@ -1805,8 +1907,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6143 NVME req mismatch: "
- "lpfc_nbuf %p nvmeCmd %p, "
- "pnvme_fcreq %p. Skipping Abort xri x%x\n",
+ "lpfc_nbuf x%px nvmeCmd x%px, "
+ "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
lpfc_nbuf, lpfc_nbuf->nvmeCmd,
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
goto out_unlock;
@@ -1815,7 +1917,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Don't abort IOs no longer on the pending queue. */
if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
- "6142 NVME IO req %p not queued - skipping "
+ "6142 NVME IO req x%px not queued - skipping "
"abort req xri x%x\n",
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
goto out_unlock;
@@ -1830,8 +1932,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6144 Outstanding NVME I/O Abort Request "
- "still pending on nvme_fcreq %p, "
- "lpfc_ncmd %p xri x%x\n",
+ "still pending on nvme_fcreq x%px, "
+ "lpfc_ncmd %px xri x%x\n",
pnvme_fcreq, lpfc_nbuf,
nvmereq_wqe->sli4_xritag);
goto out_unlock;
@@ -1841,7 +1943,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
if (!abts_buf) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6136 No available abort wqes. Skipping "
- "Abts req for nvme_fcreq %p xri x%x\n",
+ "Abts req for nvme_fcreq x%px xri x%x\n",
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
goto out_unlock;
}
@@ -1849,37 +1951,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Ready - mark outstanding as aborted by driver. */
nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
- /* Complete prepping the abort wqe and issue to the FW. */
- abts_wqe = &abts_buf->wqe;
-
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(abts_wqe, 0, sizeof(union lpfc_wqe));
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
-
- /* word 7 */
- bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
- bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
- nvmereq_wqe->iocb.ulpClass);
-
- /* word 8 - tell the FW to abort the IO associated with this
- * outstanding exchange ID.
- */
- abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
-
- /* word 9 - this is the iotag for the abts_wqe completion. */
- bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
- abts_buf->iotag);
-
- /* word 10 */
- bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- /* word 11 */
- bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_buf->iocb_flag |= LPFC_IO_NVME;
@@ -1892,7 +1964,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
if (ret_val) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6137 Failed abts issue_wqe with status x%x "
- "for nvme_fcreq %p.\n",
+ "for nvme_fcreq x%px.\n",
ret_val, pnvme_fcreq);
lpfc_sli_release_iocbq(phba, abts_buf);
return;
@@ -1913,6 +1985,8 @@ out_unlock:
/* Declare and initialization an instance of the FC NVME template. */
static struct nvme_fc_port_template lpfc_nvme_template = {
+ .module = THIS_MODULE,
+
/* initiator-based functions */
.localport_delete = lpfc_nvme_localport_delete,
.remoteport_delete = lpfc_nvme_remoteport_delete,
@@ -1982,7 +2056,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
sgl->word2 = cpu_to_le32(sgl->word2);
/* Fill in word 3 / sgl_len during cmd submission */
- /* Initialize WQE */
+ /* Initialize 64 bytes only */
memset(wqe, 0, sizeof(union lpfc_wqe));
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
@@ -2021,18 +2095,18 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
qp = lpfc_ncmd->hdwq;
- if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
+ if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6310 XB release deferred for "
"ox_id x%x on reqtag x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->cur_iocbq.iotag);
- spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag);
+ spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
list_add_tail(&lpfc_ncmd->list,
- &qp->lpfc_abts_nvme_buf_list);
+ &qp->lpfc_abts_io_buf_list);
qp->abts_nvme_io_bufs++;
- spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag);
+ spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
} else
lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
}
@@ -2076,12 +2150,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
*/
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
- /* Advertise how many hw queues we support based on fcp_io_sched */
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
- lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
- else
- lpfc_nvme_template.max_hw_queues =
- phba->sli4_hba.num_present_cpu;
+ /* Advertise how many hw queues we support based on cfg_hdw_queue,
+ * which will not exceed cpu count.
+ */
+ lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
if (!IS_ENABLED(CONFIG_NVME_FC))
return ret;
@@ -2095,8 +2167,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
if (!ret) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
"6005 Successfully registered local "
- "NVME port num %d, localP %p, private %p, "
- "sg_seg %d\n",
+ "NVME port num %d, localP x%px, private "
+ "x%px, sg_seg %d\n",
localport->port_num, localport,
localport->private,
lpfc_nvme_template.max_sgl_segments);
@@ -2157,14 +2229,14 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
if (unlikely(!ret)) {
pending = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
if (!pring)
continue;
if (pring->txcmplq_cnt)
pending += pring->txcmplq_cnt;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
- "6176 Lport %p Localport %p wait "
+ "6176 Lport x%px Localport x%px wait "
"timed out. Pending %d. Renewing.\n",
lport, vport->localport, pending);
continue;
@@ -2172,7 +2244,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
break;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
- "6177 Lport %p Localport %p Complete Success\n",
+ "6177 Lport x%px Localport x%px Complete Success\n",
lport, vport->localport);
}
#endif
@@ -2203,7 +2275,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
lport = (struct lpfc_nvme_lport *)localport->private;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6011 Destroying NVME localport %p\n",
+ "6011 Destroying NVME localport x%px\n",
localport);
/* lport's rport list is clear. Unregister
@@ -2253,12 +2325,12 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
lport = (struct lpfc_nvme_lport *)localport->private;
if (!lport) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
- "6171 Update NVME fail. localP %p, No lport\n",
+ "6171 Update NVME fail. localP x%px, No lport\n",
localport);
return;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6012 Update NVME lport %p did x%x\n",
+ "6012 Update NVME lport x%px did x%x\n",
localport, vport->fc_myDID);
localport->port_id = vport->fc_myDID;
@@ -2268,7 +2340,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6030 bound lport %p to DID x%06x\n",
+ "6030 bound lport x%px to DID x%06x\n",
lport, localport->port_id);
#endif
}
@@ -2317,9 +2389,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irq(&vport->phba->hbalock);
oldrport = lpfc_ndlp_get_nrport(ndlp);
- spin_unlock_irq(&vport->phba->hbalock);
- if (!oldrport)
+ if (oldrport) {
+ prev_ndlp = oldrport->ndlp;
+ spin_unlock_irq(&vport->phba->hbalock);
+ } else {
+ spin_unlock_irq(&vport->phba->hbalock);
lpfc_nlp_get(ndlp);
+ }
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
if (!ret) {
@@ -2338,25 +2414,34 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* New remoteport record does not guarantee valid
* host private memory area.
*/
- prev_ndlp = oldrport->ndlp;
if (oldrport == remote_port->private) {
/* Same remoteport - ndlp should match.
* Just reuse.
*/
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NVME_DISC,
- "6014 Rebinding lport to "
- "remoteport %p wwpn 0x%llx, "
- "Data: x%x x%x %p %p x%x x%06x\n",
+ "6014 Rebind lport to current "
+ "remoteport x%px wwpn 0x%llx, "
+ "Data: x%x x%x x%px x%px x%x "
+ " x%06x\n",
remote_port,
remote_port->port_name,
remote_port->port_id,
remote_port->port_role,
- prev_ndlp,
+ oldrport->ndlp,
ndlp,
ndlp->nlp_type,
ndlp->nlp_DID);
- return 0;
+
+ /* It's a complete rebind only if the driver
+ * is registering with the same ndlp. Otherwise
+ * the driver likely executed a node swap
+ * prior to this registration and the ndlp to
+ * remoteport binding needs to be redone.
+ */
+ if (prev_ndlp == ndlp)
+ return 0;
+
}
/* Sever the ndlp<->rport association
@@ -2390,10 +2475,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_unlock_irq(&vport->phba->hbalock);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
- "6022 Binding new rport to "
- "lport %p Remoteport %p rport %p WWNN 0x%llx, "
+ "6022 Bind lport x%px to remoteport x%px "
+ "rport x%px WWNN 0x%llx, "
"Rport WWPN 0x%llx DID "
- "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
+ "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
lport, remote_port, rport,
rpinfo.node_name, rpinfo.port_name,
rpinfo.port_id, rpinfo.port_role,
@@ -2423,20 +2508,23 @@ void
lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
#if (IS_ENABLED(CONFIG_NVME_FC))
- struct lpfc_nvme_rport *rport;
- struct nvme_fc_remote_port *remoteport;
+ struct lpfc_nvme_rport *nrport;
+ struct nvme_fc_remote_port *remoteport = NULL;
- rport = ndlp->nrport;
+ spin_lock_irq(&vport->phba->hbalock);
+ nrport = lpfc_ndlp_get_nrport(ndlp);
+ if (nrport)
+ remoteport = nrport->remoteport;
+ spin_unlock_irq(&vport->phba->hbalock);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6170 Rescan NPort DID x%06x type x%x "
- "state x%x rport %p\n",
- ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, rport);
- if (!rport)
- goto input_err;
- remoteport = rport->remoteport;
- if (!remoteport)
- goto input_err;
+ "state x%x nrport x%px remoteport x%px\n",
+ ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
+ nrport, remoteport);
+
+ if (!nrport || !remoteport)
+ goto rescan_exit;
/* Only rescan if we are an NVME target in the MAPPED state */
if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
@@ -2449,10 +2537,10 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, remoteport->port_state);
}
return;
-input_err:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6169 State error: lport %p, rport%p FCID x%06x\n",
- vport->localport, ndlp->rport, ndlp->nlp_DID);
+ rescan_exit:
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6169 Skip NVME Rport Rescan, NVME remoteport "
+ "unregistered\n");
#endif
}
@@ -2499,7 +2587,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
goto input_err;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6033 Unreg nvme remoteport %p, portname x%llx, "
+ "6033 Unreg nvme remoteport x%px, portname x%llx, "
"port_id x%06x, portstate x%x port type x%x\n",
remoteport, remoteport->port_name,
remoteport->port_id, remoteport->port_state,
@@ -2537,7 +2625,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
input_err:
#endif
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6168 State error: lport %p, rport%p FCID x%06x\n",
+ "6168 State error: lport x%px, rport x%px FCID x%06x\n",
vport->localport, ndlp->rport, ndlp->nlp_DID);
}
@@ -2545,6 +2633,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
+ * @lpfc_ncmd: The nvme job structure for the request being aborted.
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
* NVME aborted xri. Aborted NVME IO commands are completed to the transport
@@ -2552,59 +2641,33 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
**/
void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
- struct sli4_wcqe_xri_aborted *axri, int idx)
+ struct sli4_wcqe_xri_aborted *axri,
+ struct lpfc_io_buf *lpfc_ncmd)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
- struct lpfc_io_buf *lpfc_ncmd, *next_lpfc_ncmd;
struct nvmefc_fcp_req *nvme_cmd = NULL;
- struct lpfc_nodelist *ndlp;
- struct lpfc_sli4_hdw_queue *qp;
- unsigned long iflag = 0;
+ struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
- return;
- qp = &phba->sli4_hba.hdwq[idx];
- spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&qp->abts_nvme_buf_list_lock);
- list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
- &qp->lpfc_abts_nvme_buf_list, list) {
- if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
- list_del_init(&lpfc_ncmd->list);
- qp->abts_nvme_io_bufs--;
- lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
- lpfc_ncmd->status = IOSTAT_SUCCESS;
- spin_unlock(&qp->abts_nvme_buf_list_lock);
-
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- ndlp = lpfc_ncmd->ndlp;
- if (ndlp)
- lpfc_sli4_abts_err_handler(phba, ndlp, axri);
-
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6311 nvme_cmd %p xri x%x tag x%x "
- "abort complete and xri released\n",
- lpfc_ncmd->nvmeCmd, xri,
- lpfc_ncmd->cur_iocbq.iotag);
-
- /* Aborted NVME commands are required to not complete
- * before the abort exchange command fully completes.
- * Once completed, it is available via the put list.
- */
- if (lpfc_ncmd->nvmeCmd) {
- nvme_cmd = lpfc_ncmd->nvmeCmd;
- nvme_cmd->done(nvme_cmd);
- lpfc_ncmd->nvmeCmd = NULL;
- }
- lpfc_release_nvme_buf(phba, lpfc_ncmd);
- return;
- }
- }
- spin_unlock(&qp->abts_nvme_buf_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6312 XRI Aborted xri x%x not found\n", xri);
+ if (ndlp)
+ lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
+ "xri released\n",
+ lpfc_ncmd->nvmeCmd, xri,
+ lpfc_ncmd->cur_iocbq.iotag);
+
+ /* Aborted NVME commands are required to not complete
+ * before the abort exchange command fully completes.
+ * Once completed, it is available via the put list.
+ */
+ if (lpfc_ncmd->nvmeCmd) {
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->done(nvme_cmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
}
/**
@@ -2626,13 +2689,13 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
return;
- /* Cycle through all NVME rings and make sure all outstanding
+ /* Cycle through all IO rings and make sure all outstanding
* WQEs have been removed from the txcmplqs.
*/
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- if (!phba->sli4_hba.hdwq[i].nvme_wq)
+ if (!phba->sli4_hba.hdwq[i].io_wq)
continue;
- pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
if (!pring)
continue;
@@ -2653,3 +2716,50 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
}
}
}
+
+void
+lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ struct lpfc_io_buf *lpfc_ncmd;
+ struct nvmefc_fcp_req *nCmd;
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
+
+ if (!pwqeIn->context1) {
+ lpfc_sli_release_iocbq(phba, pwqeIn);
+ return;
+ }
+ /* For abort iocb just return, IO iocb will do a done call */
+ if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
+ CMD_ABORT_XRI_CX) {
+ lpfc_sli_release_iocbq(phba, pwqeIn);
+ return;
+ }
+ lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
+
+ spin_lock(&lpfc_ncmd->buf_lock);
+ if (!lpfc_ncmd->nvmeCmd) {
+ spin_unlock(&lpfc_ncmd->buf_lock);
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+ return;
+ }
+
+ nCmd = lpfc_ncmd->nvmeCmd;
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6194 NVME Cancel xri %x\n",
+ lpfc_ncmd->cur_iocbq.sli4_xritag);
+
+ nCmd->transferred_length = 0;
+ nCmd->rcv_rsplen = 0;
+ nCmd->status = NVME_SC_INTERNAL;
+ freqpriv = nCmd->private;
+ freqpriv->nvme_buf = NULL;
+ lpfc_ncmd->nvmeCmd = NULL;
+
+ spin_unlock(&lpfc_ncmd->buf_lock);
+ nCmd->done(nCmd);
+
+ /* Call release with XB=1 to queue the IO into the abort list. */
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+#endif
+}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index faa596f9e861..9dc9afe1c255 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -378,13 +378,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
int cpu;
unsigned long iflag;
- if (ctxp->txrdy) {
- dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
- ctxp->txrdy_phys);
- ctxp->txrdy = NULL;
- ctxp->txrdy_phys = 0;
- }
-
if (ctxp->state == LPFC_NVMET_STE_FREE) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6411 NVMET free, already free IO x%x: %d %d\n",
@@ -430,7 +423,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
ctxp->wqeq = NULL;
- ctxp->txrdy = NULL;
ctxp->offset = 0;
ctxp->phba = phba;
ctxp->size = size;
@@ -1026,7 +1018,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
* WQE release CQE
*/
ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
- wq = ctxp->hdwq->nvme_wq;
+ wq = ctxp->hdwq->io_wq;
pring = wq->pring;
spin_lock_irqsave(&pring->ring_lock, iflags);
list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
@@ -1104,7 +1096,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid);
- wq = ctxp->hdwq->nvme_wq;
+ wq = ctxp->hdwq->io_wq;
lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
return;
}
@@ -1437,7 +1429,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
infop = lpfc_get_ctx_list(phba, i, j);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
"6408 TOTAL NVMET ctx for CPU %d "
- "MRQ %d: cnt %d nextcpu %p\n",
+ "MRQ %d: cnt %d nextcpu x%px\n",
i, j, infop->nvmet_ctx_list_cnt,
infop->nvmet_ctx_next_cpu);
}
@@ -1500,7 +1492,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6026 Registered NVME "
- "targetport: %p, private %p "
+ "targetport: x%px, private x%px "
"portnm %llx nodenm %llx segs %d qs %d\n",
phba->targetport, tgtp,
pinfo.port_name, pinfo.node_name,
@@ -1555,7 +1547,7 @@ lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
return 0;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6007 Update NVMET port %p did x%x\n",
+ "6007 Update NVMET port x%px did x%x\n",
phba->targetport, vport->fc_myDID);
phba->targetport->port_id = vport->fc_myDID;
@@ -1790,12 +1782,8 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
lpfc_nvmet_defer_release(phba, ctxp);
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
}
- if (ctxp->state == LPFC_NVMET_STE_RCV)
- lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
- ctxp->oxid);
- else
- lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
- ctxp->oxid);
+ lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+ ctxp->oxid);
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
return 0;
@@ -1922,7 +1910,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
if (phba->targetport) {
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
+ wq = phba->sli4_hba.hdwq[qidx].io_wq;
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
}
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
@@ -1930,7 +1918,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
- "6179 Unreg targetport %p timeout "
+ "6179 Unreg targetport x%px timeout "
"reached.\n", phba->targetport);
lpfc_nvmet_cleanup_io_context(phba);
}
@@ -1962,12 +1950,10 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t *payload;
uint32_t size, oxid, sid, rc;
- fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
- oxid = be16_to_cpu(fc_hdr->fh_ox_id);
- if (!phba->targetport) {
+ if (!nvmebuf || !phba->targetport) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6154 LS Drop IO x%x\n", oxid);
+ "6154 LS Drop IO\n");
oxid = 0;
size = 0;
sid = 0;
@@ -1975,6 +1961,9 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
goto dropit;
}
+ fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt);
size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
@@ -2330,7 +2319,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
}
ctxp->wqeq = NULL;
- ctxp->txrdy = NULL;
ctxp->offset = 0;
ctxp->phba = phba;
ctxp->size = size;
@@ -2405,6 +2393,11 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
d_buf = piocb->context2;
nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ if (!nvmebuf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "3015 LS Drop IO\n");
+ return;
+ }
if (phba->nvmet_support == 0) {
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
return;
@@ -2433,6 +2426,11 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
uint64_t isr_timestamp,
uint8_t cqflag)
{
+ if (!nvmebuf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "3167 NVMET FCP Drop IO\n");
+ return;
+ }
if (phba->nvmet_support == 0) {
lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
return;
@@ -2599,7 +2597,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
struct scatterlist *sgel;
union lpfc_wqe128 *wqe;
struct ulp_bde64 *bde;
- uint32_t *txrdy;
dma_addr_t physaddr;
int i, cnt;
int do_pbde;
@@ -2761,23 +2758,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
&lpfc_treceive_cmd_template.words[3],
sizeof(uint32_t) * 9);
- /* Words 0 - 2 : The first sg segment */
- txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
- GFP_KERNEL, &physaddr);
- if (!txrdy) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6041 Bad txrdy buffer: oxid x%x\n",
- ctxp->oxid);
- return NULL;
- }
- ctxp->txrdy = txrdy;
- ctxp->txrdy_phys = physaddr;
- wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
- wqe->fcp_treceive.bde.addrLow =
- cpu_to_le32(putPaddrLow(physaddr));
- wqe->fcp_treceive.bde.addrHigh =
- cpu_to_le32(putPaddrHigh(physaddr));
+ /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
+ wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
+ wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
+ wqe->fcp_treceive.bde.addrLow = 0;
+ wqe->fcp_treceive.bde.addrHigh = 0;
/* Word 4 */
wqe->fcp_treceive.relative_offset = ctxp->offset;
@@ -2812,17 +2797,13 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
/* Word 12 */
wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
- /* Setup 1 TXRDY and 1 SKIP SGE */
- txrdy[0] = 0;
- txrdy[1] = cpu_to_be32(rsp->transfer_length);
- txrdy[2] = 0;
-
- sgl->addr_hi = putPaddrHigh(physaddr);
- sgl->addr_lo = putPaddrLow(physaddr);
+ /* Setup 2 SKIP SGEs */
+ sgl->addr_hi = 0;
+ sgl->addr_lo = 0;
sgl->word2 = 0;
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
+ sgl->sge_len = 0;
sgl++;
sgl->addr_hi = 0;
sgl->addr_lo = 0;
@@ -3113,7 +3094,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
atomic_inc(&tgtp->xmt_ls_abort_cmpl);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
+ "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
ctxp, wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
@@ -3243,9 +3224,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
{
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_iocbq *abts_wqeq;
- union lpfc_wqe128 *abts_wqe;
struct lpfc_nodelist *ndlp;
unsigned long flags;
+ u8 opt;
int rc;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3284,8 +3265,8 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
return 0;
}
abts_wqeq = ctxp->abort_wqeq;
- abts_wqe = &abts_wqeq->wqe;
ctxp->state = LPFC_NVMET_STE_ABORT;
+ opt = (ctxp->flag & LPFC_NVMET_ABTS_RCV) ? INHIBIT_ABORT : 0;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* Announce entry to new IO submit field. */
@@ -3299,7 +3280,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
*/
spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
@@ -3331,40 +3312,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* Ready - mark outstanding as aborted by driver. */
abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(abts_wqe, 0, sizeof(union lpfc_wqe));
-
- /* word 3 */
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
-
- /* word 7 */
- bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
- bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-
- /* word 8 - tell the FW to abort the IO associated with this
- * outstanding exchange ID.
- */
- abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
-
- /* word 9 - this is the iotag for the abts_wqe completion. */
- bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
- abts_wqeq->iotag);
-
- /* word 10 */
- bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- /* word 11 */
- bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
- abts_wqeq->iocb_cmpl = 0;
+ abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVME;
abts_wqeq->context2 = ctxp;
abts_wqeq->vport = phba->pport;
@@ -3499,7 +3452,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->hbalock, flags);
abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
- abts_wqeq->iocb_cmpl = 0;
+ abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 8ff67deac10a..b80b1639b9a7 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -112,9 +112,7 @@ struct lpfc_nvmet_rcv_ctx {
struct lpfc_hba *phba;
struct lpfc_iocbq *wqeq;
struct lpfc_iocbq *abort_wqeq;
- dma_addr_t txrdy_phys;
spinlock_t ctxlock; /* protect flag access */
- uint32_t *txrdy;
uint32_t sid;
uint32_t offset;
uint16_t oxid;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index f9df800e7067..2c7e0b22db2f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -53,8 +53,6 @@
#define LPFC_RESET_WAIT 2
#define LPFC_ABORT_WAIT 2
-int _dump_buf_done = 1;
-
static char *dif_op_str[] = {
"PROT_NORMAL",
"PROT_READ_INSERT",
@@ -89,63 +87,6 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
-static void
-lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
-{
- void *src, *dst;
- struct scatterlist *sgde = scsi_sglist(cmnd);
-
- if (!_dump_buf_data) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
- __func__);
- return;
- }
-
-
- if (!sgde) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9051 BLKGRD: ERROR: data scatterlist is null\n");
- return;
- }
-
- dst = (void *) _dump_buf_data;
- while (sgde) {
- src = sg_virt(sgde);
- memcpy(dst, src, sgde->length);
- dst += sgde->length;
- sgde = sg_next(sgde);
- }
-}
-
-static void
-lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
-{
- void *src, *dst;
- struct scatterlist *sgde = scsi_prot_sglist(cmnd);
-
- if (!_dump_buf_dif) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
- __func__);
- return;
- }
-
- if (!sgde) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9053 BLKGRD: ERROR: prot scatterlist is null\n");
- return;
- }
-
- dst = _dump_buf_dif;
- while (sgde) {
- src = sg_virt(sgde);
- memcpy(dst, src, sgde->length);
- dst += sgde->length;
- sgde = sg_next(sgde);
- }
-}
-
static inline unsigned
lpfc_cmd_blksize(struct scsi_cmnd *sc)
{
@@ -193,21 +134,21 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
/**
* lpfc_update_stats - Update statistical data for the command completion
- * @phba: Pointer to HBA object.
+ * @vport: The virtual port on which this call is executing.
* @lpfc_cmd: lpfc scsi command object pointer.
*
* This function is called when there is a command completion and this
* function updates the statistical data for the command completion.
**/
static void
-lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
+lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
unsigned long flags;
- struct Scsi_Host *shost = cmd->device->host;
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
unsigned long latency;
int i;
@@ -537,29 +478,32 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
- spin_lock(&qp->abts_scsi_buf_list_lock);
+ spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
- &qp->lpfc_abts_scsi_buf_list, list) {
+ &qp->lpfc_abts_io_buf_list, list) {
+ if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
+ continue;
+
if (psb->rdata && psb->rdata->pnode &&
psb->rdata->pnode->vport == vport)
psb->rdata = NULL;
}
- spin_unlock(&qp->abts_scsi_buf_list_lock);
+ spin_unlock(&qp->abts_io_buf_list_lock);
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
/**
- * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
+ * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
- * FCP aborted xri.
+ * FCP or NVME aborted xri.
**/
void
-lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
- struct sli4_wcqe_xri_aborted *axri, int idx)
+lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri, int idx)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
@@ -577,16 +521,23 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&qp->abts_scsi_buf_list_lock);
+ spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
- &qp->lpfc_abts_scsi_buf_list, list) {
+ &qp->lpfc_abts_io_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) {
- list_del(&psb->list);
- qp->abts_scsi_io_bufs--;
- psb->exch_busy = 0;
+ list_del_init(&psb->list);
+ psb->flags &= ~LPFC_SBUF_XBUSY;
psb->status = IOSTAT_SUCCESS;
- spin_unlock(
- &qp->abts_scsi_buf_list_lock);
+ if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
+ qp->abts_nvme_io_bufs--;
+ spin_unlock(&qp->abts_io_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
+ return;
+ }
+ qp->abts_scsi_io_bufs--;
+ spin_unlock(&qp->abts_io_buf_list_lock);
+
if (psb->rdata && psb->rdata->pnode)
ndlp = psb->rdata->pnode;
else
@@ -605,17 +556,17 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
return;
}
}
- spin_unlock(&qp->abts_scsi_buf_list_lock);
+ spin_unlock(&qp->abts_io_buf_list_lock);
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- (iocbq->iocb_flag & LPFC_IO_LIBDFC))
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+ (iocbq->iocb_flag & LPFC_IO_LIBDFC))
continue;
if (iocbq->sli4_xritag != xri)
continue;
psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
- psb->exch_busy = 0;
+ psb->flags &= ~LPFC_SBUF_XBUSY;
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (!list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
@@ -685,8 +636,9 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
IOCB_t *iocb;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_fcp_cmd;
- uint32_t sgl_size, cpu, idx;
+ uint32_t cpu, idx;
int tag;
+ struct fcp_cmd_rsp_buf *tmp = NULL;
cpu = raw_smp_processor_id();
if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
@@ -704,9 +656,6 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
return NULL;
}
- sgl_size = phba->cfg_sg_dma_buf_size -
- (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
-
/* Setup key fields in buffer that may have been changed
* if other protocols used this buffer.
*/
@@ -721,9 +670,12 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
lpfc_cmd->prot_data_type = 0;
#endif
- lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size);
- lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd +
- sizeof(struct fcp_cmnd));
+ tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
+ if (!tmp)
+ return NULL;
+
+ lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
+ lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
/*
* The first two SGEs are the FCP_CMD and FCP_RSP.
@@ -731,7 +683,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
* first two and leave the rest for queuecommand.
*/
sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- pdma_phys_fcp_cmd = (lpfc_cmd->dma_handle + sgl_size);
+ pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
sgl->word2 = le32_to_cpu(sgl->word2);
@@ -834,12 +786,12 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
psb->prot_seg_cnt = 0;
qp = psb->hdwq;
- if (psb->exch_busy) {
- spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
+ if (psb->flags & LPFC_SBUF_XBUSY) {
+ spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
psb->pCmd = NULL;
- list_add_tail(&psb->list, &qp->lpfc_abts_scsi_buf_list);
+ list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
qp->abts_scsi_io_bufs++;
- spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
+ spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
} else {
lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
}
@@ -918,9 +870,10 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
"dma_map_sg. Config %d, seg_cnt %d\n",
__func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
+ WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
lpfc_cmd->seg_cnt = 0;
scsi_dma_unmap(scsi_cmnd);
- return 1;
+ return 2;
}
/*
@@ -1774,7 +1727,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
if (!sgpe || !sgde) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
+ "9020 Invalid s/g entry: data=x%px prot=x%px\n",
sgpe, sgde);
return 0;
}
@@ -1989,7 +1942,8 @@ out:
**/
static int
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
- struct sli4_sge *sgl, int datasegcnt)
+ struct sli4_sge *sgl, int datasegcnt,
+ struct lpfc_io_buf *lpfc_cmd)
{
struct scatterlist *sgde = NULL; /* s/g data entry */
struct sli4_sge_diseed *diseed = NULL;
@@ -2003,6 +1957,9 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t checking = 1;
uint32_t dma_len;
uint32_t dma_offset = 0;
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
+ int j;
+ bool lsp_just_set = false;
status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
if (status)
@@ -2062,23 +2019,64 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
sgl++;
/* assumption: caller has already run dma_map_sg on command data */
- scsi_for_each_sg(sc, sgde, datasegcnt, i) {
- physaddr = sg_dma_address(sgde);
- dma_len = sg_dma_len(sgde);
- sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
- if ((i + 1) == datasegcnt)
- bf_set(lpfc_sli4_sge_last, sgl, 1);
- else
- bf_set(lpfc_sli4_sge_last, sgl, 0);
- bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ sgde = scsi_sglist(sc);
+ j = 3;
+ for (i = 0; i < datasegcnt; i++) {
+ /* clear it */
+ sgl->word2 = 0;
- sgl->sge_len = cpu_to_le32(dma_len);
- dma_offset += dma_len;
+ /* do we need to expand the segment */
+ if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
+ ((datasegcnt - 1) != i)) {
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
+
+ sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
+
+ if (unlikely(!sgl_xtra)) {
+ lpfc_cmd->seg_cnt = 0;
+ return 0;
+ }
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ sgl_xtra->dma_phys_sgl));
+
+ } else {
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ }
+
+ if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
+ if ((datasegcnt - 1) == i)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ physaddr = sg_dma_address(sgde);
+ dma_len = sg_dma_len(sgde);
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(dma_len);
+
+ dma_offset += dma_len;
+ sgde = sg_next(sgde);
+
+ sgl++;
+ num_sge++;
+ lsp_just_set = false;
+
+ } else {
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+ i = i - 1;
+
+ lsp_just_set = true;
+ }
+
+ j++;
- sgl++;
- num_sge++;
}
out:
@@ -2124,7 +2122,8 @@ out:
**/
static int
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
- struct sli4_sge *sgl, int datacnt, int protcnt)
+ struct sli4_sge *sgl, int datacnt, int protcnt,
+ struct lpfc_io_buf *lpfc_cmd)
{
struct scatterlist *sgde = NULL; /* s/g data entry */
struct scatterlist *sgpe = NULL; /* s/g prot entry */
@@ -2146,14 +2145,15 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
#endif
uint32_t checking = 1;
uint32_t dma_offset = 0;
- int num_sge = 0;
+ int num_sge = 0, j = 2;
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
sgpe = scsi_prot_sglist(sc);
sgde = scsi_sglist(sc);
if (!sgpe || !sgde) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
+ "9082 Invalid s/g entry: data=x%px prot=x%px\n",
sgpe, sgde);
return 0;
}
@@ -2179,9 +2179,37 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
split_offset = 0;
do {
/* Check to see if we ran out of space */
- if (num_sge >= (phba->cfg_total_seg_cnt - 2))
+ if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
+ !(phba->cfg_xpsgl))
return num_sge + 3;
+ /* DISEED and DIF have to be together */
+ if (!((j + 1) % phba->border_sge_num) ||
+ !((j + 2) % phba->border_sge_num) ||
+ !((j + 3) % phba->border_sge_num)) {
+ sgl->word2 = 0;
+
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
+
+ sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
+
+ if (unlikely(!sgl_xtra)) {
+ goto out;
+ } else {
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ sgl_xtra->dma_phys_sgl));
+ }
+
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+ j = 0;
+ }
+
/* setup DISEED with what we have */
diseed = (struct sli4_sge_diseed *) sgl;
memset(diseed, 0, sizeof(struct sli4_sge_diseed));
@@ -2228,7 +2256,9 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* advance sgl and increment bde count */
num_sge++;
+
sgl++;
+ j++;
/* setup the first BDE that points to protection buffer */
protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
@@ -2243,6 +2273,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = 0;
protgrp_blks = protgroup_len / 8;
protgrp_bytes = protgrp_blks * blksize;
@@ -2263,9 +2294,14 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* setup SGE's for data blocks associated with DIF data */
pgdone = 0;
subtotal = 0; /* total bytes processed for current prot grp */
+
+ sgl++;
+ j++;
+
while (!pgdone) {
/* Check to see if we ran out of space */
- if (num_sge >= phba->cfg_total_seg_cnt)
+ if ((num_sge >= phba->cfg_total_seg_cnt) &&
+ !phba->cfg_xpsgl)
return num_sge + 1;
if (!sgde) {
@@ -2274,60 +2310,101 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
__func__);
return 0;
}
- sgl++;
- dataphysaddr = sg_dma_address(sgde) + split_offset;
- remainder = sg_dma_len(sgde) - split_offset;
+ if (!((j + 1) % phba->border_sge_num)) {
+ sgl->word2 = 0;
- if ((subtotal + remainder) <= protgrp_bytes) {
- /* we can use this whole buffer */
- dma_len = remainder;
- split_offset = 0;
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_LSP);
- if ((subtotal + remainder) == protgrp_bytes)
- pgdone = 1;
+ sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
+ lpfc_cmd);
+
+ if (unlikely(!sgl_xtra)) {
+ goto out;
+ } else {
+ sgl->addr_lo = cpu_to_le32(
+ putPaddrLow(sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(
+ putPaddrHigh(sgl_xtra->dma_phys_sgl));
+ }
+
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(
+ phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
} else {
- /* must split this buffer with next prot grp */
- dma_len = protgrp_bytes - subtotal;
- split_offset += dma_len;
- }
+ dataphysaddr = sg_dma_address(sgde) +
+ split_offset;
- subtotal += dma_len;
+ remainder = sg_dma_len(sgde) - split_offset;
- sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
- bf_set(lpfc_sli4_sge_last, sgl, 0);
- bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ if ((subtotal + remainder) <= protgrp_bytes) {
+ /* we can use this whole buffer */
+ dma_len = remainder;
+ split_offset = 0;
- sgl->sge_len = cpu_to_le32(dma_len);
- dma_offset += dma_len;
+ if ((subtotal + remainder) ==
+ protgrp_bytes)
+ pgdone = 1;
+ } else {
+ /* must split this buffer with next
+ * prot grp
+ */
+ dma_len = protgrp_bytes - subtotal;
+ split_offset += dma_len;
+ }
- num_sge++;
- curr_data++;
+ subtotal += dma_len;
- if (split_offset)
- break;
+ sgl->word2 = 0;
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ dataphysaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ dataphysaddr));
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
- /* Move to the next s/g segment if possible */
- sgde = sg_next(sgde);
+ sgl->sge_len = cpu_to_le32(dma_len);
+ dma_offset += dma_len;
+
+ num_sge++;
+ curr_data++;
+
+ if (split_offset) {
+ sgl++;
+ j++;
+ break;
+ }
+
+ /* Move to the next s/g segment if possible */
+ sgde = sg_next(sgde);
+
+ sgl++;
+ }
+
+ j++;
}
if (protgroup_offset) {
/* update the reference tag */
reftag += protgrp_blks;
- sgl++;
continue;
}
/* are we done ? */
if (curr_prot == protcnt) {
+ /* mark the last SGL */
+ sgl--;
bf_set(lpfc_sli4_sge_last, sgl, 1);
alldone = 1;
} else if (curr_prot < protcnt) {
/* advance to next prot buffer */
sgpe = sg_next(sgpe);
- sgl++;
/* update the reference tag */
reftag += protgrp_blks;
@@ -2430,7 +2507,10 @@ lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
*
* This is the protection/DIF aware version of
* lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
- * two functions eventually, but for now, it's here
+ * two functions eventually, but for now, it's here.
+ * RETURNS 0 - SUCCESS,
+ * 1 - Failed DMA map, retry.
+ * 2 - Invalid scsi cmd or prot-type. Do not rety.
**/
static int
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
@@ -2444,6 +2524,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
int prot_group_type = 0;
int fcpdl;
+ int ret = 1;
struct lpfc_vport *vport = phba->pport;
/*
@@ -2467,8 +2548,11 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
lpfc_cmd->seg_cnt = datasegcnt;
/* First check if data segment count from SCSI Layer is good */
- if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
+ ret = 2;
goto err;
+ }
prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
@@ -2476,14 +2560,18 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
case LPFC_PG_TYPE_NO_DIF:
/* Here we need to add a PDE5 and PDE6 to the count */
- if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
+ if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
+ ret = 2;
goto err;
+ }
num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
datasegcnt);
/* we should have 2 or more entries in buffer list */
- if (num_bde < 2)
+ if (num_bde < 2) {
+ ret = 2;
goto err;
+ }
break;
case LPFC_PG_TYPE_DIF_BUF:
@@ -2507,15 +2595,19 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
* protection data segment.
*/
if ((lpfc_cmd->prot_seg_cnt * 4) >
- (phba->cfg_total_seg_cnt - 2))
+ (phba->cfg_total_seg_cnt - 2)) {
+ ret = 2;
goto err;
+ }
num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
datasegcnt, protsegcnt);
/* we should have 3 or more entries in buffer list */
if ((num_bde < 3) ||
- (num_bde > phba->cfg_total_seg_cnt))
+ (num_bde > phba->cfg_total_seg_cnt)) {
+ ret = 2;
goto err;
+ }
break;
case LPFC_PG_TYPE_INVALID:
@@ -2526,7 +2618,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"9022 Unexpected protection group %i\n",
prot_group_type);
- return 1;
+ return 2;
}
}
@@ -2576,7 +2668,7 @@ err:
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->prot_seg_cnt = 0;
- return 1;
+ return ret;
}
/*
@@ -2809,26 +2901,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
uint32_t bgstat = bgf->bgstat;
uint64_t failing_sector = 0;
- spin_lock(&_dump_buf_lock);
- if (!_dump_buf_done) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
- " Data for %u blocks to debugfs\n",
- (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
- lpfc_debug_save_data(phba, cmd);
-
- /* If we have a prot sgl, save the DIF buffer */
- if (lpfc_prot_group_type(phba, cmd) ==
- LPFC_PG_TYPE_DIF_BUF) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
- "Saving DIF for %u blocks to debugfs\n",
- (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
- lpfc_debug_save_dif(phba, cmd);
- }
-
- _dump_buf_done = 1;
- }
- spin_unlock(&_dump_buf_lock);
-
if (lpfc_bgs_get_invalid_prof(bgstat)) {
cmd->result = DID_ERROR << 16;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -2962,7 +3034,8 @@ out:
* field of @lpfc_cmd for device with SLI-4 interface spec.
*
* Return codes:
- * 1 - Error
+ * 2 - Error - Do not retry
+ * 1 - Error - Retry
* 0 - Success
**/
static int
@@ -2978,8 +3051,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
uint32_t num_bde = 0;
uint32_t dma_len;
uint32_t dma_offset = 0;
- int nseg;
+ int nseg, i, j;
struct ulp_bde64 *bde;
+ bool lsp_just_set = false;
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
/*
* There are three possibilities here - use scatter-gather segment, use
@@ -3006,15 +3081,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
sgl += 1;
first_data_sgl = sgl;
lpfc_cmd->seg_cnt = nseg;
- if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ if (!phba->cfg_xpsgl &&
+ lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
" %s: Too many sg segments from "
"dma_map_sg. Config %d, seg_cnt %d\n",
__func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
+ WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
lpfc_cmd->seg_cnt = 0;
scsi_dma_unmap(scsi_cmnd);
- return 1;
+ return 2;
}
/*
@@ -3026,22 +3103,80 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* the IOCB. If it can't then the BDEs get added to a BPL as it
* does for SLI-2 mode.
*/
- scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
- physaddr = sg_dma_address(sgel);
- dma_len = sg_dma_len(sgel);
- sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
- sgl->word2 = le32_to_cpu(sgl->word2);
- if ((num_bde + 1) == nseg)
+
+ /* for tracking segment boundaries */
+ sgel = scsi_sglist(scsi_cmnd);
+ j = 2;
+ for (i = 0; i < nseg; i++) {
+ sgl->word2 = 0;
+ if ((num_bde + 1) == nseg) {
bf_set(lpfc_sli4_sge_last, sgl, 1);
- else
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ } else {
bf_set(lpfc_sli4_sge_last, sgl, 0);
- bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
- sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len = cpu_to_le32(dma_len);
- dma_offset += dma_len;
- sgl++;
+
+ /* do we need to expand the segment */
+ if (!lsp_just_set &&
+ !((j + 1) % phba->border_sge_num) &&
+ ((nseg - 1) != i)) {
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_LSP);
+
+ sgl_xtra = lpfc_get_sgl_per_hdwq(
+ phba, lpfc_cmd);
+
+ if (unlikely(!sgl_xtra)) {
+ lpfc_cmd->seg_cnt = 0;
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ sgl_xtra->dma_phys_sgl));
+
+ } else {
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ }
+ }
+
+ if (!(bf_get(lpfc_sli4_sge_type, sgl) &
+ LPFC_SGE_TYPE_LSP)) {
+ if ((nseg - 1) == i)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+
+ physaddr = sg_dma_address(sgel);
+ dma_len = sg_dma_len(sgel);
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ physaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ physaddr));
+
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(dma_len);
+
+ dma_offset += dma_len;
+ sgel = sg_next(sgel);
+
+ sgl++;
+ lsp_just_set = false;
+
+ } else {
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(
+ phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+ i = i - 1;
+
+ lsp_just_set = true;
+ }
+
+ j++;
}
/*
* Setup the first Payload BDE. For FCoE we just key off
@@ -3110,6 +3245,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* This is the protection/DIF aware version of
* lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
* two functions eventually, but for now, it's here
+ * Return codes:
+ * 2 - Error - Do not retry
+ * 1 - Error - Retry
+ * 0 - Success
**/
static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
@@ -3123,6 +3262,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
int prot_group_type = 0;
int fcpdl;
+ int ret = 1;
struct lpfc_vport *vport = phba->pport;
/*
@@ -3152,23 +3292,33 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
lpfc_cmd->seg_cnt = datasegcnt;
/* First check if data segment count from SCSI Layer is good */
- if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
+ !phba->cfg_xpsgl) {
+ WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
+ ret = 2;
goto err;
+ }
prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
switch (prot_group_type) {
case LPFC_PG_TYPE_NO_DIF:
/* Here we need to add a DISEED to the count */
- if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
+ if (((lpfc_cmd->seg_cnt + 1) >
+ phba->cfg_total_seg_cnt) &&
+ !phba->cfg_xpsgl) {
+ ret = 2;
goto err;
+ }
num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
- datasegcnt);
+ datasegcnt, lpfc_cmd);
/* we should have 2 or more entries in buffer list */
- if (num_sge < 2)
+ if (num_sge < 2) {
+ ret = 2;
goto err;
+ }
break;
case LPFC_PG_TYPE_DIF_BUF:
@@ -3190,17 +3340,23 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
* There is a minimun of 3 SGEs used for every
* protection data segment.
*/
- if ((lpfc_cmd->prot_seg_cnt * 3) >
- (phba->cfg_total_seg_cnt - 2))
+ if (((lpfc_cmd->prot_seg_cnt * 3) >
+ (phba->cfg_total_seg_cnt - 2)) &&
+ !phba->cfg_xpsgl) {
+ ret = 2;
goto err;
+ }
num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
- datasegcnt, protsegcnt);
+ datasegcnt, protsegcnt, lpfc_cmd);
/* we should have 3 or more entries in buffer list */
- if ((num_sge < 3) ||
- (num_sge > phba->cfg_total_seg_cnt))
+ if (num_sge < 3 ||
+ (num_sge > phba->cfg_total_seg_cnt &&
+ !phba->cfg_xpsgl)) {
+ ret = 2;
goto err;
+ }
break;
case LPFC_PG_TYPE_INVALID:
@@ -3211,7 +3367,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"9083 Unexpected protection group %i\n",
prot_group_type);
- return 1;
+ return 2;
}
}
@@ -3273,7 +3429,7 @@ err:
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->prot_seg_cnt = 0;
- return 1;
+ return ret;
}
/**
@@ -3656,7 +3812,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
- if (!cmd) {
+ if (!cmd || !phba) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"2621 IO completion: Not an active IO\n");
spin_unlock(&lpfc_cmd->buf_lock);
@@ -3668,7 +3824,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+ if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
@@ -3679,7 +3835,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
/* pick up SLI4 exhange busy status from HBA */
- lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
+ if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ else
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_cmd->prot_data_type) {
@@ -3713,7 +3872,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
#endif
- if (lpfc_cmd->status) {
+ if (unlikely(lpfc_cmd->status)) {
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
(lpfc_cmd->result & IOERR_DRVR_MASK))
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
@@ -3839,14 +3998,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0710 Iodone <%d/%llu> cmd %p, error "
+ "0710 Iodone <%d/%llu> cmd x%px, error "
"x%x SNS x%x x%x Data: x%x x%x\n",
cmd->device->id, cmd->device->lun, cmd,
cmd->result, *lp, *(lp + 3), cmd->retries,
scsi_get_resid(cmd));
}
- lpfc_update_stats(phba, lpfc_cmd);
+ lpfc_update_stats(vport, lpfc_cmd);
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -4454,13 +4613,18 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
- if (err)
+ if (unlikely(err)) {
+ if (err == 2) {
+ cmnd->result = DID_ERROR << 16;
+ goto out_fail_command_release_buf;
+ }
goto out_host_busy_free_buf;
+ }
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+ if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
struct lpfc_sli4_hdw_queue *hdwq =
@@ -4526,6 +4690,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
out_tgt_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
+ out_fail_command_release_buf:
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+
out_fail_command:
cmnd->scsi_done(cmnd);
return 0;
@@ -4568,7 +4735,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3168 SCSI Layer abort requested I/O has been "
"flushed by LLD.\n");
@@ -4589,7 +4756,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
iocb = &lpfc_cmd->cur_iocbq;
if (phba->sli_rev == LPFC_SLI_REV4) {
- pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].fcp_wq->pring;
+ pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
if (!pring_s4) {
ret = FAILED;
goto out_unlock_buf;
@@ -4680,20 +4847,21 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
abtsiocb, 0);
}
- /* no longer need the lock after this point */
- spin_unlock_irqrestore(&phba->hbalock, flags);
if (ret_val == IOCB_ERROR) {
/* Indicate the IO is not being aborted by the driver. */
iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
lpfc_cmd->waitq = NULL;
spin_unlock(&lpfc_cmd->buf_lock);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
goto out;
}
+ /* no longer need the lock after this point */
spin_unlock(&lpfc_cmd->buf_lock);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_handle_fast_ring_event(phba,
@@ -4956,7 +5124,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0797 Tgt Map rport failure: rdata x%p\n", rdata);
+ "0797 Tgt Map rport failure: rdata x%px\n", rdata);
return FAILED;
}
pnode = rdata->pnode;
@@ -5054,7 +5222,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0798 Device Reset rdata failure: rdata x%p\n",
+ "0798 Device Reset rdata failure: rdata x%px\n",
rdata);
return FAILED;
}
@@ -5066,7 +5234,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_chk_tgt_mapped(vport, cmnd);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0721 Device Reset rport failure: rdata x%p\n", rdata);
+ "0721 Device Reset rport failure: rdata x%px\n", rdata);
return FAILED;
}
@@ -5125,7 +5293,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0799 Target Reset rdata failure: rdata x%p\n",
+ "0799 Target Reset rdata failure: rdata x%px\n",
rdata);
return FAILED;
}
@@ -5137,7 +5305,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_chk_tgt_mapped(vport, cmnd);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0722 Target Reset rport failure: rdata x%p\n", rdata);
+ "0722 Target Reset rport failure: rdata x%px\n", rdata);
if (pnode) {
spin_lock_irq(shost->host_lock);
pnode->nlp_flag &= ~NLP_NPR_ADISC;
@@ -5295,18 +5463,20 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
lpfc_offline(phba);
rc = lpfc_sli_brdrestart(phba);
if (rc)
- ret = FAILED;
+ goto error;
+
rc = lpfc_online(phba);
if (rc)
- ret = FAILED;
+ goto error;
+
lpfc_unblock_mgmt_io(phba);
- if (ret == FAILED) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "3323 Failed host reset, bring it offline\n");
- lpfc_sli4_offline_eratt(phba);
- }
return ret;
+error:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "3323 Failed host reset\n");
+ lpfc_unblock_mgmt_io(phba);
+ return FAILED;
}
/**
@@ -5870,7 +6040,7 @@ struct scsi_host_template lpfc_template_no_hr = {
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.shost_attrs = lpfc_hba_attrs,
- .max_sectors = 0xFFFF,
+ .max_sectors = 0xFFFFFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f9e6a135d656..64002b0cb02d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -87,6 +87,10 @@ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
struct lpfc_eqe *eqe);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
+static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
+static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
+ struct lpfc_queue *cq,
+ struct lpfc_cqe *cqe);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -467,25 +471,52 @@ __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
}
static void
-lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
+lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
{
- struct lpfc_eqe *eqe;
- uint32_t count = 0;
+ struct lpfc_eqe *eqe = NULL;
+ u32 eq_count = 0, cq_count = 0;
+ struct lpfc_cqe *cqe = NULL;
+ struct lpfc_queue *cq = NULL, *childq = NULL;
+ int cqid = 0;
/* walk all the EQ entries and drop on the floor */
eqe = lpfc_sli4_eq_get(eq);
while (eqe) {
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+ cq = NULL;
+
+ list_for_each_entry(childq, &eq->child_list, list) {
+ if (childq->queue_id == cqid) {
+ cq = childq;
+ break;
+ }
+ }
+ /* If CQ is valid, iterate through it and drop all the CQEs */
+ if (cq) {
+ cqe = lpfc_sli4_cq_get(cq);
+ while (cqe) {
+ __lpfc_sli4_consume_cqe(phba, cq, cqe);
+ cq_count++;
+ cqe = lpfc_sli4_cq_get(cq);
+ }
+ /* Clear and re-arm the CQ */
+ phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
+ LPFC_QUEUE_REARM);
+ cq_count = 0;
+ }
__lpfc_sli4_consume_eqe(phba, eq, eqe);
- count++;
+ eq_count++;
eqe = lpfc_sli4_eq_get(eq);
}
/* Clear and re-arm the EQ */
- phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
}
static int
-lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
+lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
+ uint8_t rearm)
{
struct lpfc_eqe *eqe;
int count = 0, consumed = 0;
@@ -519,8 +550,8 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
eq->queue_claimed = 0;
rearm_and_exit:
- /* Always clear and re-arm the EQ */
- phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
+ /* Always clear the EQ. */
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
return count;
}
@@ -1391,9 +1422,12 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
while (!list_empty(iocblist)) {
list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
- if (!piocb->iocb_cmpl)
- lpfc_sli_release_iocbq(phba, piocb);
- else {
+ if (!piocb->iocb_cmpl) {
+ if (piocb->iocb_flag & LPFC_IO_NVME)
+ lpfc_nvme_cancel_iocb(phba, piocb);
+ else
+ lpfc_sli_release_iocbq(phba, piocb);
+ } else {
piocb->iocb.ulpStatus = ulpstatus;
piocb->iocb.un.ulpWord[4] = ulpWord4;
(piocb->iocb_cmpl) (phba, piocb, piocb);
@@ -2426,6 +2460,20 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
return;
}
+static void
+__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ unsigned long iflags;
+
+ if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+ lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+ spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
+ }
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+}
/**
* lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
@@ -2497,7 +2545,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport,
KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
"1438 UNREG cmpl deferred mbox x%x "
- "on NPort x%x Data: x%x x%x %p\n",
+ "on NPort x%x Data: x%x x%x %px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
@@ -2507,8 +2555,10 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
} else {
- ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ __lpfc_sli_rpi_release(vport, ndlp);
}
+ if (vport->load_flag & FC_UNLOADING)
+ lpfc_nlp_put(ndlp);
pmb->ctx_ndlp = NULL;
}
}
@@ -2555,7 +2605,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport, KERN_INFO, LOG_MBOX | LOG_SLI,
"0010 UNREG_LOGIN vpi:%x "
"rpi:%x DID:%x defer x%x flg x%x "
- "map:%x %p\n",
+ "map:%x %px\n",
vport->vpi, ndlp->nlp_rpi,
ndlp->nlp_DID, ndlp->nlp_defer_did,
ndlp->nlp_flag,
@@ -2573,7 +2623,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport, KERN_INFO, LOG_DISCOVERY,
"4111 UNREG cmpl deferred "
"clr x%x on "
- "NPort x%x Data: x%x %p\n",
+ "NPort x%x Data: x%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_defer_did, ndlp);
ndlp->nlp_flag &= ~NLP_UNREG_INP;
@@ -2582,7 +2632,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_issue_els_plogi(
vport, ndlp->nlp_DID, 0);
} else {
- ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ __lpfc_sli_rpi_release(vport, ndlp);
}
}
}
@@ -2655,7 +2705,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):0323 Unknown Mailbox command "
"x%x (x%x/x%x) Cmpl\n",
- pmb->vport ? pmb->vport->vpi : 0,
+ pmb->vport ? pmb->vport->vpi :
+ LPFC_VPORT_UNKNOWN,
pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
pmb),
@@ -2676,7 +2727,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
"(%d):0305 Mbox cmd cmpl "
"error - RETRYing Data: x%x "
"(x%x/x%x) x%x x%x x%x\n",
- pmb->vport ? pmb->vport->vpi : 0,
+ pmb->vport ? pmb->vport->vpi :
+ LPFC_VPORT_UNKNOWN,
pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
pmb),
@@ -2684,7 +2736,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
pmb),
pmbox->mbxStatus,
pmbox->un.varWords[0],
- pmb->vport->port_state);
+ pmb->vport ? pmb->vport->port_state :
+ LPFC_VPORT_UNKNOWN);
pmbox->mbxStatus = 0;
pmbox->mbxOwner = OWN_HOST;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -2695,7 +2748,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
/* Mailbox cmd <cmd> Cmpl <cmpl> */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
- "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
+ "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
"x%x x%x x%x\n",
pmb->vport ? pmb->vport->vpi : 0,
@@ -3961,7 +4014,7 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
/* Look on all the FCP Rings for the iotag */
if (phba->sli_rev >= LPFC_SLI_REV4) {
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
lpfc_sli_abort_iocb_ring(phba, pring);
}
} else {
@@ -3971,17 +4024,17 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
+ * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
* @phba: Pointer to HBA context object.
*
- * This function flushes all iocbs in the fcp ring and frees all the iocb
+ * This function flushes all iocbs in the IO ring and frees all the iocb
* objects in txq and txcmplq. This function will not issue abort iocbs
* for all the iocb commands in txcmplq, they will just be returned with
* IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
* slot has been permanently disabled.
**/
void
-lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
+lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
{
LIST_HEAD(txq);
LIST_HEAD(txcmplq);
@@ -3992,13 +4045,13 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
/* Indicate the I/O queues are flushed */
- phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
+ phba->hba_flag |= HBA_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
/* Look on all the FCP Rings for the iotag */
if (phba->sli_rev >= LPFC_SLI_REV4) {
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
spin_lock_irq(&pring->ring_lock);
/* Retrieve everything on txq */
@@ -4046,56 +4099,6 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
- * @phba: Pointer to HBA context object.
- *
- * This function flushes all wqes in the nvme rings and frees all resources
- * in the txcmplq. This function does not issue abort wqes for the IO
- * commands in txcmplq, they will just be returned with
- * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
- * slot has been permanently disabled.
- **/
-void
-lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
-{
- LIST_HEAD(txcmplq);
- struct lpfc_sli_ring *pring;
- uint32_t i;
- struct lpfc_iocbq *piocb, *next_iocb;
-
- if ((phba->sli_rev < LPFC_SLI_REV4) ||
- !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
- return;
-
- /* Hint to other driver operations that a flush is in progress. */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
- spin_unlock_irq(&phba->hbalock);
-
- /* Cycle through all NVME rings and complete each IO with
- * a local driver reason code. This is a flush so no
- * abort exchange to FW.
- */
- for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
-
- spin_lock_irq(&pring->ring_lock);
- list_for_each_entry_safe(piocb, next_iocb,
- &pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
- /* Retrieve everything on the txcmplq */
- list_splice_init(&pring->txcmplq, &txcmplq);
- pring->txcmplq_cnt = 0;
- spin_unlock_irq(&pring->ring_lock);
-
- /* Flush the txcmpq &&&PAE */
- lpfc_sli_cancel_iocbs(phba, &txcmplq,
- IOSTAT_LOCAL_REJECT,
- IOERR_SLI_DOWN);
- }
-}
-
-/**
* lpfc_sli_brdready_s3 - Check for sli3 host ready status
* @phba: Pointer to HBA context object.
* @mask: Bit mask to be checked.
@@ -4495,7 +4498,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
* checking during resets the device. The caller is not required to hold
* any locks.
*
- * This function returns 0 always.
+ * This function returns 0 on success else returns negative error code.
**/
int
lpfc_sli4_brdreset(struct lpfc_hba *phba)
@@ -4652,8 +4655,10 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
rc = lpfc_sli4_brdreset(phba);
- if (rc)
- return rc;
+ if (rc) {
+ phba->link_state = LPFC_HBA_ERROR;
+ goto hba_down_queue;
+ }
spin_lock_irq(&phba->hbalock);
phba->pport->stopped = 0;
@@ -4668,6 +4673,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
if (hba_aer_enabled)
pci_disable_pcie_error_reporting(phba->pcidev);
+hba_down_queue:
lpfc_hba_down_post(phba);
lpfc_sli4_queue_destroy(phba);
@@ -4912,8 +4918,17 @@ static int
lpfc_sli4_rb_setup(struct lpfc_hba *phba)
{
phba->hbq_in_use = 1;
- phba->hbqs[LPFC_ELS_HBQ].entry_count =
- lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
+ /**
+ * Specific case when the MDS diagnostics is enabled and supported.
+ * The receive buffer count is truncated to manage the incoming
+ * traffic.
+ **/
+ if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
+ phba->hbqs[LPFC_ELS_HBQ].entry_count =
+ lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
+ else
+ phba->hbqs[LPFC_ELS_HBQ].entry_count =
+ lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
phba->hbq_count = 1;
lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
/* Initially populate or replenish the HBQs */
@@ -5584,10 +5599,8 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = &sli4_hba->hdwq[qidx];
/* ARM the corresponding CQ */
- sli4_hba->sli4_write_cq_db(phba, qp->fcp_cq, 0,
- LPFC_QUEUE_REARM);
- sli4_hba->sli4_write_cq_db(phba, qp->nvme_cq, 0,
- LPFC_QUEUE_REARM);
+ sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
+ LPFC_QUEUE_REARM);
}
/* Loop thru all IRQ vectors */
@@ -6199,6 +6212,14 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
mbox->u.mqe.un.set_feature.param_len = 8;
break;
+ case LPFC_SET_DUAL_DUMP:
+ bf_set(lpfc_mbx_set_feature_dd,
+ &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
+ bf_set(lpfc_mbx_set_feature_ddquery,
+ &mbox->u.mqe.un.set_feature, 0);
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
+ mbox->u.mqe.un.set_feature.param_len = 4;
+ break;
}
return;
@@ -6216,11 +6237,16 @@ lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
{
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
- ras_fwlog->ras_active = false;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
/* Disable FW logging to host memory */
writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+ /* Wait 10ms for firmware to stop using DMA buffer */
+ usleep_range(10 * 1000, 20 * 1000);
}
/**
@@ -6256,7 +6282,9 @@ lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
ras_fwlog->lwpd.virt = NULL;
}
- ras_fwlog->ras_active = false;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
}
/**
@@ -6358,7 +6386,9 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
goto disable_ras;
}
- ras_fwlog->ras_active = true;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
mempool_free(pmb, phba->mbox_mem_pool);
return;
@@ -6390,6 +6420,10 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
int rc = 0;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+
fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
phba->cfg_ras_fwlog_buffsize);
fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
@@ -6449,6 +6483,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = REG_INPROGRESS;
+ spin_unlock_irq(&phba->hbalock);
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
@@ -7180,7 +7217,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
int
lpfc_sli4_hba_setup(struct lpfc_hba *phba)
{
- int rc, i, cnt, len;
+ int rc, i, cnt, len, dd;
LPFC_MBOXQ_t *mboxq;
struct lpfc_mqe *mqe;
uint8_t *vpd;
@@ -7243,7 +7280,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
else
phba->hba_flag &= ~HBA_FIP_SUPPORT;
- phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
+ phba->hba_flag &= ~HBA_IOQ_FLUSH;
if (phba->sli_rev != LPFC_SLI_REV4) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -7431,6 +7468,23 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
spin_unlock_irq(&phba->hbalock);
+ /* Always try to enable dual dump feature if we can */
+ lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
+ if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_INIT,
+ "6448 Dual Dump is enabled\n");
+ else
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
+ "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
+ "rc:x%x dd:x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli_config_mbox_subsys_get(
+ phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(
+ phba, mboxq),
+ rc, dd);
/*
* Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
* calls depends on these resources to complete port setup.
@@ -7555,9 +7609,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
phba->sli4_hba.nvmet_xri_cnt = rc;
- cnt = phba->cfg_iocb_cnt * 1024;
- /* We need 1 iocbq for every SGL, for IO processing */
- cnt += phba->sli4_hba.nvmet_xri_cnt;
+ /* We allocate an iocbq for every receive context SGL.
+ * The additional allocation is for abort and ls handling.
+ */
+ cnt = phba->sli4_hba.nvmet_xri_cnt +
+ phba->sli4_hba.max_cfg_param.max_xri;
} else {
/* update host common xri-sgl sizes and mappings */
rc = lpfc_sli4_io_sgl_update(phba);
@@ -7579,14 +7635,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = -ENODEV;
goto out_destroy_queue;
}
- cnt = phba->cfg_iocb_cnt * 1024;
+ /* Each lpfc_io_buf job structure has an iocbq element.
+ * This cnt provides for abort, els, ct and ls requests.
+ */
+ cnt = phba->sli4_hba.max_cfg_param.max_xri;
}
if (!phba->sli.iocbq_lookup) {
/* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2821 initialize iocb list %d total %d\n",
- phba->cfg_iocb_cnt, cnt);
+ "2821 initialize iocb list with %d entries\n",
+ cnt);
rc = lpfc_init_iocb_list(phba, cnt);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -7898,7 +7957,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
if (sli4_hba->hdwq) {
for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
- if (eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
+ if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
fpeq = eq;
break;
}
@@ -7924,7 +7983,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
if (mbox_pending)
/* process and rearm the EQ */
- lpfc_sli4_process_eq(phba, fpeq);
+ lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
else
/* Always clear and re-arm the EQ */
sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
@@ -7972,7 +8031,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
/* Mbox cmd <mbxCommand> timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
+ "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
mb->mbxCommand,
phba->pport->port_state,
phba->sli.sli_flag,
@@ -8505,7 +8564,7 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
spin_unlock_irq(&phba->hbalock);
- /* wake up worker thread to post asynchronlous mailbox command */
+ /* wake up worker thread to post asynchronous mailbox command */
lpfc_worker_wake_up(phba);
}
@@ -8773,7 +8832,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
return rc;
}
- /* Now, interrupt mode asynchrous mailbox command */
+ /* Now, interrupt mode asynchronous mailbox command */
rc = lpfc_mbox_cmd_check(phba, mboxq);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -8996,7 +9055,8 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
* @pring: Pointer to driver SLI ring object.
* @piocb: Pointer to address of newly added command iocb.
*
- * This function is called with hbalock held to add a command
+ * This function is called with hbalock held for SLI3 ports or
+ * the ring lock held for SLI4 ports to add a command
* iocb to the txq when SLI layer cannot submit the command iocb
* to the ring.
**/
@@ -9004,7 +9064,10 @@ void
__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
- lockdep_assert_held(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lockdep_assert_held(&pring->ring_lock);
+ else
+ lockdep_assert_held(&phba->hbalock);
/* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq);
}
@@ -9333,11 +9396,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
memset(wqe, 0, sizeof(union lpfc_wqe128));
/* Some of the fields are in the right position already */
memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
- if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
- /* The ct field has moved so reset */
- wqe->generic.wqe_com.word7 = 0;
- wqe->generic.wqe_com.word10 = 0;
- }
+ /* The ct field has moved so reset */
+ wqe->generic.wqe_com.word7 = 0;
+ wqe->generic.wqe_com.word10 = 0;
abort_tag = (uint32_t) iocbq->iotag;
xritag = iocbq->sli4_xritag;
@@ -9796,7 +9857,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* we re-construct this WQE here based on information in
* iocbq from scratch.
*/
- memset(wqe, 0, sizeof(union lpfc_wqe));
+ memset(wqe, 0, sizeof(*wqe));
/* OX_ID is invariable to who sent ABTS to CT exchange */
bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
@@ -9843,6 +9904,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
break;
case CMD_SEND_FRAME:
+ bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
+ bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
+ bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
+ bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
return 0;
@@ -9888,7 +9958,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
* an iocb command to an HBA with SLI-4 interface spec.
*
- * This function is called with hbalock held. The function will return success
+ * This function is called with ringlock held. The function will return success
* after it successfully submit the iocb to firmware or after adding to the
* txq.
**/
@@ -9904,7 +9974,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
/* Get the WQ */
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
+ wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
} else {
wq = phba->sli4_hba.els_wq;
}
@@ -10051,7 +10121,7 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
piocb->hba_wqidx = lpfc_cmd->hdwq_no;
}
- return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
+ return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
} else {
if (unlikely(!phba->sli4_hba.els_wq))
return NULL;
@@ -10078,10 +10148,13 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_sli_ring *pring;
+ struct lpfc_queue *eq;
unsigned long iflags;
int rc;
if (phba->sli_rev == LPFC_SLI_REV4) {
+ eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
+
pring = lpfc_sli4_calc_ring(phba, piocb);
if (unlikely(pring == NULL))
return IOCB_ERROR;
@@ -10089,6 +10162,8 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
spin_lock_irqsave(&pring->ring_lock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
} else {
/* For now, SLI2/3 will still use hbalock */
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -10504,7 +10579,7 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
INIT_LIST_HEAD(&psli->mboxq_cmpl);
/* Initialize list headers for txq and txcmplq as double linked lists */
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
pring->flag = 0;
pring->ringno = LPFC_FCP_RING;
pring->txcmplq_cnt = 0;
@@ -10523,16 +10598,6 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
spin_lock_init(&pring->ring_lock);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
- pring->flag = 0;
- pring->ringno = LPFC_FCP_RING;
- pring->txcmplq_cnt = 0;
- INIT_LIST_HEAD(&pring->txq);
- INIT_LIST_HEAD(&pring->txcmplq);
- INIT_LIST_HEAD(&pring->iocb_continueq);
- spin_lock_init(&pring->ring_lock);
- }
pring = phba->sli4_hba.nvmels_wq->pring;
pring->flag = 0;
pring->ringno = LPFC_ELS_RING;
@@ -10713,14 +10778,14 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
set_bit(LPFC_DATA_READY, &phba->data_flags);
}
prev_pring_flag = pring->flag;
- spin_lock_irq(&pring->ring_lock);
+ spin_lock(&pring->ring_lock);
list_for_each_entry_safe(iocb, next_iocb,
&pring->txq, list) {
if (iocb->vport != vport)
continue;
list_move_tail(&iocb->list, &completions);
}
- spin_unlock_irq(&pring->ring_lock);
+ spin_unlock(&pring->ring_lock);
list_for_each_entry_safe(iocb, next_iocb,
&pring->txcmplq, list) {
if (iocb->vport != vport)
@@ -10796,9 +10861,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
pring = qp->pring;
if (!pring)
continue;
- spin_lock_irq(&pring->ring_lock);
+ spin_lock(&pring->ring_lock);
list_splice_init(&pring->txq, &completions);
- spin_unlock_irq(&pring->ring_lock);
+ spin_unlock(&pring->ring_lock);
if (pring == phba->sli4_hba.els_wq->pring) {
pring->flag |= LPFC_DEFERRED_RING_EVENT;
/* Set the lpfc data pending flag */
@@ -10979,7 +11044,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0402 Cannot find virtual addr for buffer tag on "
- "ring %d Data x%lx x%p x%p x%x\n",
+ "ring %d Data x%lx x%px x%px x%x\n",
pring->ringno, (unsigned long) tag,
slp->next, slp->prev, pring->postbufq_cnt);
@@ -11023,7 +11088,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0410 Cannot find virtual addr for mapped buf on "
- "ring %d Data x%llx x%p x%p x%x\n",
+ "ring %d Data x%llx x%px x%px x%x\n",
pring->ringno, (unsigned long long)phys,
slp->next, slp->prev, pring->postbufq_cnt);
return NULL;
@@ -11078,7 +11143,7 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
abort_iocb = phba->sli.iocbq_lookup[abort_context];
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
- "0327 Cannot abort els iocb %p "
+ "0327 Cannot abort els iocb x%px "
"with tag %x context %x, abort status %x, "
"abort code %x\n",
abort_iocb, abort_iotag, abort_context,
@@ -11493,7 +11558,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
int i;
/* all I/Os are in process of being flushed */
- if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
+ if (phba->hba_flag & HBA_IOQ_FLUSH)
return errcnt;
for (i = 1; i <= phba->sli.last_iotag; i++) {
@@ -11603,7 +11668,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
spin_lock_irqsave(&phba->hbalock, iflags);
/* all I/Os are in process of being flushed */
- if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
return 0;
}
@@ -11627,7 +11692,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
if (phba->sli_rev == LPFC_SLI_REV4) {
pring_s4 =
- phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring;
+ phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
if (!pring_s4) {
spin_unlock(&lpfc_cmd->buf_lock);
continue;
@@ -11768,7 +11833,10 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
cur_iocbq);
- lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
+ if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ else
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
}
pdone_q = cmdiocbq->context_un.wait_queue;
@@ -13053,11 +13121,11 @@ lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
}
/**
- * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
+ * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
* @phba: Pointer to HBA context object.
* @cqe: Pointer to mailbox completion queue entry.
*
- * This routine process a mailbox completion queue entry with asynchrous
+ * This routine process a mailbox completion queue entry with asynchronous
* event.
*
* Return: true if work posted to worker thread, otherwise false.
@@ -13190,13 +13258,19 @@ send_current_mbox:
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* Setting active mailbox pointer need to be in sync to flag clear */
phba->sli.mbox_active = NULL;
+ if (bf_get(lpfc_trailer_consumed, mcqe))
+ lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Wake up worker thread to post the next pending mailbox command */
lpfc_worker_wake_up(phba);
+ return workposted;
+
out_no_mqe_complete:
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (bf_get(lpfc_trailer_consumed, mcqe))
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
- return workposted;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return false;
}
/**
@@ -13205,7 +13279,7 @@ out_no_mqe_complete:
* @cqe: Pointer to mailbox completion queue entry.
*
* This routine process a mailbox completion queue entry, it invokes the
- * proper mailbox complete handling or asynchrous event handling routine
+ * proper mailbox complete handling or asynchronous event handling routine
* according to the MCQE's async bit.
*
* Return: true if work posted to worker thread, otherwise false.
@@ -13249,7 +13323,6 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_sli_ring *pring = cq->pring;
int txq_cnt = 0;
int txcmplq_cnt = 0;
- int fcp_txcmplq_cnt = 0;
/* Check for response status */
if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
@@ -13271,9 +13344,8 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
txcmplq_cnt++;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
- "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
+ "els_txcmplq_cnt=%d\n",
txq_cnt, phba->iocb_cnt,
- fcp_txcmplq_cnt,
txcmplq_cnt);
return false;
}
@@ -13336,8 +13408,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
unsigned long iflags;
switch (cq->subtype) {
- case LPFC_FCP:
- lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq);
+ case LPFC_IO:
+ lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ /* Notify aborted XRI for NVME work queue */
+ if (phba->nvmet_support)
+ lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
+ }
workposted = false;
break;
case LPFC_NVME_LS: /* NVME LS uses ELS resources */
@@ -13355,15 +13432,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
- case LPFC_NVME:
- /* Notify aborted XRI for NVME work queue */
- if (phba->nvmet_support)
- lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
- else
- lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq);
-
- workposted = false;
- break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0603 Invalid CQ subtype %d: "
@@ -13628,6 +13696,7 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
LPFC_QUEUE_NOARM);
consumed = 0;
+ cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
}
if (count == LPFC_NVMET_CQ_NOTIFY)
@@ -13691,7 +13760,7 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
&delay);
break;
case LPFC_WCQ:
- if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME)
+ if (cq->subtype == LPFC_IO)
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_fp_handle_cqe,
&delay);
@@ -14008,10 +14077,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
cq->CQ_wq++;
/* Process the WQ complete event */
phba->last_completion_time = jiffies;
- if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
- lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
- (struct lpfc_wcqe_complete *)&wcqe);
- if (cq->subtype == LPFC_NVME_LS)
+ if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
(struct lpfc_wcqe_complete *)&wcqe);
break;
@@ -14259,7 +14325,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN)
/* Flush, clear interrupt, and rearm the EQ */
- lpfc_sli4_eq_flush(phba, fpeq);
+ lpfc_sli4_eqcq_flush(phba, fpeq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return IRQ_NONE;
}
@@ -14269,14 +14335,14 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
fpeq->last_cpu = raw_smp_processor_id();
if (icnt > LPFC_EQD_ISR_TRIGGER &&
- phba->cfg_irq_chann == 1 &&
+ fpeq->q_flag & HBA_EQ_DELAY_CHK &&
phba->cfg_auto_imax &&
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
/* process and rearm the EQ */
- ecount = lpfc_sli4_process_eq(phba, fpeq);
+ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
if (unlikely(ecount == 0)) {
fpeq->EQ_no_entry++;
@@ -14336,6 +14402,147 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */
+void lpfc_sli4_poll_hbtimer(struct timer_list *t)
+{
+ struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
+ struct lpfc_queue *eq;
+ int i = 0;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
+ i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
+ if (!list_empty(&phba->poll_list))
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+
+ rcu_read_unlock();
+}
+
+inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
+{
+ struct lpfc_hba *phba = eq->phba;
+ int i = 0;
+
+ /*
+ * Unlocking an irq is one of the entry point to check
+ * for re-schedule, but we are good for io submission
+ * path as midlayer does a get_cpu to glue us in. Flush
+ * out the invalidate queue so we can see the updated
+ * value for flag.
+ */
+ smp_rmb();
+
+ if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
+ /* We will not likely get the completion for the caller
+ * during this iteration but i guess that's fine.
+ * Future io's coming on this eq should be able to
+ * pick it up. As for the case of single io's, they
+ * will be handled through a sched from polling timer
+ * function which is currently triggered every 1msec.
+ */
+ i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
+
+ return i;
+}
+
+static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ if (list_empty(&phba->poll_list)) {
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ /* kickstart slowpath processing for this eq */
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+ }
+
+ list_add_rcu(&eq->_poll_list, &phba->poll_list);
+ synchronize_rcu();
+}
+
+static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ /* Disable slowpath processing for this eq. Kick start the eq
+ * by RE-ARMING the eq's ASAP
+ */
+ list_del_rcu(&eq->_poll_list);
+ synchronize_rcu();
+
+ if (list_empty(&phba->poll_list))
+ del_timer_sync(&phba->cpuhp_poll_timer);
+}
+
+void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
+{
+ struct lpfc_queue *eq, *next;
+
+ list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
+ list_del(&eq->_poll_list);
+
+ INIT_LIST_HEAD(&phba->poll_list);
+ synchronize_rcu();
+}
+
+static inline void
+__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
+{
+ if (mode == eq->mode)
+ return;
+ /*
+ * currently this function is only called during a hotplug
+ * event and the cpu on which this function is executing
+ * is going offline. By now the hotplug has instructed
+ * the scheduler to remove this cpu from cpu active mask.
+ * So we don't need to work about being put aside by the
+ * scheduler for a high priority process. Yes, the inte-
+ * rrupts could come but they are known to retire ASAP.
+ */
+
+ /* Disable polling in the fastpath */
+ WRITE_ONCE(eq->mode, mode);
+ /* flush out the store buffer */
+ smp_wmb();
+
+ /*
+ * Add this eq to the polling list and start polling. For
+ * a grace period both interrupt handler and poller will
+ * try to process the eq _but_ that's fine. We have a
+ * synchronization mechanism in place (queue_claimed) to
+ * deal with it. This is just a draining phase for int-
+ * errupt handler (not eq's) as we have guranteed through
+ * barrier that all the CPUs have seen the new CQ_POLLED
+ * state. which will effectively disable the REARMING of
+ * the EQ. The whole idea is eq's die off eventually as
+ * we are not rearming EQ's anymore.
+ */
+ mode ? lpfc_sli4_add_to_poll_list(eq) :
+ lpfc_sli4_remove_from_poll_list(eq);
+}
+
+void lpfc_sli4_start_polling(struct lpfc_queue *eq)
+{
+ __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
+}
+
+void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
+
+ /* Kick start for the pending io's in h/w.
+ * Once we switch back to interrupt processing on a eq
+ * the io path completion will only arm eq's when it
+ * receives a completion. But since eq's are in disa-
+ * rmed state it doesn't receive a completion. This
+ * creates a deadlock scenaro.
+ */
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
+}
+
/**
* lpfc_sli4_queue_free - free a queue structure and associated memory
* @queue: The queue structure to free.
@@ -14410,6 +14617,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
return NULL;
INIT_LIST_HEAD(&queue->list);
+ INIT_LIST_HEAD(&queue->_poll_list);
INIT_LIST_HEAD(&queue->wq_list);
INIT_LIST_HEAD(&queue->wqfull_list);
INIT_LIST_HEAD(&queue->page_list);
@@ -16918,6 +17126,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
struct fc_vft_header *fc_vft_hdr;
uint32_t *header = (uint32_t *) fc_hdr;
+#define FC_RCTL_MDS_DIAGS 0xF4
+
switch (fc_hdr->fh_r_ctl) {
case FC_RCTL_DD_UNCAT: /* uncategorized information */
case FC_RCTL_DD_SOL_DATA: /* solicited data */
@@ -17445,7 +17655,6 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
ctiocb->context1 = lpfc_nlp_get(ndlp);
- ctiocb->iocb_cmpl = NULL;
ctiocb->vport = phba->pport;
ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
ctiocb->sli4_lxritag = NO_XRI;
@@ -17928,6 +18137,17 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
fcfi = bf_get(lpfc_rcqe_fcf_id,
&dmabuf->cq_event.cqe.rcqe_cmpl);
+ if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
+ vport = phba->pport;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2023 MDS Loopback %d bytes\n",
+ bf_get(lpfc_rcqe_length,
+ &dmabuf->cq_event.cqe.rcqe_cmpl));
+ /* Handle MDS Loopback frames */
+ lpfc_sli4_handle_mds_loopback(vport, dmabuf);
+ return;
+ }
+
/* d_id this frame is directed to */
did = sli4_did_from_fc_hdr(fc_hdr);
@@ -18151,8 +18371,9 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.rpi_used++;
phba->sli4_hba.rpi_count++;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "0001 rpi:%x max:%x lim:%x\n",
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
(int) rpi, max_rpi, rpi_limit);
/*
@@ -18208,9 +18429,21 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
static void
__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{
+ /*
+ * if the rpi value indicates a prior unreg has already
+ * been done, skip the unreg.
+ */
+ if (rpi == LPFC_RPI_ALLOC_ERROR)
+ return;
+
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
phba->sli4_hba.rpi_count--;
phba->sli4_hba.max_cfg_param.rpi_used--;
+ } else {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "2016 rpi %x not inuse\n",
+ rpi);
}
}
@@ -19225,7 +19458,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
struct lpfc_mbx_wr_object *wr_object;
LPFC_MBOXQ_t *mbox;
int rc = 0, i = 0;
- uint32_t shdr_status, shdr_add_status, shdr_change_status;
+ uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
uint32_t mbox_tmo;
struct lpfc_dmabuf *dmabuf;
uint32_t written = 0;
@@ -19282,6 +19515,16 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
if (check_change_status) {
shdr_change_status = bf_get(lpfc_wr_object_change_status,
&wr_object->u.response);
+
+ if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
+ shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
+ shdr_csf = bf_get(lpfc_wr_object_csf,
+ &wr_object->u.response);
+ if (shdr_csf)
+ shdr_change_status =
+ LPFC_CHANGE_STATUS_PCI_RESET;
+ }
+
switch (shdr_change_status) {
case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -19461,7 +19704,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
if (phba->link_flag & LS_MDS_LOOPBACK) {
/* MDS WQE are posted only to first WQ*/
- wq = phba->sli4_hba.hdwq[0].fcp_wq;
+ wq = phba->sli4_hba.hdwq[0].io_wq;
if (unlikely(!wq))
return 0;
pring = wq->pring;
@@ -19706,16 +19949,18 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
/* NVME_FCREQ and NVME_ABTS requests */
if (pwqe->iocb_flag & LPFC_IO_NVME) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
- wq = qp->nvme_wq;
+ wq = qp->io_wq;
pring = wq->pring;
- bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
qp, wq_access);
@@ -19726,13 +19971,15 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
/* NVMET requests */
if (pwqe->iocb_flag & LPFC_IO_NVMET) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
- wq = qp->nvme_wq;
+ wq = qp->io_wq;
pring = wq->pring;
ctxp = pwqe->context2;
@@ -19743,7 +19990,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
pwqe->sli4_xritag);
- bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
qp, wq_access);
@@ -19754,6 +20001,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
return WQE_ERROR;
@@ -19790,9 +20039,7 @@ void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
pvt_pool = &qp->p_multixri_pool->pvt_pool;
pbl_pool = &qp->p_multixri_pool->pbl_pool;
- txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
- if (qp->nvme_wq)
- txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
multixri_pool->stat_pbl_count = pbl_pool->count;
multixri_pool->stat_pvt_count = pvt_pool->count;
@@ -19862,12 +20109,9 @@ void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
watermark_max = xri_limit;
watermark_min = xri_limit / 2;
- txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
abts_io_bufs = qp->abts_scsi_io_bufs;
- if (qp->nvme_wq) {
- txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
- abts_io_bufs += qp->abts_nvme_io_bufs;
- }
+ abts_io_bufs += qp->abts_nvme_io_bufs;
new_watermark = txcmplq_cnt + abts_io_bufs;
new_watermark = min(watermark_max, new_watermark);
@@ -20121,6 +20365,13 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
+ if (phba->cfg_xpsgl && !phba->nvmet_support &&
+ !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+
+ if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
+
if (phba->cfg_xri_rebalancing) {
if (lpfc_ncmd->expedite) {
/* Return to expedite pool */
@@ -20142,12 +20393,9 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
pbl_pool = &qp->p_multixri_pool->pbl_pool;
pvt_pool = &qp->p_multixri_pool->pvt_pool;
- txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
abts_io_bufs = qp->abts_scsi_io_bufs;
- if (qp->nvme_wq) {
- txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
- abts_io_bufs += qp->abts_nvme_io_bufs;
- }
+ abts_io_bufs += qp->abts_nvme_io_bufs;
xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
xri_limit = qp->p_multixri_pool->xri_limit;
@@ -20402,3 +20650,294 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
return lpfc_cmd;
}
+
+/**
+ * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure to append the SGL chunk
+ *
+ * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
+ * and will allocate an SGL chunk if the pool is empty.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to sli4_hybrid_sgl - Success
+ **/
+struct sli4_hybrid_sgl *
+lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
+{
+ struct sli4_hybrid_sgl *list_entry = NULL;
+ struct sli4_hybrid_sgl *tmp = NULL;
+ struct sli4_hybrid_sgl *allocated_sgl = NULL;
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+ struct list_head *buf_list = &hdwq->sgl_list;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
+
+ if (likely(!list_empty(buf_list))) {
+ /* break off 1 chunk from the sgl_list */
+ list_for_each_entry_safe(list_entry, tmp,
+ buf_list, list_node) {
+ list_move_tail(&list_entry->list_node,
+ &lpfc_buf->dma_sgl_xtra_list);
+ break;
+ }
+ } else {
+ /* allocate more */
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
+ tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
+ cpu_to_node(hdwq->io_wq->chann));
+ if (!tmp) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "8353 error kmalloc memory for HDWQ "
+ "%d %s\n",
+ lpfc_buf->hdwq_no, __func__);
+ return NULL;
+ }
+
+ tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
+ GFP_ATOMIC, &tmp->dma_phys_sgl);
+ if (!tmp->dma_sgl) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "8354 error pool_alloc memory for HDWQ "
+ "%d %s\n",
+ lpfc_buf->hdwq_no, __func__);
+ kfree(tmp);
+ return NULL;
+ }
+
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
+ list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
+ }
+
+ allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
+ struct sli4_hybrid_sgl,
+ list_node);
+
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
+
+ return allocated_sgl;
+}
+
+/**
+ * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure with the SGL chunk
+ *
+ * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
+ *
+ * Return codes:
+ * 0 - Success
+ * -EINVAL - Error
+ **/
+int
+lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
+{
+ int rc = 0;
+ struct sli4_hybrid_sgl *list_entry = NULL;
+ struct sli4_hybrid_sgl *tmp = NULL;
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+ struct list_head *buf_list = &hdwq->sgl_list;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
+
+ if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
+ list_for_each_entry_safe(list_entry, tmp,
+ &lpfc_buf->dma_sgl_xtra_list,
+ list_node) {
+ list_move_tail(&list_entry->list_node,
+ buf_list);
+ }
+ } else {
+ rc = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
+ return rc;
+}
+
+/**
+ * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
+ * @phba: phba object
+ * @hdwq: hdwq to cleanup sgl buff resources on
+ *
+ * This routine frees all SGL chunks of hdwq SGL chunk pool.
+ *
+ * Return codes:
+ * None
+ **/
+void
+lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_sli4_hdw_queue *hdwq)
+{
+ struct list_head *buf_list = &hdwq->sgl_list;
+ struct sli4_hybrid_sgl *list_entry = NULL;
+ struct sli4_hybrid_sgl *tmp = NULL;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
+
+ /* Free sgl pool */
+ list_for_each_entry_safe(list_entry, tmp,
+ buf_list, list_node) {
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+ list_entry->dma_sgl,
+ list_entry->dma_phys_sgl);
+ list_del(&list_entry->list_node);
+ kfree(list_entry);
+ }
+
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
+}
+
+/**
+ * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
+ *
+ * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
+ * and will allocate an CMD/RSP buffer if the pool is empty.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to fcp_cmd_rsp_buf - Success
+ **/
+struct fcp_cmd_rsp_buf *
+lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *lpfc_buf)
+{
+ struct fcp_cmd_rsp_buf *list_entry = NULL;
+ struct fcp_cmd_rsp_buf *tmp = NULL;
+ struct fcp_cmd_rsp_buf *allocated_buf = NULL;
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+ struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
+
+ if (likely(!list_empty(buf_list))) {
+ /* break off 1 chunk from the list */
+ list_for_each_entry_safe(list_entry, tmp,
+ buf_list,
+ list_node) {
+ list_move_tail(&list_entry->list_node,
+ &lpfc_buf->dma_cmd_rsp_list);
+ break;
+ }
+ } else {
+ /* allocate more */
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
+ tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
+ cpu_to_node(hdwq->io_wq->chann));
+ if (!tmp) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "8355 error kmalloc memory for HDWQ "
+ "%d %s\n",
+ lpfc_buf->hdwq_no, __func__);
+ return NULL;
+ }
+
+ tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
+ GFP_ATOMIC,
+ &tmp->fcp_cmd_rsp_dma_handle);
+
+ if (!tmp->fcp_cmnd) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "8356 error pool_alloc memory for HDWQ "
+ "%d %s\n",
+ lpfc_buf->hdwq_no, __func__);
+ kfree(tmp);
+ return NULL;
+ }
+
+ tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
+ sizeof(struct fcp_cmnd));
+
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
+ list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
+ }
+
+ allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
+ struct fcp_cmd_rsp_buf,
+ list_node);
+
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
+
+ return allocated_buf;
+}
+
+/**
+ * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure with the CMD/RSP buf
+ *
+ * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
+ *
+ * Return codes:
+ * 0 - Success
+ * -EINVAL - Error
+ **/
+int
+lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *lpfc_buf)
+{
+ int rc = 0;
+ struct fcp_cmd_rsp_buf *list_entry = NULL;
+ struct fcp_cmd_rsp_buf *tmp = NULL;
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+ struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
+
+ if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
+ list_for_each_entry_safe(list_entry, tmp,
+ &lpfc_buf->dma_cmd_rsp_list,
+ list_node) {
+ list_move_tail(&list_entry->list_node,
+ buf_list);
+ }
+ } else {
+ rc = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
+ return rc;
+}
+
+/**
+ * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
+ * @phba: phba object
+ * @hdwq: hdwq to cleanup cmd rsp buff resources on
+ *
+ * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
+ *
+ * Return codes:
+ * None
+ **/
+void
+lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_sli4_hdw_queue *hdwq)
+{
+ struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+ struct fcp_cmd_rsp_buf *list_entry = NULL;
+ struct fcp_cmd_rsp_buf *tmp = NULL;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
+
+ /* Free cmd_rsp buf pool */
+ list_for_each_entry_safe(list_entry, tmp,
+ buf_list,
+ list_node) {
+ dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
+ list_entry->fcp_cmnd,
+ list_entry->fcp_cmd_rsp_dma_handle);
+ list_del(&list_entry->list_node);
+ kfree(list_entry);
+ }
+
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
+}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 467b8270f7fd..7bcf922a8be2 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -365,9 +365,18 @@ struct lpfc_io_buf {
/* Common fields */
struct list_head list;
void *data;
+
dma_addr_t dma_handle;
dma_addr_t dma_phys_sgl;
- struct sli4_sge *dma_sgl;
+
+ struct sli4_sge *dma_sgl; /* initial segment chunk */
+
+ /* linked list of extra sli4_hybrid_sge */
+ struct list_head dma_sgl_xtra_list;
+
+ /* list head for fcp_cmd_rsp buf */
+ struct list_head dma_cmd_rsp_list;
+
struct lpfc_iocbq cur_iocbq;
struct lpfc_sli4_hdw_queue *hdwq;
uint16_t hdwq_no;
@@ -375,14 +384,13 @@ struct lpfc_io_buf {
struct lpfc_nodelist *ndlp;
uint32_t timeout;
- uint16_t flags; /* TBD convert exch_busy to flags */
+ uint16_t flags;
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
#define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
/* External DIF device IO conversions */
#define LPFC_SBUF_NORMAL_DIF 0x4 /* normal mode to insert/strip */
#define LPFC_SBUF_PASS_DIF 0x8 /* insert/strip mode to passthru */
#define LPFC_SBUF_NOT_POSTED 0x10 /* SGL failed post to FW. */
- uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3aeca387b22a..d963ca871383 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -41,11 +41,18 @@
/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
#define LPFC_HBA_HDWQ_MIN 0
-#define LPFC_HBA_HDWQ_MAX 128
-#define LPFC_HBA_HDWQ_DEF 0
+#define LPFC_HBA_HDWQ_MAX 256
+#define LPFC_HBA_HDWQ_DEF LPFC_HBA_HDWQ_MIN
-/* Common buffer size to accomidate SCSI and NVME IO buffers */
-#define LPFC_COMMON_IO_BUF_SZ 768
+/* irq_chann range, values */
+#define LPFC_IRQ_CHANN_MIN 0
+#define LPFC_IRQ_CHANN_MAX 256
+#define LPFC_IRQ_CHANN_DEF LPFC_IRQ_CHANN_MIN
+
+/* FCP MQ queue count limiting */
+#define LPFC_FCP_MQ_THRESHOLD_MIN 0
+#define LPFC_FCP_MQ_THRESHOLD_MAX 256
+#define LPFC_FCP_MQ_THRESHOLD_DEF 8
/*
* Provide the default FCF Record attributes used by the driver
@@ -109,9 +116,8 @@ enum lpfc_sli4_queue_type {
enum lpfc_sli4_queue_subtype {
LPFC_NONE,
LPFC_MBOX,
- LPFC_FCP,
+ LPFC_IO,
LPFC_ELS,
- LPFC_NVME,
LPFC_NVMET,
LPFC_NVME_LS,
LPFC_USOL
@@ -132,6 +138,23 @@ struct lpfc_rqb {
struct lpfc_queue {
struct list_head list;
struct list_head wq_list;
+
+ /*
+ * If interrupts are in effect on _all_ the eq's the footprint
+ * of polling code is zero (except mode). This memory is chec-
+ * ked for every io to see if the io needs to be polled and
+ * while completion to check if the eq's needs to be rearmed.
+ * Keep in same cacheline as the queue ptr to avoid cpu fetch
+ * stalls. Using 1B memory will leave us with 7B hole. Fill
+ * it with other frequently used members.
+ */
+ uint16_t last_cpu; /* most recent cpu */
+ uint16_t hdwq;
+ uint8_t qe_valid;
+ uint8_t mode; /* interrupt or polling */
+#define LPFC_EQ_INTERRUPT 0
+#define LPFC_EQ_POLL 1
+
struct list_head wqfull_list;
enum lpfc_sli4_queue_type type;
enum lpfc_sli4_queue_subtype subtype;
@@ -198,6 +221,7 @@ struct lpfc_queue {
uint8_t q_flag;
#define HBA_NVMET_WQFULL 0x1 /* We hit WQ Full condition for NVMET */
#define HBA_NVMET_CQ_NOTIFY 0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */
+#define HBA_EQ_DELAY_CHK 0x2 /* EQ is a candidate for coalescing */
#define LPFC_NVMET_CQ_NOTIFY 4
void __iomem *db_regaddr;
uint16_t dpp_enable;
@@ -238,10 +262,8 @@ struct lpfc_queue {
struct delayed_work sched_spwork;
uint64_t isr_timestamp;
- uint16_t hdwq;
- uint16_t last_cpu; /* most recent cpu */
- uint8_t qe_valid;
struct lpfc_queue *assoc_qp;
+ struct list_head _poll_list;
void **q_pgs; /* array to index entries per page */
};
@@ -450,11 +472,17 @@ struct lpfc_hba;
#define LPFC_SLI4_HANDLER_NAME_SZ 16
struct lpfc_hba_eq_hdl {
uint32_t idx;
+ uint16_t irq;
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
struct lpfc_queue *eq;
+ struct cpumask aff_mask;
};
+#define lpfc_get_eq_hdl(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx])
+#define lpfc_get_aff_mask(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx].aff_mask)
+#define lpfc_get_irq(eqidx) (phba->sli4_hba.hba_eq_hdl[eqidx].irq)
+
/*BB Credit recovery value*/
struct lpfc_bbscn_params {
uint32_t word0;
@@ -512,6 +540,7 @@ struct lpfc_pc_sli4_params {
uint8_t cqav;
uint8_t wqsize;
uint8_t bv1s;
+ uint8_t pls;
#define LPFC_WQ_SZ64_SUPPORT 1
#define LPFC_WQ_SZ128_SUPPORT 2
uint8_t wqpcnt;
@@ -543,11 +572,10 @@ struct lpfc_sli4_lnk_info {
#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
LPFC_FOF_IO_CHAN_NUM)
-/* Used for IRQ vector to CPU mapping */
+/* Used for tracking CPU mapping attributes */
struct lpfc_vector_map_info {
uint16_t phys_id;
uint16_t core_id;
- uint16_t irq;
uint16_t eq;
uint16_t hdwq;
uint16_t flag;
@@ -641,22 +669,17 @@ struct lpfc_eq_intr_info {
struct lpfc_sli4_hdw_queue {
/* Pointers to the constructed SLI4 queues */
struct lpfc_queue *hba_eq; /* Event queues for HBA */
- struct lpfc_queue *fcp_cq; /* Fast-path FCP compl queue */
- struct lpfc_queue *nvme_cq; /* Fast-path NVME compl queue */
- struct lpfc_queue *fcp_wq; /* Fast-path FCP work queue */
- struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */
- uint16_t fcp_cq_map;
- uint16_t nvme_cq_map;
+ struct lpfc_queue *io_cq; /* Fast-path FCP & NVME compl queue */
+ struct lpfc_queue *io_wq; /* Fast-path FCP & NVME work queue */
+ uint16_t io_cq_map;
/* Keep track of IO buffers for this hardware queue */
spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */
struct list_head lpfc_io_buf_list_get;
spinlock_t io_buf_list_put_lock; /* Common buf free list lock */
struct list_head lpfc_io_buf_list_put;
- spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
- struct list_head lpfc_abts_scsi_buf_list;
- spinlock_t abts_nvme_buf_list_lock; /* list of aborted NVME IOs */
- struct list_head lpfc_abts_nvme_buf_list;
+ spinlock_t abts_io_buf_list_lock; /* list of aborted IOs */
+ struct list_head lpfc_abts_io_buf_list;
uint32_t total_io_bufs;
uint32_t get_io_bufs;
uint32_t put_io_bufs;
@@ -680,6 +703,13 @@ struct lpfc_sli4_hdw_queue {
uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
#endif
+
+ /* Per HDWQ pool resources */
+ struct list_head sgl_list;
+ struct list_head cmd_rsp_buf_list;
+
+ /* Lock for syncing Per HDWQ pool resources */
+ spinlock_t hdwq_lock;
};
#ifdef LPFC_HDWQ_LOCK_STAT
@@ -845,8 +875,8 @@ struct lpfc_sli4_hba {
struct lpfc_queue **cq_lookup;
struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list;
- spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
- struct list_head lpfc_abts_scsi_buf_list;
+ spinlock_t abts_io_buf_list_lock; /* list of aborted SCSI IOs */
+ struct list_head lpfc_abts_io_buf_list;
struct list_head lpfc_nvmet_sgl_list;
spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */
struct list_head lpfc_abts_nvmet_ctx_list;
@@ -888,6 +918,7 @@ struct lpfc_sli4_hba {
struct lpfc_vector_map_info *cpu_map;
uint16_t num_possible_cpu;
uint16_t num_present_cpu;
+ struct cpumask numa_mask;
uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info;
uint32_t conf_trunk;
@@ -1051,10 +1082,11 @@ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
-void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
- struct sli4_wcqe_xri_aborted *, int);
void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
- struct sli4_wcqe_xri_aborted *axri, int idx);
+ struct sli4_wcqe_xri_aborted *axri,
+ struct lpfc_io_buf *lpfc_ncmd);
+void lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri, int idx);
void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri);
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
@@ -1089,6 +1121,17 @@ int lpfc_sli4_post_status_check(struct lpfc_hba *);
uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
+struct sli4_hybrid_sgl *lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *buf);
+struct fcp_cmd_rsp_buf *lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *buf);
+int lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *buf);
+int lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *buf);
+void lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_sli4_hdw_queue *hdwq);
+void lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_sli4_hdw_queue *hdwq);
static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
{
return q->q_pgs[idx / q->entry_cnt_per_pg] +
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f7e93aaf1e00..9563c49f36ab 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.2.0.3"
+#define LPFC_DRIVER_VERSION "12.6.0.3"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 343bc71d4615..b76646357980 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -527,9 +527,11 @@ disable_vport(struct fc_vport *fc_vport)
* scsi_host_put() to release the vport.
*/
lpfc_mbx_unreg_vpi(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
- spin_unlock_irq(shost->host_lock);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(shost->host_lock);
+ }
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
OpenPOWER on IntegriCloud