diff options
author | James Smart <James.Smart@Emulex.Com> | 2009-05-22 14:51:39 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2009-06-08 11:21:29 -0500 |
commit | da0436e915a5c17ee79e72c1bf978a4ebb1cbf4d (patch) | |
tree | 7784646b7627117fa7849a901c85294fae905505 /drivers/scsi/lpfc | |
parent | 3772a99175f5378b5001e8da364341a8b8226a4a (diff) | |
download | blackbird-op-linux-da0436e915a5c17ee79e72c1bf978a4ebb1cbf4d.tar.gz blackbird-op-linux-da0436e915a5c17ee79e72c1bf978a4ebb1cbf4d.zip |
[SCSI] lpfc 8.3.2 : Addition of SLI4 Interface - Base Support
Adds new hardware and interface definitions.
Adds new interface routines - utilizing the reorganized layout of the
driver. Adds SLI-4 specific functions for attachment, initialization,
teardown, etc.
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 49 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 83 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_crtn.h | 59 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_ct.c | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.c | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 4 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 34 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw.h | 140 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw4.h | 2141 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 3800 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mbox.c | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mem.c | 204 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nportdisc.c | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 492 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.h | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 1263 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.h | 27 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli4.h | 467 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_vport.c | 23 |
19 files changed, 8485 insertions, 311 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 6c24c9aabe7b..13ac108a244c 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -105,9 +105,11 @@ struct lpfc_dma_pool { }; struct hbq_dmabuf { + struct lpfc_dmabuf hbuf; struct lpfc_dmabuf dbuf; uint32_t size; uint32_t tag; + struct lpfc_rcqe rcqe; }; /* Priority bit. Set value to exceed low water mark in lpfc_mem. */ @@ -141,7 +143,10 @@ typedef struct lpfc_vpd { } rev; struct { #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd2 :24; /* Reserved */ + uint32_t rsvd3 :19; /* Reserved */ + uint32_t cdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd2 : 3; /* Reserved */ + uint32_t cbg : 1; /* Configure BlockGuard */ uint32_t cmv : 1; /* Configure Max VPIs */ uint32_t ccrp : 1; /* Config Command Ring Polling */ uint32_t csah : 1; /* Configure Synchronous Abort Handling */ @@ -159,7 +164,10 @@ typedef struct lpfc_vpd { uint32_t csah : 1; /* Configure Synchronous Abort Handling */ uint32_t ccrp : 1; /* Config Command Ring Polling */ uint32_t cmv : 1; /* Configure Max VPIs */ - uint32_t rsvd2 :24; /* Reserved */ + uint32_t cbg : 1; /* Configure BlockGuard */ + uint32_t rsvd2 : 3; /* Reserved */ + uint32_t cdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd3 :19; /* Reserved */ #endif } sli3Feat; } lpfc_vpd_t; @@ -280,6 +288,9 @@ struct lpfc_vport { enum discovery_state port_state; uint16_t vpi; + uint16_t vfi; + uint8_t vfi_state; +#define LPFC_VFI_REGISTERED 0x1 uint32_t fc_flag; /* FC flags */ /* Several of these flags are HBA centric and should be moved to @@ -392,6 +403,9 @@ struct lpfc_vport { #endif uint8_t stat_data_enabled; uint8_t stat_data_blocked; + struct list_head rcv_buffer_list; + uint32_t vport_flag; +#define STATIC_VPORT 1 }; struct hbq_s { @@ -494,6 +508,7 @@ struct lpfc_hba { #define LPFC_SLI3_CRP_ENABLED 0x08 #define LPFC_SLI3_INB_ENABLED 0x10 #define LPFC_SLI3_BG_ENABLED 0x20 +#define LPFC_SLI3_DSS_ENABLED 0x40 uint32_t iocb_cmd_size; uint32_t iocb_rsp_size; @@ -507,8 +522,13 @@ struct lpfc_hba { uint32_t hba_flag; /* hba generic flags */ #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ - -#define DEFER_ERATT 0x4 /* Deferred error attention in progress */ +#define DEFER_ERATT 0x2 /* Deferred error attention in progress */ +#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */ +#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */ +#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ +#define FCP_XRI_ABORT_EVENT 0x20 +#define ELS_XRI_ABORT_EVENT 0x40 +#define ASYNC_EVENT 0x80 struct lpfc_dmabuf slim2p; MAILBOX_t *mbox; @@ -567,6 +587,9 @@ struct lpfc_hba { uint32_t cfg_poll; uint32_t cfg_poll_tmo; uint32_t cfg_use_msi; + uint32_t cfg_fcp_imax; + uint32_t cfg_fcp_wq_count; + uint32_t cfg_fcp_eq_count; uint32_t cfg_sg_seg_cnt; uint32_t cfg_prot_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; @@ -576,6 +599,8 @@ struct lpfc_hba { uint32_t cfg_enable_hba_reset; uint32_t cfg_enable_hba_heartbeat; uint32_t cfg_enable_bg; + uint32_t cfg_enable_fip; + uint32_t cfg_log_verbose; lpfc_vpd_t vpd; /* vital product data */ @@ -659,7 +684,8 @@ struct lpfc_hba { /* pci_mem_pools */ struct pci_pool *lpfc_scsi_dma_buf_pool; struct pci_pool *lpfc_mbuf_pool; - struct pci_pool *lpfc_hbq_pool; + struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ + struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ struct lpfc_dma_pool lpfc_mbuf_safety_pool; mempool_t *mbox_mem_pool; @@ -675,6 +701,14 @@ struct lpfc_hba { struct lpfc_vport *pport; /* physical lpfc_vport pointer */ uint16_t max_vpi; /* Maximum virtual nports */ #define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ + uint16_t max_vports; /* + * For IOV HBAs max_vpi can change + * after a reset. max_vports is max + * number of vports present. This can + * be greater than max_vpi. + */ + uint16_t vpi_base; + uint16_t vfi_base; unsigned long *vpi_bmask; /* vpi allocation table */ /* Data structure used by fabric iocb scheduler */ @@ -733,6 +767,11 @@ struct lpfc_hba { /* Maximum number of events that can be outstanding at any time*/ #define LPFC_MAX_EVT_COUNT 512 atomic_t fast_event_count; + struct lpfc_fcf fcf; + uint8_t fc_map[3]; + uint8_t valid_vlan; + uint16_t vlan_id; + struct list_head fcf_conn_rec_list; }; static inline struct Scsi_Host * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c14f0cbdb125..82016fc672b1 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -30,8 +30,10 @@ #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -828,18 +830,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba, return 0; } - if (mrpi) - *mrpi = pmb->un.varRdConfig.max_rpi; - if (arpi) - *arpi = pmb->un.varRdConfig.avail_rpi; - if (mxri) - *mxri = pmb->un.varRdConfig.max_xri; - if (axri) - *axri = pmb->un.varRdConfig.avail_xri; - if (mvpi) - *mvpi = pmb->un.varRdConfig.max_vpi; - if (avpi) - *avpi = pmb->un.varRdConfig.avail_vpi; + if (phba->sli_rev == LPFC_SLI_REV4) { + rd_config = &pmboxq->u.mqe.un.rd_config; + if (mrpi) + *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); + if (arpi) + *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) - + phba->sli4_hba.max_cfg_param.rpi_used; + if (mxri) + *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); + if (axri) + *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) - + phba->sli4_hba.max_cfg_param.xri_used; + if (mvpi) + *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); + if (avpi) + *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - + phba->sli4_hba.max_cfg_param.vpi_used; + } else { + if (mrpi) + *mrpi = pmb->un.varRdConfig.max_rpi; + if (arpi) + *arpi = pmb->un.varRdConfig.avail_rpi; + if (mxri) + *mxri = pmb->un.varRdConfig.max_xri; + if (axri) + *axri = pmb->un.varRdConfig.avail_xri; + if (mvpi) + *mvpi = pmb->un.varRdConfig.max_vpi; + if (avpi) + *avpi = pmb->un.varRdConfig.avail_vpi; + } mempool_free(pmboxq, phba->mbox_mem_pool); return 1; @@ -2844,15 +2865,39 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, /* # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that # support this feature -# 0 = MSI disabled +# 0 = MSI disabled (default) # 1 = MSI enabled -# 2 = MSI-X enabled (default) -# Value range is [0,2]. Default value is 2. +# 2 = MSI-X enabled +# Value range is [0,2]. Default value is 0. */ -LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " +LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " "MSI-X (2), if possible"); /* +# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second +# +# Value range is [636,651042]. Default value is 10000. +*/ +LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST, + "Set the maximum number of fast-path FCP interrupts per second"); + +/* +# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues +# +# Value range is [1,31]. Default value is 4. +*/ +LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX, + "Set the number of fast-path FCP work queues, if possible"); + +/* +# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues +# +# Value range is [1,7]. Default value is 1. +*/ +LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX, + "Set the number of fast-path FCP event queues, if possible"); + +/* # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. # 0 = HBA resets disabled # 1 = HBA resets enabled (default) @@ -2969,6 +3014,9 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_poll, &dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_use_msi, + &dev_attr_lpfc_fcp_imax, + &dev_attr_lpfc_fcp_wq_count, + &dev_attr_lpfc_fcp_eq_count, &dev_attr_lpfc_enable_bg, &dev_attr_lpfc_soft_wwnn, &dev_attr_lpfc_soft_wwpn, @@ -4105,6 +4153,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_poll_tmo_init(phba, lpfc_poll_tmo); lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_use_msi_init(phba, lpfc_use_msi); + lpfc_fcp_imax_init(phba, lpfc_fcp_imax); + lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); + lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); lpfc_enable_bg_init(phba, lpfc_enable_bg); diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index f88ce3f26190..3802e455734f 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -35,17 +35,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); -int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, - LPFC_MBOXQ_t *, uint32_t); +int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, + LPFC_MBOXQ_t *, uint32_t); void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); -void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); +void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); +void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); void lpfc_cleanup_rpis(struct lpfc_vport *, int); int lpfc_linkdown(struct lpfc_hba *); +void lpfc_linkdown_port(struct lpfc_vport *); void lpfc_port_link_failure(struct lpfc_vport *); void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -54,6 +56,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, @@ -149,15 +152,19 @@ int lpfc_online(struct lpfc_hba *); void lpfc_unblock_mgmt_io(struct lpfc_hba *); void lpfc_offline_prep(struct lpfc_hba *); void lpfc_offline(struct lpfc_hba *); +void lpfc_reset_hba(struct lpfc_hba *); int lpfc_sli_setup(struct lpfc_hba *); int lpfc_sli_queue_setup(struct lpfc_hba *); void lpfc_handle_eratt(struct lpfc_hba *); void lpfc_handle_latt(struct lpfc_hba *); -irqreturn_t lpfc_intr_handler(int, void *); -irqreturn_t lpfc_sp_intr_handler(int, void *); -irqreturn_t lpfc_fp_intr_handler(int, void *); +irqreturn_t lpfc_sli_intr_handler(int, void *); +irqreturn_t lpfc_sli_sp_intr_handler(int, void *); +irqreturn_t lpfc_sli_fp_intr_handler(int, void *); +irqreturn_t lpfc_sli4_intr_handler(int, void *); +irqreturn_t lpfc_sli4_sp_intr_handler(int, void *); +irqreturn_t lpfc_sli4_fp_intr_handler(int, void *); void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); @@ -165,16 +172,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); +void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_mbox_dev_check(struct lpfc_hba *); int lpfc_mbox_tmo_val(struct lpfc_hba *, int); +void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); +void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); +void lpfc_init_vpi(struct lpfcMboxq *, uint16_t); +void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t); +void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); +void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); +void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, uint32_t , LPFC_MBOXQ_t *); struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); +struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *); +void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); +void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, + uint16_t); +void lpfc_unregister_unused_fcf(struct lpfc_hba *); -int lpfc_mem_alloc(struct lpfc_hba *); +int lpfc_mem_alloc(struct lpfc_hba *, int align); void lpfc_mem_free(struct lpfc_hba *); +void lpfc_mem_free_all(struct lpfc_hba *); void lpfc_stop_vport_timers(struct lpfc_vport *); void lpfc_poll_timeout(unsigned long ptr); @@ -198,12 +221,13 @@ int lpfc_sli_host_down(struct lpfc_vport *); int lpfc_sli_hba_down(struct lpfc_hba *); int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); int lpfc_sli_handle_mb_event(struct lpfc_hba *); -int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); +void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *); int lpfc_sli_check_eratt(struct lpfc_hba *); -int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, +void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, struct lpfc_sli_ring *, uint32_t); +int lpfc_sli4_handle_received_buffer(struct lpfc_hba *); void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); -int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, +int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, uint32_t); void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); @@ -237,7 +261,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); -int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *, +int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, struct lpfc_iocbq *, uint32_t); void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, @@ -254,6 +278,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *); const char* lpfc_info(struct Scsi_Host *); int lpfc_scan_finished(struct Scsi_Host *, unsigned long); +int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_api_table_setup(struct lpfc_hba *, uint8_t); + void lpfc_get_cfgparam(struct lpfc_hba *); void lpfc_get_vport_cfgparam(struct lpfc_vport *); int lpfc_alloc_sysfs_attr(struct lpfc_vport *); @@ -314,8 +344,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); +void lpfc_create_static_vport(struct lpfc_hba *); +void lpfc_stop_hba_timers(struct lpfc_hba *); +void lpfc_stop_port(struct lpfc_hba *); +void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); +int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); +void lpfc_start_fdiscs(struct lpfc_hba *phba); #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) #define HBA_EVENT_RSCN 5 #define HBA_EVENT_LINK_UP 2 #define HBA_EVENT_LINK_DOWN 3 + diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 4164b935ea9f..51990787796f 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -32,8 +32,10 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 5dd66925f4ca..42ef258c7d52 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -33,8 +33,10 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 8c5c3aea4a19..9fe36bf6fd14 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -28,8 +28,10 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -220,7 +222,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, icmd->un.elsreq64.myID = vport->fc_myDID; /* For ELS_REQUEST64_CR, use the VPI by default */ - icmd->ulpContext = vport->vpi; + icmd->ulpContext = vport->vpi + phba->vpi_base; icmd->ulpCt_h = 0; /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ if (elscmd == ELS_CMD_ECHO) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 25fc96c9081f..0fc66005d545 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -29,10 +29,12 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" @@ -491,6 +493,10 @@ lpfc_work_done(struct lpfc_hba *phba) phba->work_ha = 0; spin_unlock_irq(&phba->hbalock); + /* First, try to post the next mailbox command to SLI4 device */ + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) + lpfc_sli4_post_async_mbox(phba); + if (ha_copy & HA_ERATT) /* Handle the error attention event */ lpfc_handle_eratt(phba); @@ -501,9 +507,27 @@ lpfc_work_done(struct lpfc_hba *phba) if (ha_copy & HA_LATT) lpfc_handle_latt(phba); + /* Process SLI4 events */ + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { + if (phba->hba_flag & FCP_XRI_ABORT_EVENT) + lpfc_sli4_fcp_xri_abort_event_proc(phba); + if (phba->hba_flag & ELS_XRI_ABORT_EVENT) + lpfc_sli4_els_xri_abort_event_proc(phba); + if (phba->hba_flag & ASYNC_EVENT) + lpfc_sli4_async_event_proc(phba); + if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; + spin_unlock_irq(&phba->hbalock); + lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); + } + if (phba->hba_flag & HBA_RECEIVE_BUFFER) + lpfc_sli4_handle_received_buffer(phba); + } + vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi; i++) { + for (i = 0; i <= phba->max_vports; i++) { /* * We could have no vports in array if unloading, so if * this happens then just use the pport @@ -2556,7 +2580,8 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) * clear_la then don't send it. */ if ((phba->link_state >= LPFC_CLEAR_LA) || - (vport->port_type != LPFC_PHYSICAL_PORT)) + (vport->port_type != LPFC_PHYSICAL_PORT) || + (phba->sli_rev == LPFC_SLI_REV4)) return; /* Link up discovery */ @@ -2585,7 +2610,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (regvpimbox) { - lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); + lpfc_reg_vpi(vport, regvpimbox); regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; regvpimbox->vport = vport; if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) @@ -2645,7 +2670,8 @@ lpfc_disc_start(struct lpfc_vport *vport) */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_PT2PT) && - !(vport->fc_flag & FC_RSCN_MODE)) { + !(vport->fc_flag & FC_RSCN_MODE) && + (phba->sli_rev < LPFC_SLI_REV4)) { lpfc_issue_reg_vpi(phba, vport); return; } diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 4168c7b498b8..a9d64cfbe5cc 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -471,6 +471,35 @@ struct serv_parm { /* Structure is in Big Endian format */ }; /* + * Virtual Fabric Tagging Header + */ +struct fc_vft_header { + uint32_t word0; +#define fc_vft_hdr_r_ctl_SHIFT 24 +#define fc_vft_hdr_r_ctl_MASK 0xFF +#define fc_vft_hdr_r_ctl_WORD word0 +#define fc_vft_hdr_ver_SHIFT 22 +#define fc_vft_hdr_ver_MASK 0x3 +#define fc_vft_hdr_ver_WORD word0 +#define fc_vft_hdr_type_SHIFT 18 +#define fc_vft_hdr_type_MASK 0xF +#define fc_vft_hdr_type_WORD word0 +#define fc_vft_hdr_e_SHIFT 16 +#define fc_vft_hdr_e_MASK 0x1 +#define fc_vft_hdr_e_WORD word0 +#define fc_vft_hdr_priority_SHIFT 13 +#define fc_vft_hdr_priority_MASK 0x7 +#define fc_vft_hdr_priority_WORD word0 +#define fc_vft_hdr_vf_id_SHIFT 1 +#define fc_vft_hdr_vf_id_MASK 0xFFF +#define fc_vft_hdr_vf_id_WORD word0 + uint32_t word1; +#define fc_vft_hdr_hopct_SHIFT 24 +#define fc_vft_hdr_hopct_MASK 0xFF +#define fc_vft_hdr_hopct_WORD word1 +}; + +/* * Extended Link Service LS_COMMAND codes (Payload Word 0) */ #ifdef __BIG_ENDIAN_BITFIELD @@ -1152,6 +1181,9 @@ typedef struct { #define PCI_DEVICE_ID_HORNET 0xfe05 #define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 +#define PCI_VENDOR_ID_SERVERENGINE 0x19a2 +#define PCI_DEVICE_ID_TIGERSHARK 0x0704 +#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705 #define JEDEC_ID_ADDRESS 0x0080001c #define FIREFLY_JEDEC_ID 0x1ACC @@ -1342,15 +1374,21 @@ typedef struct { /* FireFly BIU registers */ #define MBX_READ_LA64 0x95 #define MBX_REG_VPI 0x96 #define MBX_UNREG_VPI 0x97 -#define MBX_REG_VNPID 0x96 -#define MBX_UNREG_VNPID 0x97 #define MBX_WRITE_WWN 0x98 #define MBX_SET_DEBUG 0x99 #define MBX_LOAD_EXP_ROM 0x9C - -#define MBX_MAX_CMDS 0x9D +#define MBX_SLI4_CONFIG 0x9B +#define MBX_SLI4_REQ_FTRS 0x9D +#define MBX_MAX_CMDS 0x9E +#define MBX_RESUME_RPI 0x9E #define MBX_SLI2_CMD_MASK 0x80 +#define MBX_REG_VFI 0x9F +#define MBX_REG_FCFI 0xA0 +#define MBX_UNREG_VFI 0xA1 +#define MBX_UNREG_FCFI 0xA2 +#define MBX_INIT_VFI 0xA3 +#define MBX_INIT_VPI 0xA4 /* IOCB Commands */ @@ -1440,6 +1478,16 @@ typedef struct { /* FireFly BIU registers */ #define CMD_IOCB_LOGENTRY_CN 0x94 #define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 +/* Unhandled Data Security SLI Commands */ +#define DSSCMD_IWRITE64_CR 0xD8 +#define DSSCMD_IWRITE64_CX 0xD9 +#define DSSCMD_IREAD64_CR 0xDA +#define DSSCMD_IREAD64_CX 0xDB +#define DSSCMD_INVALIDATE_DEK 0xDC +#define DSSCMD_SET_KEK 0xDD +#define DSSCMD_GET_KEK_ID 0xDE +#define DSSCMD_GEN_XFER 0xDF + #define CMD_MAX_IOCB_CMD 0xE6 #define CMD_IOCB_MASK 0xff @@ -1466,6 +1514,7 @@ typedef struct { /* FireFly BIU registers */ #define MBXERR_BAD_RCV_LENGTH 14 #define MBXERR_DMA_ERROR 15 #define MBXERR_ERROR 16 +#define MBXERR_LINK_DOWN 0x33 #define MBX_NOT_FINISHED 255 #define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ @@ -1504,32 +1553,6 @@ struct ulp_bde { #endif }; -struct ulp_bde64 { /* SLI-2 */ - union ULP_BDE_TUS { - uint32_t w; - struct { -#ifdef __BIG_ENDIAN_BITFIELD - uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED - VALUE !! */ - uint32_t bdeSize:24; /* Size of buffer (in bytes) */ -#else /* __LITTLE_ENDIAN_BITFIELD */ - uint32_t bdeSize:24; /* Size of buffer (in bytes) */ - uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED - VALUE !! */ -#endif -#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */ -#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */ -#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */ -#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */ -#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */ -#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */ -#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */ - } f; - } tus; - uint32_t addrLow; - uint32_t addrHigh; -}; - typedef struct ULP_BDL { /* SLI-2 */ #ifdef __BIG_ENDIAN_BITFIELD uint32_t bdeFlags:8; /* BDL Flags */ @@ -2287,7 +2310,7 @@ typedef struct { uint32_t rsvd3; uint32_t rsvd4; uint32_t rsvd5; - uint16_t rsvd6; + uint16_t vfi; uint16_t vpi; #else /* __LITTLE_ENDIAN */ uint32_t rsvd1; @@ -2297,7 +2320,7 @@ typedef struct { uint32_t rsvd4; uint32_t rsvd5; uint16_t vpi; - uint16_t rsvd6; + uint16_t vfi; #endif } REG_VPI_VAR; @@ -2457,7 +2480,7 @@ typedef struct { uint32_t entry_index:16; #endif - uint32_t rsvd1; + uint32_t sli4_length; uint32_t word_cnt; uint32_t resp_offset; } DUMP_VAR; @@ -2470,9 +2493,32 @@ typedef struct { #define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ #define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ +#define DMP_REGION_VPORT 0x16 /* VPort info region */ +#define DMP_VPORT_REGION_SIZE 0x200 +#define DMP_MBOX_OFFSET_WORD 0x5 + +#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */ +#define DMP_FCOEPARAM_RGN_SIZE 0x400 + #define WAKE_UP_PARMS_REGION_ID 4 #define WAKE_UP_PARMS_WORD_SIZE 15 +struct vport_rec { + uint8_t wwpn[8]; + uint8_t wwnn[8]; +}; + +#define VPORT_INFO_SIG 0x32324752 +#define VPORT_INFO_REV_MASK 0xff +#define VPORT_INFO_REV 0x1 +#define MAX_STATIC_VPORT_COUNT 16 +struct static_vport_info { + uint32_t signature; + uint32_t rev; + struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT]; + uint32_t resvd[66]; +}; + /* Option rom version structure */ struct prog_id { #ifdef __BIG_ENDIAN_BITFIELD @@ -2697,7 +2743,9 @@ typedef struct { #endif #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd1 : 23; /* Reserved */ + uint32_t rsvd1 : 19; /* Reserved */ + uint32_t cdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd2 : 3; /* Reserved */ uint32_t cbg : 1; /* Configure BlockGuard */ uint32_t cmv : 1; /* Configure Max VPIs */ uint32_t ccrp : 1; /* Config Command Ring Polling */ @@ -2717,10 +2765,14 @@ typedef struct { uint32_t ccrp : 1; /* Config Command Ring Polling */ uint32_t cmv : 1; /* Configure Max VPIs */ uint32_t cbg : 1; /* Configure BlockGuard */ - uint32_t rsvd1 : 23; /* Reserved */ + uint32_t rsvd2 : 3; /* Reserved */ + uint32_t cdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd1 : 19; /* Reserved */ #endif #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd2 : 23; /* Reserved */ + uint32_t rsvd3 : 19; /* Reserved */ + uint32_t gdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd4 : 3; /* Reserved */ uint32_t gbg : 1; /* Grant BlockGuard */ uint32_t gmv : 1; /* Grant Max VPIs */ uint32_t gcrp : 1; /* Grant Command Ring Polling */ @@ -2740,7 +2792,9 @@ typedef struct { uint32_t gcrp : 1; /* Grant Command Ring Polling */ uint32_t gmv : 1; /* Grant Max VPIs */ uint32_t gbg : 1; /* Grant BlockGuard */ - uint32_t rsvd2 : 23; /* Reserved */ + uint32_t rsvd4 : 3; /* Reserved */ + uint32_t gdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd3 : 19; /* Reserved */ #endif #ifdef __BIG_ENDIAN_BITFIELD @@ -2753,20 +2807,20 @@ typedef struct { #ifdef __BIG_ENDIAN_BITFIELD uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ - uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ + uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */ #else /* __LITTLE_ENDIAN */ - uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ + uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */ uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ #endif - uint32_t rsvd4; /* Reserved */ + uint32_t rsvd6; /* Reserved */ #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd5 : 16; /* Reserved */ + uint32_t rsvd7 : 16; /* Reserved */ uint32_t max_vpi : 16; /* Max number of virt N-Ports */ #else /* __LITTLE_ENDIAN */ uint32_t max_vpi : 16; /* Max number of virt N-Ports */ - uint32_t rsvd5 : 16; /* Reserved */ + uint32_t rsvd7 : 16; /* Reserved */ #endif } CONFIG_PORT_VAR; @@ -3666,3 +3720,5 @@ lpfc_error_lost_link(IOCB_t *iocbp) #define MENLO_TIMEOUT 30 #define SETVAR_MLOMNT 0x103107 #define SETVAR_MLORST 0x103007 + +#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */ diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h new file mode 100644 index 000000000000..39c34b3ad29d --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -0,0 +1,2141 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2009 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +/* Macros to deal with bit fields. Each bit field must have 3 #defines + * associated with it (_SHIFT, _MASK, and _WORD). + * EG. For a bit field that is in the 7th bit of the "field4" field of a + * structure and is 2 bits in size the following #defines must exist: + * struct temp { + * uint32_t field1; + * uint32_t field2; + * uint32_t field3; + * uint32_t field4; + * #define example_bit_field_SHIFT 7 + * #define example_bit_field_MASK 0x03 + * #define example_bit_field_WORD field4 + * uint32_t field5; + * }; + * Then the macros below may be used to get or set the value of that field. + * EG. To get the value of the bit field from the above example: + * struct temp t1; + * value = bf_get(example_bit_field, &t1); + * And then to set that bit field: + * bf_set(example_bit_field, &t1, 2); + * Or clear that bit field: + * bf_set(example_bit_field, &t1, 0); + */ +#define bf_get(name, ptr) \ + (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK) +#define bf_set(name, ptr, value) \ + ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \ + ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT)))) + +struct dma_address { + uint32_t addr_lo; + uint32_t addr_hi; +}; + +#define LPFC_SLI4_BAR0 1 +#define LPFC_SLI4_BAR1 2 +#define LPFC_SLI4_BAR2 4 + +#define LPFC_SLI4_MBX_EMBED true +#define LPFC_SLI4_MBX_NEMBED false + +#define LPFC_SLI4_MB_WORD_COUNT 64 +#define LPFC_MAX_MQ_PAGE 8 +#define LPFC_MAX_WQ_PAGE 8 +#define LPFC_MAX_CQ_PAGE 4 +#define LPFC_MAX_EQ_PAGE 8 + +#define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */ +#define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */ +#define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */ + +/* Define SLI4 Alignment requirements. */ +#define LPFC_ALIGN_16_BYTE 16 +#define LPFC_ALIGN_64_BYTE 64 + +/* Define SLI4 specific definitions. */ +#define LPFC_MQ_CQE_BYTE_OFFSET 256 +#define LPFC_MBX_CMD_HDR_LENGTH 16 +#define LPFC_MBX_ERROR_RANGE 0x4000 +#define LPFC_BMBX_BIT1_ADDR_HI 0x2 +#define LPFC_BMBX_BIT1_ADDR_LO 0 +#define LPFC_RPI_HDR_COUNT 64 +#define LPFC_HDR_TEMPLATE_SIZE 4096 +#define LPFC_RPI_ALLOC_ERROR 0xFFFF +#define LPFC_FCF_RECORD_WD_CNT 132 +#define LPFC_ENTIRE_FCF_DATABASE 0 +#define LPFC_DFLT_FCF_INDEX 0 + +/* Virtual function numbers */ +#define LPFC_VF0 0 +#define LPFC_VF1 1 +#define LPFC_VF2 2 +#define LPFC_VF3 3 +#define LPFC_VF4 4 +#define LPFC_VF5 5 +#define LPFC_VF6 6 +#define LPFC_VF7 7 +#define LPFC_VF8 8 +#define LPFC_VF9 9 +#define LPFC_VF10 10 +#define LPFC_VF11 11 +#define LPFC_VF12 12 +#define LPFC_VF13 13 +#define LPFC_VF14 14 +#define LPFC_VF15 15 +#define LPFC_VF16 16 +#define LPFC_VF17 17 +#define LPFC_VF18 18 +#define LPFC_VF19 19 +#define LPFC_VF20 20 +#define LPFC_VF21 21 +#define LPFC_VF22 22 +#define LPFC_VF23 23 +#define LPFC_VF24 24 +#define LPFC_VF25 25 +#define LPFC_VF26 26 +#define LPFC_VF27 27 +#define LPFC_VF28 28 +#define LPFC_VF29 29 +#define LPFC_VF30 30 +#define LPFC_VF31 31 + +/* PCI function numbers */ +#define LPFC_PCI_FUNC0 0 +#define LPFC_PCI_FUNC1 1 +#define LPFC_PCI_FUNC2 2 +#define LPFC_PCI_FUNC3 3 +#define LPFC_PCI_FUNC4 4 + +/* Active interrupt test count */ +#define LPFC_ACT_INTR_CNT 4 + +/* Delay Multiplier constant */ +#define LPFC_DMULT_CONST 651042 +#define LPFC_MIM_IMAX 636 +#define LPFC_FP_DEF_IMAX 10000 +#define LPFC_SP_DEF_IMAX 10000 + +struct ulp_bde64 { + union ULP_BDE_TUS { + uint32_t w; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED + VALUE !! */ + uint32_t bdeSize:24; /* Size of buffer (in bytes) */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t bdeSize:24; /* Size of buffer (in bytes) */ + uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED + VALUE !! */ +#endif +#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */ +#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */ +#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */ +#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */ +#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */ +#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */ +#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */ + } f; + } tus; + uint32_t addrLow; + uint32_t addrHigh; +}; + +struct lpfc_sli4_flags { + uint32_t word0; +#define lpfc_fip_flag_SHIFT 0 +#define lpfc_fip_flag_MASK 0x00000001 +#define lpfc_fip_flag_WORD word0 +}; + +/* event queue entry structure */ +struct lpfc_eqe { + uint32_t word0; +#define lpfc_eqe_resource_id_SHIFT 16 +#define lpfc_eqe_resource_id_MASK 0x000000FF +#define lpfc_eqe_resource_id_WORD word0 +#define lpfc_eqe_minor_code_SHIFT 4 +#define lpfc_eqe_minor_code_MASK 0x00000FFF +#define lpfc_eqe_minor_code_WORD word0 +#define lpfc_eqe_major_code_SHIFT 1 +#define lpfc_eqe_major_code_MASK 0x00000007 +#define lpfc_eqe_major_code_WORD word0 +#define lpfc_eqe_valid_SHIFT 0 +#define lpfc_eqe_valid_MASK 0x00000001 +#define lpfc_eqe_valid_WORD word0 +}; + +/* completion queue entry structure (common fields for all cqe types) */ +struct lpfc_cqe { + uint32_t reserved0; + uint32_t reserved1; + uint32_t reserved2; + uint32_t word3; +#define lpfc_cqe_valid_SHIFT 31 +#define lpfc_cqe_valid_MASK 0x00000001 +#define lpfc_cqe_valid_WORD word3 +#define lpfc_cqe_code_SHIFT 16 +#define lpfc_cqe_code_MASK 0x000000FF +#define lpfc_cqe_code_WORD word3 +}; + +/* Completion Queue Entry Status Codes */ +#define CQE_STATUS_SUCCESS 0x0 +#define CQE_STATUS_FCP_RSP_FAILURE 0x1 +#define CQE_STATUS_REMOTE_STOP 0x2 +#define CQE_STATUS_LOCAL_REJECT 0x3 +#define CQE_STATUS_NPORT_RJT 0x4 +#define CQE_STATUS_FABRIC_RJT 0x5 +#define CQE_STATUS_NPORT_BSY 0x6 +#define CQE_STATUS_FABRIC_BSY 0x7 +#define CQE_STATUS_INTERMED_RSP 0x8 +#define CQE_STATUS_LS_RJT 0x9 +#define CQE_STATUS_CMD_REJECT 0xb +#define CQE_STATUS_FCP_TGT_LENCHECK 0xc +#define CQE_STATUS_NEED_BUFF_ENTRY 0xf + +/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */ +#define CQE_HW_STATUS_NO_ERR 0x0 +#define CQE_HW_STATUS_UNDERRUN 0x1 +#define CQE_HW_STATUS_OVERRUN 0x2 + +/* Completion Queue Entry Codes */ +#define CQE_CODE_COMPL_WQE 0x1 +#define CQE_CODE_RELEASE_WQE 0x2 +#define CQE_CODE_RECEIVE 0x4 +#define CQE_CODE_XRI_ABORTED 0x5 + +/* completion queue entry for wqe completions */ +struct lpfc_wcqe_complete { + uint32_t word0; +#define lpfc_wcqe_c_request_tag_SHIFT 16 +#define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF +#define lpfc_wcqe_c_request_tag_WORD word0 +#define lpfc_wcqe_c_status_SHIFT 8 +#define lpfc_wcqe_c_status_MASK 0x000000FF +#define lpfc_wcqe_c_status_WORD word0 +#define lpfc_wcqe_c_hw_status_SHIFT 0 +#define lpfc_wcqe_c_hw_status_MASK 0x000000FF +#define lpfc_wcqe_c_hw_status_WORD word0 + uint32_t total_data_placed; + uint32_t parameter; + uint32_t word3; +#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_wcqe_c_xb_SHIFT 28 +#define lpfc_wcqe_c_xb_MASK 0x00000001 +#define lpfc_wcqe_c_xb_WORD word3 +#define lpfc_wcqe_c_pv_SHIFT 27 +#define lpfc_wcqe_c_pv_MASK 0x00000001 +#define lpfc_wcqe_c_pv_WORD word3 +#define lpfc_wcqe_c_priority_SHIFT 24 +#define lpfc_wcqe_c_priority_MASK 0x00000007 +#define lpfc_wcqe_c_priority_WORD word3 +#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK +#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD +}; + +/* completion queue entry for wqe release */ +struct lpfc_wcqe_release { + uint32_t reserved0; + uint32_t reserved1; + uint32_t word2; +#define lpfc_wcqe_r_wq_id_SHIFT 16 +#define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF +#define lpfc_wcqe_r_wq_id_WORD word2 +#define lpfc_wcqe_r_wqe_index_SHIFT 0 +#define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF +#define lpfc_wcqe_r_wqe_index_WORD word2 + uint32_t word3; +#define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK +#define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD +}; + +struct sli4_wcqe_xri_aborted { + uint32_t word0; +#define lpfc_wcqe_xa_status_SHIFT 8 +#define lpfc_wcqe_xa_status_MASK 0x000000FF +#define lpfc_wcqe_xa_status_WORD word0 + uint32_t parameter; + uint32_t word2; +#define lpfc_wcqe_xa_remote_xid_SHIFT 16 +#define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF +#define lpfc_wcqe_xa_remote_xid_WORD word2 +#define lpfc_wcqe_xa_xri_SHIFT 0 +#define lpfc_wcqe_xa_xri_MASK 0x0000FFFF +#define lpfc_wcqe_xa_xri_WORD word2 + uint32_t word3; +#define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_wcqe_xa_ia_SHIFT 30 +#define lpfc_wcqe_xa_ia_MASK 0x00000001 +#define lpfc_wcqe_xa_ia_WORD word3 +#define CQE_XRI_ABORTED_IA_REMOTE 0 +#define CQE_XRI_ABORTED_IA_LOCAL 1 +#define lpfc_wcqe_xa_br_SHIFT 29 +#define lpfc_wcqe_xa_br_MASK 0x00000001 +#define lpfc_wcqe_xa_br_WORD word3 +#define CQE_XRI_ABORTED_BR_BA_ACC 0 +#define CQE_XRI_ABORTED_BR_BA_RJT 1 +#define lpfc_wcqe_xa_eo_SHIFT 28 +#define lpfc_wcqe_xa_eo_MASK 0x00000001 +#define lpfc_wcqe_xa_eo_WORD word3 +#define CQE_XRI_ABORTED_EO_REMOTE 0 +#define CQE_XRI_ABORTED_EO_LOCAL 1 +#define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK +#define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD +}; + +/* completion queue entry structure for rqe completion */ +struct lpfc_rcqe { + uint32_t word0; +#define lpfc_rcqe_bindex_SHIFT 16 +#define lpfc_rcqe_bindex_MASK 0x0000FFF +#define lpfc_rcqe_bindex_WORD word0 +#define lpfc_rcqe_status_SHIFT 8 +#define lpfc_rcqe_status_MASK 0x000000FF +#define lpfc_rcqe_status_WORD word0 +#define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */ +#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */ +#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */ +#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */ + uint32_t reserved1; + uint32_t word2; +#define lpfc_rcqe_length_SHIFT 16 +#define lpfc_rcqe_length_MASK 0x0000FFFF +#define lpfc_rcqe_length_WORD word2 +#define lpfc_rcqe_rq_id_SHIFT 6 +#define lpfc_rcqe_rq_id_MASK 0x000003FF +#define lpfc_rcqe_rq_id_WORD word2 +#define lpfc_rcqe_fcf_id_SHIFT 0 +#define lpfc_rcqe_fcf_id_MASK 0x0000003F +#define lpfc_rcqe_fcf_id_WORD word2 + uint32_t word3; +#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_rcqe_port_SHIFT 30 +#define lpfc_rcqe_port_MASK 0x00000001 +#define lpfc_rcqe_port_WORD word3 +#define lpfc_rcqe_hdr_length_SHIFT 24 +#define lpfc_rcqe_hdr_length_MASK 0x0000001F +#define lpfc_rcqe_hdr_length_WORD word3 +#define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK +#define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD +#define lpfc_rcqe_eof_SHIFT 8 +#define lpfc_rcqe_eof_MASK 0x000000FF +#define lpfc_rcqe_eof_WORD word3 +#define FCOE_EOFn 0x41 +#define FCOE_EOFt 0x42 +#define FCOE_EOFni 0x49 +#define FCOE_EOFa 0x50 +#define lpfc_rcqe_sof_SHIFT 0 +#define lpfc_rcqe_sof_MASK 0x000000FF +#define lpfc_rcqe_sof_WORD word3 +#define FCOE_SOFi2 0x2d +#define FCOE_SOFi3 0x2e +#define FCOE_SOFn2 0x35 +#define FCOE_SOFn3 0x36 +}; + +struct lpfc_wqe_generic{ + struct ulp_bde64 bde; + uint32_t word3; + uint32_t word4; + uint32_t word5; + uint32_t word6; +#define lpfc_wqe_gen_context_SHIFT 16 +#define lpfc_wqe_gen_context_MASK 0x0000FFFF +#define lpfc_wqe_gen_context_WORD word6 +#define lpfc_wqe_gen_xri_SHIFT 0 +#define lpfc_wqe_gen_xri_MASK 0x0000FFFF +#define lpfc_wqe_gen_xri_WORD word6 + uint32_t word7; +#define lpfc_wqe_gen_lnk_SHIFT 23 +#define lpfc_wqe_gen_lnk_MASK 0x00000001 +#define lpfc_wqe_gen_lnk_WORD word7 +#define lpfc_wqe_gen_erp_SHIFT 22 +#define lpfc_wqe_gen_erp_MASK 0x00000001 +#define lpfc_wqe_gen_erp_WORD word7 +#define lpfc_wqe_gen_pu_SHIFT 20 +#define lpfc_wqe_gen_pu_MASK 0x00000003 +#define lpfc_wqe_gen_pu_WORD word7 +#define lpfc_wqe_gen_class_SHIFT 16 +#define lpfc_wqe_gen_class_MASK 0x00000007 +#define lpfc_wqe_gen_class_WORD word7 +#define lpfc_wqe_gen_command_SHIFT 8 +#define lpfc_wqe_gen_command_MASK 0x000000FF +#define lpfc_wqe_gen_command_WORD word7 +#define lpfc_wqe_gen_status_SHIFT 4 +#define lpfc_wqe_gen_status_MASK 0x0000000F +#define lpfc_wqe_gen_status_WORD word7 +#define lpfc_wqe_gen_ct_SHIFT 2 +#define lpfc_wqe_gen_ct_MASK 0x00000007 +#define lpfc_wqe_gen_ct_WORD word7 + uint32_t abort_tag; + uint32_t word9; +#define lpfc_wqe_gen_request_tag_SHIFT 0 +#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF +#define lpfc_wqe_gen_request_tag_WORD word9 + uint32_t word10; +#define lpfc_wqe_gen_ccp_SHIFT 24 +#define lpfc_wqe_gen_ccp_MASK 0x000000FF +#define lpfc_wqe_gen_ccp_WORD word10 +#define lpfc_wqe_gen_ccpe_SHIFT 23 +#define lpfc_wqe_gen_ccpe_MASK 0x00000001 +#define lpfc_wqe_gen_ccpe_WORD word10 +#define lpfc_wqe_gen_pv_SHIFT 19 +#define lpfc_wqe_gen_pv_MASK 0x00000001 +#define lpfc_wqe_gen_pv_WORD word10 +#define lpfc_wqe_gen_pri_SHIFT 16 +#define lpfc_wqe_gen_pri_MASK 0x00000007 +#define lpfc_wqe_gen_pri_WORD word10 + uint32_t word11; +#define lpfc_wqe_gen_cq_id_SHIFT 16 +#define lpfc_wqe_gen_cq_id_MASK 0x000003FF +#define lpfc_wqe_gen_cq_id_WORD word11 +#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff +#define lpfc_wqe_gen_wqec_SHIFT 7 +#define lpfc_wqe_gen_wqec_MASK 0x00000001 +#define lpfc_wqe_gen_wqec_WORD word11 +#define lpfc_wqe_gen_cmd_type_SHIFT 0 +#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F +#define lpfc_wqe_gen_cmd_type_WORD word11 + uint32_t payload[4]; +}; + +struct lpfc_rqe { + uint32_t address_hi; + uint32_t address_lo; +}; + +/* buffer descriptors */ +struct lpfc_bde4 { + uint32_t addr_hi; + uint32_t addr_lo; + uint32_t word2; +#define lpfc_bde4_last_SHIFT 31 +#define lpfc_bde4_last_MASK 0x00000001 +#define lpfc_bde4_last_WORD word2 +#define lpfc_bde4_sge_offset_SHIFT 0 +#define lpfc_bde4_sge_offset_MASK 0x000003FF +#define lpfc_bde4_sge_offset_WORD word2 + uint32_t word3; +#define lpfc_bde4_length_SHIFT 0 +#define lpfc_bde4_length_MASK 0x000000FF +#define lpfc_bde4_length_WORD word3 +}; + +struct lpfc_register { + uint32_t word0; +}; + +#define LPFC_UERR_STATUS_HI 0x00A4 +#define LPFC_UERR_STATUS_LO 0x00A0 +#define LPFC_ONLINE0 0x00B0 +#define LPFC_ONLINE1 0x00B4 +#define LPFC_SCRATCHPAD 0x0058 + +/* BAR0 Registers */ +#define LPFC_HST_STATE 0x00AC +#define lpfc_hst_state_perr_SHIFT 31 +#define lpfc_hst_state_perr_MASK 0x1 +#define lpfc_hst_state_perr_WORD word0 +#define lpfc_hst_state_sfi_SHIFT 30 +#define lpfc_hst_state_sfi_MASK 0x1 +#define lpfc_hst_state_sfi_WORD word0 +#define lpfc_hst_state_nip_SHIFT 29 +#define lpfc_hst_state_nip_MASK 0x1 +#define lpfc_hst_state_nip_WORD word0 +#define lpfc_hst_state_ipc_SHIFT 28 +#define lpfc_hst_state_ipc_MASK 0x1 +#define lpfc_hst_state_ipc_WORD word0 +#define lpfc_hst_state_xrom_SHIFT 27 +#define lpfc_hst_state_xrom_MASK 0x1 +#define lpfc_hst_state_xrom_WORD word0 +#define lpfc_hst_state_dl_SHIFT 26 +#define lpfc_hst_state_dl_MASK 0x1 +#define lpfc_hst_state_dl_WORD word0 +#define lpfc_hst_state_port_status_SHIFT 0 +#define lpfc_hst_state_port_status_MASK 0xFFFF +#define lpfc_hst_state_port_status_WORD word0 + +#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000 +#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001 +#define LPFC_POST_STAGE_HOST_RDY 0x0002 +#define LPFC_POST_STAGE_BE_RESET 0x0003 +#define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100 +#define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101 +#define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200 +#define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201 +#define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300 +#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301 +#define LPFC_POST_STAGE_DDR_TEST_START 0x0400 +#define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401 +#define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600 +#define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601 +#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700 +#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701 +#define LPFC_POST_STAGE_ARMFW_START 0x0800 +#define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900 +#define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901 +#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00 +#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01 +#define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00 +#define LPFC_POST_STAGE_SWITCH_LINK 0x0B01 +#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02 +#define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03 +#define LPFC_POST_STAGE_PARSE_XML 0x0B04 +#define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05 +#define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06 +#define LPFC_POST_STAGE_RC_DONE 0x0B07 +#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08 +#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00 +#define LPFC_POST_STAGE_ARMFW_READY 0xC000 +#define LPFC_POST_STAGE_ARMFW_UE 0xF000 + +#define lpfc_scratchpad_slirev_SHIFT 4 +#define lpfc_scratchpad_slirev_MASK 0xF +#define lpfc_scratchpad_slirev_WORD word0 +#define lpfc_scratchpad_chiptype_SHIFT 8 +#define lpfc_scratchpad_chiptype_MASK 0xFF +#define lpfc_scratchpad_chiptype_WORD word0 +#define lpfc_scratchpad_featurelevel1_SHIFT 16 +#define lpfc_scratchpad_featurelevel1_MASK 0xFF +#define lpfc_scratchpad_featurelevel1_WORD word0 +#define lpfc_scratchpad_featurelevel2_SHIFT 24 +#define lpfc_scratchpad_featurelevel2_MASK 0xFF +#define lpfc_scratchpad_featurelevel2_WORD word0 + +/* BAR1 Registers */ +#define LPFC_IMR_MASK_ALL 0xFFFFFFFF +#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF + +#define LPFC_HST_ISR0 0x0C18 +#define LPFC_HST_ISR1 0x0C1C +#define LPFC_HST_ISR2 0x0C20 +#define LPFC_HST_ISR3 0x0C24 +#define LPFC_HST_ISR4 0x0C28 + +#define LPFC_HST_IMR0 0x0C48 +#define LPFC_HST_IMR1 0x0C4C +#define LPFC_HST_IMR2 0x0C50 +#define LPFC_HST_IMR3 0x0C54 +#define LPFC_HST_IMR4 0x0C58 + +#define LPFC_HST_ISCR0 0x0C78 +#define LPFC_HST_ISCR1 0x0C7C +#define LPFC_HST_ISCR2 0x0C80 +#define LPFC_HST_ISCR3 0x0C84 +#define LPFC_HST_ISCR4 0x0C88 + +#define LPFC_SLI4_INTR0 BIT0 +#define LPFC_SLI4_INTR1 BIT1 +#define LPFC_SLI4_INTR2 BIT2 +#define LPFC_SLI4_INTR3 BIT3 +#define LPFC_SLI4_INTR4 BIT4 +#define LPFC_SLI4_INTR5 BIT5 +#define LPFC_SLI4_INTR6 BIT6 +#define LPFC_SLI4_INTR7 BIT7 +#define LPFC_SLI4_INTR8 BIT8 +#define LPFC_SLI4_INTR9 BIT9 +#define LPFC_SLI4_INTR10 BIT10 +#define LPFC_SLI4_INTR11 BIT11 +#define LPFC_SLI4_INTR12 BIT12 +#define LPFC_SLI4_INTR13 BIT13 +#define LPFC_SLI4_INTR14 BIT14 +#define LPFC_SLI4_INTR15 BIT15 +#define LPFC_SLI4_INTR16 BIT16 +#define LPFC_SLI4_INTR17 BIT17 +#define LPFC_SLI4_INTR18 BIT18 +#define LPFC_SLI4_INTR19 BIT19 +#define LPFC_SLI4_INTR20 BIT20 +#define LPFC_SLI4_INTR21 BIT21 +#define LPFC_SLI4_INTR22 BIT22 +#define LPFC_SLI4_INTR23 BIT23 +#define LPFC_SLI4_INTR24 BIT24 +#define LPFC_SLI4_INTR25 BIT25 +#define LPFC_SLI4_INTR26 BIT26 +#define LPFC_SLI4_INTR27 BIT27 +#define LPFC_SLI4_INTR28 BIT28 +#define LPFC_SLI4_INTR29 BIT29 +#define LPFC_SLI4_INTR30 BIT30 +#define LPFC_SLI4_INTR31 BIT31 + +/* BAR2 Registers */ +#define LPFC_RQ_DOORBELL 0x00A0 +#define lpfc_rq_doorbell_num_posted_SHIFT 16 +#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF +#define lpfc_rq_doorbell_num_posted_WORD word0 +#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */ +#define lpfc_rq_doorbell_id_SHIFT 0 +#define lpfc_rq_doorbell_id_MASK 0x03FF +#define lpfc_rq_doorbell_id_WORD word0 + +#define LPFC_WQ_DOORBELL 0x0040 +#define lpfc_wq_doorbell_num_posted_SHIFT 24 +#define lpfc_wq_doorbell_num_posted_MASK 0x00FF +#define lpfc_wq_doorbell_num_posted_WORD word0 +#define lpfc_wq_doorbell_index_SHIFT 16 +#define lpfc_wq_doorbell_index_MASK 0x00FF +#define lpfc_wq_doorbell_index_WORD word0 +#define lpfc_wq_doorbell_id_SHIFT 0 +#define lpfc_wq_doorbell_id_MASK 0xFFFF +#define lpfc_wq_doorbell_id_WORD word0 + +#define LPFC_EQCQ_DOORBELL 0x0120 +#define lpfc_eqcq_doorbell_arm_SHIFT 29 +#define lpfc_eqcq_doorbell_arm_MASK 0x0001 +#define lpfc_eqcq_doorbell_arm_WORD word0 +#define lpfc_eqcq_doorbell_num_released_SHIFT 16 +#define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF +#define lpfc_eqcq_doorbell_num_released_WORD word0 +#define lpfc_eqcq_doorbell_qt_SHIFT 10 +#define lpfc_eqcq_doorbell_qt_MASK 0x0001 +#define lpfc_eqcq_doorbell_qt_WORD word0 +#define LPFC_QUEUE_TYPE_COMPLETION 0 +#define LPFC_QUEUE_TYPE_EVENT 1 +#define lpfc_eqcq_doorbell_eqci_SHIFT 9 +#define lpfc_eqcq_doorbell_eqci_MASK 0x0001 +#define lpfc_eqcq_doorbell_eqci_WORD word0 +#define lpfc_eqcq_doorbell_cqid_SHIFT 0 +#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF +#define lpfc_eqcq_doorbell_cqid_WORD word0 +#define lpfc_eqcq_doorbell_eqid_SHIFT 0 +#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF +#define lpfc_eqcq_doorbell_eqid_WORD word0 + +#define LPFC_BMBX 0x0160 +#define lpfc_bmbx_addr_SHIFT 2 +#define lpfc_bmbx_addr_MASK 0x3FFFFFFF +#define lpfc_bmbx_addr_WORD word0 +#define lpfc_bmbx_hi_SHIFT 1 +#define lpfc_bmbx_hi_MASK 0x0001 +#define lpfc_bmbx_hi_WORD word0 +#define lpfc_bmbx_rdy_SHIFT 0 +#define lpfc_bmbx_rdy_MASK 0x0001 +#define lpfc_bmbx_rdy_WORD word0 + +#define LPFC_MQ_DOORBELL 0x0140 +#define lpfc_mq_doorbell_num_posted_SHIFT 16 +#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF +#define lpfc_mq_doorbell_num_posted_WORD word0 +#define lpfc_mq_doorbell_id_SHIFT 0 +#define lpfc_mq_doorbell_id_MASK 0x03FF +#define lpfc_mq_doorbell_id_WORD word0 + +struct lpfc_sli4_cfg_mhdr { + uint32_t word1; +#define lpfc_mbox_hdr_emb_SHIFT 0 +#define lpfc_mbox_hdr_emb_MASK 0x00000001 +#define lpfc_mbox_hdr_emb_WORD word1 +#define lpfc_mbox_hdr_sge_cnt_SHIFT 3 +#define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F +#define lpfc_mbox_hdr_sge_cnt_WORD word1 + uint32_t payload_length; + uint32_t tag_lo; + uint32_t tag_hi; + uint32_t reserved5; +}; + +union lpfc_sli4_cfg_shdr { + struct { + uint32_t word6; +#define lpfc_mbox_hdr_opcode_SHIFT 0 +#define lpfc_mbox_hdr_opcode_MASK 0x000000FF +#define lpfc_mbox_hdr_opcode_WORD word6 +#define lpfc_mbox_hdr_subsystem_SHIFT 8 +#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF +#define lpfc_mbox_hdr_subsystem_WORD word6 +#define lpfc_mbox_hdr_port_number_SHIFT 16 +#define lpfc_mbox_hdr_port_number_MASK 0x000000FF +#define lpfc_mbox_hdr_port_number_WORD word6 +#define lpfc_mbox_hdr_domain_SHIFT 24 +#define lpfc_mbox_hdr_domain_MASK 0x000000FF +#define lpfc_mbox_hdr_domain_WORD word6 + uint32_t timeout; + uint32_t request_length; + uint32_t reserved9; + } request; + struct { + uint32_t word6; +#define lpfc_mbox_hdr_opcode_SHIFT 0 +#define lpfc_mbox_hdr_opcode_MASK 0x000000FF +#define lpfc_mbox_hdr_opcode_WORD word6 +#define lpfc_mbox_hdr_subsystem_SHIFT 8 +#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF +#define lpfc_mbox_hdr_subsystem_WORD word6 +#define lpfc_mbox_hdr_domain_SHIFT 24 +#define lpfc_mbox_hdr_domain_MASK 0x000000FF +#define lpfc_mbox_hdr_domain_WORD word6 + uint32_t word7; +#define lpfc_mbox_hdr_status_SHIFT 0 +#define lpfc_mbox_hdr_status_MASK 0x000000FF +#define lpfc_mbox_hdr_status_WORD word7 +#define lpfc_mbox_hdr_add_status_SHIFT 8 +#define lpfc_mbox_hdr_add_status_MASK 0x000000FF +#define lpfc_mbox_hdr_add_status_WORD word7 + uint32_t response_length; + uint32_t actual_response_length; + } response; +}; + +/* Mailbox structures */ +struct mbox_header { + struct lpfc_sli4_cfg_mhdr cfg_mhdr; + union lpfc_sli4_cfg_shdr cfg_shdr; +}; + +/* Subsystem Definitions */ +#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1 +#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC + +/* Device Specific Definitions */ + +/* The HOST ENDIAN defines are in Big Endian format. */ +#define HOST_ENDIAN_LOW_WORD0 0xFF3412FF +#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF + +/* Common Opcodes */ +#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C +#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D +#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15 +#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20 +#define LPFC_MBOX_OPCODE_NOP 0x21 +#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 +#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 +#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 +#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D + +/* FCoE Opcodes */ +#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 +#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02 +#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03 +#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04 +#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05 +#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06 +#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08 +#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09 +#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A +#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B + +/* Mailbox command structures */ +struct eq_context { + uint32_t word0; +#define lpfc_eq_context_size_SHIFT 31 +#define lpfc_eq_context_size_MASK 0x00000001 +#define lpfc_eq_context_size_WORD word0 +#define LPFC_EQE_SIZE_4 0x0 +#define LPFC_EQE_SIZE_16 0x1 +#define lpfc_eq_context_valid_SHIFT 29 +#define lpfc_eq_context_valid_MASK 0x00000001 +#define lpfc_eq_context_valid_WORD word0 + uint32_t word1; +#define lpfc_eq_context_count_SHIFT 26 +#define lpfc_eq_context_count_MASK 0x00000003 +#define lpfc_eq_context_count_WORD word1 +#define LPFC_EQ_CNT_256 0x0 +#define LPFC_EQ_CNT_512 0x1 +#define LPFC_EQ_CNT_1024 0x2 +#define LPFC_EQ_CNT_2048 0x3 +#define LPFC_EQ_CNT_4096 0x4 + uint32_t word2; +#define lpfc_eq_context_delay_multi_SHIFT 13 +#define lpfc_eq_context_delay_multi_MASK 0x000003FF +#define lpfc_eq_context_delay_multi_WORD word2 + uint32_t reserved3; +}; + +struct sgl_page_pairs { + uint32_t sgl_pg0_addr_lo; + uint32_t sgl_pg0_addr_hi; + uint32_t sgl_pg1_addr_lo; + uint32_t sgl_pg1_addr_hi; +}; + +struct lpfc_mbx_post_sgl_pages { + struct mbox_header header; + uint32_t word0; +#define lpfc_post_sgl_pages_xri_SHIFT 0 +#define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF +#define lpfc_post_sgl_pages_xri_WORD word0 +#define lpfc_post_sgl_pages_xricnt_SHIFT 16 +#define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF +#define lpfc_post_sgl_pages_xricnt_WORD word0 + struct sgl_page_pairs sgl_pg_pairs[1]; +}; + +/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */ +struct lpfc_mbx_post_uembed_sgl_page1 { + union lpfc_sli4_cfg_shdr cfg_shdr; + uint32_t word0; + struct sgl_page_pairs sgl_pg_pairs; +}; + +struct lpfc_mbx_sge { + uint32_t pa_lo; + uint32_t pa_hi; + uint32_t length; +}; + +struct lpfc_mbx_nembed_cmd { + struct lpfc_sli4_cfg_mhdr cfg_mhdr; +#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19 + struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES]; +}; + +struct lpfc_mbx_nembed_sge_virt { + void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES]; +}; + +struct lpfc_mbx_eq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_eq_create_num_pages_SHIFT 0 +#define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_eq_create_num_pages_WORD word0 + struct eq_context context; + struct dma_address page[LPFC_MAX_EQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_eq_create_q_id_SHIFT 0 +#define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_eq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_eq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_eq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_eq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct lpfc_mbx_nop { + struct mbox_header header; + uint32_t context[2]; +}; + +struct cq_context { + uint32_t word0; +#define lpfc_cq_context_event_SHIFT 31 +#define lpfc_cq_context_event_MASK 0x00000001 +#define lpfc_cq_context_event_WORD word0 +#define lpfc_cq_context_valid_SHIFT 29 +#define lpfc_cq_context_valid_MASK 0x00000001 +#define lpfc_cq_context_valid_WORD word0 +#define lpfc_cq_context_count_SHIFT 27 +#define lpfc_cq_context_count_MASK 0x00000003 +#define lpfc_cq_context_count_WORD word0 +#define LPFC_CQ_CNT_256 0x0 +#define LPFC_CQ_CNT_512 0x1 +#define LPFC_CQ_CNT_1024 0x2 + uint32_t word1; +#define lpfc_cq_eq_id_SHIFT 22 +#define lpfc_cq_eq_id_MASK 0x000000FF +#define lpfc_cq_eq_id_WORD word1 + uint32_t reserved0; + uint32_t reserved1; +}; + +struct lpfc_mbx_cq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_cq_create_num_pages_SHIFT 0 +#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_num_pages_WORD word0 + struct cq_context context; + struct dma_address page[LPFC_MAX_CQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_cq_create_q_id_SHIFT 0 +#define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_cq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_cq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_cq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct wq_context { + uint32_t reserved0; + uint32_t reserved1; + uint32_t reserved2; + uint32_t reserved3; +}; + +struct lpfc_mbx_wq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_wq_create_num_pages_SHIFT 0 +#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_num_pages_WORD word0 +#define lpfc_mbx_wq_create_cq_id_SHIFT 16 +#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_cq_id_WORD word0 + struct dma_address page[LPFC_MAX_WQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_wq_create_q_id_SHIFT 0 +#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_wq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_wq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_wq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +#define LPFC_HDR_BUF_SIZE 128 +#define LPFC_DATA_BUF_SIZE 4096 +struct rq_context { + uint32_t word0; +#define lpfc_rq_context_rq_size_SHIFT 16 +#define lpfc_rq_context_rq_size_MASK 0x0000000F +#define lpfc_rq_context_rq_size_WORD word0 +#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ +#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ +#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ +#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ + uint32_t reserved1; + uint32_t word2; +#define lpfc_rq_context_cq_id_SHIFT 16 +#define lpfc_rq_context_cq_id_MASK 0x000003FF +#define lpfc_rq_context_cq_id_WORD word2 +#define lpfc_rq_context_buf_size_SHIFT 0 +#define lpfc_rq_context_buf_size_MASK 0x0000FFFF +#define lpfc_rq_context_buf_size_WORD word2 + uint32_t reserved3; +}; + +struct lpfc_mbx_rq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_rq_create_num_pages_SHIFT 0 +#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_num_pages_WORD word0 + struct rq_context context; + struct dma_address page[LPFC_MAX_WQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_rq_create_q_id_SHIFT 0 +#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_rq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_rq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_rq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct mq_context { + uint32_t word0; +#define lpfc_mq_context_cq_id_SHIFT 22 +#define lpfc_mq_context_cq_id_MASK 0x000003FF +#define lpfc_mq_context_cq_id_WORD word0 +#define lpfc_mq_context_count_SHIFT 16 +#define lpfc_mq_context_count_MASK 0x0000000F +#define lpfc_mq_context_count_WORD word0 +#define LPFC_MQ_CNT_16 0x5 +#define LPFC_MQ_CNT_32 0x6 +#define LPFC_MQ_CNT_64 0x7 +#define LPFC_MQ_CNT_128 0x8 + uint32_t word1; +#define lpfc_mq_context_valid_SHIFT 31 +#define lpfc_mq_context_valid_MASK 0x00000001 +#define lpfc_mq_context_valid_WORD word1 + uint32_t reserved2; + uint32_t reserved3; +}; + +struct lpfc_mbx_mq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_mq_create_num_pages_SHIFT 0 +#define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_num_pages_WORD word0 + struct mq_context context; + struct dma_address page[LPFC_MAX_MQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_mq_create_q_id_SHIFT 0 +#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_mq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_mq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_mq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct lpfc_mbx_post_hdr_tmpl { + struct mbox_header header; + uint32_t word10; +#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0 +#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF +#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10 +#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16 +#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF +#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10 + uint32_t rpi_paddr_lo; + uint32_t rpi_paddr_hi; +}; + +struct sli4_sge { /* SLI-4 */ + uint32_t addr_hi; + uint32_t addr_lo; + + uint32_t word2; +#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/ +#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF +#define lpfc_sli4_sge_offset_WORD word2 +#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets + this flag !! */ +#define lpfc_sli4_sge_last_MASK 0x00000001 +#define lpfc_sli4_sge_last_WORD word2 + uint32_t word3; +#define lpfc_sli4_sge_len_SHIFT 0 +#define lpfc_sli4_sge_len_MASK 0x0001FFFF +#define lpfc_sli4_sge_len_WORD word3 +}; + +struct fcf_record { + uint32_t max_rcv_size; + uint32_t fka_adv_period; + uint32_t fip_priority; + uint32_t word3; +#define lpfc_fcf_record_mac_0_SHIFT 0 +#define lpfc_fcf_record_mac_0_MASK 0x000000FF +#define lpfc_fcf_record_mac_0_WORD word3 +#define lpfc_fcf_record_mac_1_SHIFT 8 +#define lpfc_fcf_record_mac_1_MASK 0x000000FF +#define lpfc_fcf_record_mac_1_WORD word3 +#define lpfc_fcf_record_mac_2_SHIFT 16 +#define lpfc_fcf_record_mac_2_MASK 0x000000FF +#define lpfc_fcf_record_mac_2_WORD word3 +#define lpfc_fcf_record_mac_3_SHIFT 24 +#define lpfc_fcf_record_mac_3_MASK 0x000000FF +#define lpfc_fcf_record_mac_3_WORD word3 + uint32_t word4; +#define lpfc_fcf_record_mac_4_SHIFT 0 +#define lpfc_fcf_record_mac_4_MASK 0x000000FF +#define lpfc_fcf_record_mac_4_WORD word4 +#define lpfc_fcf_record_mac_5_SHIFT 8 +#define lpfc_fcf_record_mac_5_MASK 0x000000FF +#define lpfc_fcf_record_mac_5_WORD word4 +#define lpfc_fcf_record_fcf_avail_SHIFT 16 +#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF +#define lpfc_fcf_record_fc_avail_WORD word4 +#define lpfc_fcf_record_mac_addr_prov_SHIFT 24 +#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF +#define lpfc_fcf_record_mac_addr_prov_WORD word4 +#define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */ +#define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */ + uint32_t word5; +#define lpfc_fcf_record_fab_name_0_SHIFT 0 +#define lpfc_fcf_record_fab_name_0_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_0_WORD word5 +#define lpfc_fcf_record_fab_name_1_SHIFT 8 +#define lpfc_fcf_record_fab_name_1_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_1_WORD word5 +#define lpfc_fcf_record_fab_name_2_SHIFT 16 +#define lpfc_fcf_record_fab_name_2_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_2_WORD word5 +#define lpfc_fcf_record_fab_name_3_SHIFT 24 +#define lpfc_fcf_record_fab_name_3_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_3_WORD word5 + uint32_t word6; +#define lpfc_fcf_record_fab_name_4_SHIFT 0 +#define lpfc_fcf_record_fab_name_4_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_4_WORD word6 +#define lpfc_fcf_record_fab_name_5_SHIFT 8 +#define lpfc_fcf_record_fab_name_5_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_5_WORD word6 +#define lpfc_fcf_record_fab_name_6_SHIFT 16 +#define lpfc_fcf_record_fab_name_6_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_6_WORD word6 +#define lpfc_fcf_record_fab_name_7_SHIFT 24 +#define lpfc_fcf_record_fab_name_7_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_7_WORD word6 + uint32_t word7; +#define lpfc_fcf_record_fc_map_0_SHIFT 0 +#define lpfc_fcf_record_fc_map_0_MASK 0x000000FF +#define lpfc_fcf_record_fc_map_0_WORD word7 +#define lpfc_fcf_record_fc_map_1_SHIFT 8 +#define lpfc_fcf_record_fc_map_1_MASK 0x000000FF +#define lpfc_fcf_record_fc_map_1_WORD word7 +#define lpfc_fcf_record_fc_map_2_SHIFT 16 +#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF +#define lpfc_fcf_record_fc_map_2_WORD word7 +#define lpfc_fcf_record_fcf_valid_SHIFT 24 +#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF +#define lpfc_fcf_record_fcf_valid_WORD word7 + uint32_t word8; +#define lpfc_fcf_record_fcf_index_SHIFT 0 +#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF +#define lpfc_fcf_record_fcf_index_WORD word8 +#define lpfc_fcf_record_fcf_state_SHIFT 16 +#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF +#define lpfc_fcf_record_fcf_state_WORD word8 + uint8_t vlan_bitmap[512]; +}; + +struct lpfc_mbx_read_fcf_tbl { + union lpfc_sli4_cfg_shdr cfg_shdr; + union { + struct { + uint32_t word10; +#define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0 +#define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF +#define lpfc_mbx_read_fcf_tbl_indx_WORD word10 + } request; + struct { + uint32_t eventag; + } response; + } u; + uint32_t word11; +#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0 +#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF +#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11 +}; + +struct lpfc_mbx_add_fcf_tbl_entry { + union lpfc_sli4_cfg_shdr cfg_shdr; + uint32_t word10; +#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0 +#define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF +#define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10 + struct lpfc_mbx_sge fcf_sge; +}; + +struct lpfc_mbx_del_fcf_tbl_entry { + struct mbox_header header; + uint32_t word10; +#define lpfc_mbx_del_fcf_tbl_count_SHIFT 0 +#define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF +#define lpfc_mbx_del_fcf_tbl_count_WORD word10 +#define lpfc_mbx_del_fcf_tbl_index_SHIFT 16 +#define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF +#define lpfc_mbx_del_fcf_tbl_index_WORD word10 +}; + +/* Status field for embedded SLI_CONFIG mailbox command */ +#define STATUS_SUCCESS 0x0 +#define STATUS_FAILED 0x1 +#define STATUS_ILLEGAL_REQUEST 0x2 +#define STATUS_ILLEGAL_FIELD 0x3 +#define STATUS_INSUFFICIENT_BUFFER 0x4 +#define STATUS_UNAUTHORIZED_REQUEST 0x5 +#define STATUS_FLASHROM_SAVE_FAILED 0x17 +#define STATUS_FLASHROM_RESTORE_FAILED 0x18 +#define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a +#define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b +#define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c +#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d +#define STATUS_ASSERT_FAILED 0x1e +#define STATUS_INVALID_SESSION 0x1f +#define STATUS_INVALID_CONNECTION 0x20 +#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21 +#define STATUS_BTL_NO_FREE_SLOT_PATH 0x24 +#define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25 +#define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26 +#define STATUS_FLASHROM_READ_FAILED 0x27 +#define STATUS_POLL_IOCTL_TIMEOUT 0x28 +#define STATUS_ERROR_ACITMAIN 0x2a +#define STATUS_REBOOT_REQUIRED 0x2c +#define STATUS_FCF_IN_USE 0x3a + +struct lpfc_mbx_sli4_config { + struct mbox_header header; +}; + +struct lpfc_mbx_init_vfi { + uint32_t word1; +#define lpfc_init_vfi_vr_SHIFT 31 +#define lpfc_init_vfi_vr_MASK 0x00000001 +#define lpfc_init_vfi_vr_WORD word1 +#define lpfc_init_vfi_vt_SHIFT 30 +#define lpfc_init_vfi_vt_MASK 0x00000001 +#define lpfc_init_vfi_vt_WORD word1 +#define lpfc_init_vfi_vf_SHIFT 29 +#define lpfc_init_vfi_vf_MASK 0x00000001 +#define lpfc_init_vfi_vf_WORD word1 +#define lpfc_init_vfi_vfi_SHIFT 0 +#define lpfc_init_vfi_vfi_MASK 0x0000FFFF +#define lpfc_init_vfi_vfi_WORD word1 + uint32_t word2; +#define lpfc_init_vfi_fcfi_SHIFT 0 +#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF +#define lpfc_init_vfi_fcfi_WORD word2 + uint32_t word3; +#define lpfc_init_vfi_pri_SHIFT 13 +#define lpfc_init_vfi_pri_MASK 0x00000007 +#define lpfc_init_vfi_pri_WORD word3 +#define lpfc_init_vfi_vf_id_SHIFT 1 +#define lpfc_init_vfi_vf_id_MASK 0x00000FFF +#define lpfc_init_vfi_vf_id_WORD word3 + uint32_t word4; +#define lpfc_init_vfi_hop_count_SHIFT 24 +#define lpfc_init_vfi_hop_count_MASK 0x000000FF +#define lpfc_init_vfi_hop_count_WORD word4 +}; + +struct lpfc_mbx_reg_vfi { + uint32_t word1; +#define lpfc_reg_vfi_vp_SHIFT 28 +#define lpfc_reg_vfi_vp_MASK 0x00000001 +#define lpfc_reg_vfi_vp_WORD word1 +#define lpfc_reg_vfi_vfi_SHIFT 0 +#define lpfc_reg_vfi_vfi_MASK 0x0000FFFF +#define lpfc_reg_vfi_vfi_WORD word1 + uint32_t word2; +#define lpfc_reg_vfi_vpi_SHIFT 16 +#define lpfc_reg_vfi_vpi_MASK 0x0000FFFF +#define lpfc_reg_vfi_vpi_WORD word2 +#define lpfc_reg_vfi_fcfi_SHIFT 0 +#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF +#define lpfc_reg_vfi_fcfi_WORD word2 + uint32_t word3_rsvd; + uint32_t word4_rsvd; + struct ulp_bde64 bde; + uint32_t word8_rsvd; + uint32_t word9_rsvd; + uint32_t word10; +#define lpfc_reg_vfi_nport_id_SHIFT 0 +#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF +#define lpfc_reg_vfi_nport_id_WORD word10 +}; + +struct lpfc_mbx_init_vpi { + uint32_t word1; +#define lpfc_init_vpi_vfi_SHIFT 16 +#define lpfc_init_vpi_vfi_MASK 0x0000FFFF +#define lpfc_init_vpi_vfi_WORD word1 +#define lpfc_init_vpi_vpi_SHIFT 0 +#define lpfc_init_vpi_vpi_MASK 0x0000FFFF +#define lpfc_init_vpi_vpi_WORD word1 +}; + +struct lpfc_mbx_read_vpi { + uint32_t word1_rsvd; + uint32_t word2; +#define lpfc_mbx_read_vpi_vnportid_SHIFT 0 +#define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF +#define lpfc_mbx_read_vpi_vnportid_WORD word2 + uint32_t word3_rsvd; + uint32_t word4; +#define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0 +#define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF +#define lpfc_mbx_read_vpi_acq_alpa_WORD word4 +#define lpfc_mbx_read_vpi_pb_SHIFT 15 +#define lpfc_mbx_read_vpi_pb_MASK 0x00000001 +#define lpfc_mbx_read_vpi_pb_WORD word4 +#define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16 +#define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF +#define lpfc_mbx_read_vpi_spec_alpa_WORD word4 +#define lpfc_mbx_read_vpi_ns_SHIFT 30 +#define lpfc_mbx_read_vpi_ns_MASK 0x00000001 +#define lpfc_mbx_read_vpi_ns_WORD word4 +#define lpfc_mbx_read_vpi_hl_SHIFT 31 +#define lpfc_mbx_read_vpi_hl_MASK 0x00000001 +#define lpfc_mbx_read_vpi_hl_WORD word4 + uint32_t word5_rsvd; + uint32_t word6; +#define lpfc_mbx_read_vpi_vpi_SHIFT 0 +#define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF +#define lpfc_mbx_read_vpi_vpi_WORD word6 + uint32_t word7; +#define lpfc_mbx_read_vpi_mac_0_SHIFT 0 +#define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_0_WORD word7 +#define lpfc_mbx_read_vpi_mac_1_SHIFT 8 +#define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_1_WORD word7 +#define lpfc_mbx_read_vpi_mac_2_SHIFT 16 +#define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_2_WORD word7 +#define lpfc_mbx_read_vpi_mac_3_SHIFT 24 +#define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_3_WORD word7 + uint32_t word8; +#define lpfc_mbx_read_vpi_mac_4_SHIFT 0 +#define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_4_WORD word8 +#define lpfc_mbx_read_vpi_mac_5_SHIFT 8 +#define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_5_WORD word8 +#define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16 +#define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF +#define lpfc_mbx_read_vpi_vlan_tag_WORD word8 +#define lpfc_mbx_read_vpi_vv_SHIFT 28 +#define lpfc_mbx_read_vpi_vv_MASK 0x0000001 +#define lpfc_mbx_read_vpi_vv_WORD word8 +}; + +struct lpfc_mbx_unreg_vfi { + uint32_t word1_rsvd; + uint32_t word2; +#define lpfc_unreg_vfi_vfi_SHIFT 0 +#define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF +#define lpfc_unreg_vfi_vfi_WORD word2 +}; + +struct lpfc_mbx_resume_rpi { + uint32_t word1; +#define lpfc_resume_rpi_rpi_SHIFT 0 +#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF +#define lpfc_resume_rpi_rpi_WORD word1 + uint32_t event_tag; + uint32_t word3_rsvd; + uint32_t word4_rsvd; + uint32_t word5_rsvd; + uint32_t word6; +#define lpfc_resume_rpi_vpi_SHIFT 0 +#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF +#define lpfc_resume_rpi_vpi_WORD word6 +#define lpfc_resume_rpi_vfi_SHIFT 16 +#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF +#define lpfc_resume_rpi_vfi_WORD word6 +}; + +#define REG_FCF_INVALID_QID 0xFFFF +struct lpfc_mbx_reg_fcfi { + uint32_t word1; +#define lpfc_reg_fcfi_info_index_SHIFT 0 +#define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF +#define lpfc_reg_fcfi_info_index_WORD word1 +#define lpfc_reg_fcfi_fcfi_SHIFT 16 +#define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF +#define lpfc_reg_fcfi_fcfi_WORD word1 + uint32_t word2; +#define lpfc_reg_fcfi_rq_id1_SHIFT 0 +#define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id1_WORD word2 +#define lpfc_reg_fcfi_rq_id0_SHIFT 16 +#define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id0_WORD word2 + uint32_t word3; +#define lpfc_reg_fcfi_rq_id3_SHIFT 0 +#define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id3_WORD word3 +#define lpfc_reg_fcfi_rq_id2_SHIFT 16 +#define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id2_WORD word3 + uint32_t word4; +#define lpfc_reg_fcfi_type_match0_SHIFT 24 +#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match0_WORD word4 +#define lpfc_reg_fcfi_type_mask0_SHIFT 16 +#define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask0_WORD word4 +#define lpfc_reg_fcfi_rctl_match0_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match0_WORD word4 +#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask0_WORD word4 + uint32_t word5; +#define lpfc_reg_fcfi_type_match1_SHIFT 24 +#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match1_WORD word5 +#define lpfc_reg_fcfi_type_mask1_SHIFT 16 +#define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask1_WORD word5 +#define lpfc_reg_fcfi_rctl_match1_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match1_WORD word5 +#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask1_WORD word5 + uint32_t word6; +#define lpfc_reg_fcfi_type_match2_SHIFT 24 +#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match2_WORD word6 +#define lpfc_reg_fcfi_type_mask2_SHIFT 16 +#define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask2_WORD word6 +#define lpfc_reg_fcfi_rctl_match2_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match2_WORD word6 +#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask2_WORD word6 + uint32_t word7; +#define lpfc_reg_fcfi_type_match3_SHIFT 24 +#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match3_WORD word7 +#define lpfc_reg_fcfi_type_mask3_SHIFT 16 +#define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask3_WORD word7 +#define lpfc_reg_fcfi_rctl_match3_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match3_WORD word7 +#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask3_WORD word7 + uint32_t word8; +#define lpfc_reg_fcfi_mam_SHIFT 13 +#define lpfc_reg_fcfi_mam_MASK 0x00000003 +#define lpfc_reg_fcfi_mam_WORD word8 +#define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */ +#define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */ +#define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */ +#define lpfc_reg_fcfi_vv_SHIFT 12 +#define lpfc_reg_fcfi_vv_MASK 0x00000001 +#define lpfc_reg_fcfi_vv_WORD word8 +#define lpfc_reg_fcfi_vlan_tag_SHIFT 0 +#define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF +#define lpfc_reg_fcfi_vlan_tag_WORD word8 +}; + +struct lpfc_mbx_unreg_fcfi { + uint32_t word1_rsv; + uint32_t word2; +#define lpfc_unreg_fcfi_SHIFT 0 +#define lpfc_unreg_fcfi_MASK 0x0000FFFF +#define lpfc_unreg_fcfi_WORD word2 +}; + +struct lpfc_mbx_read_rev { + uint32_t word1; +#define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16 +#define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F +#define lpfc_mbx_rd_rev_sli_lvl_WORD word1 +#define lpfc_mbx_rd_rev_fcoe_SHIFT 20 +#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001 +#define lpfc_mbx_rd_rev_fcoe_WORD word1 +#define lpfc_mbx_rd_rev_vpd_SHIFT 29 +#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001 +#define lpfc_mbx_rd_rev_vpd_WORD word1 + uint32_t first_hw_rev; + uint32_t second_hw_rev; + uint32_t word4_rsvd; + uint32_t third_hw_rev; + uint32_t word6; +#define lpfc_mbx_rd_rev_fcph_low_SHIFT 0 +#define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF +#define lpfc_mbx_rd_rev_fcph_low_WORD word6 +#define lpfc_mbx_rd_rev_fcph_high_SHIFT 8 +#define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF +#define lpfc_mbx_rd_rev_fcph_high_WORD word6 +#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16 +#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF +#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6 +#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24 +#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF +#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6 + uint32_t word7_rsvd; + uint32_t fw_id_rev; + uint8_t fw_name[16]; + uint32_t ulp_fw_id_rev; + uint8_t ulp_fw_name[16]; + uint32_t word18_47_rsvd[30]; + uint32_t word48; +#define lpfc_mbx_rd_rev_avail_len_SHIFT 0 +#define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF +#define lpfc_mbx_rd_rev_avail_len_WORD word48 + uint32_t vpd_paddr_low; + uint32_t vpd_paddr_high; + uint32_t avail_vpd_len; + uint32_t rsvd_52_63[12]; +}; + +struct lpfc_mbx_read_config { + uint32_t word1; +#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0 +#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF +#define lpfc_mbx_rd_conf_max_bbc_WORD word1 +#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8 +#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF +#define lpfc_mbx_rd_conf_init_bbc_WORD word1 + uint32_t word2; +#define lpfc_mbx_rd_conf_nport_did_SHIFT 0 +#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF +#define lpfc_mbx_rd_conf_nport_did_WORD word2 +#define lpfc_mbx_rd_conf_topology_SHIFT 24 +#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF +#define lpfc_mbx_rd_conf_topology_WORD word2 + uint32_t word3; +#define lpfc_mbx_rd_conf_ao_SHIFT 0 +#define lpfc_mbx_rd_conf_ao_MASK 0x00000001 +#define lpfc_mbx_rd_conf_ao_WORD word3 +#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8 +#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F +#define lpfc_mbx_rd_conf_bb_scn_WORD word3 +#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12 +#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F +#define lpfc_mbx_rd_conf_cbb_scn_WORD word3 +#define lpfc_mbx_rd_conf_mc_SHIFT 29 +#define lpfc_mbx_rd_conf_mc_MASK 0x00000001 +#define lpfc_mbx_rd_conf_mc_WORD word3 + uint32_t word4; +#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_e_d_tov_WORD word4 + uint32_t word5; +#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_lp_tov_WORD word5 + uint32_t word6; +#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_r_a_tov_WORD word6 + uint32_t word7; +#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF +#define lpfc_mbx_rd_conf_r_t_tov_WORD word7 + uint32_t word8; +#define lpfc_mbx_rd_conf_al_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F +#define lpfc_mbx_rd_conf_al_tov_WORD word8 + uint32_t word9; +#define lpfc_mbx_rd_conf_lmt_SHIFT 0 +#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_lmt_WORD word9 + uint32_t word10; +#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0 +#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF +#define lpfc_mbx_rd_conf_max_alpa_WORD word10 + uint32_t word11_rsvd; + uint32_t word12; +#define lpfc_mbx_rd_conf_xri_base_SHIFT 0 +#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_xri_base_WORD word12 +#define lpfc_mbx_rd_conf_xri_count_SHIFT 16 +#define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_xri_count_WORD word12 + uint32_t word13; +#define lpfc_mbx_rd_conf_rpi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_rpi_base_WORD word13 +#define lpfc_mbx_rd_conf_rpi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_rpi_count_WORD word13 + uint32_t word14; +#define lpfc_mbx_rd_conf_vpi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vpi_base_WORD word14 +#define lpfc_mbx_rd_conf_vpi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vpi_count_WORD word14 + uint32_t word15; +#define lpfc_mbx_rd_conf_vfi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vfi_base_WORD word15 +#define lpfc_mbx_rd_conf_vfi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vfi_count_WORD word15 + uint32_t word16; +#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_fcfi_base_WORD word16 +#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_fcfi_count_WORD word16 + uint32_t word17; +#define lpfc_mbx_rd_conf_rq_count_SHIFT 0 +#define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_rq_count_WORD word17 +#define lpfc_mbx_rd_conf_eq_count_SHIFT 16 +#define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_eq_count_WORD word17 + uint32_t word18; +#define lpfc_mbx_rd_conf_wq_count_SHIFT 0 +#define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_wq_count_WORD word18 +#define lpfc_mbx_rd_conf_cq_count_SHIFT 16 +#define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_cq_count_WORD word18 +}; + +struct lpfc_mbx_request_features { + uint32_t word1; +#define lpfc_mbx_rq_ftr_qry_SHIFT 0 +#define lpfc_mbx_rq_ftr_qry_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_qry_WORD word1 + uint32_t word2; +#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0 +#define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_iaab_WORD word2 +#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1 +#define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_npiv_WORD word2 +#define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2 +#define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_dif_WORD word2 +#define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3 +#define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_vf_WORD word2 +#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4 +#define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2 +#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5 +#define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2 +#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6 +#define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2 +#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7 +#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2 + uint32_t word3; +#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0 +#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1 +#define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2 +#define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_dif_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3 +#define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_vf_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4 +#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5 +#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6 +#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7 +#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 +}; + +/* Mailbox Completion Queue Error Messages */ +#define MB_CQE_STATUS_SUCCESS 0x0 +#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 +#define MB_CQE_STATUS_INVALID_PARAMETER 0x2 +#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3 +#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 +#define MB_CQE_STATUS_DMA_FAILED 0x5 + +/* mailbox queue entry structure */ +struct lpfc_mqe { + uint32_t word0; +#define lpfc_mqe_status_SHIFT 16 +#define lpfc_mqe_status_MASK 0x0000FFFF +#define lpfc_mqe_status_WORD word0 +#define lpfc_mqe_command_SHIFT 8 +#define lpfc_mqe_command_MASK 0x000000FF +#define lpfc_mqe_command_WORD word0 + union { + uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1]; + /* sli4 mailbox commands */ + struct lpfc_mbx_sli4_config sli4_config; + struct lpfc_mbx_init_vfi init_vfi; + struct lpfc_mbx_reg_vfi reg_vfi; + struct lpfc_mbx_reg_vfi unreg_vfi; + struct lpfc_mbx_init_vpi init_vpi; + struct lpfc_mbx_resume_rpi resume_rpi; + struct lpfc_mbx_read_fcf_tbl read_fcf_tbl; + struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry; + struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry; + struct lpfc_mbx_reg_fcfi reg_fcfi; + struct lpfc_mbx_unreg_fcfi unreg_fcfi; + struct lpfc_mbx_mq_create mq_create; + struct lpfc_mbx_eq_create eq_create; + struct lpfc_mbx_cq_create cq_create; + struct lpfc_mbx_wq_create wq_create; + struct lpfc_mbx_rq_create rq_create; + struct lpfc_mbx_mq_destroy mq_destroy; + struct lpfc_mbx_eq_destroy eq_destroy; + struct lpfc_mbx_cq_destroy cq_destroy; + struct lpfc_mbx_wq_destroy wq_destroy; + struct lpfc_mbx_rq_destroy rq_destroy; + struct lpfc_mbx_post_sgl_pages post_sgl_pages; + struct lpfc_mbx_nembed_cmd nembed_cmd; + struct lpfc_mbx_read_rev read_rev; + struct lpfc_mbx_read_vpi read_vpi; + struct lpfc_mbx_read_config rd_config; + struct lpfc_mbx_request_features req_ftrs; + struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; + struct lpfc_mbx_nop nop; + } un; +}; + +struct lpfc_mcqe { + uint32_t word0; +#define lpfc_mcqe_status_SHIFT 0 +#define lpfc_mcqe_status_MASK 0x0000FFFF +#define lpfc_mcqe_status_WORD word0 +#define lpfc_mcqe_ext_status_SHIFT 16 +#define lpfc_mcqe_ext_status_MASK 0x0000FFFF +#define lpfc_mcqe_ext_status_WORD word0 + uint32_t mcqe_tag0; + uint32_t mcqe_tag1; + uint32_t trailer; +#define lpfc_trailer_valid_SHIFT 31 +#define lpfc_trailer_valid_MASK 0x00000001 +#define lpfc_trailer_valid_WORD trailer +#define lpfc_trailer_async_SHIFT 30 +#define lpfc_trailer_async_MASK 0x00000001 +#define lpfc_trailer_async_WORD trailer +#define lpfc_trailer_hpi_SHIFT 29 +#define lpfc_trailer_hpi_MASK 0x00000001 +#define lpfc_trailer_hpi_WORD trailer +#define lpfc_trailer_completed_SHIFT 28 +#define lpfc_trailer_completed_MASK 0x00000001 +#define lpfc_trailer_completed_WORD trailer +#define lpfc_trailer_consumed_SHIFT 27 +#define lpfc_trailer_consumed_MASK 0x00000001 +#define lpfc_trailer_consumed_WORD trailer +#define lpfc_trailer_type_SHIFT 16 +#define lpfc_trailer_type_MASK 0x000000FF +#define lpfc_trailer_type_WORD trailer +#define lpfc_trailer_code_SHIFT 8 +#define lpfc_trailer_code_MASK 0x000000FF +#define lpfc_trailer_code_WORD trailer +#define LPFC_TRAILER_CODE_LINK 0x1 +#define LPFC_TRAILER_CODE_FCOE 0x2 +#define LPFC_TRAILER_CODE_DCBX 0x3 +}; + +struct lpfc_acqe_link { + uint32_t word0; +#define lpfc_acqe_link_speed_SHIFT 24 +#define lpfc_acqe_link_speed_MASK 0x000000FF +#define lpfc_acqe_link_speed_WORD word0 +#define LPFC_ASYNC_LINK_SPEED_ZERO 0x0 +#define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1 +#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2 +#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3 +#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4 +#define lpfc_acqe_link_duplex_SHIFT 16 +#define lpfc_acqe_link_duplex_MASK 0x000000FF +#define lpfc_acqe_link_duplex_WORD word0 +#define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0 +#define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1 +#define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2 +#define lpfc_acqe_link_status_SHIFT 8 +#define lpfc_acqe_link_status_MASK 0x000000FF +#define lpfc_acqe_link_status_WORD word0 +#define LPFC_ASYNC_LINK_STATUS_DOWN 0x0 +#define LPFC_ASYNC_LINK_STATUS_UP 0x1 +#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2 +#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3 +#define lpfc_acqe_link_physical_SHIFT 0 +#define lpfc_acqe_link_physical_MASK 0x000000FF +#define lpfc_acqe_link_physical_WORD word0 +#define LPFC_ASYNC_LINK_PORT_A 0x0 +#define LPFC_ASYNC_LINK_PORT_B 0x1 + uint32_t word1; +#define lpfc_acqe_link_fault_SHIFT 0 +#define lpfc_acqe_link_fault_MASK 0x000000FF +#define lpfc_acqe_link_fault_WORD word1 +#define LPFC_ASYNC_LINK_FAULT_NONE 0x0 +#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 +#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 + uint32_t event_tag; + uint32_t trailer; +}; + +struct lpfc_acqe_fcoe { + uint32_t fcf_index; + uint32_t word1; +#define lpfc_acqe_fcoe_fcf_count_SHIFT 0 +#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF +#define lpfc_acqe_fcoe_fcf_count_WORD word1 +#define lpfc_acqe_fcoe_event_type_SHIFT 16 +#define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF +#define lpfc_acqe_fcoe_event_type_WORD word1 +#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1 +#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 +#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 + uint32_t event_tag; + uint32_t trailer; +}; + +struct lpfc_acqe_dcbx { + uint32_t tlv_ttl; + uint32_t reserved; + uint32_t event_tag; + uint32_t trailer; +}; + +/* + * Define the bootstrap mailbox (bmbx) region used to communicate + * mailbox command between the host and port. The mailbox consists + * of a payload area of 256 bytes and a completion queue of length + * 16 bytes. + */ +struct lpfc_bmbx_create { + struct lpfc_mqe mqe; + struct lpfc_mcqe mcqe; +}; + +#define SGL_ALIGN_SZ 64 +#define SGL_PAGE_SIZE 4096 +/* align SGL addr on a size boundary - adjust address up */ +#define NO_XRI ((uint16_t)-1) +struct wqe_common { + uint32_t word6; +#define wqe_xri_SHIFT 0 +#define wqe_xri_MASK 0x0000FFFF +#define wqe_xri_WORD word6 +#define wqe_ctxt_tag_SHIFT 16 +#define wqe_ctxt_tag_MASK 0x0000FFFF +#define wqe_ctxt_tag_WORD word6 + uint32_t word7; +#define wqe_ct_SHIFT 2 +#define wqe_ct_MASK 0x00000003 +#define wqe_ct_WORD word7 +#define wqe_status_SHIFT 4 +#define wqe_status_MASK 0x0000000f +#define wqe_status_WORD word7 +#define wqe_cmnd_SHIFT 8 +#define wqe_cmnd_MASK 0x000000ff +#define wqe_cmnd_WORD word7 +#define wqe_class_SHIFT 16 +#define wqe_class_MASK 0x00000007 +#define wqe_class_WORD word7 +#define wqe_pu_SHIFT 20 +#define wqe_pu_MASK 0x00000003 +#define wqe_pu_WORD word7 +#define wqe_erp_SHIFT 22 +#define wqe_erp_MASK 0x00000001 +#define wqe_erp_WORD word7 +#define wqe_lnk_SHIFT 23 +#define wqe_lnk_MASK 0x00000001 +#define wqe_lnk_WORD word7 +#define wqe_tmo_SHIFT 24 +#define wqe_tmo_MASK 0x000000ff +#define wqe_tmo_WORD word7 + uint32_t abort_tag; /* word 8 in WQE */ + uint32_t word9; +#define wqe_reqtag_SHIFT 0 +#define wqe_reqtag_MASK 0x0000FFFF +#define wqe_reqtag_WORD word9 +#define wqe_rcvoxid_SHIFT 16 +#define wqe_rcvoxid_MASK 0x0000FFFF +#define wqe_rcvoxid_WORD word9 + uint32_t word10; +#define wqe_pri_SHIFT 16 +#define wqe_pri_MASK 0x00000007 +#define wqe_pri_WORD word10 +#define wqe_pv_SHIFT 19 +#define wqe_pv_MASK 0x00000001 +#define wqe_pv_WORD word10 +#define wqe_xc_SHIFT 21 +#define wqe_xc_MASK 0x00000001 +#define wqe_xc_WORD word10 +#define wqe_ccpe_SHIFT 23 +#define wqe_ccpe_MASK 0x00000001 +#define wqe_ccpe_WORD word10 +#define wqe_ccp_SHIFT 24 +#define wqe_ccp_MASK 0x000000ff +#define wqe_ccp_WORD word10 + uint32_t word11; +#define wqe_cmd_type_SHIFT 0 +#define wqe_cmd_type_MASK 0x0000000f +#define wqe_cmd_type_WORD word11 +#define wqe_wqec_SHIFT 7 +#define wqe_wqec_MASK 0x00000001 +#define wqe_wqec_WORD word11 +#define wqe_cqid_SHIFT 16 +#define wqe_cqid_MASK 0x000003ff +#define wqe_cqid_WORD word11 +}; + +struct wqe_did { + uint32_t word5; +#define wqe_els_did_SHIFT 0 +#define wqe_els_did_MASK 0x00FFFFFF +#define wqe_els_did_WORD word5 +#define wqe_xmit_bls_ar_SHIFT 30 +#define wqe_xmit_bls_ar_MASK 0x00000001 +#define wqe_xmit_bls_ar_WORD word5 +#define wqe_xmit_bls_xo_SHIFT 31 +#define wqe_xmit_bls_xo_MASK 0x00000001 +#define wqe_xmit_bls_xo_WORD word5 +}; + +struct els_request64_wqe { + struct ulp_bde64 bde; + uint32_t payload_len; + uint32_t word4; +#define els_req64_sid_SHIFT 0 +#define els_req64_sid_MASK 0x00FFFFFF +#define els_req64_sid_WORD word4 +#define els_req64_sp_SHIFT 24 +#define els_req64_sp_MASK 0x00000001 +#define els_req64_sp_WORD word4 +#define els_req64_vf_SHIFT 25 +#define els_req64_vf_MASK 0x00000001 +#define els_req64_vf_WORD word4 + struct wqe_did wqe_dest; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t word12; +#define els_req64_vfid_SHIFT 1 +#define els_req64_vfid_MASK 0x00000FFF +#define els_req64_vfid_WORD word12 +#define els_req64_pri_SHIFT 13 +#define els_req64_pri_MASK 0x00000007 +#define els_req64_pri_WORD word12 + uint32_t word13; +#define els_req64_hopcnt_SHIFT 24 +#define els_req64_hopcnt_MASK 0x000000ff +#define els_req64_hopcnt_WORD word13 + uint32_t reserved[2]; +}; + +struct xmit_els_rsp64_wqe { + struct ulp_bde64 bde; + uint32_t rsvd3; + uint32_t rsvd4; + struct wqe_did wqe_dest; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; +}; + +struct xmit_bls_rsp64_wqe { + uint32_t payload0; + uint32_t word1; +#define xmit_bls_rsp64_rxid_SHIFT 0 +#define xmit_bls_rsp64_rxid_MASK 0x0000ffff +#define xmit_bls_rsp64_rxid_WORD word1 +#define xmit_bls_rsp64_oxid_SHIFT 16 +#define xmit_bls_rsp64_oxid_MASK 0x0000ffff +#define xmit_bls_rsp64_oxid_WORD word1 + uint32_t word2; +#define xmit_bls_rsp64_seqcntlo_SHIFT 0 +#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff +#define xmit_bls_rsp64_seqcntlo_WORD word2 +#define xmit_bls_rsp64_seqcnthi_SHIFT 16 +#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff +#define xmit_bls_rsp64_seqcnthi_WORD word2 + uint32_t rsrvd3; + uint32_t rsrvd4; + struct wqe_did wqe_dest; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; +}; +struct wqe_rctl_dfctl { + uint32_t word5; +#define wqe_si_SHIFT 2 +#define wqe_si_MASK 0x000000001 +#define wqe_si_WORD word5 +#define wqe_la_SHIFT 3 +#define wqe_la_MASK 0x000000001 +#define wqe_la_WORD word5 +#define wqe_ls_SHIFT 7 +#define wqe_ls_MASK 0x000000001 +#define wqe_ls_WORD word5 +#define wqe_dfctl_SHIFT 8 +#define wqe_dfctl_MASK 0x0000000ff +#define wqe_dfctl_WORD word5 +#define wqe_type_SHIFT 16 +#define wqe_type_MASK 0x0000000ff +#define wqe_type_WORD word5 +#define wqe_rctl_SHIFT 24 +#define wqe_rctl_MASK 0x0000000ff +#define wqe_rctl_WORD word5 +}; + +struct xmit_seq64_wqe { + struct ulp_bde64 bde; + uint32_t paylaod_offset; + uint32_t relative_offset; + struct wqe_rctl_dfctl wge_ctl; + struct wqe_common wqe_com; /* words 6-11 */ + /* Note: word10 different REVISIT */ + uint32_t xmit_len; + uint32_t rsvd_12_15[3]; +}; +struct xmit_bcast64_wqe { + struct ulp_bde64 bde; + uint32_t paylaod_len; + uint32_t rsvd4; + struct wqe_rctl_dfctl wge_ctl; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; +}; + +struct gen_req64_wqe { + struct ulp_bde64 bde; + uint32_t command_len; + uint32_t payload_len; + struct wqe_rctl_dfctl wge_ctl; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; +}; + +struct create_xri_wqe { + uint32_t rsrvd[5]; /* words 0-4 */ + struct wqe_did wqe_dest; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +#define T_REQUEST_TAG 3 +#define T_XRI_TAG 1 + +struct abort_cmd_wqe { + uint32_t rsrvd[3]; + uint32_t word3; +#define abort_cmd_ia_SHIFT 0 +#define abort_cmd_ia_MASK 0x000000001 +#define abort_cmd_ia_WORD word3 +#define abort_cmd_criteria_SHIFT 8 +#define abort_cmd_criteria_MASK 0x0000000ff +#define abort_cmd_criteria_WORD word3 + uint32_t rsrvd4; + uint32_t rsrvd5; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +struct fcp_iwrite64_wqe { + struct ulp_bde64 bde; + uint32_t payload_len; + uint32_t total_xfer_len; + uint32_t initial_xfer_len; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +struct fcp_iread64_wqe { + struct ulp_bde64 bde; + uint32_t payload_len; /* word 3 */ + uint32_t total_xfer_len; /* word 4 */ + uint32_t rsrvd5; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +struct fcp_icmnd64_wqe { + struct ulp_bde64 bde; /* words 0-2 */ + uint32_t rsrvd[3]; /* words 3-5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + + +union lpfc_wqe { + uint32_t words[16]; + struct lpfc_wqe_generic generic; + struct fcp_icmnd64_wqe fcp_icmd; + struct fcp_iread64_wqe fcp_iread; + struct fcp_iwrite64_wqe fcp_iwrite; + struct abort_cmd_wqe abort_cmd; + struct create_xri_wqe create_xri; + struct xmit_bcast64_wqe xmit_bcast64; + struct xmit_seq64_wqe xmit_sequence; + struct xmit_bls_rsp64_wqe xmit_bls_rsp; + struct xmit_els_rsp64_wqe xmit_els_rsp; + struct els_request64_wqe els_req; + struct gen_req64_wqe gen_req; +}; + +#define FCP_COMMAND 0x0 +#define FCP_COMMAND_DATA_OUT 0x1 +#define ELS_COMMAND_NON_FIP 0xC +#define ELS_COMMAND_FIP 0xD +#define OTHER_COMMAND 0x8 + diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 3f06ce2becf5..e9e4a1df8989 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -34,8 +34,10 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -51,9 +53,23 @@ char *_dump_buf_dif; unsigned long _dump_buf_dif_order; spinlock_t _dump_buf_lock; -static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); +static int lpfc_sli4_queue_create(struct lpfc_hba *); +static void lpfc_sli4_queue_destroy(struct lpfc_hba *); +static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); +static int lpfc_setup_endian_order(struct lpfc_hba *); +static int lpfc_sli4_read_config(struct lpfc_hba *); +static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); +static void lpfc_free_sgl_list(struct lpfc_hba *); +static int lpfc_init_sgl_list(struct lpfc_hba *); +static int lpfc_init_active_sgl_array(struct lpfc_hba *); +static void lpfc_free_active_sgl(struct lpfc_hba *); +static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); +static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); +static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); +static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); +static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; @@ -646,6 +662,77 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba) return 0; } +/** + * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will do uninitialization after the HBA is reset when bring + * down the SLI Layer. + * + * Return codes + * 0 - sucess. + * Any other value - error. + **/ +static int +lpfc_hba_down_post_s4(struct lpfc_hba *phba) +{ + struct lpfc_scsi_buf *psb, *psb_next; + LIST_HEAD(aborts); + int ret; + unsigned long iflag = 0; + ret = lpfc_hba_down_post_s3(phba); + if (ret) + return ret; + /* At this point in time the HBA is either reset or DOA. Either + * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be + * on the lpfc_sgl_list so that it can either be freed if the + * driver is unloading or reposted if the driver is restarting + * the port. + */ + spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ + /* scsl_buf_list */ + /* abts_sgl_list_lock required because worker thread uses this + * list. + */ + spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, + &phba->sli4_hba.lpfc_sgl_list); + spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + /* abts_scsi_buf_list_lock required because worker thread uses this + * list. + */ + spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, + &aborts); + spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + spin_unlock_irq(&phba->hbalock); + + list_for_each_entry_safe(psb, psb_next, &aborts, list) { + psb->pCmd = NULL; + psb->status = IOSTAT_SUCCESS; + } + spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); + list_splice(&aborts, &phba->lpfc_scsi_buf_list); + spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + return 0; +} + +/** + * lpfc_hba_down_post - Wrapper func for hba down post routine + * @phba: pointer to lpfc HBA data structure. + * + * This routine wraps the actual SLI3 or SLI4 routine for performing + * uninitialization after the HBA is reset when bring down the SLI Layer. + * + * Return codes + * 0 - sucess. + * Any other value - error. + **/ +int +lpfc_hba_down_post(struct lpfc_hba *phba) +{ + return (*phba->lpfc_hba_down_post)(phba); +} /** * lpfc_hb_timeout - The HBA-timer timeout handler @@ -853,6 +940,25 @@ lpfc_offline_eratt(struct lpfc_hba *phba) } /** + * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to bring a SLI4 HBA offline when HBA hardware error + * other than Port Error 6 has been detected. + **/ +static void +lpfc_sli4_offline_eratt(struct lpfc_hba *phba) +{ + lpfc_offline_prep(phba); + lpfc_offline(phba); + lpfc_sli4_brdreset(phba); + lpfc_hba_down_post(phba); + lpfc_sli4_post_status_check(phba); + lpfc_unblock_mgmt_io(phba); + phba->link_state = LPFC_HBA_ERROR; +} + +/** * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler * @phba: pointer to lpfc hba data structure. * @@ -1057,6 +1163,65 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) } /** + * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to handle the SLI4 HBA hardware error attention + * conditions. + **/ +static void +lpfc_handle_eratt_s4(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + uint32_t event_data; + struct Scsi_Host *shost; + + /* If the pci channel is offline, ignore possible errors, since + * we cannot communicate with the pci card anyway. + */ + if (pci_channel_offline(phba->pcidev)) + return; + /* If resets are disabled then leave the HBA alone and return */ + if (!phba->cfg_enable_hba_reset) + return; + + /* Send an internal error event to mgmt application */ + lpfc_board_errevt_to_mgmt(phba); + + /* For now, the actual action for SLI4 device handling is not + * specified yet, just treated it as adaptor hardware failure + */ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", + phba->work_status[0], phba->work_status[1]); + + event_data = FC_REG_DUMP_EVENT; + shost = lpfc_shost_from_vport(vport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(event_data), (char *) &event_data, + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); + + lpfc_sli4_offline_eratt(phba); +} + +/** + * lpfc_handle_eratt - Wrapper func for handling hba error attention + * @phba: pointer to lpfc HBA data structure. + * + * This routine wraps the actual SLI3 or SLI4 hba error attention handling + * routine from the API jump table function pointer from the lpfc_hba struct. + * + * Return codes + * 0 - sucess. + * Any other value - error. + **/ +void +lpfc_handle_eratt(struct lpfc_hba *phba) +{ + (*phba->lpfc_handle_eratt)(phba); +} + +/** * lpfc_handle_latt - The HBA link event handler * @phba: pointer to lpfc hba data structure. * @@ -1312,6 +1477,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) uint16_t dev_id = phba->pcidev->device; int max_speed; int GE = 0; + int oneConnect = 0; /* default is not a oneConnect */ struct { char * name; int max_speed; @@ -1457,6 +1623,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) case PCI_DEVICE_ID_PROTEUS_S: m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; break; + case PCI_DEVICE_ID_TIGERSHARK: + oneConnect = 1; + m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; + break; + case PCI_DEVICE_ID_TIGERSHARK_S: + oneConnect = 1; + m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"}; + break; default: m = (typeof(m)){ NULL }; break; @@ -1464,13 +1638,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) if (mdp && mdp[0] == '\0') snprintf(mdp, 79,"%s", m.name); - if (descp && descp[0] == '\0') - snprintf(descp, 255, - "Emulex %s %d%s %s %s", - m.name, m.max_speed, - (GE) ? "GE" : "Gb", - m.bus, - (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); + /* oneConnect hba requires special processing, they are all initiators + * and we put the port number on the end + */ + if (descp && descp[0] == '\0') { + if (oneConnect) + snprintf(descp, 255, + "Emulex OneConnect %s, FCoE Initiator, Port %s", + m.name, + phba->Port); + else + snprintf(descp, 255, + "Emulex %s %d%s %s %s", + m.name, m.max_speed, + (GE) ? "GE" : "Gb", + m.bus, + (GE) ? "FCoE Adapter" : + "Fibre Channel Adapter"); + } } /** @@ -1911,14 +2096,21 @@ lpfc_online(struct lpfc_hba *phba) return 1; } - if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ - lpfc_unblock_mgmt_io(phba); - return 1; + if (phba->sli_rev == LPFC_SLI_REV4) { + if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ + lpfc_unblock_mgmt_io(phba); + return 1; + } + } else { + if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ + lpfc_unblock_mgmt_io(phba); + return 1; + } } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { struct Scsi_Host *shost; shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); @@ -1980,11 +2172,12 @@ lpfc_offline_prep(struct lpfc_hba * phba) /* Issue an unreg_login to all nodes on all vports */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { struct Scsi_Host *shost; if (vports[i]->load_flag & FC_UNLOADING) continue; + vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; shost = lpfc_shost_from_vport(vports[i]); list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, @@ -2029,11 +2222,11 @@ lpfc_offline(struct lpfc_hba *phba) if (phba->pport->fc_flag & FC_OFFLINE_MODE) return; - /* stop all timers associated with this hba */ - lpfc_stop_phba_timers(phba); + /* stop port and all timers associated with this hba */ + lpfc_stop_port(phba); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_stop_vport_timers(vports[i]); lpfc_destroy_vport_work_array(phba, vports); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, @@ -2046,7 +2239,7 @@ lpfc_offline(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->work_port_events = 0; @@ -2139,6 +2332,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; shost->max_cmd_len = 16; + if (phba->sli_rev == LPFC_SLI_REV4) { + shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; + shost->sg_tablesize = phba->cfg_sg_seg_cnt; + } /* * Set initial can_queue value since 0 is no longer supported and @@ -2156,6 +2353,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) /* Initialize all internally managed lists. */ INIT_LIST_HEAD(&vport->fc_nodes); + INIT_LIST_HEAD(&vport->rcv_buffer_list); spin_lock_init(&vport->work_port_lock); init_timer(&vport->fc_disctmo); @@ -2347,192 +2545,501 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost) } /** - * lpfc_enable_msix - Enable MSI-X interrupt mode + * lpfc_stop_port_s3 - Stop SLI3 device port * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to enable the MSI-X interrupt vectors. The kernel - * function pci_enable_msix() is called to enable the MSI-X vectors. Note that - * pci_enable_msix(), once invoked, enables either all or nothing, depending - * on the current availability of PCI vector resources. The device driver is - * responsible for calling the individual request_irq() to register each MSI-X - * vector with a interrupt handler, which is done in this function. Note that - * later when device is unloading, the driver should always call free_irq() - * on all MSI-X vectors it has done request_irq() on before calling - * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device - * will be left with MSI-X enabled and leaks its vectors. + * This routine is invoked to stop an SLI3 device port, it stops the device + * from generating interrupts and stops the device driver's timers for the + * device. + **/ +static void +lpfc_stop_port_s3(struct lpfc_hba *phba) +{ + /* Clear all interrupt enable conditions */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + /* Clear all pending interrupts */ + writel(0xffffffff, phba->HAregaddr); + readl(phba->HAregaddr); /* flush */ + + /* Reset some HBA SLI setup states */ + lpfc_stop_hba_timers(phba); + phba->pport->work_port_events = 0; +} + +/** + * lpfc_stop_port_s4 - Stop SLI4 device port + * @phba: pointer to lpfc hba data structure. * - * Return codes - * 0 - sucessful - * other values - error + * This routine is invoked to stop an SLI4 device port, it stops the device + * from generating interrupts and stops the device driver's timers for the + * device. **/ -static int -lpfc_enable_msix(struct lpfc_hba *phba) +static void +lpfc_stop_port_s4(struct lpfc_hba *phba) { - int rc, i; - LPFC_MBOXQ_t *pmb; + /* Reset some HBA SLI4 setup states */ + lpfc_stop_hba_timers(phba); + phba->pport->work_port_events = 0; + phba->sli4_hba.intr_enable = 0; + /* Hard clear it for now, shall have more graceful way to wait later */ + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; +} - /* Set up MSI-X multi-message vectors */ - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - phba->msix_entries[i].entry = i; +/** + * lpfc_stop_port - Wrapper function for stopping hba port + * @phba: Pointer to HBA context object. + * + * This routine wraps the actual SLI3 or SLI4 hba stop port routine from + * the API jump table function pointer from the lpfc_hba struct. + **/ +void +lpfc_stop_port(struct lpfc_hba *phba) +{ + phba->lpfc_stop_port(phba); +} - /* Configure MSI-X capability structure */ - rc = pci_enable_msix(phba->pcidev, phba->msix_entries, - ARRAY_SIZE(phba->msix_entries)); - if (rc) { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0420 PCI enable MSI-X failed (%d)\n", rc); - goto msi_fail_out; - } else - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0477 MSI-X entry[%d]: vector=x%x " - "message=%d\n", i, - phba->msix_entries[i].vector, - phba->msix_entries[i].entry); +/** + * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to remove the driver default fcf record from + * the port. This routine currently acts on FCF Index 0. + * + **/ +void +lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) +{ + int rc = 0; + LPFC_MBOXQ_t *mboxq; + struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; + uint32_t mbox_tmo, req_len; + uint32_t shdr_status, shdr_add_status; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2020 Failed to allocate mbox for ADD_FCF cmd\n"); + return; + } + + req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - + sizeof(struct lpfc_sli4_cfg_mhdr); + rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, + req_len, LPFC_SLI4_MBX_EMBED); /* - * Assign MSI-X vectors to interrupt handlers + * In phase 1, there is a single FCF index, 0. In phase2, the driver + * supports multiple FCF indices. */ + del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; + bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); + bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, + phba->fcf.fcf_indx); - /* vector-0 is associated to slow-path handler */ - rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, - IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); - if (rc) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0421 MSI-X slow-path request_irq failed " - "(%d)\n", rc); - goto msi_fail_out; + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); + } + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, + &del_fcf_record->header.cfg_shdr.response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &del_fcf_record->header.cfg_shdr.response); + if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2516 DEL FCF of default FCF Index failed " + "mbx status x%x, status x%x add_status x%x\n", + rc, shdr_status, shdr_add_status); } + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); +} - /* vector-1 is associated to fast-path handler */ - rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, - IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); +/** + * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. + * + * This routine is to parse the SLI4 link-attention link fault code and + * translate it into the base driver's read link attention mailbox command + * status. + * + * Return: Link-attention status in terms of base driver's coding. + **/ +static uint16_t +lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) +{ + uint16_t latt_fault; - if (rc) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0429 MSI-X fast-path request_irq failed " - "(%d)\n", rc); - goto irq_fail_out; + switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { + case LPFC_ASYNC_LINK_FAULT_NONE: + case LPFC_ASYNC_LINK_FAULT_LOCAL: + case LPFC_ASYNC_LINK_FAULT_REMOTE: + latt_fault = 0; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0398 Invalid link fault code: x%x\n", + bf_get(lpfc_acqe_link_fault, acqe_link)); + latt_fault = MBXERR_ERROR; + break; } + return latt_fault; +} - /* - * Configure HBA MSI-X attention conditions to messages - */ - pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); +/** + * lpfc_sli4_parse_latt_type - Parse sli4 link attention type + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. + * + * This routine is to parse the SLI4 link attention type and translate it + * into the base driver's link attention type coding. + * + * Return: Link attention type in terms of base driver's coding. + **/ +static uint8_t +lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) +{ + uint8_t att_type; - if (!pmb) { - rc = -ENOMEM; + switch (bf_get(lpfc_acqe_link_status, acqe_link)) { + case LPFC_ASYNC_LINK_STATUS_DOWN: + case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: + att_type = AT_LINK_DOWN; + break; + case LPFC_ASYNC_LINK_STATUS_UP: + /* Ignore physical link up events - wait for logical link up */ + att_type = AT_RESERVED; + break; + case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: + att_type = AT_LINK_UP; + break; + default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0474 Unable to allocate memory for issuing " - "MBOX_CONFIG_MSI command\n"); - goto mem_fail_out; + "0399 Invalid link attention type: x%x\n", + bf_get(lpfc_acqe_link_status, acqe_link)); + att_type = AT_RESERVED; + break; } - rc = lpfc_config_msi(phba, pmb); - if (rc) - goto mbx_fail_out; - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); - if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, - "0351 Config MSI mailbox command failed, " - "mbxCmd x%x, mbxStatus x%x\n", - pmb->mb.mbxCommand, pmb->mb.mbxStatus); - goto mbx_fail_out; + return att_type; +} + +/** + * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. + * + * This routine is to parse the SLI4 link-attention link speed and translate + * it into the base driver's link-attention link speed coding. + * + * Return: Link-attention link speed in terms of base driver's coding. + **/ +static uint8_t +lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) +{ + uint8_t link_speed; + + switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { + case LPFC_ASYNC_LINK_SPEED_ZERO: + link_speed = LA_UNKNW_LINK; + break; + case LPFC_ASYNC_LINK_SPEED_10MBPS: + link_speed = LA_UNKNW_LINK; + break; + case LPFC_ASYNC_LINK_SPEED_100MBPS: + link_speed = LA_UNKNW_LINK; + break; + case LPFC_ASYNC_LINK_SPEED_1GBPS: + link_speed = LA_1GHZ_LINK; + break; + case LPFC_ASYNC_LINK_SPEED_10GBPS: + link_speed = LA_10GHZ_LINK; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0483 Invalid link-attention link speed: x%x\n", + bf_get(lpfc_acqe_link_speed, acqe_link)); + link_speed = LA_UNKNW_LINK; + break; } + return link_speed; +} - /* Free memory allocated for mailbox command */ - mempool_free(pmb, phba->mbox_mem_pool); - return rc; +/** + * lpfc_sli4_async_link_evt - Process the asynchronous link event + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. + * + * This routine is to handle the SLI4 asynchronous link event. + **/ +static void +lpfc_sli4_async_link_evt(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) +{ + struct lpfc_dmabuf *mp; + LPFC_MBOXQ_t *pmb; + MAILBOX_t *mb; + READ_LA_VAR *la; + uint8_t att_type; -mbx_fail_out: - /* Free memory allocated for mailbox command */ - mempool_free(pmb, phba->mbox_mem_pool); + att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); + if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) + return; + pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0395 The mboxq allocation failed\n"); + return; + } + mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!mp) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0396 The lpfc_dmabuf allocation failed\n"); + goto out_free_pmb; + } + mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); + if (!mp->virt) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0397 The mbuf allocation failed\n"); + goto out_free_dmabuf; + } -mem_fail_out: - /* free the irq already requested */ - free_irq(phba->msix_entries[1].vector, phba); + /* Cleanup any outstanding ELS commands */ + lpfc_els_flush_all_cmd(phba); -irq_fail_out: - /* free the irq already requested */ - free_irq(phba->msix_entries[0].vector, phba); + /* Block ELS IOCBs until we have done process link event */ + phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; -msi_fail_out: - /* Unconfigure MSI-X capability structure */ - pci_disable_msix(phba->pcidev); - return rc; + /* Update link event statistics */ + phba->sli.slistat.link_event++; + + /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ + lpfc_read_la(phba, pmb, mp); + pmb->vport = phba->pport; + + /* Parse and translate status field */ + mb = &pmb->u.mb; + mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); + + /* Parse and translate link attention fields */ + la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; + la->eventTag = acqe_link->event_tag; + la->attType = att_type; + la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); + + /* Fake the the following irrelvant fields */ + la->topology = TOPOLOGY_PT_PT; + la->granted_AL_PA = 0; + la->il = 0; + la->pb = 0; + la->fa = 0; + la->mm = 0; + + /* Keep the link status for extra SLI4 state machine reference */ + phba->sli4_hba.link_state.speed = + bf_get(lpfc_acqe_link_speed, acqe_link); + phba->sli4_hba.link_state.duplex = + bf_get(lpfc_acqe_link_duplex, acqe_link); + phba->sli4_hba.link_state.status = + bf_get(lpfc_acqe_link_status, acqe_link); + phba->sli4_hba.link_state.physical = + bf_get(lpfc_acqe_link_physical, acqe_link); + phba->sli4_hba.link_state.fault = + bf_get(lpfc_acqe_link_fault, acqe_link); + + /* Invoke the lpfc_handle_latt mailbox command callback function */ + lpfc_mbx_cmpl_read_la(phba, pmb); + + return; + +out_free_dmabuf: + kfree(mp); +out_free_pmb: + mempool_free(pmb, phba->mbox_mem_pool); } /** - * lpfc_disable_msix - Disable MSI-X interrupt mode + * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async fcoe completion queue entry. * - * This routine is invoked to release the MSI-X vectors and then disable the - * MSI-X interrupt mode. + * This routine is to handle the SLI4 asynchronous fcoe event. **/ static void -lpfc_disable_msix(struct lpfc_hba *phba) +lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, + struct lpfc_acqe_fcoe *acqe_fcoe) { - int i; + uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); + int rc; - /* Free up MSI-X multi-message vectors */ - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - free_irq(phba->msix_entries[i].vector, phba); - /* Disable MSI-X */ - pci_disable_msix(phba->pcidev); + switch (event_type) { + case LPFC_FCOE_EVENT_TYPE_NEW_FCF: + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "2546 New FCF found index 0x%x tag 0x%x \n", + acqe_fcoe->fcf_index, + acqe_fcoe->event_tag); + /* + * If the current FCF is in discovered state, + * do nothing. + */ + spin_lock_irq(&phba->hbalock); + if (phba->fcf.fcf_flag & FCF_DISCOVERED) { + spin_unlock_irq(&phba->hbalock); + break; + } + spin_unlock_irq(&phba->hbalock); + + /* Read the FCF table and re-discover SAN. */ + rc = lpfc_sli4_read_fcf_record(phba, + LPFC_FCOE_FCF_GET_FIRST); + if (rc) + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "2547 Read FCF record failed 0x%x\n", + rc); + break; + + case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2548 FCF Table full count 0x%x tag 0x%x \n", + bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), + acqe_fcoe->event_tag); + break; + + case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "2549 FCF disconnected fron network index 0x%x" + " tag 0x%x \n", acqe_fcoe->fcf_index, + acqe_fcoe->event_tag); + /* If the event is not for currently used fcf do nothing */ + if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) + break; + /* + * Currently, driver support only one FCF - so treat this as + * a link down. + */ + lpfc_linkdown(phba); + /* Unregister FCF if no devices connected to it */ + lpfc_unregister_unused_fcf(phba); + break; + + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0288 Unknown FCoE event type 0x%x event tag " + "0x%x\n", event_type, acqe_fcoe->event_tag); + break; + } } /** - * lpfc_enable_msi - Enable MSI interrupt mode + * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async dcbx completion queue entry. * - * This routine is invoked to enable the MSI interrupt mode. The kernel - * function pci_enable_msi() is called to enable the MSI vector. The - * device driver is responsible for calling the request_irq() to register - * MSI vector with a interrupt the handler, which is done in this function. - * - * Return codes - * 0 - sucessful - * other values - error - */ -static int -lpfc_enable_msi(struct lpfc_hba *phba) + * This routine is to handle the SLI4 asynchronous dcbx event. + **/ +static void +lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, + struct lpfc_acqe_dcbx *acqe_dcbx) { - int rc; + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0290 The SLI4 DCBX asynchronous event is not " + "handled yet\n"); +} - rc = pci_enable_msi(phba->pcidev); - if (!rc) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0462 PCI enable MSI mode success.\n"); - else { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0471 PCI enable MSI mode failed (%d)\n", rc); - return rc; - } +/** + * lpfc_sli4_async_event_proc - Process all the pending asynchronous event + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked by the worker thread to process all the pending + * SLI4 asynchronous events. + **/ +void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; - rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, - IRQF_SHARED, LPFC_DRIVER_NAME, phba); - if (rc) { - pci_disable_msi(phba->pcidev); - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0478 MSI request_irq failed (%d)\n", rc); + /* First, declare the async event has been handled */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~ASYNC_EVENT; + spin_unlock_irq(&phba->hbalock); + /* Now, handle all the async events */ + while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { + /* Get the first event from the head of the event queue */ + spin_lock_irq(&phba->hbalock); + list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irq(&phba->hbalock); + /* Process the asynchronous event */ + switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { + case LPFC_TRAILER_CODE_LINK: + lpfc_sli4_async_link_evt(phba, + &cq_event->cqe.acqe_link); + break; + case LPFC_TRAILER_CODE_FCOE: + lpfc_sli4_async_fcoe_evt(phba, + &cq_event->cqe.acqe_fcoe); + break; + case LPFC_TRAILER_CODE_DCBX: + lpfc_sli4_async_dcbx_evt(phba, + &cq_event->cqe.acqe_dcbx); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "1804 Invalid asynchrous event code: " + "x%x\n", bf_get(lpfc_trailer_code, + &cq_event->cqe.mcqe_cmpl)); + break; + } + /* Free the completion event processed to the free pool */ + lpfc_sli4_cq_event_release(phba, cq_event); } - return rc; } /** - * lpfc_disable_msi - Disable MSI interrupt mode + * lpfc_api_table_setup - Set up per hba pci-device group func api jump table * @phba: pointer to lpfc hba data structure. + * @dev_grp: The HBA PCI-Device group number. * - * This routine is invoked to disable the MSI interrupt mode. The driver - * calls free_irq() on MSI vector it has done request_irq() on before - * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and - * a device will be left with MSI enabled and leaks its vector. - */ - -static void -lpfc_disable_msi(struct lpfc_hba *phba) + * This routine is invoked to set up the per HBA PCI-Device group function + * API jump table entries. + * + * Return: 0 if success, otherwise -ENODEV + **/ +int +lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) { - free_irq(phba->pcidev->irq, phba); - pci_disable_msi(phba->pcidev); - return; + int rc; + + /* Set up lpfc PCI-device group */ + phba->pci_dev_grp = dev_grp; + + /* The LPFC_PCI_DEV_OC uses SLI4 */ + if (dev_grp == LPFC_PCI_DEV_OC) + phba->sli_rev = LPFC_SLI_REV4; + + /* Set up device INIT API function jump table */ + rc = lpfc_init_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + /* Set up SCSI API function jump table */ + rc = lpfc_scsi_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + /* Set up SLI API function jump table */ + rc = lpfc_sli_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + /* Set up MBOX API function jump table */ + rc = lpfc_mbox_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + + return 0; } /** @@ -2764,6 +3271,292 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) } /** + * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources specific to + * support the SLI-4 HBA device it attached to. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli; + int rc; + int i, hbq_count; + + /* Before proceed, wait for POST done and device ready */ + rc = lpfc_sli4_post_status_check(phba); + if (rc) + return -ENODEV; + + /* + * Initialize timers used by driver + */ + + /* Heartbeat timer */ + init_timer(&phba->hb_tmofunc); + phba->hb_tmofunc.function = lpfc_hb_timeout; + phba->hb_tmofunc.data = (unsigned long)phba; + + psli = &phba->sli; + /* MBOX heartbeat timer */ + init_timer(&psli->mbox_tmo); + psli->mbox_tmo.function = lpfc_mbox_timeout; + psli->mbox_tmo.data = (unsigned long) phba; + /* Fabric block timer */ + init_timer(&phba->fabric_block_timer); + phba->fabric_block_timer.function = lpfc_fabric_block_timeout; + phba->fabric_block_timer.data = (unsigned long) phba; + /* EA polling mode timer */ + init_timer(&phba->eratt_poll); + phba->eratt_poll.function = lpfc_poll_eratt; + phba->eratt_poll.data = (unsigned long) phba; + /* + * We need to do a READ_CONFIG mailbox command here before + * calling lpfc_get_cfgparam. For VFs this will report the + * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. + * All of the resources allocated + * for this Port are tied to these values. + */ + /* Get all the module params for configuring this host */ + lpfc_get_cfgparam(phba); + phba->max_vpi = LPFC_MAX_VPI; + /* This will be set to correct value after the read_config mbox */ + phba->max_vports = 0; + + /* Program the default value of vlan_id and fc_map */ + phba->valid_vlan = 0; + phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; + phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; + phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; + + /* + * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * used to create the sg_dma_buf_pool must be dynamically calculated. + * 2 segments are added since the IOCB needs a command and response bde. + * To insure that the scsi sgl does not cross a 4k page boundary only + * sgl sizes of 1k, 2k, 4k, and 8k are supported. + * Table of sgl sizes and seg_cnt: + * sgl size, sg_seg_cnt total seg + * 1k 50 52 + * 2k 114 116 + * 4k 242 244 + * 8k 498 500 + * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024 + * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048 + * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096 + * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192 + */ + if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) + phba->cfg_sg_seg_cnt = 50; + else if (phba->cfg_sg_seg_cnt <= 114) + phba->cfg_sg_seg_cnt = 114; + else if (phba->cfg_sg_seg_cnt <= 242) + phba->cfg_sg_seg_cnt = 242; + else + phba->cfg_sg_seg_cnt = 498; + + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp); + phba->cfg_sg_dma_buf_size += + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); + + /* Initialize buffer queue management fields */ + hbq_count = lpfc_sli_hbq_count(); + for (i = 0; i < hbq_count; ++i) + INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); + INIT_LIST_HEAD(&phba->rb_pend_list); + phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; + phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; + + /* + * Initialize the SLI Layer to run with lpfc SLI4 HBAs. + */ + /* Initialize the Abort scsi buffer list used by driver */ + spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + /* This abort list used by worker thread */ + spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); + + /* + * Initialize dirver internal slow-path work queues + */ + + /* Driver internel slow-path CQ Event pool */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); + /* Response IOCB work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); + /* Asynchronous event CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); + /* Fast-path XRI aborted CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); + /* Slow-path XRI aborted CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); + /* Receive queue CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); + + /* Initialize the driver internal SLI layer lists. */ + lpfc_sli_setup(phba); + lpfc_sli_queue_setup(phba); + + /* Allocate device driver memory */ + rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); + if (rc) + return -ENOMEM; + + /* Create the bootstrap mailbox command */ + rc = lpfc_create_bootstrap_mbox(phba); + if (unlikely(rc)) + goto out_free_mem; + + /* Set up the host's endian order with the device. */ + rc = lpfc_setup_endian_order(phba); + if (unlikely(rc)) + goto out_free_bsmbx; + + /* Set up the hba's configuration parameters. */ + rc = lpfc_sli4_read_config(phba); + if (unlikely(rc)) + goto out_free_bsmbx; + + /* Perform a function reset */ + rc = lpfc_pci_function_reset(phba); + if (unlikely(rc)) + goto out_free_bsmbx; + + /* Create all the SLI4 queues */ + rc = lpfc_sli4_queue_create(phba); + if (rc) + goto out_free_bsmbx; + + /* Create driver internal CQE event pool */ + rc = lpfc_sli4_cq_event_pool_create(phba); + if (rc) + goto out_destroy_queue; + + /* Initialize and populate the iocb list per host */ + rc = lpfc_init_sgl_list(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1400 Failed to initialize sgl list.\n"); + goto out_destroy_cq_event_pool; + } + rc = lpfc_init_active_sgl_array(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1430 Failed to initialize sgl list.\n"); + goto out_free_sgl_list; + } + + rc = lpfc_sli4_init_rpi_hdrs(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1432 Failed to initialize rpi headers.\n"); + goto out_free_active_sgl; + } + + phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * + phba->cfg_fcp_eq_count), GFP_KERNEL); + if (!phba->sli4_hba.fcp_eq_hdl) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2572 Failed allocate memory for fast-path " + "per-EQ handle array\n"); + goto out_remove_rpi_hdrs; + } + + phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * + phba->sli4_hba.cfg_eqn), GFP_KERNEL); + if (!phba->sli4_hba.msix_entries) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2573 Failed allocate memory for msi-x " + "interrupt vector entries\n"); + goto out_free_fcp_eq_hdl; + } + + return rc; + +out_free_fcp_eq_hdl: + kfree(phba->sli4_hba.fcp_eq_hdl); +out_remove_rpi_hdrs: + lpfc_sli4_remove_rpi_hdrs(phba); +out_free_active_sgl: + lpfc_free_active_sgl(phba); +out_free_sgl_list: + lpfc_free_sgl_list(phba); +out_destroy_cq_event_pool: + lpfc_sli4_cq_event_pool_destroy(phba); +out_destroy_queue: + lpfc_sli4_queue_destroy(phba); +out_free_bsmbx: + lpfc_destroy_bootstrap_mbox(phba); +out_free_mem: + lpfc_mem_free(phba); + return rc; +} + +/** + * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the driver internal resources set up + * specific for supporting the SLI-4 HBA device it attached to. + **/ +static void +lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) +{ + struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + + /* unregister default FCFI from the HBA */ + lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); + + /* Free the default FCR table */ + lpfc_sli_remove_dflt_fcf(phba); + + /* Free memory allocated for msi-x interrupt vector entries */ + kfree(phba->sli4_hba.msix_entries); + + /* Free memory allocated for fast-path work queue handles */ + kfree(phba->sli4_hba.fcp_eq_hdl); + + /* Free the allocated rpi headers. */ + lpfc_sli4_remove_rpi_hdrs(phba); + + /* Free the ELS sgl list */ + lpfc_free_active_sgl(phba); + lpfc_free_sgl_list(phba); + + /* Free the SCSI sgl management array */ + kfree(phba->sli4_hba.lpfc_scsi_psb_array); + + /* Free the SLI4 queues */ + lpfc_sli4_queue_destroy(phba); + + /* Free the completion queue EQ event pool */ + lpfc_sli4_cq_event_release_all(phba); + lpfc_sli4_cq_event_pool_destroy(phba); + + /* Reset SLI4 HBA FCoE function */ + lpfc_pci_function_reset(phba); + + /* Free the bsmbx region. */ + lpfc_destroy_bootstrap_mbox(phba); + + /* Free the SLI Layer memory with SLI4 HBAs */ + lpfc_mem_free_all(phba); + + /* Free the current connect table */ + list_for_each_entry_safe(conn_entry, next_conn_entry, + &phba->fcf_conn_rec_list, list) + kfree(conn_entry); + + return; +} + +/** * lpfc_init_api_table_setup - Set up init api fucntion jump table * @phba: The hba struct for which this call is being executed. * @dev_grp: The HBA PCI-Device group number. @@ -2782,6 +3575,11 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; phba->lpfc_stop_port = lpfc_stop_port_s3; break; + case LPFC_PCI_DEV_OC: + phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; + phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; + phba->lpfc_stop_port = lpfc_stop_port_s4; + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1431 Invalid HBA PCI-device group: 0x%x\n", @@ -2956,6 +3754,346 @@ out_free_iocbq: } /** + * lpfc_free_sgl_list - Free sgl list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver's sgl list and memory. + **/ +static void +lpfc_free_sgl_list(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + LIST_HEAD(sglq_list); + int rc = 0; + + spin_lock_irq(&phba->hbalock); + list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); + spin_unlock_irq(&phba->hbalock); + + list_for_each_entry_safe(sglq_entry, sglq_next, + &sglq_list, list) { + list_del(&sglq_entry->list); + lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); + kfree(sglq_entry); + phba->sli4_hba.total_sglq_bufs--; + } + rc = lpfc_sli4_remove_all_sgl_pages(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2005 Unable to deregister pages from HBA: %x", rc); + } + kfree(phba->sli4_hba.lpfc_els_sgl_array); +} + +/** + * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate the driver's active sgl memory. + * This array will hold the sglq_entry's for active IOs. + **/ +static int +lpfc_init_active_sgl_array(struct lpfc_hba *phba) +{ + int size; + size = sizeof(struct lpfc_sglq *); + size *= phba->sli4_hba.max_cfg_param.max_xri; + + phba->sli4_hba.lpfc_sglq_active_list = + kzalloc(size, GFP_KERNEL); + if (!phba->sli4_hba.lpfc_sglq_active_list) + return -ENOMEM; + return 0; +} + +/** + * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to walk through the array of active sglq entries + * and free all of the resources. + * This is just a place holder for now. + **/ +static void +lpfc_free_active_sgl(struct lpfc_hba *phba) +{ + kfree(phba->sli4_hba.lpfc_sglq_active_list); +} + +/** + * lpfc_init_sgl_list - Allocate and initialize sgl list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate and initizlize the driver's sgl + * list and set up the sgl xritag tag array accordingly. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_init_sgl_list(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry = NULL; + int i; + int els_xri_cnt; + + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2400 lpfc_init_sgl_list els %d.\n", + els_xri_cnt); + /* Initialize and populate the sglq list per host/VF. */ + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); + + /* Sanity check on XRI management */ + if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2562 No room left for SCSI XRI allocation: " + "max_xri=%d, els_xri=%d\n", + phba->sli4_hba.max_cfg_param.max_xri, + els_xri_cnt); + return -ENOMEM; + } + + /* Allocate memory for the ELS XRI management array */ + phba->sli4_hba.lpfc_els_sgl_array = + kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), + GFP_KERNEL); + + if (!phba->sli4_hba.lpfc_els_sgl_array) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2401 Failed to allocate memory for ELS " + "XRI management array of size %d.\n", + els_xri_cnt); + return -ENOMEM; + } + + /* Keep the SCSI XRI into the XRI management array */ + phba->sli4_hba.scsi_xri_max = + phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; + phba->sli4_hba.scsi_xri_cnt = 0; + + phba->sli4_hba.lpfc_scsi_psb_array = + kzalloc((sizeof(struct lpfc_scsi_buf *) * + phba->sli4_hba.scsi_xri_max), GFP_KERNEL); + + if (!phba->sli4_hba.lpfc_scsi_psb_array) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2563 Failed to allocate memory for SCSI " + "XRI management array of size %d.\n", + phba->sli4_hba.scsi_xri_max); + kfree(phba->sli4_hba.lpfc_els_sgl_array); + return -ENOMEM; + } + + for (i = 0; i < els_xri_cnt; i++) { + sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); + if (sglq_entry == NULL) { + printk(KERN_ERR "%s: only allocated %d sgls of " + "expected %d count. Unloading driver.\n", + __func__, i, els_xri_cnt); + goto out_free_mem; + } + + sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); + if (sglq_entry->sli4_xritag == NO_XRI) { + kfree(sglq_entry); + printk(KERN_ERR "%s: failed to allocate XRI.\n" + "Unloading driver.\n", __func__); + goto out_free_mem; + } + sglq_entry->buff_type = GEN_BUFF_TYPE; + sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); + if (sglq_entry->virt == NULL) { + kfree(sglq_entry); + printk(KERN_ERR "%s: failed to allocate mbuf.\n" + "Unloading driver.\n", __func__); + goto out_free_mem; + } + sglq_entry->sgl = sglq_entry->virt; + memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); + + /* The list order is used by later block SGL registraton */ + spin_lock_irq(&phba->hbalock); + list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); + phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; + phba->sli4_hba.total_sglq_bufs++; + spin_unlock_irq(&phba->hbalock); + } + return 0; + +out_free_mem: + kfree(phba->sli4_hba.lpfc_scsi_psb_array); + lpfc_free_sgl_list(phba); + return -ENOMEM; +} + +/** + * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to post rpi header templates to the + * HBA consistent with the SLI-4 interface spec. This routine + * posts a PAGE_SIZE memory region to the port to hold up to + * PAGE_SIZE modulo 64 rpi context headers. + * No locks are held here because this is an initialization routine + * called only from probe or lpfc_online when interrupts are not + * enabled and the driver is reinitializing the device. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) +{ + int rc = 0; + int longs; + uint16_t rpi_count; + struct lpfc_rpi_hdr *rpi_hdr; + + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); + + /* + * Provision an rpi bitmask range for discovery. The total count + * is the difference between max and base + 1. + */ + rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + + phba->sli4_hba.max_cfg_param.max_rpi - 1; + + longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; + phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), + GFP_KERNEL); + if (!phba->sli4_hba.rpi_bmask) + return -ENOMEM; + + rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); + if (!rpi_hdr) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0391 Error during rpi post operation\n"); + lpfc_sli4_remove_rpis(phba); + rc = -ENODEV; + } + + return rc; +} + +/** + * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate a single 4KB memory region to + * support rpis and stores them in the phba. This single region + * provides support for up to 64 rpis. The region is used globally + * by the device. + * + * Returns: + * A valid rpi hdr on success. + * A NULL pointer on any failure. + **/ +struct lpfc_rpi_hdr * +lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) +{ + uint16_t rpi_limit, curr_rpi_range; + struct lpfc_dmabuf *dmabuf; + struct lpfc_rpi_hdr *rpi_hdr; + + rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + + phba->sli4_hba.max_cfg_param.max_rpi - 1; + + spin_lock_irq(&phba->hbalock); + curr_rpi_range = phba->sli4_hba.next_rpi; + spin_unlock_irq(&phba->hbalock); + + /* + * The port has a limited number of rpis. The increment here + * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value + * and to allow the full max_rpi range per port. + */ + if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) + return NULL; + + /* + * First allocate the protocol header region for the port. The + * port expects a 4KB DMA-mapped memory region that is 4K aligned. + */ + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + return NULL; + + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + LPFC_HDR_TEMPLATE_SIZE, + &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + rpi_hdr = NULL; + goto err_free_dmabuf; + } + + memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); + if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { + rpi_hdr = NULL; + goto err_free_coherent; + } + + /* Save the rpi header data for cleanup later. */ + rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); + if (!rpi_hdr) + goto err_free_coherent; + + rpi_hdr->dmabuf = dmabuf; + rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; + rpi_hdr->page_count = 1; + spin_lock_irq(&phba->hbalock); + rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; + list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); + + /* + * The next_rpi stores the next module-64 rpi value to post + * in any subsequent rpi memory region postings. + */ + phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; + spin_unlock_irq(&phba->hbalock); + return rpi_hdr; + + err_free_coherent: + dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, + dmabuf->virt, dmabuf->phys); + err_free_dmabuf: + kfree(dmabuf); + return NULL; +} + +/** + * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to remove all memory resources allocated + * to support rpis. This routine presumes the caller has released all + * rpis consumed by fabric or port logins and is prepared to have + * the header pages removed. + **/ +void +lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) +{ + struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; + + list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, + &phba->sli4_hba.lpfc_rpi_hdr_list, list) { + list_del(&rpi_hdr->list); + dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, + rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); + kfree(rpi_hdr->dmabuf); + kfree(rpi_hdr); + } + + phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; + memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); +} + +/** * lpfc_hba_alloc - Allocate driver hba data structure for a device. * @pdev: pointer to pci device data structure. * @@ -3316,6 +4454,1542 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) } /** + * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to wait for SLI4 device Power On Self Test (POST) + * done and check status. + * + * Return 0 if successful, otherwise -ENODEV. + **/ +int +lpfc_sli4_post_status_check(struct lpfc_hba *phba) +{ + struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; + uint32_t onlnreg0, onlnreg1; + int i, port_error = -ENODEV; + + if (!phba->sli4_hba.STAregaddr) + return -ENODEV; + + /* With uncoverable error, log the error message and return error */ + onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); + onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); + if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { + uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); + uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); + if (uerrlo_reg.word0 || uerrhi_reg.word0) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1422 HBA Unrecoverable error: " + "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " + "online0_reg=0x%x, online1_reg=0x%x\n", + uerrlo_reg.word0, uerrhi_reg.word0, + onlnreg0, onlnreg1); + } + return -ENODEV; + } + + /* Wait up to 30 seconds for the SLI Port POST done and ready */ + for (i = 0; i < 3000; i++) { + sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); + /* Encounter fatal POST error, break out */ + if (bf_get(lpfc_hst_state_perr, &sta_reg)) { + port_error = -ENODEV; + break; + } + if (LPFC_POST_STAGE_ARMFW_READY == + bf_get(lpfc_hst_state_port_status, &sta_reg)) { + port_error = 0; + break; + } + msleep(10); + } + + if (port_error) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1408 Failure HBA POST Status: sta_reg=0x%x, " + "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " + "dl=x%x, pstatus=x%x\n", sta_reg.word0, + bf_get(lpfc_hst_state_perr, &sta_reg), + bf_get(lpfc_hst_state_sfi, &sta_reg), + bf_get(lpfc_hst_state_nip, &sta_reg), + bf_get(lpfc_hst_state_ipc, &sta_reg), + bf_get(lpfc_hst_state_xrom, &sta_reg), + bf_get(lpfc_hst_state_dl, &sta_reg), + bf_get(lpfc_hst_state_port_status, &sta_reg)); + + /* Log device information */ + scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " + "FeatureL1=0x%x, FeatureL2=0x%x\n", + bf_get(lpfc_scratchpad_chiptype, &scratchpad), + bf_get(lpfc_scratchpad_slirev, &scratchpad), + bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), + bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); + + return port_error; +} + +/** + * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up SLI4 BAR0 PCI config space register + * memory map. + **/ +static void +lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) +{ + phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_UERR_STATUS_LO; + phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_UERR_STATUS_HI; + phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_ONLINE0; + phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_ONLINE1; + phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_SCRATCHPAD; +} + +/** + * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up SLI4 BAR1 control status register (CSR) + * memory map. + **/ +static void +lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) +{ + + phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_STATE; + phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_ISR0; + phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_IMR0; + phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_ISCR0; + return; +} + +/** + * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. + * @phba: pointer to lpfc hba data structure. + * @vf: virtual function number + * + * This routine is invoked to set up SLI4 BAR2 doorbell register memory map + * based on the given viftual function number, @vf. + * + * Return 0 if successful, otherwise -ENODEV. + **/ +static int +lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) +{ + if (vf > LPFC_VIR_FUNC_MAX) + return -ENODEV; + + phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); + phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); + phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); + phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); + phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); + return 0; +} + +/** + * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to create the bootstrap mailbox + * region consistent with the SLI-4 interface spec. This + * routine allocates all memory necessary to communicate + * mailbox commands to the port and sets up all alignment + * needs. No locks are expected to be held when calling + * this routine. + * + * Return codes + * 0 - sucessful + * ENOMEM - could not allocated memory. + **/ +static int +lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) +{ + uint32_t bmbx_size; + struct lpfc_dmabuf *dmabuf; + struct dma_address *dma_address; + uint32_t pa_addr; + uint64_t phys_addr; + + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + /* + * The bootstrap mailbox region is comprised of 2 parts + * plus an alignment restriction of 16 bytes. + */ + bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + bmbx_size, + &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + return -ENOMEM; + } + memset(dmabuf->virt, 0, bmbx_size); + + /* + * Initialize the bootstrap mailbox pointers now so that the register + * operations are simple later. The mailbox dma address is required + * to be 16-byte aligned. Also align the virtual memory as each + * maibox is copied into the bmbx mailbox region before issuing the + * command to the port. + */ + phba->sli4_hba.bmbx.dmabuf = dmabuf; + phba->sli4_hba.bmbx.bmbx_size = bmbx_size; + + phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, + LPFC_ALIGN_16_BYTE); + phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, + LPFC_ALIGN_16_BYTE); + + /* + * Set the high and low physical addresses now. The SLI4 alignment + * requirement is 16 bytes and the mailbox is posted to the port + * as two 30-bit addresses. The other data is a bit marking whether + * the 30-bit address is the high or low address. + * Upcast bmbx aphys to 64bits so shift instruction compiles + * clean on 32 bit machines. + */ + dma_address = &phba->sli4_hba.bmbx.dma_address; + phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; + pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); + dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | + LPFC_BMBX_BIT1_ADDR_HI); + + pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); + dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | + LPFC_BMBX_BIT1_ADDR_LO); + return 0; +} + +/** + * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to teardown the bootstrap mailbox + * region and release all host resources. This routine requires + * the caller to ensure all mailbox commands recovered, no + * additional mailbox comands are sent, and interrupts are disabled + * before calling this routine. + * + **/ +static void +lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) +{ + dma_free_coherent(&phba->pcidev->dev, + phba->sli4_hba.bmbx.bmbx_size, + phba->sli4_hba.bmbx.dmabuf->virt, + phba->sli4_hba.bmbx.dmabuf->phys); + + kfree(phba->sli4_hba.bmbx.dmabuf); + memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); +} + +/** + * lpfc_sli4_read_config - Get the config parameters. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to read the configuration parameters from the HBA. + * The configuration parameters are used to set the base and maximum values + * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource + * allocation for the port. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +static int +lpfc_sli4_read_config(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *pmb; + struct lpfc_mbx_read_config *rd_config; + uint32_t rc = 0; + + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2011 Unable to allocate memory for issuing " + "SLI_CONFIG_SPECIAL mailbox command\n"); + return -ENOMEM; + } + + lpfc_read_config(phba, pmb); + + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2012 Mailbox failed , mbxCmd x%x " + "READ_CONFIG, mbxStatus x%x\n", + bf_get(lpfc_mqe_command, &pmb->u.mqe), + bf_get(lpfc_mqe_status, &pmb->u.mqe)); + rc = -EIO; + } else { + rd_config = &pmb->u.mqe.un.rd_config; + phba->sli4_hba.max_cfg_param.max_xri = + bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); + phba->sli4_hba.max_cfg_param.xri_base = + bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); + phba->sli4_hba.max_cfg_param.max_vpi = + bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); + phba->sli4_hba.max_cfg_param.vpi_base = + bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_rpi = + bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); + phba->sli4_hba.max_cfg_param.rpi_base = + bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_vfi = + bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); + phba->sli4_hba.max_cfg_param.vfi_base = + bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_fcfi = + bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); + phba->sli4_hba.max_cfg_param.fcfi_base = + bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_eq = + bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); + phba->sli4_hba.max_cfg_param.max_rq = + bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); + phba->sli4_hba.max_cfg_param.max_wq = + bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); + phba->sli4_hba.max_cfg_param.max_cq = + bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); + phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); + phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; + phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; + phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; + phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; + phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; + phba->max_vports = phba->max_vpi; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2003 cfg params XRI(B:%d M:%d), " + "VPI(B:%d M:%d) " + "VFI(B:%d M:%d) " + "RPI(B:%d M:%d) " + "FCFI(B:%d M:%d)\n", + phba->sli4_hba.max_cfg_param.xri_base, + phba->sli4_hba.max_cfg_param.max_xri, + phba->sli4_hba.max_cfg_param.vpi_base, + phba->sli4_hba.max_cfg_param.max_vpi, + phba->sli4_hba.max_cfg_param.vfi_base, + phba->sli4_hba.max_cfg_param.max_vfi, + phba->sli4_hba.max_cfg_param.rpi_base, + phba->sli4_hba.max_cfg_param.max_rpi, + phba->sli4_hba.max_cfg_param.fcfi_base, + phba->sli4_hba.max_cfg_param.max_fcfi); + } + mempool_free(pmb, phba->mbox_mem_pool); + + /* Reset the DFT_HBA_Q_DEPTH to the max xri */ + if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) + phba->cfg_hba_queue_depth = + phba->sli4_hba.max_cfg_param.max_xri; + return rc; +} + +/** + * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to setup the host-side endian order to the + * HBA consistent with the SLI-4 interface spec. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +static int +lpfc_setup_endian_order(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + uint32_t rc = 0; + uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, + HOST_ENDIAN_HIGH_WORD1}; + + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0492 Unable to allocate memory for issuing " + "SLI_CONFIG_SPECIAL mailbox command\n"); + return -ENOMEM; + } + + /* + * The SLI4_CONFIG_SPECIAL mailbox command requires the first two + * words to contain special data values and no other data. + */ + memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); + memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0493 SLI_CONFIG_SPECIAL mailbox failed with " + "status x%x\n", + rc); + rc = -EIO; + } + + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; +} + +/** + * lpfc_sli4_queue_create - Create all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA + * operation. For each SLI4 queue type, the parameters such as queue entry + * count (queue depth) shall be taken from the module parameter. For now, + * we just use some constant number as place holder. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +static int +lpfc_sli4_queue_create(struct lpfc_hba *phba) +{ + struct lpfc_queue *qdesc; + int fcp_eqidx, fcp_cqidx, fcp_wqidx; + int cfg_fcp_wq_count; + int cfg_fcp_eq_count; + + /* + * Sanity check for confiugred queue parameters against the run-time + * device parameters + */ + + /* Sanity check on FCP fast-path WQ parameters */ + cfg_fcp_wq_count = phba->cfg_fcp_wq_count; + if (cfg_fcp_wq_count > + (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { + cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - + LPFC_SP_WQN_DEF; + if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2581 Not enough WQs (%d) from " + "the pci function for supporting " + "FCP WQs (%d)\n", + phba->sli4_hba.max_cfg_param.max_wq, + phba->cfg_fcp_wq_count); + goto out_error; + } + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2582 Not enough WQs (%d) from the pci " + "function for supporting the requested " + "FCP WQs (%d), the actual FCP WQs can " + "be supported: %d\n", + phba->sli4_hba.max_cfg_param.max_wq, + phba->cfg_fcp_wq_count, cfg_fcp_wq_count); + } + /* The actual number of FCP work queues adopted */ + phba->cfg_fcp_wq_count = cfg_fcp_wq_count; + + /* Sanity check on FCP fast-path EQ parameters */ + cfg_fcp_eq_count = phba->cfg_fcp_eq_count; + if (cfg_fcp_eq_count > + (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { + cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - + LPFC_SP_EQN_DEF; + if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2574 Not enough EQs (%d) from the " + "pci function for supporting FCP " + "EQs (%d)\n", + phba->sli4_hba.max_cfg_param.max_eq, + phba->cfg_fcp_eq_count); + goto out_error; + } + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2575 Not enough EQs (%d) from the pci " + "function for supporting the requested " + "FCP EQs (%d), the actual FCP EQs can " + "be supported: %d\n", + phba->sli4_hba.max_cfg_param.max_eq, + phba->cfg_fcp_eq_count, cfg_fcp_eq_count); + } + /* It does not make sense to have more EQs than WQs */ + if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2593 The number of FCP EQs (%d) is more " + "than the number of FCP WQs (%d), take " + "the number of FCP EQs same as than of " + "WQs (%d)\n", cfg_fcp_eq_count, + phba->cfg_fcp_wq_count, + phba->cfg_fcp_wq_count); + cfg_fcp_eq_count = phba->cfg_fcp_wq_count; + } + /* The actual number of FCP event queues adopted */ + phba->cfg_fcp_eq_count = cfg_fcp_eq_count; + /* The overall number of event queues used */ + phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; + + /* + * Create Event Queues (EQs) + */ + + /* Get EQ depth from module parameter, fake the default for now */ + phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; + phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; + + /* Create slow path event queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, + phba->sli4_hba.eq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0496 Failed allocate slow-path EQ\n"); + goto out_error; + } + phba->sli4_hba.sp_eq = qdesc; + + /* Create fast-path FCP Event Queue(s) */ + phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * + phba->cfg_fcp_eq_count), GFP_KERNEL); + if (!phba->sli4_hba.fp_eq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2576 Failed allocate memory for fast-path " + "EQ record array\n"); + goto out_free_sp_eq; + } + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, + phba->sli4_hba.eq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0497 Failed allocate fast-path EQ\n"); + goto out_free_fp_eq; + } + phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; + } + + /* + * Create Complete Queues (CQs) + */ + + /* Get CQ depth from module parameter, fake the default for now */ + phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; + phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; + + /* Create slow-path Mailbox Command Complete Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0500 Failed allocate slow-path mailbox CQ\n"); + goto out_free_fp_eq; + } + phba->sli4_hba.mbx_cq = qdesc; + + /* Create slow-path ELS Complete Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0501 Failed allocate slow-path ELS CQ\n"); + goto out_free_mbx_cq; + } + phba->sli4_hba.els_cq = qdesc; + + /* Create slow-path Unsolicited Receive Complete Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0502 Failed allocate slow-path USOL RX CQ\n"); + goto out_free_els_cq; + } + phba->sli4_hba.rxq_cq = qdesc; + + /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ + phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * + phba->cfg_fcp_eq_count), GFP_KERNEL); + if (!phba->sli4_hba.fcp_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2577 Failed allocate memory for fast-path " + "CQ record array\n"); + goto out_free_rxq_cq; + } + for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0499 Failed allocate fast-path FCP " + "CQ (%d)\n", fcp_cqidx); + goto out_free_fcp_cq; + } + phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; + } + + /* Create Mailbox Command Queue */ + phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; + phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; + + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, + phba->sli4_hba.mq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0505 Failed allocate slow-path MQ\n"); + goto out_free_fcp_cq; + } + phba->sli4_hba.mbx_wq = qdesc; + + /* + * Create all the Work Queues (WQs) + */ + phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; + phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; + + /* Create slow-path ELS Work Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0504 Failed allocate slow-path ELS WQ\n"); + goto out_free_mbx_wq; + } + phba->sli4_hba.els_wq = qdesc; + + /* Create fast-path FCP Work Queue(s) */ + phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * + phba->cfg_fcp_wq_count), GFP_KERNEL); + if (!phba->sli4_hba.fcp_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2578 Failed allocate memory for fast-path " + "WQ record array\n"); + goto out_free_els_wq; + } + for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0503 Failed allocate fast-path FCP " + "WQ (%d)\n", fcp_wqidx); + goto out_free_fcp_wq; + } + phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; + } + + /* + * Create Receive Queue (RQ) + */ + phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; + phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; + + /* Create Receive Queue for header */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, + phba->sli4_hba.rq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0506 Failed allocate receive HRQ\n"); + goto out_free_fcp_wq; + } + phba->sli4_hba.hdr_rq = qdesc; + + /* Create Receive Queue for data */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, + phba->sli4_hba.rq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0507 Failed allocate receive DRQ\n"); + goto out_free_hdr_rq; + } + phba->sli4_hba.dat_rq = qdesc; + + return 0; + +out_free_hdr_rq: + lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); + phba->sli4_hba.hdr_rq = NULL; +out_free_fcp_wq: + for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { + lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); + phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; + } + kfree(phba->sli4_hba.fcp_wq); +out_free_els_wq: + lpfc_sli4_queue_free(phba->sli4_hba.els_wq); + phba->sli4_hba.els_wq = NULL; +out_free_mbx_wq: + lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); + phba->sli4_hba.mbx_wq = NULL; +out_free_fcp_cq: + for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { + lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); + phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; + } + kfree(phba->sli4_hba.fcp_cq); +out_free_rxq_cq: + lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); + phba->sli4_hba.rxq_cq = NULL; +out_free_els_cq: + lpfc_sli4_queue_free(phba->sli4_hba.els_cq); + phba->sli4_hba.els_cq = NULL; +out_free_mbx_cq: + lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); + phba->sli4_hba.mbx_cq = NULL; +out_free_fp_eq: + for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { + lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); + phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; + } + kfree(phba->sli4_hba.fp_eq); +out_free_sp_eq: + lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); + phba->sli4_hba.sp_eq = NULL; +out_error: + return -ENOMEM; +} + +/** + * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release all the SLI4 queues with the FCoE HBA + * operation. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +static void +lpfc_sli4_queue_destroy(struct lpfc_hba *phba) +{ + int fcp_qidx; + + /* Release mailbox command work queue */ + lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); + phba->sli4_hba.mbx_wq = NULL; + + /* Release ELS work queue */ + lpfc_sli4_queue_free(phba->sli4_hba.els_wq); + phba->sli4_hba.els_wq = NULL; + + /* Release FCP work queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) + lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); + kfree(phba->sli4_hba.fcp_wq); + phba->sli4_hba.fcp_wq = NULL; + + /* Release unsolicited receive queue */ + lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); + phba->sli4_hba.hdr_rq = NULL; + lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); + phba->sli4_hba.dat_rq = NULL; + + /* Release unsolicited receive complete queue */ + lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); + phba->sli4_hba.rxq_cq = NULL; + + /* Release ELS complete queue */ + lpfc_sli4_queue_free(phba->sli4_hba.els_cq); + phba->sli4_hba.els_cq = NULL; + + /* Release mailbox command complete queue */ + lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); + phba->sli4_hba.mbx_cq = NULL; + + /* Release FCP response complete queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) + lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); + kfree(phba->sli4_hba.fcp_cq); + phba->sli4_hba.fcp_cq = NULL; + + /* Release fast-path event queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) + lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); + kfree(phba->sli4_hba.fp_eq); + phba->sli4_hba.fp_eq = NULL; + + /* Release slow-path event queue */ + lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); + phba->sli4_hba.sp_eq = NULL; + + return; +} + +/** + * lpfc_sli4_queue_setup - Set up all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up all the SLI4 queues for the FCoE HBA + * operation. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_sli4_queue_setup(struct lpfc_hba *phba) +{ + int rc = -ENOMEM; + int fcp_eqidx, fcp_cqidx, fcp_wqidx; + int fcp_cq_index = 0; + + /* + * Set up Event Queues (EQs) + */ + + /* Set up slow-path event queue */ + if (!phba->sli4_hba.sp_eq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0520 Slow-path EQ not allocated\n"); + goto out_error; + } + rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, + LPFC_SP_DEF_IMAX); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0521 Failed setup of slow-path EQ: " + "rc = 0x%x\n", rc); + goto out_error; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2583 Slow-path EQ setup: queue-id=%d\n", + phba->sli4_hba.sp_eq->queue_id); + + /* Set up fast-path event queue */ + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { + if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0522 Fast-path EQ (%d) not " + "allocated\n", fcp_eqidx); + goto out_destroy_fp_eq; + } + rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], + phba->cfg_fcp_imax); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0523 Failed setup of fast-path EQ " + "(%d), rc = 0x%x\n", fcp_eqidx, rc); + goto out_destroy_fp_eq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2584 Fast-path EQ setup: " + "queue[%d]-id=%d\n", fcp_eqidx, + phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); + } + + /* + * Set up Complete Queues (CQs) + */ + + /* Set up slow-path MBOX Complete Queue as the first CQ */ + if (!phba->sli4_hba.mbx_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0528 Mailbox CQ not allocated\n"); + goto out_destroy_fp_eq; + } + rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, + LPFC_MCQ, LPFC_MBOX); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0529 Failed setup of slow-path mailbox CQ: " + "rc = 0x%x\n", rc); + goto out_destroy_fp_eq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", + phba->sli4_hba.mbx_cq->queue_id, + phba->sli4_hba.sp_eq->queue_id); + + /* Set up slow-path ELS Complete Queue */ + if (!phba->sli4_hba.els_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0530 ELS CQ not allocated\n"); + goto out_destroy_mbx_cq; + } + rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, + LPFC_WCQ, LPFC_ELS); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0531 Failed setup of slow-path ELS CQ: " + "rc = 0x%x\n", rc); + goto out_destroy_mbx_cq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", + phba->sli4_hba.els_cq->queue_id, + phba->sli4_hba.sp_eq->queue_id); + + /* Set up slow-path Unsolicited Receive Complete Queue */ + if (!phba->sli4_hba.rxq_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0532 USOL RX CQ not allocated\n"); + goto out_destroy_els_cq; + } + rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq, + LPFC_RCQ, LPFC_USOL); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0533 Failed setup of slow-path USOL RX CQ: " + "rc = 0x%x\n", rc); + goto out_destroy_els_cq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n", + phba->sli4_hba.rxq_cq->queue_id, + phba->sli4_hba.sp_eq->queue_id); + + /* Set up fast-path FCP Response Complete Queue */ + for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { + if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0526 Fast-path FCP CQ (%d) not " + "allocated\n", fcp_cqidx); + goto out_destroy_fcp_cq; + } + rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], + phba->sli4_hba.fp_eq[fcp_cqidx], + LPFC_WCQ, LPFC_FCP); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0527 Failed setup of fast-path FCP " + "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); + goto out_destroy_fcp_cq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2588 FCP CQ setup: cq[%d]-id=%d, " + "parent eq[%d]-id=%d\n", + fcp_cqidx, + phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, + fcp_cqidx, + phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); + } + + /* + * Set up all the Work Queues (WQs) + */ + + /* Set up Mailbox Command Queue */ + if (!phba->sli4_hba.mbx_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0538 Slow-path MQ not allocated\n"); + goto out_destroy_fcp_cq; + } + rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, + phba->sli4_hba.mbx_cq, LPFC_MBOX); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0539 Failed setup of slow-path MQ: " + "rc = 0x%x\n", rc); + goto out_destroy_fcp_cq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", + phba->sli4_hba.mbx_wq->queue_id, + phba->sli4_hba.mbx_cq->queue_id); + + /* Set up slow-path ELS Work Queue */ + if (!phba->sli4_hba.els_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0536 Slow-path ELS WQ not allocated\n"); + goto out_destroy_mbx_wq; + } + rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, + phba->sli4_hba.els_cq, LPFC_ELS); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0537 Failed setup of slow-path ELS WQ: " + "rc = 0x%x\n", rc); + goto out_destroy_mbx_wq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", + phba->sli4_hba.els_wq->queue_id, + phba->sli4_hba.els_cq->queue_id); + + /* Set up fast-path FCP Work Queue */ + for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { + if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0534 Fast-path FCP WQ (%d) not " + "allocated\n", fcp_wqidx); + goto out_destroy_fcp_wq; + } + rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], + phba->sli4_hba.fcp_cq[fcp_cq_index], + LPFC_FCP); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0535 Failed setup of fast-path FCP " + "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); + goto out_destroy_fcp_wq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2591 FCP WQ setup: wq[%d]-id=%d, " + "parent cq[%d]-id=%d\n", + fcp_wqidx, + phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, + fcp_cq_index, + phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); + /* Round robin FCP Work Queue's Completion Queue assignment */ + fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); + } + + /* + * Create Receive Queue (RQ) + */ + if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0540 Receive Queue not allocated\n"); + goto out_destroy_fcp_wq; + } + rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, + phba->sli4_hba.rxq_cq, LPFC_USOL); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0541 Failed setup of Receive Queue: " + "rc = 0x%x\n", rc); + goto out_destroy_fcp_wq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " + "parent cq-id=%d\n", + phba->sli4_hba.hdr_rq->queue_id, + phba->sli4_hba.dat_rq->queue_id, + phba->sli4_hba.rxq_cq->queue_id); + return 0; + +out_destroy_fcp_wq: + for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) + lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); + lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); +out_destroy_mbx_wq: + lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); +out_destroy_fcp_cq: + for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) + lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); + lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); +out_destroy_els_cq: + lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); +out_destroy_mbx_cq: + lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); +out_destroy_fp_eq: + for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) + lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); + lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); +out_error: + return rc; +} + +/** + * lpfc_sli4_queue_unset - Unset all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset all the SLI4 queues with the FCoE HBA + * operation. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +void +lpfc_sli4_queue_unset(struct lpfc_hba *phba) +{ + int fcp_qidx; + + /* Unset mailbox command work queue */ + lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); + /* Unset ELS work queue */ + lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); + /* Unset unsolicited receive queue */ + lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); + /* Unset FCP work queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) + lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); + /* Unset mailbox command complete queue */ + lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); + /* Unset ELS complete queue */ + lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); + /* Unset unsolicited receive complete queue */ + lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); + /* Unset FCP response complete queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) + lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); + /* Unset fast-path event queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) + lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); + /* Unset slow-path event queue */ + lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); +} + +/** + * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate and set up a pool of completion queue + * events. The body of the completion queue event is a completion queue entry + * CQE. For now, this pool is used for the interrupt service routine to queue + * the following HBA completion queue events for the worker thread to process: + * - Mailbox asynchronous events + * - Receive queue completion unsolicited events + * Later, this can be used for all the slow-path events. + * + * Return codes + * 0 - sucessful + * -ENOMEM - No availble memory + **/ +static int +lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + int i; + + for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { + cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); + if (!cq_event) + goto out_pool_create_fail; + list_add_tail(&cq_event->list, + &phba->sli4_hba.sp_cqe_event_pool); + } + return 0; + +out_pool_create_fail: + lpfc_sli4_cq_event_pool_destroy(phba); + return -ENOMEM; +} + +/** + * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the pool of completion queue events at + * driver unload time. Note that, it is the responsibility of the driver + * cleanup routine to free all the outstanding completion-queue events + * allocated from this pool back into the pool before invoking this routine + * to destroy the pool. + **/ +static void +lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event, *next_cq_event; + + list_for_each_entry_safe(cq_event, next_cq_event, + &phba->sli4_hba.sp_cqe_event_pool, list) { + list_del(&cq_event->list); + kfree(cq_event); + } +} + +/** + * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is the lock free version of the API invoked to allocate a + * completion-queue event from the free pool. + * + * Return: Pointer to the newly allocated completion-queue event if successful + * NULL otherwise. + **/ +struct lpfc_cq_event * +__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event = NULL; + + list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, + struct lpfc_cq_event, list); + return cq_event; +} + +/** + * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is the lock version of the API invoked to allocate a + * completion-queue event from the free pool. + * + * Return: Pointer to the newly allocated completion-queue event if successful + * NULL otherwise. + **/ +struct lpfc_cq_event * +lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + unsigned long iflags; + + spin_lock_irqsave(&phba->hbalock, iflags); + cq_event = __lpfc_sli4_cq_event_alloc(phba); + spin_unlock_irqrestore(&phba->hbalock, iflags); + return cq_event; +} + +/** + * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool + * @phba: pointer to lpfc hba data structure. + * @cq_event: pointer to the completion queue event to be freed. + * + * This routine is the lock free version of the API invoked to release a + * completion-queue event back into the free pool. + **/ +void +__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, + struct lpfc_cq_event *cq_event) +{ + list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); +} + +/** + * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool + * @phba: pointer to lpfc hba data structure. + * @cq_event: pointer to the completion queue event to be freed. + * + * This routine is the lock version of the API invoked to release a + * completion-queue event back into the free pool. + **/ +void +lpfc_sli4_cq_event_release(struct lpfc_hba *phba, + struct lpfc_cq_event *cq_event) +{ + unsigned long iflags; + spin_lock_irqsave(&phba->hbalock, iflags); + __lpfc_sli4_cq_event_release(phba, cq_event); + spin_unlock_irqrestore(&phba->hbalock, iflags); +} + +/** + * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is to free all the pending completion-queue events to the + * back into the free pool for device reset. + **/ +static void +lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) +{ + LIST_HEAD(cqelist); + struct lpfc_cq_event *cqe; + unsigned long iflags; + + /* Retrieve all the pending WCQEs from pending WCQE lists */ + spin_lock_irqsave(&phba->hbalock, iflags); + /* Pending FCP XRI abort events */ + list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, + &cqelist); + /* Pending ELS XRI abort events */ + list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, + &cqelist); + /* Pending asynnc events */ + list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, + &cqelist); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + while (!list_empty(&cqelist)) { + list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); + lpfc_sli4_cq_event_release(phba, cqe); + } +} + +/** + * lpfc_pci_function_reset - Reset pci function. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to request a PCI function reset. It will destroys + * all resources assigned to the PCI function which originates this request. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_pci_function_reset(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + uint32_t rc = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0494 Unable to allocate memory for issuing " + "SLI_FUNCTION_RESET mailbox command\n"); + return -ENOMEM; + } + + /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, + LPFC_SLI4_MBX_EMBED); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) + &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0495 SLI_FUNCTION_RESET mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands + * @phba: pointer to lpfc hba data structure. + * @cnt: number of nop mailbox commands to send. + * + * This routine is invoked to send a number @cnt of NOP mailbox command and + * wait for each command to complete. + * + * Return: the number of NOP mailbox command completed. + **/ +static int +lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) +{ + LPFC_MBOXQ_t *mboxq; + int length, cmdsent; + uint32_t mbox_tmo; + uint32_t rc = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (cnt == 0) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2518 Requested to send 0 NOP mailbox cmd\n"); + return cnt; + } + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2519 Unable to allocate memory for issuing " + "NOP mailbox command\n"); + return 0; + } + + /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ + length = (sizeof(struct lpfc_mbx_nop) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); + + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + for (cmdsent = 0; cmdsent < cnt; cmdsent++) { + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); + if (rc == MBX_TIMEOUT) + break; + /* Check return status */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2520 NOP mailbox command failed " + "status x%x add_status x%x mbx " + "status x%x\n", shdr_status, + shdr_add_status, rc); + break; + } + } + + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + + return cmdsent; +} + +/** + * lpfc_sli4_fcfi_unreg - Unregister fcfi to device + * @phba: pointer to lpfc hba data structure. + * @fcfi: fcf index. + * + * This routine is invoked to unregister a FCFI from device. + **/ +void +lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) +{ + LPFC_MBOXQ_t *mbox; + uint32_t mbox_tmo; + int rc; + unsigned long flags; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + + if (!mbox) + return; + + lpfc_unreg_fcfi(mbox, fcfi); + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + if (rc != MBX_SUCCESS) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2517 Unregister FCFI command failed " + "status %d, mbxStatus x%x\n", rc, + bf_get(lpfc_mqe_status, &mbox->u.mqe)); + else { + spin_lock_irqsave(&phba->hbalock, flags); + /* Mark the FCFI is no longer registered */ + phba->fcf.fcf_flag &= + ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); + spin_unlock_irqrestore(&phba->hbalock, flags); + } +} + +/** + * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the PCI device memory space for device + * with SLI-4 interface spec. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + unsigned long bar0map_len, bar1map_len, bar2map_len; + int error = -ENODEV; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + return error; + else + pdev = phba->pcidev; + + /* Set the device DMA mask size */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) + return error; + + /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the + * number of bytes required by each mapping. They are actually + * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. + */ + phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); + bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); + + phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); + bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); + + phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); + bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); + + /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ + phba->sli4_hba.conf_regs_memmap_p = + ioremap(phba->pci_bar0_map, bar0map_len); + if (!phba->sli4_hba.conf_regs_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLI4 PCI config registers.\n"); + goto out; + } + + /* Map SLI4 HBA Control Register base to a kernel virtual address. */ + phba->sli4_hba.ctrl_regs_memmap_p = + ioremap(phba->pci_bar1_map, bar1map_len); + if (!phba->sli4_hba.ctrl_regs_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLI4 HBA control registers.\n"); + goto out_iounmap_conf; + } + + /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ + phba->sli4_hba.drbl_regs_memmap_p = + ioremap(phba->pci_bar2_map, bar2map_len); + if (!phba->sli4_hba.drbl_regs_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLI4 HBA doorbell registers.\n"); + goto out_iounmap_ctrl; + } + + /* Set up BAR0 PCI config space register memory map */ + lpfc_sli4_bar0_register_memmap(phba); + + /* Set up BAR1 register memory map */ + lpfc_sli4_bar1_register_memmap(phba); + + /* Set up BAR2 register memory map */ + error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); + if (error) + goto out_iounmap_all; + + return 0; + +out_iounmap_all: + iounmap(phba->sli4_hba.drbl_regs_memmap_p); +out_iounmap_ctrl: + iounmap(phba->sli4_hba.ctrl_regs_memmap_p); +out_iounmap_conf: + iounmap(phba->sli4_hba.conf_regs_memmap_p); +out: + return error; +} + +/** + * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the PCI device memory space for device + * with SLI-4 interface spec. + **/ +static void +lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + return; + else + pdev = phba->pcidev; + + /* Free coherent DMA memory allocated */ + + /* Unmap I/O memory space */ + iounmap(phba->sli4_hba.drbl_regs_memmap_p); + iounmap(phba->sli4_hba.ctrl_regs_memmap_p); + iounmap(phba->sli4_hba.conf_regs_memmap_p); + + return; +} + +/** * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device * @phba: pointer to lpfc hba data structure. * @@ -3597,6 +6271,276 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba) } /** + * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI-X interrupt vectors to device + * with SLI-4 interface spec. The kernel function pci_enable_msix() is called + * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, + * enables either all or nothing, depending on the current availability of + * PCI vector resources. The device driver is responsible for calling the + * individual request_irq() to register each MSI-X vector with a interrupt + * handler, which is done in this function. Note that later when device is + * unloading, the driver should always call free_irq() on all MSI-X vectors + * it has done request_irq() on before calling pci_disable_msix(). Failure + * to do so results in a BUG_ON() and a device will be left with MSI-X + * enabled and leaks its vectors. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli4_enable_msix(struct lpfc_hba *phba) +{ + int rc, index; + + /* Set up MSI-X multi-message vectors */ + for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) + phba->sli4_hba.msix_entries[index].entry = index; + + /* Configure MSI-X capability structure */ + rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, + phba->sli4_hba.cfg_eqn); + if (rc) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0484 PCI enable MSI-X failed (%d)\n", rc); + goto msi_fail_out; + } + /* Log MSI-X vector assignment */ + for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0489 MSI-X entry[%d]: vector=x%x " + "message=%d\n", index, + phba->sli4_hba.msix_entries[index].vector, + phba->sli4_hba.msix_entries[index].entry); + /* + * Assign MSI-X vectors to interrupt handlers + */ + + /* The first vector must associated to slow-path handler for MQ */ + rc = request_irq(phba->sli4_hba.msix_entries[0].vector, + &lpfc_sli4_sp_intr_handler, IRQF_SHARED, + LPFC_SP_DRIVER_HANDLER_NAME, phba); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0485 MSI-X slow-path request_irq failed " + "(%d)\n", rc); + goto msi_fail_out; + } + + /* The rest of the vector(s) are associated to fast-path handler(s) */ + for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { + phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; + phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; + rc = request_irq(phba->sli4_hba.msix_entries[index].vector, + &lpfc_sli4_fp_intr_handler, IRQF_SHARED, + LPFC_FP_DRIVER_HANDLER_NAME, + &phba->sli4_hba.fcp_eq_hdl[index - 1]); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0486 MSI-X fast-path (%d) " + "request_irq failed (%d)\n", index, rc); + goto cfg_fail_out; + } + } + + return rc; + +cfg_fail_out: + /* free the irq already requested */ + for (--index; index >= 1; index--) + free_irq(phba->sli4_hba.msix_entries[index - 1].vector, + &phba->sli4_hba.fcp_eq_hdl[index - 1]); + + /* free the irq already requested */ + free_irq(phba->sli4_hba.msix_entries[0].vector, phba); + +msi_fail_out: + /* Unconfigure MSI-X capability structure */ + pci_disable_msix(phba->pcidev); + return rc; +} + +/** + * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release the MSI-X vectors and then disable the + * MSI-X interrupt mode to device with SLI-4 interface spec. + **/ +static void +lpfc_sli4_disable_msix(struct lpfc_hba *phba) +{ + int index; + + /* Free up MSI-X multi-message vectors */ + free_irq(phba->sli4_hba.msix_entries[0].vector, phba); + + for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) + free_irq(phba->sli4_hba.msix_entries[index].vector, + &phba->sli4_hba.fcp_eq_hdl[index - 1]); + /* Disable MSI-X */ + pci_disable_msix(phba->pcidev); + + return; +} + +/** + * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI interrupt mode to device with + * SLI-4 interface spec. The kernel function pci_enable_msi() is called + * to enable the MSI vector. The device driver is responsible for calling + * the request_irq() to register MSI vector with a interrupt the handler, + * which is done in this function. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli4_enable_msi(struct lpfc_hba *phba) +{ + int rc, index; + + rc = pci_enable_msi(phba->pcidev); + if (!rc) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0487 PCI enable MSI mode success.\n"); + else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0488 PCI enable MSI mode failed (%d)\n", rc); + return rc; + } + + rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (rc) { + pci_disable_msi(phba->pcidev); + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0490 MSI request_irq failed (%d)\n", rc); + } + + for (index = 0; index < phba->cfg_fcp_eq_count; index++) { + phba->sli4_hba.fcp_eq_hdl[index].idx = index; + phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + } + + return rc; +} + +/** + * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable the MSI interrupt mode to device with + * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has + * done request_irq() on before calling pci_disable_msi(). Failure to do so + * results in a BUG_ON() and a device will be left with MSI enabled and leaks + * its vector. + **/ +static void +lpfc_sli4_disable_msi(struct lpfc_hba *phba) +{ + free_irq(phba->pcidev->irq, phba); + pci_disable_msi(phba->pcidev); + return; +} + +/** + * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable device interrupt and associate driver's + * interrupt handler(s) to interrupt vector(s) to device with SLI-4 + * interface spec. Depends on the interrupt mode configured to the driver, + * the driver will try to fallback from the configured interrupt mode to an + * interrupt mode which is supported by the platform, kernel, and device in + * the order of: + * MSI-X -> MSI -> IRQ. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static uint32_t +lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) +{ + uint32_t intr_mode = LPFC_INTR_ERROR; + int retval, index; + + if (cfg_mode == 2) { + /* Preparation before conf_msi mbox cmd */ + retval = 0; + if (!retval) { + /* Now, try to enable MSI-X interrupt mode */ + retval = lpfc_sli4_enable_msix(phba); + if (!retval) { + /* Indicate initialization to MSI-X mode */ + phba->intr_type = MSIX; + intr_mode = 2; + } + } + } + + /* Fallback to MSI if MSI-X initialization failed */ + if (cfg_mode >= 1 && phba->intr_type == NONE) { + retval = lpfc_sli4_enable_msi(phba); + if (!retval) { + /* Indicate initialization to MSI mode */ + phba->intr_type = MSI; + intr_mode = 1; + } + } + + /* Fallback to INTx if both MSI-X/MSI initalization failed */ + if (phba->intr_type == NONE) { + retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (!retval) { + /* Indicate initialization to INTx mode */ + phba->intr_type = INTx; + intr_mode = 0; + for (index = 0; index < phba->cfg_fcp_eq_count; + index++) { + phba->sli4_hba.fcp_eq_hdl[index].idx = index; + phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + } + } + } + return intr_mode; +} + +/** + * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable device interrupt and disassociate + * the driver's interrupt handler(s) from interrupt vector(s) to device + * with SLI-4 interface spec. Depending on the interrupt mode, the driver + * will release the interrupt vector(s) for the message signaled interrupt. + **/ +static void +lpfc_sli4_disable_intr(struct lpfc_hba *phba) +{ + /* Disable the currently initialized interrupt mode */ + if (phba->intr_type == MSIX) + lpfc_sli4_disable_msix(phba); + else if (phba->intr_type == MSI) + lpfc_sli4_disable_msi(phba); + else if (phba->intr_type == INTx) + free_irq(phba->pcidev->irq, phba); + + /* Reset interrupt management states */ + phba->intr_type = NONE; + phba->sli.slistat.sli_intr = 0; + + return; +} + +/** * lpfc_unset_hba - Unset SLI3 hba device initialization * @phba: pointer to lpfc hba data structure. * @@ -3627,6 +6571,90 @@ lpfc_unset_hba(struct lpfc_hba *phba) } /** + * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the HBA device initialization steps to + * a device with SLI-4 interface spec. + **/ +static void +lpfc_sli4_unset_hba(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + spin_lock_irq(shost->host_lock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(shost->host_lock); + + phba->pport->work_port_events = 0; + + lpfc_sli4_hba_down(phba); + + lpfc_sli4_disable_intr(phba); + + return; +} + +/** + * lpfc_sli4_hba_unset - Unset the fcoe hba + * @phba: Pointer to HBA context object. + * + * This function is called in the SLI4 code path to reset the HBA's FCoE + * function. The caller is not required to hold any lock. This routine + * issues PCI function reset mailbox command to reset the FCoE function. + * At the end of the function, it calls lpfc_hba_down_post function to + * free any pending commands. + **/ +static void +lpfc_sli4_hba_unset(struct lpfc_hba *phba) +{ + int wait_cnt = 0; + LPFC_MBOXQ_t *mboxq; + + lpfc_stop_hba_timers(phba); + phba->sli4_hba.intr_enable = 0; + + /* + * Gracefully wait out the potential current outstanding asynchronous + * mailbox command. + */ + + /* First, block any pending async mailbox command from posted */ + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); + /* Now, trying to wait it out if we can */ + while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { + msleep(10); + if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) + break; + } + /* Forcefully release the outstanding mailbox command if timed out */ + if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { + spin_lock_irq(&phba->hbalock); + mboxq = phba->sli.mbox_active; + mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; + __lpfc_mbox_cmpl_put(phba, mboxq); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + spin_unlock_irq(&phba->hbalock); + } + + /* Tear down the queues in the HBA */ + lpfc_sli4_queue_unset(phba); + + /* Disable PCI subsystem interrupt */ + lpfc_sli4_disable_intr(phba); + + /* Stop kthread signal shall trigger work_done one more time */ + kthread_stop(phba->worker_thread); + + /* Stop the SLI4 device port */ + phba->pport->work_port_events = 0; +} + +/** * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. * @pdev: pointer to PCI device * @pid: pointer to PCI device identifier @@ -4127,6 +7155,446 @@ lpfc_io_resume_s3(struct pci_dev *pdev) } /** + * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve + * @phba: pointer to lpfc hba data structure. + * + * returns the number of ELS/CT IOCBs to reserve + **/ +int +lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) +{ + int max_xri = phba->sli4_hba.max_cfg_param.max_xri; + + if (max_xri <= 100) + return 4; + else if (max_xri <= 256) + return 8; + else if (max_xri <= 512) + return 16; + else if (max_xri <= 1024) + return 32; + else + return 48; +} + +/** + * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys + * @pdev: pointer to PCI device + * @pid: pointer to PCI device identifier + * + * This routine is called from the kernel's PCI subsystem to device with + * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is + * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific + * information of the device and driver to see if the driver state that it + * can support this kind of device. If the match is successful, the driver + * core invokes this routine. If this routine determines it can claim the HBA, + * it does all the initialization that it needs to do to handle the HBA + * properly. + * + * Return code + * 0 - driver can claim the device + * negative value - driver can not claim the device + **/ +static int __devinit +lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct lpfc_hba *phba; + struct lpfc_vport *vport = NULL; + int error; + uint32_t cfg_mode, intr_mode; + int mcnt; + + /* Allocate memory for HBA structure */ + phba = lpfc_hba_alloc(pdev); + if (!phba) + return -ENOMEM; + + /* Perform generic PCI device enabling operation */ + error = lpfc_enable_pci_dev(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1409 Failed to enable pci device.\n"); + goto out_free_phba; + } + + /* Set up SLI API function jump table for PCI-device group-1 HBAs */ + error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); + if (error) + goto out_disable_pci_dev; + + /* Set up SLI-4 specific device PCI memory space */ + error = lpfc_sli4_pci_mem_setup(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1410 Failed to set up pci memory space.\n"); + goto out_disable_pci_dev; + } + + /* Set up phase-1 common device driver resources */ + error = lpfc_setup_driver_resource_phase1(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1411 Failed to set up driver resource.\n"); + goto out_unset_pci_mem_s4; + } + + /* Set up SLI-4 Specific device driver resources */ + error = lpfc_sli4_driver_resource_setup(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1412 Failed to set up driver resource.\n"); + goto out_unset_pci_mem_s4; + } + + /* Initialize and populate the iocb list per host */ + error = lpfc_init_iocb_list(phba, + phba->sli4_hba.max_cfg_param.max_xri); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1413 Failed to initialize iocb list.\n"); + goto out_unset_driver_resource_s4; + } + + /* Set up common device driver resources */ + error = lpfc_setup_driver_resource_phase2(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1414 Failed to set up driver resource.\n"); + goto out_free_iocb_list; + } + + /* Create SCSI host to the physical port */ + error = lpfc_create_shost(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1415 Failed to create scsi host.\n"); + goto out_unset_driver_resource; + } + + /* Configure sysfs attributes */ + vport = phba->pport; + error = lpfc_alloc_sysfs_attr(vport); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1416 Failed to allocate sysfs attr\n"); + goto out_destroy_shost; + } + + /* Now, trying to enable interrupt and bring up the device */ + cfg_mode = phba->cfg_use_msi; + while (true) { + /* Put device to a known state before enabling interrupt */ + lpfc_stop_port(phba); + /* Configure and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0426 Failed to enable interrupt.\n"); + error = -ENODEV; + goto out_free_sysfs_attr; + } + /* Set up SLI-4 HBA */ + if (lpfc_sli4_hba_setup(phba)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1421 Failed to set up hba\n"); + error = -ENODEV; + goto out_disable_intr; + } + + /* Send NOP mbx cmds for non-INTx mode active interrupt test */ + if (intr_mode != 0) + mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, + LPFC_ACT_INTR_CNT); + + /* Check active interrupts received only for MSI/MSI-X */ + if (intr_mode == 0 || + phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { + /* Log the current active interrupt mode */ + phba->intr_mode = intr_mode; + lpfc_log_intr_mode(phba, intr_mode); + break; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0451 Configure interrupt mode (%d) " + "failed active interrupt test.\n", + intr_mode); + /* Unset the preivous SLI-4 HBA setup */ + lpfc_sli4_unset_hba(phba); + /* Try next level of interrupt mode */ + cfg_mode = --intr_mode; + } + + /* Perform post initialization setup */ + lpfc_post_init_setup(phba); + + return 0; + +out_disable_intr: + lpfc_sli4_disable_intr(phba); +out_free_sysfs_attr: + lpfc_free_sysfs_attr(vport); +out_destroy_shost: + lpfc_destroy_shost(phba); +out_unset_driver_resource: + lpfc_unset_driver_resource_phase2(phba); +out_free_iocb_list: + lpfc_free_iocb_list(phba); +out_unset_driver_resource_s4: + lpfc_sli4_driver_resource_unset(phba); +out_unset_pci_mem_s4: + lpfc_sli4_pci_mem_unset(phba); +out_disable_pci_dev: + lpfc_disable_pci_dev(phba); +out_free_phba: + lpfc_hba_free(phba); + return error; +} + +/** + * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem + * @pdev: pointer to PCI device + * + * This routine is called from the kernel's PCI subsystem to device with + * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is + * removed from PCI bus, it performs all the necessary cleanup for the HBA + * device to be removed from the PCI subsystem properly. + **/ +static void __devexit +lpfc_pci_remove_one_s4(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_vport **vports; + struct lpfc_hba *phba = vport->phba; + int i; + + /* Mark the device unloading flag */ + spin_lock_irq(&phba->hbalock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(&phba->hbalock); + + /* Free the HBA sysfs attributes */ + lpfc_free_sysfs_attr(vport); + + /* Release all the vports against this physical port */ + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) + fc_vport_terminate(vports[i]->fc_vport); + lpfc_destroy_vport_work_array(phba, vports); + + /* Remove FC host and then SCSI host with the physical port */ + fc_remove_host(shost); + scsi_remove_host(shost); + + /* Perform cleanup on the physical port */ + lpfc_cleanup(vport); + + /* + * Bring down the SLI Layer. This step disables all interrupts, + * clears the rings, discards all mailbox commands, and resets + * the HBA FCoE function. + */ + lpfc_debugfs_terminate(vport); + lpfc_sli4_hba_unset(phba); + + spin_lock_irq(&phba->hbalock); + list_del_init(&vport->listentry); + spin_unlock_irq(&phba->hbalock); + + /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi + * buffers are released to their corresponding pools here. + */ + lpfc_scsi_free(phba); + lpfc_sli4_driver_resource_unset(phba); + + /* Unmap adapter Control and Doorbell registers */ + lpfc_sli4_pci_mem_unset(phba); + + /* Release PCI resources and disable device's PCI function */ + scsi_host_put(shost); + lpfc_disable_pci_dev(phba); + + /* Finally, free the driver's device data structure */ + lpfc_hba_free(phba); + + return; +} + +/** + * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt + * @pdev: pointer to PCI device + * @msg: power management message + * + * This routine is called from the kernel's PCI subsystem to support system + * Power Management (PM) to device with SLI-4 interface spec. When PM invokes + * this method, it quiesces the device by stopping the driver's worker + * thread for the device, turning off device's interrupt and DMA, and bring + * the device offline. Note that as the driver implements the minimum PM + * requirements to a power-aware driver's PM support for suspend/resume -- all + * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() + * method call will be treated as SUSPEND and the driver will fully + * reinitialize its device during resume() method call, the driver will set + * device to PCI_D3hot state in PCI config space instead of setting it + * according to the @msg provided by the PM. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int +lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0298 PCI device Power Management suspend.\n"); + + /* Bring down the device */ + lpfc_offline_prep(phba); + lpfc_offline(phba); + kthread_stop(phba->worker_thread); + + /* Disable interrupt from device */ + lpfc_sli4_disable_intr(phba); + + /* Save device state to PCI config space */ + pci_save_state(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +/** + * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt + * @pdev: pointer to PCI device + * + * This routine is called from the kernel's PCI subsystem to support system + * Power Management (PM) to device with SLI-4 interface spac. When PM invokes + * this method, it restores the device's PCI config space state and fully + * reinitializes the device and brings it online. Note that as the driver + * implements the minimum PM requirements to a power-aware driver's PM for + * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) + * to the suspend() method call will be treated as SUSPEND and the driver + * will fully reinitialize its device during resume() method call, the device + * will be set to PCI_D0 directly in PCI config space before restoring the + * state. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int +lpfc_pci_resume_one_s4(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + uint32_t intr_mode; + int error; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0292 PCI device Power Management resume.\n"); + + /* Restore device state from PCI config space */ + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + if (pdev->is_busmaster) + pci_set_master(pdev); + + /* Startup the kernel thread for this host adapter. */ + phba->worker_thread = kthread_run(lpfc_do_work, phba, + "lpfc_worker_%d", phba->brd_no); + if (IS_ERR(phba->worker_thread)) { + error = PTR_ERR(phba->worker_thread); + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0293 PM resume failed to start worker " + "thread: error=x%x.\n", error); + return error; + } + + /* Configure and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0294 PM resume Failed to enable interrupt\n"); + return -EIO; + } else + phba->intr_mode = intr_mode; + + /* Restart HBA and bring it online */ + lpfc_sli_brdrestart(phba); + lpfc_online(phba); + + /* Log the current active interrupt mode */ + lpfc_log_intr_mode(phba, phba->intr_mode); + + return 0; +} + +/** + * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device + * @pdev: pointer to PCI device. + * @state: the current PCI connection state. + * + * This routine is called from the PCI subsystem for error handling to device + * with SLI-4 interface spec. This function is called by the PCI subsystem + * after a PCI bus error affecting this device has been detected. When this + * function is invoked, it will need to stop all the I/Os and interrupt(s) + * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET + * for the PCI subsystem to perform proper recovery as desired. + * + * Return codes + * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + **/ +static pci_ers_result_t +lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) +{ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch + * @pdev: pointer to PCI device. + * + * This routine is called from the PCI subsystem for error handling to device + * with SLI-4 interface spec. It is called after PCI bus has been reset to + * restart the PCI card from scratch, as if from a cold-boot. During the + * PCI subsystem error recovery, after the driver returns + * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error + * recovery and then call this routine before calling the .resume method to + * recover the device. This function will initialize the HBA device, enable + * the interrupt, but it will just put the HBA to offline state without + * passing any I/O traffic. + * + * Return codes + * PCI_ERS_RESULT_RECOVERED - the device has been recovered + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + */ +static pci_ers_result_t +lpfc_io_slot_reset_s4(struct pci_dev *pdev) +{ + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device + * @pdev: pointer to PCI device + * + * This routine is called from the PCI subsystem for error handling to device + * with SLI-4 interface spec. It is called when kernel error recovery tells + * the lpfc driver that it is ok to resume normal PCI operation after PCI bus + * error recovery. After this call, traffic can start to flow from this device + * again. + **/ +static void +lpfc_io_resume_s4(struct pci_dev *pdev) +{ + return; +} + +/** * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem * @pdev: pointer to PCI device * @pid: pointer to PCI device identifier @@ -4154,6 +7622,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) return -ENODEV; switch (dev_id) { + case PCI_DEVICE_ID_TIGERSHARK: + case PCI_DEVICE_ID_TIGERSHARK_S: + rc = lpfc_pci_probe_one_s4(pdev, pid); + break; default: rc = lpfc_pci_probe_one_s3(pdev, pid); break; @@ -4181,6 +7653,9 @@ lpfc_pci_remove_one(struct pci_dev *pdev) case LPFC_PCI_DEV_LP: lpfc_pci_remove_one_s3(pdev); break; + case LPFC_PCI_DEV_OC: + lpfc_pci_remove_one_s4(pdev); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1424 Invalid PCI device group: 0x%x\n", @@ -4215,6 +7690,9 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) case LPFC_PCI_DEV_LP: rc = lpfc_pci_suspend_one_s3(pdev, msg); break; + case LPFC_PCI_DEV_OC: + rc = lpfc_pci_suspend_one_s4(pdev, msg); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1425 Invalid PCI device group: 0x%x\n", @@ -4248,6 +7726,9 @@ lpfc_pci_resume_one(struct pci_dev *pdev) case LPFC_PCI_DEV_LP: rc = lpfc_pci_resume_one_s3(pdev); break; + case LPFC_PCI_DEV_OC: + rc = lpfc_pci_resume_one_s4(pdev); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1426 Invalid PCI device group: 0x%x\n", @@ -4283,6 +7764,9 @@ lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) case LPFC_PCI_DEV_LP: rc = lpfc_io_error_detected_s3(pdev, state); break; + case LPFC_PCI_DEV_OC: + rc = lpfc_io_error_detected_s4(pdev, state); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1427 Invalid PCI device group: 0x%x\n", @@ -4317,6 +7801,9 @@ lpfc_io_slot_reset(struct pci_dev *pdev) case LPFC_PCI_DEV_LP: rc = lpfc_io_slot_reset_s3(pdev); break; + case LPFC_PCI_DEV_OC: + rc = lpfc_io_slot_reset_s4(pdev); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1428 Invalid PCI device group: 0x%x\n", @@ -4346,6 +7833,9 @@ lpfc_io_resume(struct pci_dev *pdev) case LPFC_PCI_DEV_LP: lpfc_io_resume_s3(pdev); break; + case LPFC_PCI_DEV_OC: + lpfc_io_resume_s4(pdev); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1429 Invalid PCI device group: 0x%x\n", diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 134fc7fc2127..7f5899b70bd2 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -28,8 +28,10 @@ #include <scsi/scsi.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 35a976733398..516f4802f84e 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -28,8 +28,10 @@ #include <scsi/scsi.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -45,7 +47,7 @@ * @phba: HBA to allocate pools for * * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, - * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools + * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. * * Notes: Not interrupt-safe. Must be called with no locks held. If any @@ -56,19 +58,30 @@ * -ENOMEM on failure (if any memory allocations fail) **/ int -lpfc_mem_alloc(struct lpfc_hba * phba) +lpfc_mem_alloc(struct lpfc_hba *phba, int align) { struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; int longs; int i; - phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", - phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0); + if (phba->sli_rev == LPFC_SLI_REV4) + phba->lpfc_scsi_dma_buf_pool = + pci_pool_create("lpfc_scsi_dma_buf_pool", + phba->pcidev, + phba->cfg_sg_dma_buf_size, + phba->cfg_sg_dma_buf_size, + 0); + else + phba->lpfc_scsi_dma_buf_pool = + pci_pool_create("lpfc_scsi_dma_buf_pool", + phba->pcidev, phba->cfg_sg_dma_buf_size, + align, 0); if (!phba->lpfc_scsi_dma_buf_pool) goto fail; phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, - LPFC_BPL_SIZE, 8,0); + LPFC_BPL_SIZE, + align, 0); if (!phba->lpfc_mbuf_pool) goto fail_free_dma_buf_pool; @@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba) sizeof(struct lpfc_nodelist)); if (!phba->nlp_mem_pool) goto fail_free_mbox_pool; - - phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev, - LPFC_BPL_SIZE, 8, 0); - if (!phba->lpfc_hbq_pool) + phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool", + phba->pcidev, + LPFC_HDR_BUF_SIZE, align, 0); + if (!phba->lpfc_hrb_pool) goto fail_free_nlp_mem_pool; + phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool", + phba->pcidev, + LPFC_DATA_BUF_SIZE, align, 0); + if (!phba->lpfc_drb_pool) + goto fail_free_hbq_pool; /* vpi zero is reserved for the physical port so add 1 to max */ longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); if (!phba->vpi_bmask) - goto fail_free_hbq_pool; + goto fail_free_dbq_pool; return 0; + fail_free_dbq_pool: + pci_pool_destroy(phba->lpfc_drb_pool); + phba->lpfc_drb_pool = NULL; fail_free_hbq_pool: - lpfc_sli_hbqbuf_free_all(phba); - pci_pool_destroy(phba->lpfc_hbq_pool); + pci_pool_destroy(phba->lpfc_hrb_pool); + phba->lpfc_hrb_pool = NULL; fail_free_nlp_mem_pool: mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; @@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba) } /** - * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc + * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc * @phba: HBA to free memory for * - * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, - * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and - * lpfc_nodelist. Also frees the VPI bitmask + * Description: Free the memory allocated by lpfc_mem_alloc routine. This + * routine is a the counterpart of lpfc_mem_alloc. * * Returns: None **/ void -lpfc_mem_free(struct lpfc_hba * phba) +lpfc_mem_free(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; - LPFC_MBOXQ_t *mbox, *next_mbox; - struct lpfc_dmabuf *mp; int i; + struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; + /* Free VPI bitmask memory */ kfree(phba->vpi_bmask); + + /* Free HBQ pools */ lpfc_sli_hbqbuf_free_all(phba); + pci_pool_destroy(phba->lpfc_drb_pool); + phba->lpfc_drb_pool = NULL; + pci_pool_destroy(phba->lpfc_hrb_pool); + phba->lpfc_hrb_pool = NULL; + + /* Free NLP memory pool */ + mempool_destroy(phba->nlp_mem_pool); + phba->nlp_mem_pool = NULL; + + /* Free mbox memory pool */ + mempool_destroy(phba->mbox_mem_pool); + phba->mbox_mem_pool = NULL; + + /* Free MBUF memory pool */ + for (i = 0; i < pool->current_count; i++) + pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, + pool->elements[i].phys); + kfree(pool->elements); + + pci_pool_destroy(phba->lpfc_mbuf_pool); + phba->lpfc_mbuf_pool = NULL; + /* Free DMA buffer memory pool */ + pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); + phba->lpfc_scsi_dma_buf_pool = NULL; + + return; +} + +/** + * lpfc_mem_free_all - Frees all PCI and driver memory + * @phba: HBA to free memory for + * + * Description: Free memory from PCI and driver memory pools and also those + * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees + * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees + * the VPI bitmask. + * + * Returns: None + **/ +void +lpfc_mem_free_all(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *mbox, *next_mbox; + struct lpfc_dmabuf *mp; + + /* Free memory used in mailbox queue back to mailbox memory pool */ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { mp = (struct lpfc_dmabuf *) (mbox->context1); if (mp) { @@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba) list_del(&mbox->list); mempool_free(mbox, phba->mbox_mem_pool); } + /* Free memory used in mailbox cmpl list back to mailbox memory pool */ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { mp = (struct lpfc_dmabuf *) (mbox->context1); if (mp) { @@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba) list_del(&mbox->list); mempool_free(mbox, phba->mbox_mem_pool); } - + /* Free the active mailbox command back to the mailbox memory pool */ + spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(&phba->hbalock); if (psli->mbox_active) { mbox = psli->mbox_active; mp = (struct lpfc_dmabuf *) (mbox->context1); @@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba) psli->mbox_active = NULL; } - for (i = 0; i < pool->current_count; i++) - pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, - pool->elements[i].phys); - kfree(pool->elements); - - pci_pool_destroy(phba->lpfc_hbq_pool); - mempool_destroy(phba->nlp_mem_pool); - mempool_destroy(phba->mbox_mem_pool); - - pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); - pci_pool_destroy(phba->lpfc_mbuf_pool); - - phba->lpfc_hbq_pool = NULL; - phba->nlp_mem_pool = NULL; - phba->mbox_mem_pool = NULL; - phba->lpfc_scsi_dma_buf_pool = NULL; - phba->lpfc_mbuf_pool = NULL; + /* Free and destroy all the allocated memory pools */ + lpfc_mem_free(phba); /* Free the iocb lookup array */ kfree(psli->iocbq_lookup); psli->iocbq_lookup = NULL; + + return; } /** @@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) * lpfc_els_hbq_alloc - Allocate an HBQ buffer * @phba: HBA to allocate HBQ buffer for * - * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI + * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI * pool along a non-DMA-mapped container for it. * * Notes: Not interrupt-safe. Must be called with no locks held. @@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) if (!hbqbp) return NULL; - hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, + hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, &hbqbp->dbuf.phys); if (!hbqbp->dbuf.virt) { kfree(hbqbp); @@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) } /** - * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc + * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc * @phba: HBA buffer was allocated for * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc * @@ -348,12 +405,73 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) void lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) { - pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); + pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); kfree(hbqbp); return; } /** + * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer + * @phba: HBA to allocate a receive buffer for + * + * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI + * pool along a non-DMA-mapped container for it. + * + * Notes: Not interrupt-safe. Must be called with no locks held. + * + * Returns: + * pointer to HBQ on success + * NULL on failure + **/ +struct hbq_dmabuf * +lpfc_sli4_rb_alloc(struct lpfc_hba *phba) +{ + struct hbq_dmabuf *dma_buf; + + dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); + if (!dma_buf) + return NULL; + + dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, + &dma_buf->hbuf.phys); + if (!dma_buf->hbuf.virt) { + kfree(dma_buf); + return NULL; + } + dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, + &dma_buf->dbuf.phys); + if (!dma_buf->dbuf.virt) { + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + return NULL; + } + dma_buf->size = LPFC_BPL_SIZE; + return dma_buf; +} + +/** + * lpfc_sli4_rb_free - Frees a receive buffer + * @phba: HBA buffer was allocated for + * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc + * + * Description: Frees both the container and the DMA-mapped buffers returned by + * lpfc_sli4_rb_alloc. + * + * Notes: Can be called with or without locks held. + * + * Returns: None + **/ +void +lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) +{ + pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); + pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); + kfree(dmab); + return; +} + +/** * lpfc_in_buf_free - Free a DMA buffer * @phba: HBA buffer is associated with * @mp: Buffer to free diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 08cdc77af41c..6ba5a72f6049 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -28,8 +28,10 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index a226c053c0f4..9af2db355bc6 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -31,8 +31,10 @@ #include <scsi/scsi_transport_fc.h> #include "lpfc_version.h" +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -57,6 +59,8 @@ static char *dif_op_str[] = { "SCSI_PROT_READ_CONVERT", "SCSI_PROT_WRITE_CONVERT" }; +static void +lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); static void lpfc_debug_save_data(struct scsi_cmnd *cmnd) @@ -565,6 +569,8 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) } iocb->ulpClass = CLASS3; psb->status = IOSTAT_SUCCESS; + /* Put it back into the SCSI buffer list */ + lpfc_release_scsi_buf_s4(phba, psb); } @@ -572,6 +578,271 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) } /** + * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort + * @phba: pointer to lpfc hba data structure. + * @axri: pointer to the fcp xri abort wcqe structure. + * + * This routine is invoked by the worker thread to process a SLI4 fast-path + * FCP aborted xri. + **/ +void +lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri) +{ + uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); + struct lpfc_scsi_buf *psb, *next_psb; + unsigned long iflag = 0; + + spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); + list_for_each_entry_safe(psb, next_psb, + &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { + if (psb->cur_iocbq.sli4_xritag == xri) { + list_del(&psb->list); + psb->status = IOSTAT_SUCCESS; + spin_unlock_irqrestore( + &phba->sli4_hba.abts_scsi_buf_list_lock, + iflag); + lpfc_release_scsi_buf_s4(phba, psb); + return; + } + } + spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, + iflag); +} + +/** + * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block + * @phba: pointer to lpfc hba data structure. + * + * This routine walks the list of scsi buffers that have been allocated and + * repost them to the HBA by using SGL block post. This is needed after a + * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine + * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list + * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. + * + * Returns: 0 = success, non-zero failure. + **/ +int +lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) +{ + struct lpfc_scsi_buf *psb; + int index, status, bcnt = 0, rcnt = 0, rc = 0; + LIST_HEAD(sblist); + + for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) { + psb = phba->sli4_hba.lpfc_scsi_psb_array[index]; + if (psb) { + /* Remove from SCSI buffer list */ + list_del(&psb->list); + /* Add it to a local SCSI buffer list */ + list_add_tail(&psb->list, &sblist); + if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) { + bcnt = rcnt; + rcnt = 0; + } + } else + /* A hole present in the XRI array, need to skip */ + bcnt = rcnt; + + if (index == phba->sli4_hba.scsi_xri_cnt - 1) + /* End of XRI array for SCSI buffer, complete */ + bcnt = rcnt; + + /* Continue until collect up to a nembed page worth of sgls */ + if (bcnt == 0) + continue; + /* Now, post the SCSI buffer list sgls as a block */ + status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); + /* Reset SCSI buffer count for next round of posting */ + bcnt = 0; + while (!list_empty(&sblist)) { + list_remove_head(&sblist, psb, struct lpfc_scsi_buf, + list); + if (status) { + /* Put this back on the abort scsi list */ + psb->status = IOSTAT_LOCAL_REJECT; + psb->result = IOERR_ABORT_REQUESTED; + rc++; + } else + psb->status = IOSTAT_SUCCESS; + /* Put it back into the SCSI buffer list */ + lpfc_release_scsi_buf_s4(phba, psb); + } + } + return rc; +} + +/** + * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec + * @vport: The virtual port for which this call being executed. + * @num_to_allocate: The requested number of buffers to allocate. + * + * This routine allocates a scsi buffer for device with SLI-4 interface spec, + * the scsi buffer contains all the necessary information needed to initiate + * a SCSI I/O. + * + * Return codes: + * int - number of scsi buffers that were allocated. + * 0 = failure, less than num_to_alloc is a partial failure. + **/ +static int +lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_scsi_buf *psb; + struct sli4_sge *sgl; + IOCB_t *iocb; + dma_addr_t pdma_phys_fcp_cmd; + dma_addr_t pdma_phys_fcp_rsp; + dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; + uint16_t iotag, last_xritag = NO_XRI; + int status = 0, index; + int bcnt; + int non_sequential_xri = 0; + int rc = 0; + LIST_HEAD(sblist); + + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { + psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); + if (!psb) + break; + + /* + * Get memory from the pci pool to map the virt space to pci bus + * space for an I/O. The DMA buffer includes space for the + * struct fcp_cmnd, struct fcp_rsp and the number of bde's + * necessary to support the sg_tablesize. + */ + psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, + GFP_KERNEL, &psb->dma_handle); + if (!psb->data) { + kfree(psb); + break; + } + + /* Initialize virtual ptrs to dma_buf region. */ + memset(psb->data, 0, phba->cfg_sg_dma_buf_size); + + /* Allocate iotag for psb->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); + if (iotag == 0) { + kfree(psb); + break; + } + + psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba); + if (psb->cur_iocbq.sli4_xritag == NO_XRI) { + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + psb->data, psb->dma_handle); + kfree(psb); + break; + } + if (last_xritag != NO_XRI + && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) { + non_sequential_xri = 1; + } else + list_add_tail(&psb->list, &sblist); + last_xritag = psb->cur_iocbq.sli4_xritag; + + index = phba->sli4_hba.scsi_xri_cnt++; + psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; + + psb->fcp_bpl = psb->data; + psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) + - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + + sizeof(struct fcp_cmnd)); + + /* Initialize local short-hand pointers. */ + sgl = (struct sli4_sge *)psb->fcp_bpl; + pdma_phys_bpl = psb->dma_handle; + pdma_phys_fcp_cmd = + (psb->dma_handle + phba->cfg_sg_dma_buf_size) + - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); + + /* + * The first two bdes are the FCP_CMD and FCP_RSP. The balance + * are sg list bdes. Initialize the first two and leave the + * rest for queuecommand. + */ + sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); + bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd)); + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->word3 = cpu_to_le32(sgl->word3); + sgl++; + + /* Setup the physical region for the FCP RSP */ + sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); + bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp)); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->word3 = cpu_to_le32(sgl->word3); + + /* + * Since the IOCB for the FCP I/O is built into this + * lpfc_scsi_buf, initialize it with all known data now. + */ + iocb = &psb->cur_iocbq.iocb; + iocb->un.fcpi64.bdl.ulpIoTag32 = 0; + iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64; + /* setting the BLP size to 2 * sizeof BDE may not be correct. + * We are setting the bpl to point to out sgl. An sgl's + * entries are 16 bytes, a bpl entries are 12 bytes. + */ + iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); + iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd); + iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd); + iocb->ulpBdeCount = 1; + iocb->ulpLe = 1; + iocb->ulpClass = CLASS3; + if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) + pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; + else + pdma_phys_bpl1 = 0; + psb->dma_phys_bpl = pdma_phys_bpl; + phba->sli4_hba.lpfc_scsi_psb_array[index] = psb; + if (non_sequential_xri) { + status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl, + pdma_phys_bpl1, + psb->cur_iocbq.sli4_xritag); + if (status) { + /* Put this back on the abort scsi list */ + psb->status = IOSTAT_LOCAL_REJECT; + psb->result = IOERR_ABORT_REQUESTED; + rc++; + } else + psb->status = IOSTAT_SUCCESS; + /* Put it back into the SCSI buffer list */ + lpfc_release_scsi_buf_s4(phba, psb); + break; + } + } + if (bcnt) { + status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); + /* Reset SCSI buffer count for next round of posting */ + while (!list_empty(&sblist)) { + list_remove_head(&sblist, psb, struct lpfc_scsi_buf, + list); + if (status) { + /* Put this back on the abort scsi list */ + psb->status = IOSTAT_LOCAL_REJECT; + psb->result = IOERR_ABORT_REQUESTED; + rc++; + } else + psb->status = IOSTAT_SUCCESS; + /* Put it back into the SCSI buffer list */ + lpfc_release_scsi_buf_s4(phba, psb); + } + } + + return bcnt + non_sequential_xri - rc; +} + +/** * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator * @vport: The virtual port for which this call being executed. * @num_to_allocate: The requested number of buffers to allocate. @@ -638,6 +909,39 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) } /** + * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. + * @phba: The Hba for which this call is being executed. + * @psb: The scsi buffer which is being released. + * + * This routine releases @psb scsi buffer by adding it to tail of @phba + * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer + * and cannot be reused for at least RA_TOV amount of time if it was + * aborted. + **/ +static void +lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +{ + unsigned long iflag = 0; + + if (psb->status == IOSTAT_LOCAL_REJECT + && psb->result == IOERR_ABORT_REQUESTED) { + spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, + iflag); + psb->pCmd = NULL; + list_add_tail(&psb->list, + &phba->sli4_hba.lpfc_abts_scsi_buf_list); + spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, + iflag); + } else { + + spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); + psb->pCmd = NULL; + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); + spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + } +} + +/** * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. * @phba: The Hba for which this call is being executed. * @psb: The scsi buffer which is being released. @@ -1455,6 +1759,115 @@ out: } /** + * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be mapped. + * + * This routine does the pci dma mapping for scatter-gather list of scsi cmnd + * field of @lpfc_cmd for device with SLI-4 interface spec. + * + * Return codes: + * 1 - Error + * 0 - Success + **/ +static int +lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; + struct scatterlist *sgel = NULL; + struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; + struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; + dma_addr_t physaddr; + uint32_t num_bde = 0; + uint32_t dma_len; + uint32_t dma_offset = 0; + int nseg; + + /* + * There are three possibilities here - use scatter-gather segment, use + * the single mapping, or neither. Start the lpfc command prep by + * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first + * data bde entry. + */ + if (scsi_sg_count(scsi_cmnd)) { + /* + * The driver stores the segment count returned from pci_map_sg + * because this a count of dma-mappings used to map the use_sg + * pages. They are not guaranteed to be the same for those + * architectures that implement an IOMMU. + */ + + nseg = scsi_dma_map(scsi_cmnd); + if (unlikely(!nseg)) + return 1; + sgl += 1; + /* clear the last flag in the fcp_rsp map entry */ + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl += 1; + + lpfc_cmd->seg_cnt = nseg; + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { + printk(KERN_ERR "%s: Too many sg segments from " + "dma_map_sg. Config %d, seg_cnt %d\n", + __func__, phba->cfg_sg_seg_cnt, + lpfc_cmd->seg_cnt); + scsi_dma_unmap(scsi_cmnd); + return 1; + } + + /* + * The driver established a maximum scatter-gather segment count + * during probe that limits the number of sg elements in any + * single scsi command. Just run through the seg_cnt and format + * the sge's. + * When using SLI-3 the driver will try to fit all the BDEs into + * the IOCB. If it can't then the BDEs get added to a BPL as it + * does for SLI-2 mode. + */ + scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { + physaddr = sg_dma_address(sgel); + dma_len = sg_dma_len(sgel); + bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); + if ((num_bde + 1) == nseg) + bf_set(lpfc_sli4_sge_last, sgl, 1); + else + bf_set(lpfc_sli4_sge_last, sgl, 0); + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->word3 = cpu_to_le32(sgl->word3); + dma_offset += dma_len; + sgl++; + } + } else { + sgl += 1; + /* clear the last flag in the fcp_rsp map entry */ + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + } + + /* + * Finish initializing those IOCB fields that are dependent on the + * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is + * explicitly reinitialized. + * all iocb memory resources are reused. + */ + fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); + + /* + * Due to difference in data length between DIF/non-DIF paths, + * we need to set word 4 of IOCB here + */ + iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); + return 0; +} + +/** * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. @@ -1590,6 +2003,22 @@ lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) } /** + * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev + * @phba: The Hba for which this call is being executed. + * @psb: The scsi buffer which is going to be un-mapped. + * + * This routine does DMA un-mapping of scatter gather list of scsi command + * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to + * remove the sgl for this scsi buffer then we will do it here. For now + * we should be able to just call the sli3 unprep routine. + **/ +static void +lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +{ + lpfc_scsi_unprep_dma_buf_s3(phba, psb); +} + +/** * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list * @phba: The Hba for which this call is being executed. * @psb: The scsi buffer which is going to be un-mapped. @@ -2129,6 +2558,29 @@ lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, } /** + * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: The scsi command which needs to send. + * @pnode: Pointer to lpfc_nodelist. + * + * This routine initializes fcp_cmnd and iocb data structure from scsi command + * to transfer for device with SLI4 interface spec. + **/ +static void +lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, + struct lpfc_nodelist *pnode) +{ + /* + * The prep cmnd routines do not touch the sgl or its + * entries. We may not have to do anything different. + * I will leave this function in place until we can + * run some IO through the driver and determine if changes + * are needed. + */ + return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode); +} + +/** * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: The scsi command which needs to send. @@ -2209,6 +2661,37 @@ lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, } /** + * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. + * @lun: Logical unit number. + * @task_mgmt_cmd: SCSI task management command. + * + * This routine creates FCP information unit corresponding to @task_mgmt_cmd + * for device with SLI-4 interface spec. + * + * Return codes: + * 0 - Error + * 1 - Success + **/ +static int +lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport, + struct lpfc_scsi_buf *lpfc_cmd, + unsigned int lun, + uint8_t task_mgmt_cmd) +{ + /* + * The prep cmnd routines do not touch the sgl or its + * entries. We may not have to do anything different. + * I will leave this function in place until we can + * run some IO through the driver and determine if changes + * are needed. + */ + return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun, + task_mgmt_cmd); +} + +/** * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. @@ -2257,6 +2740,15 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) lpfc_scsi_prep_task_mgmt_cmd_s3; phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; break; + case LPFC_PCI_DEV_OC: + phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; + phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; + phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4; + phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4; + phba->lpfc_scsi_prep_task_mgmt_cmd = + lpfc_scsi_prep_task_mgmt_cmd_s4; + phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1418 Invalid HBA PCI-device group: 0x%x\n", diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index c7c440d5fa29..65dfc8bd5b49 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -140,6 +140,8 @@ struct lpfc_scsi_buf { struct fcp_rsp *fcp_rsp; struct ulp_bde64 *fcp_bpl; + dma_addr_t dma_phys_bpl; + /* cur_iocbq has phys of the dma-able buffer. * Iotag is in here */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index e2d07d97fa8b..706bb22a6e8e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -29,9 +29,12 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include <scsi/fc/fc_fs.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -121,6 +124,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba) } /** + * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. + * @phba: Pointer to HBA context object. + * @xritag: XRI value. + * + * This function clears the sglq pointer from the array of acive + * sglq's. The xritag that is passed in is used to index into the + * array. Before the xritag can be used it needs to be adjusted + * by subtracting the xribase. + * + * Returns sglq ponter = success, NULL = Failure. + **/ +static struct lpfc_sglq * +__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) +{ + uint16_t adj_xri; + struct lpfc_sglq *sglq; + adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; + if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) + return NULL; + sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; + phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL; + return sglq; +} + +/** + * __lpfc_get_active_sglq - Get the active sglq for this XRI. + * @phba: Pointer to HBA context object. + * @xritag: XRI value. + * + * This function returns the sglq pointer from the array of acive + * sglq's. The xritag that is passed in is used to index into the + * array. Before the xritag can be used it needs to be adjusted + * by subtracting the xribase. + * + * Returns sglq ponter = success, NULL = Failure. + **/ +static struct lpfc_sglq * +__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) +{ + uint16_t adj_xri; + struct lpfc_sglq *sglq; + adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; + if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) + return NULL; + sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; + return sglq; +} + +/** + * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool + * @phba: Pointer to HBA context object. + * + * This function is called with hbalock held. This function + * Gets a new driver sglq object from the sglq list. If the + * list is not empty then it is successful, it returns pointer to the newly + * allocated sglq object else it returns NULL. + **/ +static struct lpfc_sglq * +__lpfc_sli_get_sglq(struct lpfc_hba *phba) +{ + struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; + struct lpfc_sglq *sglq = NULL; + uint16_t adj_xri; + list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); + adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; + phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; + return sglq; +} + +/** * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool * @phba: Pointer to HBA context object. * @@ -298,6 +371,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) case CMD_GEN_REQUEST64_CR: case CMD_GEN_REQUEST64_CX: case CMD_XMIT_ELS_RSP64_CX: + case DSSCMD_IWRITE64_CR: + case DSSCMD_IWRITE64_CX: + case DSSCMD_IREAD64_CR: + case DSSCMD_IREAD64_CX: + case DSSCMD_INVALIDATE_DEK: + case DSSCMD_SET_KEK: + case DSSCMD_GET_KEK_ID: + case DSSCMD_GEN_XFER: type = LPFC_SOL_IOCB; break; case CMD_ABORT_XRI_CN: @@ -2629,6 +2710,56 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) return retval; } +/** + * lpfc_sli_brdready_s4 - Check for sli4 host ready status + * @phba: Pointer to HBA context object. + * @mask: Bit mask to be checked. + * + * This function checks the host status register to check if HBA is + * ready. This function will wait in a loop for the HBA to be ready + * If the HBA is not ready , the function will will reset the HBA PCI + * function again. The function returns 1 when HBA fail to be ready + * otherwise returns zero. + **/ +static int +lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) +{ + uint32_t status; + int retval = 0; + + /* Read the HBA Host Status Register */ + status = lpfc_sli4_post_status_check(phba); + + if (status) { + phba->pport->port_state = LPFC_VPORT_UNKNOWN; + lpfc_sli_brdrestart(phba); + status = lpfc_sli4_post_status_check(phba); + } + + /* Check to see if any errors occurred during init */ + if (status) { + phba->link_state = LPFC_HBA_ERROR; + retval = 1; + } else + phba->sli4_hba.intr_enable = 0; + + return retval; +} + +/** + * lpfc_sli_brdready - Wrapper func for checking the hba readyness + * @phba: Pointer to HBA context object. + * @mask: Bit mask to be checked. + * + * This routine wraps the actual SLI3 or SLI4 hba readyness check routine + * from the API jump table function pointer from the lpfc_hba struct. + **/ +int +lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) +{ + return phba->lpfc_sli_brdready(phba, mask); +} + #define BARRIER_TEST_PATTERN (0xdeadbeef) /** @@ -2863,7 +2994,66 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) } /** - * lpfc_sli_brdrestart - Restart the HBA + * lpfc_sli4_brdreset - Reset a sli-4 HBA + * @phba: Pointer to HBA context object. + * + * This function resets a SLI4 HBA. This function disables PCI layer parity + * checking during resets the device. The caller is not required to hold + * any locks. + * + * This function returns 0 always. + **/ +int +lpfc_sli4_brdreset(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + uint16_t cfg_value; + uint8_t qindx; + + /* Reset HBA */ + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0295 Reset HBA Data: x%x x%x\n", + phba->pport->port_state, psli->sli_flag); + + /* perform board reset */ + phba->fc_eventTag = 0; + phba->pport->fc_myDID = 0; + phba->pport->fc_prevDID = 0; + + /* Turn off parity checking and serr during the physical reset */ + pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); + pci_write_config_word(phba->pcidev, PCI_COMMAND, + (cfg_value & + ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); + + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~(LPFC_PROCESS_LA); + phba->fcf.fcf_flag = 0; + /* Clean up the child queue list for the CQs */ + list_del_init(&phba->sli4_hba.mbx_wq->list); + list_del_init(&phba->sli4_hba.els_wq->list); + list_del_init(&phba->sli4_hba.hdr_rq->list); + list_del_init(&phba->sli4_hba.dat_rq->list); + list_del_init(&phba->sli4_hba.mbx_cq->list); + list_del_init(&phba->sli4_hba.els_cq->list); + list_del_init(&phba->sli4_hba.rxq_cq->list); + for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) + list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); + for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) + list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list); + spin_unlock_irq(&phba->hbalock); + + /* Now physically reset the device */ + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0389 Performing PCI function reset!\n"); + /* Perform FCoE PCI function reset */ + lpfc_pci_function_reset(phba); + + return 0; +} + +/** + * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba * @phba: Pointer to HBA context object. * * This function is called in the SLI initialization code path to @@ -2875,8 +3065,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) * The function does not guarantee completion of MBX_RESTART mailbox * command before the return of this function. **/ -int -lpfc_sli_brdrestart(struct lpfc_hba *phba) +static int +lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) { MAILBOX_t *mb; struct lpfc_sli *psli; @@ -2915,7 +3105,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) lpfc_sli_brdreset(phba); phba->pport->stopped = 0; phba->link_state = LPFC_INIT_START; - + phba->hba_flag = 0; spin_unlock_irq(&phba->hbalock); memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); @@ -2930,6 +3120,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) } /** + * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba + * @phba: Pointer to HBA context object. + * + * This function is called in the SLI initialization code path to restart + * a SLI4 HBA. The caller is not required to hold any lock. + * At the end of the function, it calls lpfc_hba_down_post function to + * free any pending commands. + **/ +static int +lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + + + /* Restart HBA */ + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0296 Restart HBA Data: x%x x%x\n", + phba->pport->port_state, psli->sli_flag); + + lpfc_sli4_brdreset(phba); + + spin_lock_irq(&phba->hbalock); + phba->pport->stopped = 0; + phba->link_state = LPFC_INIT_START; + phba->hba_flag = 0; + spin_unlock_irq(&phba->hbalock); + + memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); + psli->stats_start = get_seconds(); + + lpfc_hba_down_post(phba); + + return 0; +} + +/** + * lpfc_sli_brdrestart - Wrapper func for restarting hba + * @phba: Pointer to HBA context object. + * + * This routine wraps the actual SLI3 or SLI4 hba restart routine from the + * API jump table function pointer from the lpfc_hba struct. +**/ +int +lpfc_sli_brdrestart(struct lpfc_hba *phba) +{ + return phba->lpfc_sli_brdrestart(phba); +} + +/** * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart * @phba: Pointer to HBA context object. * @@ -3353,6 +3592,488 @@ lpfc_sli_hba_setup_error: return rc; } +/** + * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region + * @phba: Pointer to HBA context object. + * @mboxq: mailbox pointer. + * This function issue a dump mailbox command to read config region + * 23 and parse the records in the region and populate driver + * data structure. + **/ +static int +lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, + LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_dmabuf *mp; + struct lpfc_mqe *mqe; + uint32_t data_length; + int rc; + + /* Program the default value of vlan_id and fc_map */ + phba->valid_vlan = 0; + phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; + phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; + phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; + + mqe = &mboxq->u.mqe; + if (lpfc_dump_fcoe_param(phba, mboxq)) + return -ENOMEM; + + mp = (struct lpfc_dmabuf *) mboxq->context1; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):2571 Mailbox cmd x%x Status x%x " + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "CQ: x%x x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + bf_get(lpfc_mqe_command, mqe), + bf_get(lpfc_mqe_status, mqe), + mqe->un.mb_words[0], mqe->un.mb_words[1], + mqe->un.mb_words[2], mqe->un.mb_words[3], + mqe->un.mb_words[4], mqe->un.mb_words[5], + mqe->un.mb_words[6], mqe->un.mb_words[7], + mqe->un.mb_words[8], mqe->un.mb_words[9], + mqe->un.mb_words[10], mqe->un.mb_words[11], + mqe->un.mb_words[12], mqe->un.mb_words[13], + mqe->un.mb_words[14], mqe->un.mb_words[15], + mqe->un.mb_words[16], mqe->un.mb_words[50], + mboxq->mcqe.word0, + mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, + mboxq->mcqe.trailer); + + if (rc) { + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + return -EIO; + } + data_length = mqe->un.mb_words[5]; + if (data_length > DMP_FCOEPARAM_RGN_SIZE) + return -EIO; + + lpfc_parse_fcoe_conf(phba, mp->virt, data_length); + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + return 0; +} + +/** + * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to the LPFC_MBOXQ_t structure. + * @vpd: pointer to the memory to hold resulting port vpd data. + * @vpd_size: On input, the number of bytes allocated to @vpd. + * On output, the number of data bytes in @vpd. + * + * This routine executes a READ_REV SLI4 mailbox command. In + * addition, this routine gets the port vpd data. + * + * Return codes + * 0 - sucessful + * ENOMEM - could not allocated memory. + **/ +static int +lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, + uint8_t *vpd, uint32_t *vpd_size) +{ + int rc = 0; + uint32_t dma_size; + struct lpfc_dmabuf *dmabuf; + struct lpfc_mqe *mqe; + + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + /* + * Get a DMA buffer for the vpd data resulting from the READ_REV + * mailbox command. + */ + dma_size = *vpd_size; + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + dma_size, + &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + return -ENOMEM; + } + memset(dmabuf->virt, 0, dma_size); + + /* + * The SLI4 implementation of READ_REV conflicts at word1, + * bits 31:16 and SLI4 adds vpd functionality not present + * in SLI3. This code corrects the conflicts. + */ + lpfc_read_rev(phba, mboxq); + mqe = &mboxq->u.mqe; + mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); + mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); + mqe->un.read_rev.word1 &= 0x0000FFFF; + bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); + bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc) { + dma_free_coherent(&phba->pcidev->dev, dma_size, + dmabuf->virt, dmabuf->phys); + return -EIO; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0380 Mailbox cmd x%x Status x%x " + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "CQ: x%x x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + bf_get(lpfc_mqe_command, mqe), + bf_get(lpfc_mqe_status, mqe), + mqe->un.mb_words[0], mqe->un.mb_words[1], + mqe->un.mb_words[2], mqe->un.mb_words[3], + mqe->un.mb_words[4], mqe->un.mb_words[5], + mqe->un.mb_words[6], mqe->un.mb_words[7], + mqe->un.mb_words[8], mqe->un.mb_words[9], + mqe->un.mb_words[10], mqe->un.mb_words[11], + mqe->un.mb_words[12], mqe->un.mb_words[13], + mqe->un.mb_words[14], mqe->un.mb_words[15], + mqe->un.mb_words[16], mqe->un.mb_words[50], + mboxq->mcqe.word0, + mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, + mboxq->mcqe.trailer); + + /* + * The available vpd length cannot be bigger than the + * DMA buffer passed to the port. Catch the less than + * case and update the caller's size. + */ + if (mqe->un.read_rev.avail_vpd_len < *vpd_size) + *vpd_size = mqe->un.read_rev.avail_vpd_len; + + lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size); + dma_free_coherent(&phba->pcidev->dev, dma_size, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + return 0; +} + +/** + * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to explicitly arm the SLI4 device's completion and + * event queues + **/ +static void +lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) +{ + uint8_t fcp_eqidx; + + lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); + lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); + lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM); + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) + lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], + LPFC_QUEUE_REARM); + lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) + lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], + LPFC_QUEUE_REARM); +} + +/** + * lpfc_sli4_hba_setup - SLI4 device intialization PCI function + * @phba: Pointer to HBA context object. + * + * This function is the main SLI4 device intialization PCI function. This + * function is called by the HBA intialization code, HBA reset code and + * HBA error attention handler code. Caller is not required to hold any + * locks. + **/ +int +lpfc_sli4_hba_setup(struct lpfc_hba *phba) +{ + int rc; + LPFC_MBOXQ_t *mboxq; + struct lpfc_mqe *mqe; + uint8_t *vpd; + uint32_t vpd_size; + uint32_t ftr_rsp = 0; + struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); + struct lpfc_vport *vport = phba->pport; + struct lpfc_dmabuf *mp; + + /* Perform a PCI function reset to start from clean */ + rc = lpfc_pci_function_reset(phba); + if (unlikely(rc)) + return -ENODEV; + + /* Check the HBA Host Status Register for readyness */ + rc = lpfc_sli4_post_status_check(phba); + if (unlikely(rc)) + return -ENODEV; + else { + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag |= LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + } + + /* + * Allocate a single mailbox container for initializing the + * port. + */ + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + /* + * Continue initialization with default values even if driver failed + * to read FCoE param config regions + */ + if (lpfc_sli4_read_fcoe_params(phba, mboxq)) + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, + "2570 Failed to read FCoE parameters \n"); + + /* Issue READ_REV to collect vpd and FW information. */ + vpd_size = PAGE_SIZE; + vpd = kzalloc(vpd_size, GFP_KERNEL); + if (!vpd) { + rc = -ENOMEM; + goto out_free_mbox; + } + + rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); + if (unlikely(rc)) + goto out_free_vpd; + + mqe = &mboxq->u.mqe; + if ((bf_get(lpfc_mbx_rd_rev_sli_lvl, + &mqe->un.read_rev) != LPFC_SLI_REV4) || + (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0376 READ_REV Error. SLI Level %d " + "FCoE enabled %d\n", + bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev), + bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)); + rc = -EIO; + goto out_free_vpd; + } + /* Single threaded at this point, no need for lock */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag |= HBA_FCOE_SUPPORT; + spin_unlock_irq(&phba->hbalock); + /* + * Evaluate the read rev and vpd data. Populate the driver + * state with the results. If this routine fails, the failure + * is not fatal as the driver will use generic values. + */ + rc = lpfc_parse_vpd(phba, vpd, vpd_size); + if (unlikely(!rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0377 Error %d parsing vpd. " + "Using defaults.\n", rc); + rc = 0; + } + + /* By now, we should determine the SLI revision, hard code for now */ + phba->sli_rev = LPFC_SLI_REV4; + + /* + * Discover the port's supported feature set and match it against the + * hosts requests. + */ + lpfc_request_features(phba, mboxq); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (unlikely(rc)) { + rc = -EIO; + goto out_free_vpd; + } + + /* + * The port must support FCP initiator mode as this is the + * only mode running in the host. + */ + if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "0378 No support for fcpi mode.\n"); + ftr_rsp++; + } + + /* + * If the port cannot support the host's requested features + * then turn off the global config parameters to disable the + * feature in the driver. This is not a fatal error. + */ + if ((phba->cfg_enable_bg) && + !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) + ftr_rsp++; + + if (phba->max_vpi && phba->cfg_enable_npiv && + !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) + ftr_rsp++; + + if (ftr_rsp) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "0379 Feature Mismatch Data: x%08x %08x " + "x%x x%x x%x\n", mqe->un.req_ftrs.word2, + mqe->un.req_ftrs.word3, phba->cfg_enable_bg, + phba->cfg_enable_npiv, phba->max_vpi); + if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) + phba->cfg_enable_bg = 0; + if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) + phba->cfg_enable_npiv = 0; + } + + /* These SLI3 features are assumed in SLI4 */ + spin_lock_irq(&phba->hbalock); + phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); + spin_unlock_irq(&phba->hbalock); + + /* Read the port's service parameters. */ + lpfc_read_sparam(phba, mboxq, vport->vpi); + mboxq->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + mp = (struct lpfc_dmabuf *) mboxq->context1; + if (rc == MBX_SUCCESS) { + memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); + rc = 0; + } + + /* + * This memory was allocated by the lpfc_read_sparam routine. Release + * it to the mbuf pool. + */ + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + mboxq->context1 = NULL; + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0382 READ_SPARAM command failed " + "status %d, mbxStatus x%x\n", + rc, bf_get(lpfc_mqe_status, mqe)); + phba->link_state = LPFC_HBA_ERROR; + rc = -EIO; + goto out_free_vpd; + } + + if (phba->cfg_soft_wwnn) + u64_to_wwn(phba->cfg_soft_wwnn, + vport->fc_sparam.nodeName.u.wwn); + if (phba->cfg_soft_wwpn) + u64_to_wwn(phba->cfg_soft_wwpn, + vport->fc_sparam.portName.u.wwn); + memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, + sizeof(struct lpfc_name)); + memcpy(&vport->fc_portname, &vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + + /* Update the fc_host data structures with new wwn. */ + fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); + fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); + + /* Register SGL pool to the device using non-embedded mailbox command */ + rc = lpfc_sli4_post_sgl_list(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0582 Error %d during sgl post operation", rc); + rc = -ENODEV; + goto out_free_vpd; + } + + /* Register SCSI SGL pool to the device */ + rc = lpfc_sli4_repost_scsi_sgl_list(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "0383 Error %d during scsi sgl post opeation", + rc); + /* Some Scsi buffers were moved to the abort scsi list */ + /* A pci function reset will repost them */ + rc = -ENODEV; + goto out_free_vpd; + } + + /* Post the rpi header region to the device. */ + rc = lpfc_sli4_post_all_rpi_hdrs(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0393 Error %d during rpi post operation\n", + rc); + rc = -ENODEV; + goto out_free_vpd; + } + /* Temporary initialization of lpfc_fip_flag to non-fip */ + bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); + + /* Set up all the queues to the device */ + rc = lpfc_sli4_queue_setup(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0381 Error %d during queue setup.\n ", rc); + goto out_stop_timers; + } + + /* Arm the CQs and then EQs on device */ + lpfc_sli4_arm_cqeq_intr(phba); + + /* Indicate device interrupt mode */ + phba->sli4_hba.intr_enable = 1; + + /* Allow asynchronous mailbox command to go through */ + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); + + /* Post receive buffers to the device */ + lpfc_sli4_rb_setup(phba); + + /* Start the ELS watchdog timer */ + /* + * The driver for SLI4 is not yet ready to process timeouts + * or interrupts. Once it is, the comment bars can be removed. + */ + /* mod_timer(&vport->els_tmofunc, + * jiffies + HZ * (phba->fc_ratov*2)); */ + + /* Start heart beat timer */ + mod_timer(&phba->hb_tmofunc, + jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + phba->hb_outstanding = 0; + phba->last_completion_time = jiffies; + + /* Start error attention (ERATT) polling timer */ + mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + + /* + * The port is ready, set the host's link state to LINK_DOWN + * in preparation for link interrupts. + */ + lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed); + mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + lpfc_set_loopback_flag(phba); + /* Change driver state to LPFC_LINK_DOWN right before init link */ + spin_lock_irq(&phba->hbalock); + phba->link_state = LPFC_LINK_DOWN; + spin_unlock_irq(&phba->hbalock); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (unlikely(rc != MBX_NOT_FINISHED)) { + kfree(vpd); + return 0; + } else + rc = -EIO; + + /* Unset all the queues set up in this routine when error out */ + if (rc) + lpfc_sli4_queue_unset(phba); + +out_stop_timers: + if (rc) + lpfc_stop_hba_timers(phba); +out_free_vpd: + kfree(vpd); +out_free_mbox: + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; +} /** * lpfc_mbox_timeout - Timeout call back function for mbox timer @@ -3812,13 +4533,420 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, out_not_finished: if (processing_queue) { - pmbox->mb.mbxStatus = MBX_NOT_FINISHED; + pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; lpfc_mbox_cmpl_put(phba, pmbox); } return MBX_NOT_FINISHED; } /** + * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox + * @phba: Pointer to HBA context object. + * @mboxq: Pointer to mailbox object. + * + * The function posts a mailbox to the port. The mailbox is expected + * to be comletely filled in and ready for the port to operate on it. + * This routine executes a synchronous completion operation on the + * mailbox by polling for its completion. + * + * The caller must not be holding any locks when calling this routine. + * + * Returns: + * MBX_SUCCESS - mailbox posted successfully + * Any of the MBX error values. + **/ +static int +lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + int rc = MBX_SUCCESS; + unsigned long iflag; + uint32_t db_ready; + uint32_t mcqe_status; + uint32_t mbx_cmnd; + unsigned long timeout; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_mqe *mb = &mboxq->u.mqe; + struct lpfc_bmbx_create *mbox_rgn; + struct dma_address *dma_address; + struct lpfc_register bmbx_reg; + + /* + * Only one mailbox can be active to the bootstrap mailbox region + * at a time and there is no queueing provided. + */ + spin_lock_irqsave(&phba->hbalock, iflag); + if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2532 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, MBX_POLL); + return MBXERR_ERROR; + } + /* The server grabs the token and owns it until release */ + psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = mboxq; + spin_unlock_irqrestore(&phba->hbalock, iflag); + + /* + * Initialize the bootstrap memory region to avoid stale data areas + * in the mailbox post. Then copy the caller's mailbox contents to + * the bmbx mailbox region. + */ + mbx_cmnd = bf_get(lpfc_mqe_command, mb); + memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); + lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, + sizeof(struct lpfc_mqe)); + + /* Post the high mailbox dma address to the port and wait for ready. */ + dma_address = &phba->sli4_hba.bmbx.dma_address; + writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); + + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) + * 1000) + jiffies; + do { + bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); + db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); + if (!db_ready) + msleep(2); + + if (time_after(jiffies, timeout)) { + rc = MBXERR_ERROR; + goto exit; + } + } while (!db_ready); + + /* Post the low mailbox dma address to the port. */ + writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) + * 1000) + jiffies; + do { + bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); + db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); + if (!db_ready) + msleep(2); + + if (time_after(jiffies, timeout)) { + rc = MBXERR_ERROR; + goto exit; + } + } while (!db_ready); + + /* + * Read the CQ to ensure the mailbox has completed. + * If so, update the mailbox status so that the upper layers + * can complete the request normally. + */ + lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, + sizeof(struct lpfc_mqe)); + mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; + lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, + sizeof(struct lpfc_mcqe)); + mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); + + /* Prefix the mailbox status with range x4000 to note SLI4 status. */ + if (mcqe_status != MB_CQE_STATUS_SUCCESS) { + bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status); + rc = MBXERR_ERROR; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0356 Mailbox cmd x%x (x%x) Status x%x " + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" + " x%x x%x CQ: x%x x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq), + bf_get(lpfc_mqe_status, mb), + mb->un.mb_words[0], mb->un.mb_words[1], + mb->un.mb_words[2], mb->un.mb_words[3], + mb->un.mb_words[4], mb->un.mb_words[5], + mb->un.mb_words[6], mb->un.mb_words[7], + mb->un.mb_words[8], mb->un.mb_words[9], + mb->un.mb_words[10], mb->un.mb_words[11], + mb->un.mb_words[12], mboxq->mcqe.word0, + mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, + mboxq->mcqe.trailer); +exit: + /* We are holding the token, no needed for lock when release */ + spin_lock_irqsave(&phba->hbalock, iflag); + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + spin_unlock_irqrestore(&phba->hbalock, iflag); + return rc; +} + +/** + * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware + * @phba: Pointer to HBA context object. + * @pmbox: Pointer to mailbox object. + * @flag: Flag indicating how the mailbox need to be processed. + * + * This function is called by discovery code and HBA management code to submit + * a mailbox command to firmware with SLI-4 interface spec. + * + * Return codes the caller owns the mailbox command after the return of the + * function. + **/ +static int +lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, + uint32_t flag) +{ + struct lpfc_sli *psli = &phba->sli; + unsigned long iflags; + int rc; + + /* Detect polling mode and jump to a handler */ + if (!phba->sli4_hba.intr_enable) { + if (flag == MBX_POLL) + rc = lpfc_sli4_post_sync_mbox(phba, mboxq); + else + rc = -EIO; + if (rc != MBX_SUCCESS) + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2541 Mailbox command x%x " + "(x%x) cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + return rc; + } else if (flag == MBX_POLL) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2542 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + return -EIO; + } + + /* Now, interrupt mode asynchrous mailbox command */ + rc = lpfc_mbox_cmd_check(phba, mboxq); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2543 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + goto out_not_finished; + } + rc = lpfc_mbox_dev_check(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2544 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + goto out_not_finished; + } + + /* Put the mailbox command to the driver internal FIFO */ + psli->slistat.mbox_busy++; + spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_mbox_put(phba, mboxq); + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0354 Mbox cmd issue - Enqueue Data: " + "x%x (x%x) x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0xffffff, + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli4_mbox_opcode_get(phba, mboxq), + phba->pport->port_state, + psli->sli_flag, MBX_NOWAIT); + /* Wake up worker thread to transport mailbox command from head */ + lpfc_worker_wake_up(phba); + + return MBX_BUSY; + +out_not_finished: + return MBX_NOT_FINISHED; +} + +/** + * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device + * @phba: Pointer to HBA context object. + * + * This function is called by worker thread to send a mailbox command to + * SLI4 HBA firmware. + * + **/ +int +lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *mboxq; + int rc = MBX_SUCCESS; + unsigned long iflags; + struct lpfc_mqe *mqe; + uint32_t mbx_cmnd; + + /* Check interrupt mode before post async mailbox command */ + if (unlikely(!phba->sli4_hba.intr_enable)) + return MBX_NOT_FINISHED; + + /* Check for mailbox command service token */ + spin_lock_irqsave(&phba->hbalock, iflags); + if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return MBX_NOT_FINISHED; + } + if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return MBX_NOT_FINISHED; + } + if (unlikely(phba->sli.mbox_active)) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0384 There is pending active mailbox cmd\n"); + return MBX_NOT_FINISHED; + } + /* Take the mailbox command service token */ + psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; + + /* Get the next mailbox command from head of queue */ + mboxq = lpfc_mbox_get(phba); + + /* If no more mailbox command waiting for post, we're done */ + if (!mboxq) { + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irqrestore(&phba->hbalock, iflags); + return MBX_SUCCESS; + } + phba->sli.mbox_active = mboxq; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + /* Check device readiness for posting mailbox command */ + rc = lpfc_mbox_dev_check(phba); + if (unlikely(rc)) + /* Driver clean routine will clean up pending mailbox */ + goto out_not_finished; + + /* Prepare the mbox command to be posted */ + mqe = &mboxq->u.mqe; + mbx_cmnd = bf_get(lpfc_mqe_command, mqe); + + /* Start timer for the mbox_tmo and log some mailbox post messages */ + mod_timer(&psli->mbox_tmo, (jiffies + + (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd)))); + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0355 Mailbox cmd x%x (x%x) issue Data: " + "x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + phba->pport->port_state, psli->sli_flag); + + if (mbx_cmnd != MBX_HEARTBEAT) { + if (mboxq->vport) { + lpfc_debugfs_disc_trc(mboxq->vport, + LPFC_DISC_TRC_MBOX_VPORT, + "MBOX Send vport: cmd:x%x mb:x%x x%x", + mbx_cmnd, mqe->un.mb_words[0], + mqe->un.mb_words[1]); + } else { + lpfc_debugfs_disc_trc(phba->pport, + LPFC_DISC_TRC_MBOX, + "MBOX Send: cmd:x%x mb:x%x x%x", + mbx_cmnd, mqe->un.mb_words[0], + mqe->un.mb_words[1]); + } + } + psli->slistat.mbox_cmd++; + + /* Post the mailbox command to the port */ + rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2533 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, MBX_NOWAIT); + goto out_not_finished; + } + + return rc; + +out_not_finished: + spin_lock_irqsave(&phba->hbalock, iflags); + mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; + __lpfc_mbox_cmpl_put(phba, mboxq); + /* Release the token */ + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + return MBX_NOT_FINISHED; +} + +/** + * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command + * @phba: Pointer to HBA context object. + * @pmbox: Pointer to mailbox object. + * @flag: Flag indicating how the mailbox need to be processed. + * + * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from + * the API jump table function pointer from the lpfc_hba struct. + * + * Return codes the caller owns the mailbox command after the return of the + * function. + **/ +int +lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) +{ + return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); +} + +/** + * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the mbox interface API function jump table in @phba + * struct. + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; + phba->lpfc_sli_handle_slow_ring_event = + lpfc_sli_handle_slow_ring_event_s3; + phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; + phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; + phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; + break; + case LPFC_PCI_DEV_OC: + phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; + phba->lpfc_sli_handle_slow_ring_event = + lpfc_sli_handle_slow_ring_event_s4; + phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; + phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; + phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1420 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + break; + } + return 0; +} + +/** * __lpfc_sli_ringtx_put - Add an iocb to the txq * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. @@ -4501,28 +5629,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) /* Return any active mbox cmds */ del_timer_sync(&psli->mbox_tmo); - spin_lock_irqsave(&phba->hbalock, flags); - spin_lock(&phba->pport->work_port_lock); + spin_lock_irqsave(&phba->pport->work_port_lock, flags); phba->pport->work_port_events &= ~WORKER_MBOX_TMO; - spin_unlock(&phba->pport->work_port_lock); + spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); - /* Return any pending or completed mbox cmds */ - list_splice_init(&phba->sli.mboxq, &completions); - if (psli->mbox_active) { - list_add_tail(&psli->mbox_active->list, &completions); - psli->mbox_active = NULL; - psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - } - list_splice_init(&phba->sli.mboxq_cmpl, &completions); - spin_unlock_irqrestore(&phba->hbalock, flags); + return 1; +} + +/** + * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA + * @phba: Pointer to HBA context object. + * + * This function cleans up all queues, iocb, buffers, mailbox commands while + * shutting down the SLI4 HBA FCoE function. This function is called with no + * lock held and always returns 1. + * + * This function does the following to cleanup driver FCoE function resources: + * - Free discovery resources for each virtual port + * - Cleanup any pending fabric iocbs + * - Iterate through the iocb txq and free each entry in the list. + * - Free up any buffer posted to the HBA. + * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc. + * - Free mailbox commands in the mailbox queue. + **/ +int +lpfc_sli4_hba_down(struct lpfc_hba *phba) +{ + /* Stop the SLI4 device port */ + lpfc_stop_port(phba); + + /* Tear down the queues in the HBA */ + lpfc_sli4_queue_unset(phba); + + /* unregister default FCFI from the HBA */ + lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); - while (!list_empty(&completions)) { - list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); - pmb->mb.mbxStatus = MBX_NOT_FINISHED; - if (pmb->mbox_cmpl) - pmb->mbox_cmpl(phba,pmb); - } return 1; } @@ -4853,7 +5995,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, iabt = &abtsiocbp->iocb; iabt->un.acxri.abortType = ABORT_TYPE_ABTS; iabt->un.acxri.abortContextTag = icmd->ulpContext; - iabt->un.acxri.abortIoTag = icmd->ulpIoTag; + if (phba->sli_rev == LPFC_SLI_REV4) + iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; + else + iabt->un.acxri.abortIoTag = icmd->ulpIoTag; iabt->ulpLe = 1; iabt->ulpClass = icmd->ulpClass; @@ -4869,7 +6014,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, "abort cmd iotag x%x\n", iabt->un.acxri.abortContextTag, iabt->un.acxri.abortIoTag, abtsiocbp->iotag); - retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); + retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); if (retval) __lpfc_sli_release_iocbq(phba, abtsiocbp); @@ -5052,7 +6197,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, cmd = &iocbq->iocb; abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; - abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; + if (phba->sli_rev == LPFC_SLI_REV4) + abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; + else + abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; abtsiocb->iocb.ulpLe = 1; abtsiocb->iocb.ulpClass = cmd->ulpClass; abtsiocb->vport = phba->pport; @@ -5064,7 +6212,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, /* Setup callback routine and issue the command. */ abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; - ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); + ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, + abtsiocb, 0); if (ret_val == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocb); errcnt++; @@ -5145,7 +6294,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, **/ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, - struct lpfc_sli_ring *pring, + uint32_t ring_number, struct lpfc_iocbq *piocb, struct lpfc_iocbq *prspiocbq, uint32_t timeout) @@ -5176,7 +6325,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, readl(phba->HCregaddr); /* flush */ } - retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); + retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0); if (retval == IOCB_SUCCESS) { timeout_req = timeout * HZ; timeleft = wait_event_timeout(done_q, @@ -5385,6 +6534,58 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) } /** + * lpfc_sli4_eratt_read - read sli-4 error attention events + * @phba: Pointer to HBA context. + * + * This function is called to read the SLI4 device error attention registers + * for possible error attention events. The caller must hold the hostlock + * with spin_lock_irq(). + * + * This fucntion returns 1 when there is Error Attention in the Host Attention + * Register and returns 0 otherwise. + **/ +static int +lpfc_sli4_eratt_read(struct lpfc_hba *phba) +{ + uint32_t uerr_sta_hi, uerr_sta_lo; + uint32_t onlnreg0, onlnreg1; + + /* For now, use the SLI4 device internal unrecoverable error + * registers for error attention. This can be changed later. + */ + onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); + onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); + if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { + uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); + uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); + if (uerr_sta_lo || uerr_sta_hi) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1423 HBA Unrecoverable error: " + "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " + "online0_reg=0x%x, online1_reg=0x%x\n", + uerr_sta_lo, uerr_sta_hi, + onlnreg0, onlnreg1); + /* TEMP: as the driver error recover logic is not + * fully developed, we just log the error message + * and the device error attention action is now + * temporarily disabled. + */ + return 0; + phba->work_status[0] = uerr_sta_lo; + phba->work_status[1] = uerr_sta_hi; + spin_lock_irq(&phba->hbalock); + /* Set the driver HA work bitmap */ + phba->work_ha |= HA_ERATT; + /* Indicate polling handles this ERATT */ + phba->hba_flag |= HBA_ERATT_HANDLED; + spin_unlock_irq(&phba->hbalock); + return 1; + } + } + return 0; +} + +/** * lpfc_sli_check_eratt - check error attention events * @phba: Pointer to HBA context. * @@ -5434,6 +6635,10 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) /* Read chip Host Attention (HA) register */ ha_copy = lpfc_sli_eratt_read(phba); break; + case LPFC_SLI_REV4: + /* Read devcie Uncoverable Error (UERR) registers */ + ha_copy = lpfc_sli4_eratt_read(phba); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0299 Invalid SLI revision (%d)\n", diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 883938652a6a..e6c88ee8ee96 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd { LPFC_CTX_HOST } lpfc_ctx_cmd; +/* This structure is used to carry the needed response IOCB states */ +struct lpfc_sli4_rspiocb_info { + uint8_t hw_status; + uint8_t bfield; +#define LPFC_XB 0x1 +#define LPFC_PV 0x2 + uint8_t priority; + uint8_t reserved; +}; + /* This structure is used to handle IOCB requests / responses */ struct lpfc_iocbq { /* lpfc_iocbqs are used in double linked lists */ struct list_head list; struct list_head clist; uint16_t iotag; /* pre-assigned IO tag */ - uint16_t rsvd1; + uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ IOCB_t iocb; /* IOCB cmd */ uint8_t retry; /* retry counter for IOCB cmd - if needed */ @@ -65,7 +75,7 @@ struct lpfc_iocbq { struct lpfc_iocbq *); void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); - + struct lpfc_sli4_rspiocb_info sli4_info; }; #define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ @@ -81,14 +91,18 @@ struct lpfc_iocbq { typedef struct lpfcMboxq { /* MBOXQs are used in single linked lists */ struct list_head list; /* ptr to next mailbox command */ - MAILBOX_t mb; /* Mailbox cmd */ - struct lpfc_vport *vport;/* virutal port pointer */ + union { + MAILBOX_t mb; /* Mailbox cmd */ + struct lpfc_mqe mqe; + } u; + struct lpfc_vport *vport;/* virtual port pointer */ void *context1; /* caller context information */ void *context2; /* caller context information */ void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); uint8_t mbox_flag; - + struct lpfc_mcqe mcqe; + struct lpfc_mbx_nembed_sge_virt *sge_array; } LPFC_MBOXQ_t; #define MBX_POLL 1 /* poll mailbox till command done, then @@ -234,6 +248,7 @@ struct lpfc_sli { #define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ #define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ #define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ +#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */ struct lpfc_sli_ring ring[LPFC_MAX_RING]; int fcp_ring; /* ring used for FCP initiator commands */ @@ -261,6 +276,8 @@ struct lpfc_sli { #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox command */ +#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox + command */ #define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write * or erase cmds. This is especially * long because of the potential of diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h new file mode 100644 index 000000000000..5196b46608d7 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -0,0 +1,467 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2009 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#define LPFC_ACTIVE_MBOX_WAIT_CNT 100 +#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 +#define LPFC_GET_QE_REL_INT 32 +#define LPFC_RPI_LOW_WATER_MARK 10 +/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ +#define LPFC_NEMBED_MBOX_SGL_CNT 254 + +/* Multi-queue arrangement for fast-path FCP work queues */ +#define LPFC_FN_EQN_MAX 8 +#define LPFC_SP_EQN_DEF 1 +#define LPFC_FP_EQN_DEF 1 +#define LPFC_FP_EQN_MIN 1 +#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) + +#define LPFC_FN_WQN_MAX 32 +#define LPFC_SP_WQN_DEF 1 +#define LPFC_FP_WQN_DEF 4 +#define LPFC_FP_WQN_MIN 1 +#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF) + +/* + * Provide the default FCF Record attributes used by the driver + * when nonFIP mode is configured and there is no other default + * FCF Record attributes. + */ +#define LPFC_FCOE_FCF_DEF_INDEX 0 +#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF +#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF + +/* First 3 bytes of default FCF MAC is specified by FC_MAP */ +#define LPFC_FCOE_FCF_MAC3 0xFF +#define LPFC_FCOE_FCF_MAC4 0xFF +#define LPFC_FCOE_FCF_MAC5 0xFE +#define LPFC_FCOE_FCF_MAP0 0x0E +#define LPFC_FCOE_FCF_MAP1 0xFC +#define LPFC_FCOE_FCF_MAP2 0x00 +#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC +#define LPFC_FCOE_FKA_ADV_PER 0 +#define LPFC_FCOE_FIP_PRIORITY 0x80 + +enum lpfc_sli4_queue_type { + LPFC_EQ, + LPFC_GCQ, + LPFC_MCQ, + LPFC_WCQ, + LPFC_RCQ, + LPFC_MQ, + LPFC_WQ, + LPFC_HRQ, + LPFC_DRQ +}; + +/* The queue sub-type defines the functional purpose of the queue */ +enum lpfc_sli4_queue_subtype { + LPFC_NONE, + LPFC_MBOX, + LPFC_FCP, + LPFC_ELS, + LPFC_USOL +}; + +union sli4_qe { + void *address; + struct lpfc_eqe *eqe; + struct lpfc_cqe *cqe; + struct lpfc_mcqe *mcqe; + struct lpfc_wcqe_complete *wcqe_complete; + struct lpfc_wcqe_release *wcqe_release; + struct sli4_wcqe_xri_aborted *wcqe_xri_aborted; + struct lpfc_rcqe_complete *rcqe_complete; + struct lpfc_mqe *mqe; + union lpfc_wqe *wqe; + struct lpfc_rqe *rqe; +}; + +struct lpfc_queue { + struct list_head list; + enum lpfc_sli4_queue_type type; + enum lpfc_sli4_queue_subtype subtype; + struct lpfc_hba *phba; + struct list_head child_list; + uint32_t entry_count; /* Number of entries to support on the queue */ + uint32_t entry_size; /* Size of each queue entry. */ + uint32_t queue_id; /* Queue ID assigned by the hardware */ + struct list_head page_list; + uint32_t page_count; /* Number of pages allocated for this queue */ + + uint32_t host_index; /* The host's index for putting or getting */ + uint32_t hba_index; /* The last known hba index for get or put */ + union sli4_qe qe[1]; /* array to index entries (must be last) */ +}; + +struct lpfc_cq_event { + struct list_head list; + union { + struct lpfc_mcqe mcqe_cmpl; + struct lpfc_acqe_link acqe_link; + struct lpfc_acqe_fcoe acqe_fcoe; + struct lpfc_acqe_dcbx acqe_dcbx; + struct lpfc_rcqe rcqe_cmpl; + struct sli4_wcqe_xri_aborted wcqe_axri; + } cqe; +}; + +struct lpfc_sli4_link { + uint8_t speed; + uint8_t duplex; + uint8_t status; + uint8_t physical; + uint8_t fault; +}; + +struct lpfc_fcf { + uint8_t fabric_name[8]; + uint8_t mac_addr[6]; + uint16_t fcf_indx; + uint16_t fcfi; + uint32_t fcf_flag; +#define FCF_AVAILABLE 0x01 /* FCF available for discovery */ +#define FCF_REGISTERED 0x02 /* FCF registered with FW */ +#define FCF_DISCOVERED 0x04 /* FCF discovery started */ +#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */ +#define FCF_IN_USE 0x10 /* Atleast one discovery completed */ +#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */ + uint32_t priority; + uint32_t addr_mode; + uint16_t vlan_id; +}; + +#define LPFC_REGION23_SIGNATURE "RG23" +#define LPFC_REGION23_VERSION 1 +#define LPFC_REGION23_LAST_REC 0xff +struct lpfc_fip_param_hdr { + uint8_t type; +#define FCOE_PARAM_TYPE 0xA0 + uint8_t length; +#define FCOE_PARAM_LENGTH 2 + uint8_t parm_version; +#define FIPP_VERSION 0x01 + uint8_t parm_flags; +#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6 +#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3 +#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags +#define FIPP_MODE_ON 0x2 +#define FIPP_MODE_OFF 0x0 +#define FIPP_VLAN_VALID 0x1 +}; + +struct lpfc_fcoe_params { + uint8_t fc_map[3]; + uint8_t reserved1; + uint16_t vlan_tag; + uint8_t reserved[2]; +}; + +struct lpfc_fcf_conn_hdr { + uint8_t type; +#define FCOE_CONN_TBL_TYPE 0xA1 + uint8_t length; /* words */ + uint8_t reserved[2]; +}; + +struct lpfc_fcf_conn_rec { + uint16_t flags; +#define FCFCNCT_VALID 0x0001 +#define FCFCNCT_BOOT 0x0002 +#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */ +#define FCFCNCT_FBNM_VALID 0x0008 +#define FCFCNCT_SWNM_VALID 0x0010 +#define FCFCNCT_VLAN_VALID 0x0020 +#define FCFCNCT_AM_VALID 0x0040 +#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */ +#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */ + + uint16_t vlan_tag; + uint8_t fabric_name[8]; + uint8_t switch_name[8]; +}; + +struct lpfc_fcf_conn_entry { + struct list_head list; + struct lpfc_fcf_conn_rec conn_rec; +}; + +/* + * Define the host's bootstrap mailbox. This structure contains + * the member attributes needed to create, use, and destroy the + * bootstrap mailbox region. + * + * The macro definitions for the bmbx data structure are defined + * in lpfc_hw4.h with the register definition. + */ +struct lpfc_bmbx { + struct lpfc_dmabuf *dmabuf; + struct dma_address dma_address; + void *avirt; + dma_addr_t aphys; + uint32_t bmbx_size; +}; + +#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4 + +#define LPFC_EQE_SIZE_4B 4 +#define LPFC_EQE_SIZE_16B 16 +#define LPFC_CQE_SIZE 16 +#define LPFC_WQE_SIZE 64 +#define LPFC_MQE_SIZE 256 +#define LPFC_RQE_SIZE 8 + +#define LPFC_EQE_DEF_COUNT 1024 +#define LPFC_CQE_DEF_COUNT 256 +#define LPFC_WQE_DEF_COUNT 64 +#define LPFC_MQE_DEF_COUNT 16 +#define LPFC_RQE_DEF_COUNT 512 + +#define LPFC_QUEUE_NOARM false +#define LPFC_QUEUE_REARM true + + +/* + * SLI4 CT field defines + */ +#define SLI4_CT_RPI 0 +#define SLI4_CT_VPI 1 +#define SLI4_CT_VFI 2 +#define SLI4_CT_FCFI 3 + +#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000 + +/* + * SLI4 specific data structures + */ +struct lpfc_max_cfg_param { + uint16_t max_xri; + uint16_t xri_base; + uint16_t xri_used; + uint16_t max_rpi; + uint16_t rpi_base; + uint16_t rpi_used; + uint16_t max_vpi; + uint16_t vpi_base; + uint16_t vpi_used; + uint16_t max_vfi; + uint16_t vfi_base; + uint16_t vfi_used; + uint16_t max_fcfi; + uint16_t fcfi_base; + uint16_t fcfi_used; + uint16_t max_eq; + uint16_t max_rq; + uint16_t max_cq; + uint16_t max_wq; +}; + +struct lpfc_hba; +/* SLI4 HBA multi-fcp queue handler struct */ +struct lpfc_fcp_eq_hdl { + uint32_t idx; + struct lpfc_hba *phba; +}; + +/* SLI4 HBA data structure entries */ +struct lpfc_sli4_hba { + void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for + PCI BAR0, config space registers */ + void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for + PCI BAR1, control registers */ + void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for + PCI BAR2, doorbell registers */ + /* BAR0 PCI config space register memory map */ + void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */ + void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ + void __iomem *ONLINE0regaddr; /* Address to components of internal UE */ + void __iomem *ONLINE1regaddr; /* Address to components of internal UE */ +#define LPFC_ONLINE_NERR 0xFFFFFFFF + void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */ + /* BAR1 FCoE function CSR register memory map */ + void __iomem *STAregaddr; /* Address to HST_STATE register */ + void __iomem *ISRregaddr; /* Address to HST_ISR register */ + void __iomem *IMRregaddr; /* Address to HST_IMR register */ + void __iomem *ISCRregaddr; /* Address to HST_ISCR register */ + /* BAR2 VF-0 doorbell register memory map */ + void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */ + void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */ + void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */ + void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */ + void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */ + + struct msix_entry *msix_entries; + uint32_t cfg_eqn; + struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ + /* Pointers to the constructed SLI4 queues */ + struct lpfc_queue **fp_eq; /* Fast-path event queue */ + struct lpfc_queue *sp_eq; /* Slow-path event queue */ + struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */ + struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */ + struct lpfc_queue *els_wq; /* Slow-path ELS work queue */ + struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ + struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ + struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ + struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ + struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ + struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */ + + /* Setup information for various queue parameters */ + int eq_esize; + int eq_ecount; + int cq_esize; + int cq_ecount; + int wq_esize; + int wq_ecount; + int mq_esize; + int mq_ecount; + int rq_esize; + int rq_ecount; +#define LPFC_SP_EQ_MAX_INTR_SEC 10000 +#define LPFC_FP_EQ_MAX_INTR_SEC 10000 + + uint32_t intr_enable; + struct lpfc_bmbx bmbx; + struct lpfc_max_cfg_param max_cfg_param; + uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ + uint16_t next_rpi; + uint16_t scsi_xri_max; + uint16_t scsi_xri_cnt; + struct list_head lpfc_free_sgl_list; + struct list_head lpfc_sgl_list; + struct lpfc_sglq **lpfc_els_sgl_array; + struct list_head lpfc_abts_els_sgl_list; + struct lpfc_scsi_buf **lpfc_scsi_psb_array; + struct list_head lpfc_abts_scsi_buf_list; + uint32_t total_sglq_bufs; + struct lpfc_sglq **lpfc_sglq_active_list; + struct list_head lpfc_rpi_hdr_list; + unsigned long *rpi_bmask; + uint16_t rpi_count; + struct lpfc_sli4_flags sli4_flags; + struct list_head sp_rspiocb_work_queue; + struct list_head sp_cqe_event_pool; + struct list_head sp_asynce_work_queue; + struct list_head sp_fcp_xri_aborted_work_queue; + struct list_head sp_els_xri_aborted_work_queue; + struct list_head sp_unsol_work_queue; + struct lpfc_sli4_link link_state; + spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ + spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ +}; + +enum lpfc_sge_type { + GEN_BUFF_TYPE, + SCSI_BUFF_TYPE +}; + +struct lpfc_sglq { + /* lpfc_sglqs are used in double linked lists */ + struct list_head list; + struct list_head clist; + enum lpfc_sge_type buff_type; /* is this a scsi sgl */ + uint16_t iotag; /* pre-assigned IO tag */ + uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ + struct sli4_sge *sgl; /* pre-assigned SGL */ + void *virt; /* virtual address. */ + dma_addr_t phys; /* physical address */ +}; + +struct lpfc_rpi_hdr { + struct list_head list; + uint32_t len; + struct lpfc_dmabuf *dmabuf; + uint32_t page_count; + uint32_t start_rpi; +}; + +/* + * SLI4 specific function prototypes + */ +int lpfc_pci_function_reset(struct lpfc_hba *); +int lpfc_sli4_hba_setup(struct lpfc_hba *); +int lpfc_sli4_hba_down(struct lpfc_hba *); +int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t, + uint8_t, uint32_t, bool); +void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *); +void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); +void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, + struct lpfc_mbx_sge *); + +void lpfc_sli4_hba_reset(struct lpfc_hba *); +struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, + uint32_t); +void lpfc_sli4_queue_free(struct lpfc_queue *); +uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t); +uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, uint32_t, uint32_t); +uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, uint32_t); +uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, uint32_t); +uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, struct lpfc_queue *, uint32_t); +uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); +uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); +uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); +uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *); +uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *); +int lpfc_sli4_queue_setup(struct lpfc_hba *); +void lpfc_sli4_queue_unset(struct lpfc_hba *); +int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); +int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); +int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *); +uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); +int lpfc_sli4_post_async_mbox(struct lpfc_hba *); +int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba); +int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); +struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); +struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *); +void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); +void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); +int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *); +int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *); +int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *); +struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *); +void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *); +int lpfc_sli4_alloc_rpi(struct lpfc_hba *); +void lpfc_sli4_free_rpi(struct lpfc_hba *, int); +void lpfc_sli4_remove_rpis(struct lpfc_hba *); +void lpfc_sli4_async_event_proc(struct lpfc_hba *); +int lpfc_sli4_resume_rpi(struct lpfc_nodelist *); +void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); +void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); +void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, + struct sli4_wcqe_xri_aborted *); +void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, + struct sli4_wcqe_xri_aborted *); +int lpfc_sli4_brdreset(struct lpfc_hba *); +int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *); +void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *); +int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *); +int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t); +uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); +uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); +void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); +int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); +void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_sli4_post_status_check(struct lpfc_hba *); +uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); + diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 917ad56b0aff..59e67f7ee531 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -32,8 +32,10 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba) vpi = 0; else set_bit(vpi, phba->vpi_bmask); + if (phba->sli_rev == LPFC_SLI_REV4) + phba->sli4_hba.max_cfg_param.vpi_used++; spin_unlock_irq(&phba->hbalock); return vpi; } @@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba) static void lpfc_free_vpi(struct lpfc_hba *phba, int vpi) { + if (vpi == 0) + return; spin_lock_irq(&phba->hbalock); clear_bit(vpi, phba->vpi_bmask); + if (phba->sli_rev == LPFC_SLI_REV4) + phba->sli4_hba.max_cfg_param.vpi_used--; spin_unlock_irq(&phba->hbalock); } @@ -308,6 +316,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) goto error_out; } + /* + * In SLI4, the vpi must be activated before it can be used + * by the port. + */ + if (phba->sli_rev == LPFC_SLI_REV4) { + rc = lpfc_sli4_init_vpi(phba, vpi); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + "1838 Failed to INIT_VPI on vpi %d " + "status %d\n", vpi, rc); + rc = VPORT_NORESOURCES; + lpfc_free_vpi(phba, vpi); + goto error_out; + } + } /* Assign an unused board number */ if ((instance = lpfc_get_instance()) < 0) { |