diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-15 16:51:54 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-15 16:51:54 -0700 |
commit | bc06cffdec85d487c77109dffcd2f285bdc502d3 (patch) | |
tree | adc6e6398243da87e66c56102840597a329183a0 /drivers/scsi/lpfc | |
parent | d3502d7f25b22cfc9762bf1781faa9db1bb3be2e (diff) | |
parent | 9413d7b8aa777dd1fc7db9563ce5e80d769fe7b5 (diff) | |
download | blackbird-op-linux-bc06cffdec85d487c77109dffcd2f285bdc502d3.tar.gz blackbird-op-linux-bc06cffdec85d487c77109dffcd2f285bdc502d3.zip |
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (166 commits)
[SCSI] ibmvscsi: convert to use the data buffer accessors
[SCSI] dc395x: convert to use the data buffer accessors
[SCSI] ncr53c8xx: convert to use the data buffer accessors
[SCSI] sym53c8xx: convert to use the data buffer accessors
[SCSI] ppa: coding police and printk levels
[SCSI] aic7xxx_old: remove redundant GFP_ATOMIC from kmalloc
[SCSI] i2o: remove redundant GFP_ATOMIC from kmalloc from device.c
[SCSI] remove the dead CYBERSTORMIII_SCSI option
[SCSI] don't build scsi_dma_{map,unmap} for !HAS_DMA
[SCSI] Clean up scsi_add_lun a bit
[SCSI] 53c700: Remove printk, which triggers because of low scsi clock on SNI RMs
[SCSI] sni_53c710: Cleanup
[SCSI] qla4xxx: Fix underrun/overrun conditions
[SCSI] megaraid_mbox: use mutex instead of semaphore
[SCSI] aacraid: add 51245, 51645 and 52245 adapters to documentation.
[SCSI] qla2xxx: update version to 8.02.00-k1.
[SCSI] qla2xxx: add support for NPIV
[SCSI] stex: use resid for xfer len information
[SCSI] Add Brownie 1200U3P to blacklist
[SCSI] scsi.c: convert to use the data buffer accessors
...
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/Makefile | 5 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 358 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 760 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_crtn.h | 182 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_ct.c | 971 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.c | 508 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.h | 50 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_disc.h | 15 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 3377 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 2262 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw.h | 558 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 948 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_logmsg.h | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mbox.c | 306 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mem.c | 101 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nportdisc.c | 1325 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 557 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.h | 3 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 2047 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.h | 47 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_version.h | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_vport.c | 523 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_vport.h | 113 |
23 files changed, 10453 insertions, 4566 deletions
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index d1be465d5f55..1c286707dd5f 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile @@ -1,7 +1,7 @@ #/******************************************************************* # * This file is part of the Emulex Linux Device Driver for * # * Fibre Channel Host Bus Adapters. * -# * Copyright (C) 2004-2005 Emulex. All rights reserved. * +# * Copyright (C) 2004-2006 Emulex. All rights reserved. * # * EMULEX and SLI are trademarks of Emulex. * # * www.emulex.com * # * * @@ -27,4 +27,5 @@ endif obj-$(CONFIG_SCSI_LPFC) := lpfc.o lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ - lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o + lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \ + lpfc_vport.o lpfc_debugfs.o diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 82e8f90c4617..f8f64d6485cd 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -19,8 +19,9 @@ * included with this package. * *******************************************************************/ -struct lpfc_sli2_slim; +#include <scsi/scsi_host.h> +struct lpfc_sli2_slim; #define LPFC_MAX_TARGET 256 /* max number of targets supported */ #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els @@ -32,6 +33,20 @@ struct lpfc_sli2_slim; #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ +/* + * Following time intervals are used of adjusting SCSI device + * queue depths when there are driver resource error or Firmware + * resource error. + */ +#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */ +#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */ + +/* Number of exchanges reserved for discovery to complete */ +#define LPFC_DISC_IOCB_BUFF_COUNT 20 + +#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */ +#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */ + /* Define macros for 64 bit support */ #define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr))) #define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32))) @@ -61,6 +76,11 @@ struct lpfc_dma_pool { uint32_t current_count; }; +struct hbq_dmabuf { + struct lpfc_dmabuf dbuf; + uint32_t tag; +}; + /* Priority bit. Set value to exceed low water mark in lpfc_mem. */ #define MEM_PRI 0x100 @@ -90,6 +110,29 @@ typedef struct lpfc_vpd { uint32_t sli2FwRev; uint8_t sli2FwName[16]; } rev; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd2 :24; /* Reserved */ + uint32_t cmv : 1; /* Configure Max VPIs */ + uint32_t ccrp : 1; /* Config Command Ring Polling */ + uint32_t csah : 1; /* Configure Synchronous Abort Handling */ + uint32_t chbs : 1; /* Cofigure Host Backing store */ + uint32_t cinb : 1; /* Enable Interrupt Notification Block */ + uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */ + uint32_t cmx : 1; /* Configure Max XRIs */ + uint32_t cmr : 1; /* Configure Max RPIs */ +#else /* __LITTLE_ENDIAN */ + uint32_t cmr : 1; /* Configure Max RPIs */ + uint32_t cmx : 1; /* Configure Max XRIs */ + uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */ + uint32_t cinb : 1; /* Enable Interrupt Notification Block */ + uint32_t chbs : 1; /* Cofigure Host Backing store */ + uint32_t csah : 1; /* Configure Synchronous Abort Handling */ + uint32_t ccrp : 1; /* Config Command Ring Polling */ + uint32_t cmv : 1; /* Configure Max VPIs */ + uint32_t rsvd2 :24; /* Reserved */ +#endif + } sli3Feat; } lpfc_vpd_t; struct lpfc_scsi_buf; @@ -122,6 +165,7 @@ struct lpfc_stats { uint32_t elsRcvRPS; uint32_t elsRcvRPL; uint32_t elsXmitFLOGI; + uint32_t elsXmitFDISC; uint32_t elsXmitPLOGI; uint32_t elsXmitPRLI; uint32_t elsXmitADISC; @@ -165,50 +209,186 @@ struct lpfc_sysfs_mbox { struct lpfcMboxq * mbox; }; +struct lpfc_hba; + + +enum discovery_state { + LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */ + LPFC_VPORT_FAILED = 1, /* vport has failed */ + LPFC_LOCAL_CFG_LINK = 6, /* local NPORT Id configured */ + LPFC_FLOGI = 7, /* FLOGI sent to Fabric */ + LPFC_FDISC = 8, /* FDISC sent for vport */ + LPFC_FABRIC_CFG_LINK = 9, /* Fabric assigned NPORT Id + * configured */ + LPFC_NS_REG = 10, /* Register with NameServer */ + LPFC_NS_QRY = 11, /* Query NameServer for NPort ID list */ + LPFC_BUILD_DISC_LIST = 12, /* Build ADISC and PLOGI lists for + * device authentication / discovery */ + LPFC_DISC_AUTH = 13, /* Processing ADISC list */ + LPFC_VPORT_READY = 32, +}; + +enum hba_state { + LPFC_LINK_UNKNOWN = 0, /* HBA state is unknown */ + LPFC_WARM_START = 1, /* HBA state after selective reset */ + LPFC_INIT_START = 2, /* Initial state after board reset */ + LPFC_INIT_MBX_CMDS = 3, /* Initialize HBA with mbox commands */ + LPFC_LINK_DOWN = 4, /* HBA initialized, link is down */ + LPFC_LINK_UP = 5, /* Link is up - issue READ_LA */ + LPFC_CLEAR_LA = 6, /* authentication cmplt - issue + * CLEAR_LA */ + LPFC_HBA_READY = 32, + LPFC_HBA_ERROR = -1 +}; + +struct lpfc_vport { + struct list_head listentry; + struct lpfc_hba *phba; + uint8_t port_type; +#define LPFC_PHYSICAL_PORT 1 +#define LPFC_NPIV_PORT 2 +#define LPFC_FABRIC_PORT 3 + enum discovery_state port_state; + + uint16_t vpi; + + uint32_t fc_flag; /* FC flags */ +/* Several of these flags are HBA centric and should be moved to + * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP) + */ +#define FC_PT2PT 0x1 /* pt2pt with no fabric */ +#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */ +#define FC_DISC_TMO 0x4 /* Discovery timer running */ +#define FC_PUBLIC_LOOP 0x8 /* Public loop */ +#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */ +#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */ +#define FC_NLP_MORE 0x40 /* More node to process in node tbl */ +#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */ +#define FC_FABRIC 0x100 /* We are fabric attached */ +#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */ +#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ +#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ +#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ +#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ +#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */ +#define FC_RFF_NOT_SUPPORTED 0x40000 /* RFF_ID was rejected by switch */ +#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */ +#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */ + + struct list_head fc_nodes; + + /* Keep counters for the number of entries in each list. */ + uint16_t fc_plogi_cnt; + uint16_t fc_adisc_cnt; + uint16_t fc_reglogin_cnt; + uint16_t fc_prli_cnt; + uint16_t fc_unmap_cnt; + uint16_t fc_map_cnt; + uint16_t fc_npr_cnt; + uint16_t fc_unused_cnt; + struct serv_parm fc_sparam; /* buffer for our service parameters */ + + uint32_t fc_myDID; /* fibre channel S_ID */ + uint32_t fc_prevDID; /* previous fibre channel S_ID */ + + int32_t stopped; /* HBA has not been restarted since last ERATT */ + uint8_t fc_linkspeed; /* Link speed after last READ_LA */ + + uint32_t num_disc_nodes; /*in addition to hba_state */ + + uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ + uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ + struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN]; + struct lpfc_name fc_nodename; /* fc nodename */ + struct lpfc_name fc_portname; /* fc portname */ + + struct lpfc_work_evt disc_timeout_evt; + + struct timer_list fc_disctmo; /* Discovery rescue timer */ + uint8_t fc_ns_retry; /* retries for fabric nameserver */ + uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */ + + spinlock_t work_port_lock; + uint32_t work_port_events; /* Timeout to be handled */ +#define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */ +#define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */ +#define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */ + +#define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */ +#define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */ +#define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timout */ +#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */ +#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */ + + struct timer_list fc_fdmitmo; + struct timer_list els_tmofunc; + + int unreg_vpi_cmpl; + + uint8_t load_flag; +#define FC_LOADING 0x1 /* HBA in process of loading drvr */ +#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */ + char *vname; /* Application assigned name */ + struct fc_vport *fc_vport; + +#ifdef CONFIG_LPFC_DEBUG_FS + struct dentry *debug_disc_trc; + struct dentry *debug_nodelist; + struct dentry *vport_debugfs_root; + struct lpfc_disc_trc *disc_trc; + atomic_t disc_trc_cnt; +#endif +}; + +struct hbq_s { + uint16_t entry_count; /* Current number of HBQ slots */ + uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */ + uint32_t hbqPutIdx; /* HBQ slot to use */ + uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */ +}; + +#define LPFC_MAX_HBQS 16 +/* this matches the possition in the lpfc_hbq_defs array */ +#define LPFC_ELS_HBQ 0 + struct lpfc_hba { struct lpfc_sli sli; + uint32_t sli_rev; /* SLI2 or SLI3 */ + uint32_t sli3_options; /* Mask of enabled SLI3 options */ +#define LPFC_SLI3_ENABLED 0x01 +#define LPFC_SLI3_HBQ_ENABLED 0x02 +#define LPFC_SLI3_NPIV_ENABLED 0x04 +#define LPFC_SLI3_VPORT_TEARDOWN 0x08 + uint32_t iocb_cmd_size; + uint32_t iocb_rsp_size; + + enum hba_state link_state; + uint32_t link_flag; /* link state flags */ +#define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */ + /* This flag is set while issuing */ + /* INIT_LINK mailbox command */ +#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ +#define LS_IGNORE_ERATT 0x3 /* intr handler should ignore ERATT */ + struct lpfc_sli2_slim *slim2p; + struct lpfc_dmabuf hbqslimp; + dma_addr_t slim2p_mapping; + uint16_t pci_cfg_value; - int32_t hba_state; - -#define LPFC_STATE_UNKNOWN 0 /* HBA state is unknown */ -#define LPFC_WARM_START 1 /* HBA state after selective reset */ -#define LPFC_INIT_START 2 /* Initial state after board reset */ -#define LPFC_INIT_MBX_CMDS 3 /* Initialize HBA with mbox commands */ -#define LPFC_LINK_DOWN 4 /* HBA initialized, link is down */ -#define LPFC_LINK_UP 5 /* Link is up - issue READ_LA */ -#define LPFC_LOCAL_CFG_LINK 6 /* local NPORT Id configured */ -#define LPFC_FLOGI 7 /* FLOGI sent to Fabric */ -#define LPFC_FABRIC_CFG_LINK 8 /* Fabric assigned NPORT Id - configured */ -#define LPFC_NS_REG 9 /* Register with NameServer */ -#define LPFC_NS_QRY 10 /* Query NameServer for NPort ID list */ -#define LPFC_BUILD_DISC_LIST 11 /* Build ADISC and PLOGI lists for - * device authentication / discovery */ -#define LPFC_DISC_AUTH 12 /* Processing ADISC list */ -#define LPFC_CLEAR_LA 13 /* authentication cmplt - issue - CLEAR_LA */ -#define LPFC_HBA_READY 32 -#define LPFC_HBA_ERROR -1 + uint8_t work_found; +#define LPFC_MAX_WORKER_ITERATION 4 - int32_t stopped; /* HBA has not been restarted since last ERATT */ uint8_t fc_linkspeed; /* Link speed after last READ_LA */ uint32_t fc_eventTag; /* event tag for link attention */ - uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */ - uint32_t num_disc_nodes; /*in addition to hba_state */ struct timer_list fc_estabtmo; /* link establishment timer */ - struct timer_list fc_disctmo; /* Discovery rescue timer */ - struct timer_list fc_fdmitmo; /* fdmi timer */ /* These fields used to be binfo */ - struct lpfc_name fc_nodename; /* fc nodename */ - struct lpfc_name fc_portname; /* fc portname */ uint32_t fc_pref_DID; /* preferred D_ID */ - uint8_t fc_pref_ALPA; /* preferred AL_PA */ + uint8_t fc_pref_ALPA; /* preferred AL_PA */ uint32_t fc_edtov; /* E_D_TOV timer value */ uint32_t fc_arbtov; /* ARB_TOV timer value */ uint32_t fc_ratov; /* R_A_TOV timer value */ @@ -216,61 +396,21 @@ struct lpfc_hba { uint32_t fc_altov; /* AL_TOV timer value */ uint32_t fc_crtov; /* C_R_TOV timer value */ uint32_t fc_citov; /* C_I_TOV timer value */ - uint32_t fc_myDID; /* fibre channel S_ID */ - uint32_t fc_prevDID; /* previous fibre channel S_ID */ - struct serv_parm fc_sparam; /* buffer for our service parameters */ struct serv_parm fc_fabparam; /* fabric service parameters buffer */ uint8_t alpa_map[128]; /* AL_PA map from READ_LA */ - uint8_t fc_ns_retry; /* retries for fabric nameserver */ - uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ - uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ - struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN]; uint32_t lmt; - uint32_t fc_flag; /* FC flags */ -#define FC_PT2PT 0x1 /* pt2pt with no fabric */ -#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */ -#define FC_DISC_TMO 0x4 /* Discovery timer running */ -#define FC_PUBLIC_LOOP 0x8 /* Public loop */ -#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */ -#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */ -#define FC_NLP_MORE 0x40 /* More node to process in node tbl */ -#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */ -#define FC_FABRIC 0x100 /* We are fabric attached */ -#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */ -#define FC_RSCN_DISCOVERY 0x400 /* Authenticate all devices after RSCN*/ -#define FC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ -#define FC_LOADING 0x1000 /* HBA in process of loading drvr */ -#define FC_UNLOADING 0x2000 /* HBA in process of unloading drvr */ -#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ -#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ -#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ -#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */ -#define FC_LOOPBACK_MODE 0x40000 /* NPort is in Loopback mode */ - /* This flag is set while issuing */ - /* INIT_LINK mailbox command */ -#define FC_IGNORE_ERATT 0x80000 /* intr handler should ignore ERATT */ uint32_t fc_topology; /* link topology, from LINK INIT */ struct lpfc_stats fc_stat; - struct list_head fc_nodes; - - /* Keep counters for the number of entries in each list. */ - uint16_t fc_plogi_cnt; - uint16_t fc_adisc_cnt; - uint16_t fc_reglogin_cnt; - uint16_t fc_prli_cnt; - uint16_t fc_unmap_cnt; - uint16_t fc_map_cnt; - uint16_t fc_npr_cnt; - uint16_t fc_unused_cnt; struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */ uint32_t nport_event_cnt; /* timestamp for nlplist entry */ - uint32_t wwnn[2]; + uint8_t wwnn[8]; + uint8_t wwpn[8]; uint32_t RandomData[7]; uint32_t cfg_log_verbose; @@ -278,6 +418,9 @@ struct lpfc_hba { uint32_t cfg_nodev_tmo; uint32_t cfg_devloss_tmo; uint32_t cfg_hba_queue_depth; + uint32_t cfg_peer_port_login; + uint32_t cfg_vport_restrict_login; + uint32_t cfg_npiv_enable; uint32_t cfg_fcp_class; uint32_t cfg_use_adisc; uint32_t cfg_ack0; @@ -304,22 +447,20 @@ struct lpfc_hba { lpfc_vpd_t vpd; /* vital product data */ - struct Scsi_Host *host; struct pci_dev *pcidev; struct list_head work_list; uint32_t work_ha; /* Host Attention Bits for WT */ uint32_t work_ha_mask; /* HA Bits owned by WT */ uint32_t work_hs; /* HS stored in case of ERRAT */ uint32_t work_status[2]; /* Extra status from SLIM */ - uint32_t work_hba_events; /* Timeout to be handled */ -#define WORKER_DISC_TMO 0x1 /* Discovery timeout */ -#define WORKER_ELS_TMO 0x2 /* ELS timeout */ -#define WORKER_MBOX_TMO 0x4 /* MBOX timeout */ -#define WORKER_FDMI_TMO 0x8 /* FDMI timeout */ wait_queue_head_t *work_wait; struct task_struct *worker_thread; + struct list_head hbq_buffer_list; + uint32_t hbq_count; /* Count of configured HBQs */ + struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ + unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ void __iomem *slim_memmap_p; /* Kernel memory mapped address for @@ -334,6 +475,10 @@ struct lpfc_hba { reg */ void __iomem *HCregaddr; /* virtual address for host ctl reg */ + struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */ + uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */ + uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */ + int brd_no; /* FC board number */ char SerialNumber[32]; /* adapter Serial Number */ @@ -353,7 +498,6 @@ struct lpfc_hba { uint8_t soft_wwn_enable; struct timer_list fcp_poll_timer; - struct timer_list els_tmofunc; /* * stat counters @@ -370,31 +514,69 @@ struct lpfc_hba { uint32_t total_scsi_bufs; struct list_head lpfc_iocb_list; uint32_t total_iocbq_bufs; + spinlock_t hbalock; /* pci_mem_pools */ struct pci_pool *lpfc_scsi_dma_buf_pool; struct pci_pool *lpfc_mbuf_pool; + struct pci_pool *lpfc_hbq_pool; struct lpfc_dma_pool lpfc_mbuf_safety_pool; mempool_t *mbox_mem_pool; mempool_t *nlp_mem_pool; struct fc_host_statistics link_stats; + + struct list_head port_list; + struct lpfc_vport *pport; /* physical lpfc_vport pointer */ + uint16_t max_vpi; /* Maximum virtual nports */ +#define LPFC_MAX_VPI 100 /* Max number of VPorts supported */ + unsigned long *vpi_bmask; /* vpi allocation table */ + + /* Data structure used by fabric iocb scheduler */ + struct list_head fabric_iocb_list; + atomic_t fabric_iocb_count; + struct timer_list fabric_block_timer; + unsigned long bit_flags; +#define FABRIC_COMANDS_BLOCKED 0 + atomic_t num_rsrc_err; + atomic_t num_cmd_success; + unsigned long last_rsrc_error_time; + unsigned long last_ramp_down_time; + unsigned long last_ramp_up_time; +#ifdef CONFIG_LPFC_DEBUG_FS + struct dentry *hba_debugfs_root; + atomic_t debugfs_vport_count; +#endif + + /* Fields used for heart beat. */ + unsigned long last_completion_time; + struct timer_list hb_tmofunc; + uint8_t hb_outstanding; }; +static inline struct Scsi_Host * +lpfc_shost_from_vport(struct lpfc_vport *vport) +{ + return container_of((void *) vport, struct Scsi_Host, hostdata[0]); +} + static inline void -lpfc_set_loopback_flag(struct lpfc_hba *phba) { +lpfc_set_loopback_flag(struct lpfc_hba *phba) +{ if (phba->cfg_topology == FLAGS_LOCAL_LB) - phba->fc_flag |= FC_LOOPBACK_MODE; + phba->link_flag |= LS_LOOPBACK_MODE; else - phba->fc_flag &= ~FC_LOOPBACK_MODE; + phba->link_flag &= ~LS_LOOPBACK_MODE; } -struct rnidrsp { - void *buf; - uint32_t uniqueid; - struct list_head list; - uint32_t data; -}; +static inline int +lpfc_is_link_up(struct lpfc_hba *phba) +{ + return phba->link_state == LPFC_LINK_UP || + phba->link_state == LPFC_CLEAR_LA || + phba->link_state == LPFC_HBA_READY; +} #define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ + diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 5dfda9778c80..860a52c090f4 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -39,6 +39,7 @@ #include "lpfc_version.h" #include "lpfc_compat.h" #include "lpfc_crtn.h" +#include "lpfc_vport.h" #define LPFC_DEF_DEVLOSS_TMO 30 #define LPFC_MIN_DEVLOSS_TMO 1 @@ -76,116 +77,156 @@ static ssize_t lpfc_info_show(struct class_device *cdev, char *buf) { struct Scsi_Host *host = class_to_shost(cdev); + return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host)); } static ssize_t lpfc_serialnum_show(struct class_device *cdev, char *buf) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber); } static ssize_t lpfc_modeldesc_show(struct class_device *cdev, char *buf) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc); } static ssize_t lpfc_modelname_show(struct class_device *cdev, char *buf) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName); } static ssize_t lpfc_programtype_show(struct class_device *cdev, char *buf) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType); } static ssize_t -lpfc_portnum_show(struct class_device *cdev, char *buf) +lpfc_vportnum_show(struct class_device *cdev, char *buf) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port); } static ssize_t lpfc_fwrev_show(struct class_device *cdev, char *buf) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; char fwrev[32]; + lpfc_decode_firmware_rev(phba, fwrev, 1); - return snprintf(buf, PAGE_SIZE, "%s\n",fwrev); + return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev); } static ssize_t lpfc_hdw_show(struct class_device *cdev, char *buf) { char hdw[9]; - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; lpfc_vpd_t *vp = &phba->vpd; + lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); return snprintf(buf, PAGE_SIZE, "%s\n", hdw); } static ssize_t lpfc_option_rom_version_show(struct class_device *cdev, char *buf) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion); } static ssize_t lpfc_state_show(struct class_device *cdev, char *buf) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; - int len = 0; - switch (phba->hba_state) { - case LPFC_STATE_UNKNOWN: + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int len = 0; + + switch (phba->link_state) { + case LPFC_LINK_UNKNOWN: case LPFC_WARM_START: case LPFC_INIT_START: case LPFC_INIT_MBX_CMDS: case LPFC_LINK_DOWN: + case LPFC_HBA_ERROR: len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n"); break; case LPFC_LINK_UP: - case LPFC_LOCAL_CFG_LINK: - len += snprintf(buf + len, PAGE_SIZE-len, "Link Up\n"); - break; - case LPFC_FLOGI: - case LPFC_FABRIC_CFG_LINK: - case LPFC_NS_REG: - case LPFC_NS_QRY: - case LPFC_BUILD_DISC_LIST: - case LPFC_DISC_AUTH: case LPFC_CLEAR_LA: - len += snprintf(buf + len, PAGE_SIZE-len, - "Link Up - Discovery\n"); - break; case LPFC_HBA_READY: - len += snprintf(buf + len, PAGE_SIZE-len, - "Link Up - Ready:\n"); + len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - \n"); + + switch (vport->port_state) { + len += snprintf(buf + len, PAGE_SIZE-len, + "initializing\n"); + break; + case LPFC_LOCAL_CFG_LINK: + len += snprintf(buf + len, PAGE_SIZE-len, + "Configuring Link\n"); + break; + case LPFC_FDISC: + case LPFC_FLOGI: + case LPFC_FABRIC_CFG_LINK: + case LPFC_NS_REG: + case LPFC_NS_QRY: + case LPFC_BUILD_DISC_LIST: + case LPFC_DISC_AUTH: + len += snprintf(buf + len, PAGE_SIZE - len, + "Discovery\n"); + break; + case LPFC_VPORT_READY: + len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n"); + break; + + case LPFC_VPORT_FAILED: + len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n"); + break; + + case LPFC_VPORT_UNKNOWN: + len += snprintf(buf + len, PAGE_SIZE - len, + "Unknown\n"); + break; + } + if (phba->fc_topology == TOPOLOGY_LOOP) { - if (phba->fc_flag & FC_PUBLIC_LOOP) + if (vport->fc_flag & FC_PUBLIC_LOOP) len += snprintf(buf + len, PAGE_SIZE-len, " Public Loop\n"); else len += snprintf(buf + len, PAGE_SIZE-len, " Private Loop\n"); } else { - if (phba->fc_flag & FC_FABRIC) + if (vport->fc_flag & FC_FABRIC) len += snprintf(buf + len, PAGE_SIZE-len, " Fabric\n"); else @@ -193,29 +234,32 @@ lpfc_state_show(struct class_device *cdev, char *buf) " Point-2-Point\n"); } } + return len; } static ssize_t lpfc_num_discovered_ports_show(struct class_device *cdev, char *buf) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; - return snprintf(buf, PAGE_SIZE, "%d\n", phba->fc_map_cnt + - phba->fc_unmap_cnt); + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + + return snprintf(buf, PAGE_SIZE, "%d\n", + vport->fc_map_cnt + vport->fc_unmap_cnt); } static int -lpfc_issue_lip(struct Scsi_Host *host) +lpfc_issue_lip(struct Scsi_Host *shost) { - struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *pmboxq; int mbxstatus = MBXERR_ERROR; - if ((phba->fc_flag & FC_OFFLINE_MODE) || - (phba->fc_flag & FC_BLOCK_MGMT_IO) || - (phba->hba_state != LPFC_HBA_READY)) + if ((vport->fc_flag & FC_OFFLINE_MODE) || + (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) || + (vport->port_state != LPFC_VPORT_READY)) return -EPERM; pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); @@ -238,9 +282,7 @@ lpfc_issue_lip(struct Scsi_Host *host) } lpfc_set_loopback_flag(phba); - if (mbxstatus == MBX_TIMEOUT) - pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - else + if (mbxstatus != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); if (mbxstatus == MBXERR_ERROR) @@ -320,8 +362,10 @@ lpfc_selective_reset(struct lpfc_hba *phba) static ssize_t lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int status = -EINVAL; if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) @@ -336,23 +380,26 @@ lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count) static ssize_t lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); } static ssize_t lpfc_board_mode_show(struct class_device *cdev, char *buf) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; char * state; - if (phba->hba_state == LPFC_HBA_ERROR) + if (phba->link_state == LPFC_HBA_ERROR) state = "error"; - else if (phba->hba_state == LPFC_WARM_START) + else if (phba->link_state == LPFC_WARM_START) state = "warm start"; - else if (phba->hba_state == LPFC_INIT_START) + else if (phba->link_state == LPFC_INIT_START) state = "offline"; else state = "online"; @@ -363,8 +410,9 @@ lpfc_board_mode_show(struct class_device *cdev, char *buf) static ssize_t lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; struct completion online_compl; int status=0; @@ -389,11 +437,166 @@ lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count) return -EIO; } +int +lpfc_get_hba_info(struct lpfc_hba *phba, + uint32_t *mxri, uint32_t *axri, + uint32_t *mrpi, uint32_t *arpi, + uint32_t *mvpi, uint32_t *avpi) +{ + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *pmboxq; + MAILBOX_t *pmb; + int rc = 0; + + /* + * prevent udev from issuing mailbox commands until the port is + * configured. + */ + if (phba->link_state < LPFC_LINK_DOWN || + !phba->mbox_mem_pool || + (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) + return 0; + + if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) + return 0; + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) + return 0; + memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); + + pmb = &pmboxq->mb; + pmb->mbxCommand = MBX_READ_CONFIG; + pmb->mbxOwner = OWN_HOST; + pmboxq->context1 = NULL; + + if ((phba->pport->fc_flag & FC_OFFLINE_MODE) || + (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + rc = MBX_NOT_FINISHED; + else + rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + + if (rc != MBX_SUCCESS) { + if (rc != MBX_TIMEOUT) + mempool_free(pmboxq, phba->mbox_mem_pool); + return 0; + } + + if (mrpi) + *mrpi = pmb->un.varRdConfig.max_rpi; + if (arpi) + *arpi = pmb->un.varRdConfig.avail_rpi; + if (mxri) + *mxri = pmb->un.varRdConfig.max_xri; + if (axri) + *axri = pmb->un.varRdConfig.avail_xri; + if (mvpi) + *mvpi = pmb->un.varRdConfig.max_vpi; + if (avpi) + *avpi = pmb->un.varRdConfig.avail_vpi; + + mempool_free(pmboxq, phba->mbox_mem_pool); + return 1; +} + +static ssize_t +lpfc_max_rpi_show(struct class_device *cdev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + uint32_t cnt; + + if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL)) + return snprintf(buf, PAGE_SIZE, "%d\n", cnt); + return snprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +static ssize_t +lpfc_used_rpi_show(struct class_device *cdev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + uint32_t cnt, acnt; + + if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL)) + return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); + return snprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +static ssize_t +lpfc_max_xri_show(struct class_device *cdev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + uint32_t cnt; + + if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL)) + return snprintf(buf, PAGE_SIZE, "%d\n", cnt); + return snprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +static ssize_t +lpfc_used_xri_show(struct class_device *cdev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + uint32_t cnt, acnt; + + if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL)) + return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); + return snprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +static ssize_t +lpfc_max_vpi_show(struct class_device *cdev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + uint32_t cnt; + + if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL)) + return snprintf(buf, PAGE_SIZE, "%d\n", cnt); + return snprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +static ssize_t +lpfc_used_vpi_show(struct class_device *cdev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + uint32_t cnt, acnt; + + if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt)) + return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); + return snprintf(buf, PAGE_SIZE, "Unknown\n"); +} + +static ssize_t +lpfc_npiv_info_show(struct class_device *cdev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + if (!(phba->max_vpi)) + return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n"); + if (vport->port_type == LPFC_PHYSICAL_PORT) + return snprintf(buf, PAGE_SIZE, "NPIV Physical\n"); + return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi); +} + static ssize_t lpfc_poll_show(struct class_device *cdev, char *buf) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); } @@ -402,8 +605,9 @@ static ssize_t lpfc_poll_store(struct class_device *cdev, const char *buf, size_t count) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; uint32_t creg_val; uint32_t old_val; int val=0; @@ -417,7 +621,7 @@ lpfc_poll_store(struct class_device *cdev, const char *buf, if ((val & 0x3) != val) return -EINVAL; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); old_val = phba->cfg_poll; @@ -432,16 +636,16 @@ lpfc_poll_store(struct class_device *cdev, const char *buf, lpfc_poll_start_timer(phba); } } else if (val != 0x0) { - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); return -EINVAL; } if (!(val & DISABLE_FCP_RING_INT) && (old_val & DISABLE_FCP_RING_INT)) { - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); del_timer(&phba->fcp_poll_timer); - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); creg_val = readl(phba->HCregaddr); creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); @@ -450,7 +654,7 @@ lpfc_poll_store(struct class_device *cdev, const char *buf, phba->cfg_poll = val; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); return strlen(buf); } @@ -459,8 +663,9 @@ lpfc_poll_store(struct class_device *cdev, const char *buf, static ssize_t \ lpfc_##attr##_show(struct class_device *cdev, char *buf) \ { \ - struct Scsi_Host *host = class_to_shost(cdev);\ - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\ + struct Scsi_Host *shost = class_to_shost(cdev);\ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ + struct lpfc_hba *phba = vport->phba;\ int val = 0;\ val = phba->cfg_##attr;\ return snprintf(buf, PAGE_SIZE, "%d\n",\ @@ -471,8 +676,9 @@ lpfc_##attr##_show(struct class_device *cdev, char *buf) \ static ssize_t \ lpfc_##attr##_show(struct class_device *cdev, char *buf) \ { \ - struct Scsi_Host *host = class_to_shost(cdev);\ - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\ + struct Scsi_Host *shost = class_to_shost(cdev);\ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ + struct lpfc_hba *phba = vport->phba;\ int val = 0;\ val = phba->cfg_##attr;\ return snprintf(buf, PAGE_SIZE, "%#x\n",\ @@ -514,8 +720,9 @@ lpfc_##attr##_set(struct lpfc_hba *phba, int val) \ static ssize_t \ lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \ { \ - struct Scsi_Host *host = class_to_shost(cdev);\ - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\ + struct Scsi_Host *shost = class_to_shost(cdev);\ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ + struct lpfc_hba *phba = vport->phba;\ int val=0;\ if (!isdigit(buf[0]))\ return -EINVAL;\ @@ -576,7 +783,7 @@ static CLASS_DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL); static CLASS_DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); static CLASS_DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL); static CLASS_DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL); -static CLASS_DEVICE_ATTR(portnum, S_IRUGO, lpfc_portnum_show, NULL); +static CLASS_DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL); static CLASS_DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); static CLASS_DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); static CLASS_DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL); @@ -592,6 +799,13 @@ static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show, static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, lpfc_board_mode_show, lpfc_board_mode_store); static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); +static CLASS_DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL); +static CLASS_DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL); +static CLASS_DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL); +static CLASS_DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL); +static CLASS_DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL); +static CLASS_DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); +static CLASS_DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); static char *lpfc_soft_wwn_key = "C99G71SL8032A"; @@ -600,8 +814,9 @@ static ssize_t lpfc_soft_wwn_enable_store(struct class_device *cdev, const char *buf, size_t count) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; unsigned int cnt = count; /* @@ -634,8 +849,10 @@ static CLASS_DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL, static ssize_t lpfc_soft_wwpn_show(struct class_device *cdev, char *buf) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + return snprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)phba->cfg_soft_wwpn); } @@ -644,8 +861,9 @@ lpfc_soft_wwpn_show(struct class_device *cdev, char *buf) static ssize_t lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; struct completion online_compl; int stat1=0, stat2=0; unsigned int i, j, cnt=count; @@ -680,9 +898,9 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count) } } phba->cfg_soft_wwpn = wwn_to_u64(wwpn); - fc_host_port_name(host) = phba->cfg_soft_wwpn; + fc_host_port_name(shost) = phba->cfg_soft_wwpn; if (phba->cfg_soft_wwnn) - fc_host_node_name(host) = phba->cfg_soft_wwnn; + fc_host_node_name(shost) = phba->cfg_soft_wwnn; dev_printk(KERN_NOTICE, &phba->pcidev->dev, "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no); @@ -777,6 +995,15 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, lpfc_poll_show, lpfc_poll_store); +int lpfc_sli_mode = 0; +module_param(lpfc_sli_mode, int, 0); +MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:" + " 0 - auto (SLI-3 if supported)," + " 2 - select SLI-2 even on SLI-3 capable HBAs," + " 3 - select SLI-3"); + +LPFC_ATTR_R(npiv_enable, 0, 0, 1, "Enable NPIV functionality"); + /* # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear # until the timer expires. Value range is [0,255]. Default value is 30. @@ -790,8 +1017,9 @@ MODULE_PARM_DESC(lpfc_nodev_tmo, static ssize_t lpfc_nodev_tmo_show(struct class_device *cdev, char *buf) { - struct Scsi_Host *host = class_to_shost(cdev); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; int val = 0; val = phba->cfg_devloss_tmo; return snprintf(buf, PAGE_SIZE, "%d\n", @@ -832,13 +1060,19 @@ lpfc_nodev_tmo_init(struct lpfc_hba *phba, int val) static void lpfc_update_rport_devloss_tmo(struct lpfc_hba *phba) { + struct lpfc_vport *vport; + struct Scsi_Host *shost; struct lpfc_nodelist *ndlp; - spin_lock_irq(phba->host->host_lock); - list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) - if (ndlp->rport) - ndlp->rport->dev_loss_tmo = phba->cfg_devloss_tmo; - spin_unlock_irq(phba->host->host_lock); + list_for_each_entry(vport, &phba->port_list, listentry) { + shost = lpfc_shost_from_vport(vport); + spin_lock_irq(shost->host_lock); + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) + if (ndlp->rport) + ndlp->rport->dev_loss_tmo = + phba->cfg_devloss_tmo; + spin_unlock_irq(shost->host_lock); + } } static int @@ -946,6 +1180,33 @@ LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192, "Max number of FCP commands we can queue to a lpfc HBA"); /* +# peer_port_login: This parameter allows/prevents logins +# between peer ports hosted on the same physical port. +# When this parameter is set 0 peer ports of same physical port +# are not allowed to login to each other. +# When this parameter is set 1 peer ports of same physical port +# are allowed to login to each other. +# Default value of this parameter is 0. +*/ +LPFC_ATTR_R(peer_port_login, 0, 0, 1, + "Allow peer ports on the same physical port to login to each " + "other."); + +/* +# vport_restrict_login: This parameter allows/prevents logins +# between Virtual Ports and remote initiators. +# When this parameter is not set (0) Virtual Ports will accept PLOGIs from +# other initiators and will attempt to PLOGI all remote ports. +# When this parameter is set (1) Virtual Ports will reject PLOGIs from +# remote ports and will not attempt to PLOGI to other initiators. +# This parameter does not restrict to the physical port. +# This parameter does not restrict logins to Fabric resident remote ports. +# Default value of this parameter is 1. +*/ +LPFC_ATTR_RW(vport_restrict_login, 1, 0, 1, + "Restrict virtual ports login to remote initiators."); + +/* # Some disk devices have a "select ID" or "select Target" capability. # From a protocol standpoint "select ID" usually means select the # Fibre channel "ALPA". In the FC-AL Profile there is an "informative @@ -1088,7 +1349,8 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible"); -struct class_device_attribute *lpfc_host_attrs[] = { + +struct class_device_attribute *lpfc_hba_attrs[] = { &class_device_attr_info, &class_device_attr_serialnum, &class_device_attr_modeldesc, @@ -1104,6 +1366,8 @@ struct class_device_attribute *lpfc_host_attrs[] = { &class_device_attr_lpfc_log_verbose, &class_device_attr_lpfc_lun_queue_depth, &class_device_attr_lpfc_hba_queue_depth, + &class_device_attr_lpfc_peer_port_login, + &class_device_attr_lpfc_vport_restrict_login, &class_device_attr_lpfc_nodev_tmo, &class_device_attr_lpfc_devloss_tmo, &class_device_attr_lpfc_fcp_class, @@ -1119,9 +1383,17 @@ struct class_device_attribute *lpfc_host_attrs[] = { &class_device_attr_lpfc_multi_ring_type, &class_device_attr_lpfc_fdmi_on, &class_device_attr_lpfc_max_luns, + &class_device_attr_lpfc_npiv_enable, &class_device_attr_nport_evt_cnt, &class_device_attr_management_version, &class_device_attr_board_mode, + &class_device_attr_max_vpi, + &class_device_attr_used_vpi, + &class_device_attr_max_rpi, + &class_device_attr_used_rpi, + &class_device_attr_max_xri, + &class_device_attr_used_xri, + &class_device_attr_npiv_info, &class_device_attr_issue_reset, &class_device_attr_lpfc_poll, &class_device_attr_lpfc_poll_tmo, @@ -1137,9 +1409,11 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { size_t buf_off; - struct Scsi_Host *host = class_to_shost(container_of(kobj, - struct class_device, kobj)); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct class_device *cdev = container_of(kobj, struct class_device, + kobj); + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; if ((off + count) > FF_REG_AREA_SIZE) return -ERANGE; @@ -1149,18 +1423,16 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr, if (off % 4 || count % 4 || (unsigned long)buf % 4) return -EINVAL; - spin_lock_irq(phba->host->host_lock); - - if (!(phba->fc_flag & FC_OFFLINE_MODE)) { - spin_unlock_irq(phba->host->host_lock); + if (!(vport->fc_flag & FC_OFFLINE_MODE)) { return -EPERM; } + spin_lock_irq(&phba->hbalock); for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) writel(*((uint32_t *)(buf + buf_off)), phba->ctrl_regs_memmap_p + off + buf_off); - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); return count; } @@ -1171,9 +1443,11 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr, { size_t buf_off; uint32_t * tmp_ptr; - struct Scsi_Host *host = class_to_shost(container_of(kobj, - struct class_device, kobj)); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct class_device *cdev = container_of(kobj, struct class_device, + kobj); + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; if (off > FF_REG_AREA_SIZE) return -ERANGE; @@ -1186,14 +1460,14 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr, if (off % 4 || count % 4 || (unsigned long)buf % 4) return -EINVAL; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) { tmp_ptr = (uint32_t *)(buf + buf_off); *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off); } - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); return count; } @@ -1210,7 +1484,7 @@ static struct bin_attribute sysfs_ctlreg_attr = { static void -sysfs_mbox_idle (struct lpfc_hba * phba) +sysfs_mbox_idle(struct lpfc_hba *phba) { phba->sysfs_mbox.state = SMBOX_IDLE; phba->sysfs_mbox.offset = 0; @@ -1226,10 +1500,12 @@ static ssize_t sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { - struct Scsi_Host * host = - class_to_shost(container_of(kobj, struct class_device, kobj)); - struct lpfc_hba * phba = (struct lpfc_hba*)host->hostdata; - struct lpfcMboxq * mbox = NULL; + struct class_device *cdev = container_of(kobj, struct class_device, + kobj); + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfcMboxq *mbox = NULL; if ((count + off) > MAILBOX_CMD_SIZE) return -ERANGE; @@ -1247,7 +1523,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr, memset(mbox, 0, sizeof (LPFC_MBOXQ_t)); } - spin_lock_irq(host->host_lock); + spin_lock_irq(&phba->hbalock); if (off == 0) { if (phba->sysfs_mbox.mbox) @@ -1258,9 +1534,9 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr, } else { if (phba->sysfs_mbox.state != SMBOX_WRITING || phba->sysfs_mbox.offset != off || - phba->sysfs_mbox.mbox == NULL ) { + phba->sysfs_mbox.mbox == NULL) { sysfs_mbox_idle(phba); - spin_unlock_irq(host->host_lock); + spin_unlock_irq(&phba->hbalock); return -EAGAIN; } } @@ -1270,7 +1546,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr, phba->sysfs_mbox.offset = off + count; - spin_unlock_irq(host->host_lock); + spin_unlock_irq(&phba->hbalock); return count; } @@ -1279,10 +1555,11 @@ static ssize_t sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { - struct Scsi_Host *host = - class_to_shost(container_of(kobj, struct class_device, - kobj)); - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct class_device *cdev = container_of(kobj, struct class_device, + kobj); + struct Scsi_Host *shost = class_to_shost(cdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; int rc; if (off > MAILBOX_CMD_SIZE) @@ -1297,7 +1574,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, if (off && count == 0) return 0; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); if (off == 0 && phba->sysfs_mbox.state == SMBOX_WRITING && @@ -1320,12 +1597,12 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, case MBX_SET_MASK: case MBX_SET_SLIM: case MBX_SET_DEBUG: - if (!(phba->fc_flag & FC_OFFLINE_MODE)) { + if (!(vport->fc_flag & FC_OFFLINE_MODE)) { printk(KERN_WARNING "mbox_read:Command 0x%x " "is illegal in on-line state\n", phba->sysfs_mbox.mbox->mb.mbxCommand); sysfs_mbox_idle(phba); - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); return -EPERM; } case MBX_LOAD_SM: @@ -1355,48 +1632,48 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", phba->sysfs_mbox.mbox->mb.mbxCommand); sysfs_mbox_idle(phba); - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); return -EPERM; default: printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", phba->sysfs_mbox.mbox->mb.mbxCommand); sysfs_mbox_idle(phba); - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); return -EPERM; } - if (phba->fc_flag & FC_BLOCK_MGMT_IO) { + phba->sysfs_mbox.mbox->vport = vport; + + if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { sysfs_mbox_idle(phba); - spin_unlock_irq(host->host_lock); + spin_unlock_irq(&phba->hbalock); return -EAGAIN; } - if ((phba->fc_flag & FC_OFFLINE_MODE) || + if ((vport->fc_flag & FC_OFFLINE_MODE) || (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){ - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox (phba, phba->sysfs_mbox.mbox, MBX_POLL); - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); } else { - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox_wait (phba, phba->sysfs_mbox.mbox, lpfc_mbox_tmo_val(phba, phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ); - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); } if (rc != MBX_SUCCESS) { if (rc == MBX_TIMEOUT) { - phba->sysfs_mbox.mbox->mbox_cmpl = - lpfc_sli_def_mbox_cmpl; phba->sysfs_mbox.mbox = NULL; } sysfs_mbox_idle(phba); - spin_unlock_irq(host->host_lock); + spin_unlock_irq(&phba->hbalock); return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; } phba->sysfs_mbox.state = SMBOX_READING; @@ -1405,7 +1682,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, phba->sysfs_mbox.state != SMBOX_READING) { printk(KERN_WARNING "mbox_read: Bad State\n"); sysfs_mbox_idle(phba); - spin_unlock_irq(host->host_lock); + spin_unlock_irq(&phba->hbalock); return -EAGAIN; } @@ -1416,7 +1693,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE) sysfs_mbox_idle(phba); - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); return count; } @@ -1432,35 +1709,35 @@ static struct bin_attribute sysfs_mbox_attr = { }; int -lpfc_alloc_sysfs_attr(struct lpfc_hba *phba) +lpfc_alloc_sysfs_attr(struct lpfc_vport *vport) { - struct Scsi_Host *host = phba->host; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); int error; - error = sysfs_create_bin_file(&host->shost_classdev.kobj, - &sysfs_ctlreg_attr); + error = sysfs_create_bin_file(&shost->shost_classdev.kobj, + &sysfs_ctlreg_attr); if (error) goto out; - error = sysfs_create_bin_file(&host->shost_classdev.kobj, - &sysfs_mbox_attr); + error = sysfs_create_bin_file(&shost->shost_classdev.kobj, + &sysfs_mbox_attr); if (error) goto out_remove_ctlreg_attr; return 0; out_remove_ctlreg_attr: - sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr); + sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_ctlreg_attr); out: return error; } void -lpfc_free_sysfs_attr(struct lpfc_hba *phba) +lpfc_free_sysfs_attr(struct lpfc_vport *vport) { - struct Scsi_Host *host = phba->host; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_mbox_attr); - sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr); + sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_mbox_attr); + sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_ctlreg_attr); } @@ -1471,26 +1748,30 @@ lpfc_free_sysfs_attr(struct lpfc_hba *phba) static void lpfc_get_host_port_id(struct Scsi_Host *shost) { - struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + /* note: fc_myDID already in cpu endianness */ - fc_host_port_id(shost) = phba->fc_myDID; + fc_host_port_id(shost) = vport->fc_myDID; } static void lpfc_get_host_port_type(struct Scsi_Host *shost) { - struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; spin_lock_irq(shost->host_lock); - if (phba->hba_state == LPFC_HBA_READY) { + if (vport->port_type == LPFC_NPIV_PORT) { + fc_host_port_type(shost) = FC_PORTTYPE_NPIV; + } else if (lpfc_is_link_up(phba)) { if (phba->fc_topology == TOPOLOGY_LOOP) { - if (phba->fc_flag & FC_PUBLIC_LOOP) + if (vport->fc_flag & FC_PUBLIC_LOOP) fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; else fc_host_port_type(shost) = FC_PORTTYPE_LPORT; } else { - if (phba->fc_flag & FC_FABRIC) + if (vport->fc_flag & FC_FABRIC) fc_host_port_type(shost) = FC_PORTTYPE_NPORT; else fc_host_port_type(shost) = FC_PORTTYPE_PTP; @@ -1504,29 +1785,20 @@ lpfc_get_host_port_type(struct Scsi_Host *shost) static void lpfc_get_host_port_state(struct Scsi_Host *shost) { - struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; spin_lock_irq(shost->host_lock); - if (phba->fc_flag & FC_OFFLINE_MODE) + if (vport->fc_flag & FC_OFFLINE_MODE) fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; else { - switch (phba->hba_state) { - case LPFC_STATE_UNKNOWN: - case LPFC_WARM_START: - case LPFC_INIT_START: - case LPFC_INIT_MBX_CMDS: + switch (phba->link_state) { + case LPFC_LINK_UNKNOWN: case LPFC_LINK_DOWN: fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; break; case LPFC_LINK_UP: - case LPFC_LOCAL_CFG_LINK: - case LPFC_FLOGI: - case LPFC_FABRIC_CFG_LINK: - case LPFC_NS_REG: - case LPFC_NS_QRY: - case LPFC_BUILD_DISC_LIST: - case LPFC_DISC_AUTH: case LPFC_CLEAR_LA: case LPFC_HBA_READY: /* Links up, beyond this port_type reports state */ @@ -1547,11 +1819,12 @@ lpfc_get_host_port_state(struct Scsi_Host *shost) static void lpfc_get_host_speed(struct Scsi_Host *shost) { - struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; spin_lock_irq(shost->host_lock); - if (phba->hba_state == LPFC_HBA_READY) { + if (lpfc_is_link_up(phba)) { switch(phba->fc_linkspeed) { case LA_1GHZ_LINK: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; @@ -1577,39 +1850,31 @@ lpfc_get_host_speed(struct Scsi_Host *shost) static void lpfc_get_host_fabric_name (struct Scsi_Host *shost) { - struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; u64 node_name; spin_lock_irq(shost->host_lock); - if ((phba->fc_flag & FC_FABRIC) || + if ((vport->fc_flag & FC_FABRIC) || ((phba->fc_topology == TOPOLOGY_LOOP) && - (phba->fc_flag & FC_PUBLIC_LOOP))) + (vport->fc_flag & FC_PUBLIC_LOOP))) node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); else /* fabric is local port if there is no F/FL_Port */ - node_name = wwn_to_u64(phba->fc_nodename.u.wwn); + node_name = wwn_to_u64(vport->fc_nodename.u.wwn); spin_unlock_irq(shost->host_lock); fc_host_fabric_name(shost) = node_name; } -static void -lpfc_get_host_symbolic_name (struct Scsi_Host *shost) -{ - struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; - - spin_lock_irq(shost->host_lock); - lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost)); - spin_unlock_irq(shost->host_lock); -} - static struct fc_host_statistics * lpfc_get_stats(struct Scsi_Host *shost) { - struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; - struct lpfc_sli *psli = &phba->sli; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli *psli = &phba->sli; struct fc_host_statistics *hs = &phba->link_stats; struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; LPFC_MBOXQ_t *pmboxq; @@ -1617,7 +1882,16 @@ lpfc_get_stats(struct Scsi_Host *shost) unsigned long seconds; int rc = 0; - if (phba->fc_flag & FC_BLOCK_MGMT_IO) + /* + * prevent udev from issuing mailbox commands until the port is + * configured. + */ + if (phba->link_state < LPFC_LINK_DOWN || + !phba->mbox_mem_pool || + (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) + return NULL; + + if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) return NULL; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -1629,17 +1903,16 @@ lpfc_get_stats(struct Scsi_Host *shost) pmb->mbxCommand = MBX_READ_STATUS; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; + pmboxq->vport = vport; - if ((phba->fc_flag & FC_OFFLINE_MODE) || + if ((vport->fc_flag & FC_OFFLINE_MODE) || (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { - if (rc == MBX_TIMEOUT) - pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - else + if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return NULL; } @@ -1655,18 +1928,17 @@ lpfc_get_stats(struct Scsi_Host *shost) pmb->mbxCommand = MBX_READ_LNK_STAT; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; + pmboxq->vport = vport; - if ((phba->fc_flag & FC_OFFLINE_MODE) || + if ((vport->fc_flag & FC_OFFLINE_MODE) || (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { - if (rc == MBX_TIMEOUT) - pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - else - mempool_free( pmboxq, phba->mbox_mem_pool); + if (rc != MBX_TIMEOUT) + mempool_free(pmboxq, phba->mbox_mem_pool); return NULL; } @@ -1713,14 +1985,15 @@ lpfc_get_stats(struct Scsi_Host *shost) static void lpfc_reset_stats(struct Scsi_Host *shost) { - struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; int rc = 0; - if (phba->fc_flag & FC_BLOCK_MGMT_IO) + if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) return; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -1733,17 +2006,16 @@ lpfc_reset_stats(struct Scsi_Host *shost) pmb->mbxOwner = OWN_HOST; pmb->un.varWords[0] = 0x1; /* reset request */ pmboxq->context1 = NULL; + pmboxq->vport = vport; - if ((phba->fc_flag & FC_OFFLINE_MODE) || + if ((vport->fc_flag & FC_OFFLINE_MODE) || (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { - if (rc == MBX_TIMEOUT) - pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - else + if (rc != MBX_TIMEOUT) mempool_free(pmboxq, phba->mbox_mem_pool); return; } @@ -1752,17 +2024,16 @@ lpfc_reset_stats(struct Scsi_Host *shost) pmb->mbxCommand = MBX_READ_LNK_STAT; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; + pmboxq->vport = vport; - if ((phba->fc_flag & FC_OFFLINE_MODE) || + if ((vport->fc_flag & FC_OFFLINE_MODE) || (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { - if (rc == MBX_TIMEOUT) - pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - else + if (rc != MBX_TIMEOUT) mempool_free( pmboxq, phba->mbox_mem_pool); return; } @@ -1791,13 +2062,13 @@ lpfc_reset_stats(struct Scsi_Host *shost) static struct lpfc_nodelist * lpfc_get_node_by_target(struct scsi_target *starget) { - struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); - struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata; + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_nodelist *ndlp; spin_lock_irq(shost->host_lock); /* Search for this, mapped, target ID */ - list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && starget->id == ndlp->nlp_sid) { spin_unlock_irq(shost->host_lock); @@ -1887,8 +2158,66 @@ struct fc_function_template lpfc_transport_functions = { .get_host_fabric_name = lpfc_get_host_fabric_name, .show_host_fabric_name = 1, - .get_host_symbolic_name = lpfc_get_host_symbolic_name, - .show_host_symbolic_name = 1, + /* + * The LPFC driver treats linkdown handling as target loss events + * so there are no sysfs handlers for link_down_tmo. + */ + + .get_fc_host_stats = lpfc_get_stats, + .reset_fc_host_stats = lpfc_reset_stats, + + .dd_fcrport_size = sizeof(struct lpfc_rport_data), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + + .get_starget_port_id = lpfc_get_starget_port_id, + .show_starget_port_id = 1, + + .get_starget_node_name = lpfc_get_starget_node_name, + .show_starget_node_name = 1, + + .get_starget_port_name = lpfc_get_starget_port_name, + .show_starget_port_name = 1, + + .issue_fc_host_lip = lpfc_issue_lip, + .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, + .terminate_rport_io = lpfc_terminate_rport_io, + + .vport_create = lpfc_vport_create, + .vport_delete = lpfc_vport_delete, + .dd_fcvport_size = sizeof(struct lpfc_vport *), +}; + +struct fc_function_template lpfc_vport_transport_functions = { + /* fixed attributes the driver supports */ + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_supported_speeds = 1, + .show_host_maxframe_size = 1, + + /* dynamic attributes the driver supports */ + .get_host_port_id = lpfc_get_host_port_id, + .show_host_port_id = 1, + + .get_host_port_type = lpfc_get_host_port_type, + .show_host_port_type = 1, + + .get_host_port_state = lpfc_get_host_port_state, + .show_host_port_state = 1, + + /* active_fc4s is shown but doesn't change (thus no get function) */ + .show_host_active_fc4s = 1, + + .get_host_speed = lpfc_get_host_speed, + .show_host_speed = 1, + + .get_host_fabric_name = lpfc_get_host_fabric_name, + .show_host_fabric_name = 1, /* * The LPFC driver treats linkdown handling as target loss events @@ -1917,6 +2246,8 @@ struct fc_function_template lpfc_transport_functions = { .issue_fc_host_lip = lpfc_issue_lip, .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, .terminate_rport_io = lpfc_terminate_rport_io, + + .vport_disable = lpfc_vport_disable, }; void @@ -1939,6 +2270,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_discovery_threads_init(phba, lpfc_discovery_threads); lpfc_max_luns_init(phba, lpfc_max_luns); lpfc_poll_tmo_init(phba, lpfc_poll_tmo); + lpfc_peer_port_login_init(phba, lpfc_peer_port_login); + lpfc_npiv_enable_init(phba, lpfc_npiv_enable); + lpfc_vport_restrict_login_init(phba, lpfc_vport_restrict_login); lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo); lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo); diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index b8c2a8862d8c..e19d1a746586 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -23,92 +23,114 @@ typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param); struct fc_rport; void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp); void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport); void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); -int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); -int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *, - uint32_t); -void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *); -void lpfc_unreg_did(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *); +int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, + LPFC_MBOXQ_t *, uint32_t); +void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); +void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); +void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); +void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); - +void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove); int lpfc_linkdown(struct lpfc_hba *); void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); -void lpfc_dequeue_node(struct lpfc_hba *, struct lpfc_nodelist *); -void lpfc_nlp_set_state(struct lpfc_hba *, struct lpfc_nodelist *, int); -void lpfc_drop_node(struct lpfc_hba *, struct lpfc_nodelist *); -void lpfc_set_disctmo(struct lpfc_hba *); -int lpfc_can_disctmo(struct lpfc_hba *); -int lpfc_unreg_rpi(struct lpfc_hba *, struct lpfc_nodelist *); +void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); +void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); +void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *); +void lpfc_set_disctmo(struct lpfc_vport *); +int lpfc_can_disctmo(struct lpfc_vport *); +int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *); +void lpfc_unreg_all_rpis(struct lpfc_vport *); +void lpfc_unreg_default_rpis(struct lpfc_vport *); +void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *); + int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *, - struct lpfc_iocbq *, struct lpfc_nodelist *); -void lpfc_nlp_init(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t); + struct lpfc_iocbq *, struct lpfc_nodelist *); +void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t); struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *); int lpfc_nlp_put(struct lpfc_nodelist *); -struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_hba *, uint32_t); -void lpfc_disc_list_loopmap(struct lpfc_hba *); -void lpfc_disc_start(struct lpfc_hba *); -void lpfc_disc_flush_list(struct lpfc_hba *); +struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t); +void lpfc_disc_list_loopmap(struct lpfc_vport *); +void lpfc_disc_start(struct lpfc_vport *); +void lpfc_disc_flush_list(struct lpfc_vport *); +void lpfc_cleanup_discovery_resources(struct lpfc_vport *); void lpfc_disc_timeout(unsigned long); -struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi); -struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi); +struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); +struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); +void lpfc_worker_wake_up(struct lpfc_hba *); int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t); int lpfc_do_work(void *); -int lpfc_disc_state_machine(struct lpfc_hba *, struct lpfc_nodelist *, void *, +int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t); -int lpfc_check_sparm(struct lpfc_hba *, struct lpfc_nodelist *, +void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *, + struct lpfc_nodelist *); +void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); +int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, struct serv_parm *, uint32_t); -int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp); +int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); +int lpfc_els_chk_latt(struct lpfc_vport *); int lpfc_els_abort_flogi(struct lpfc_hba *); -int lpfc_initial_flogi(struct lpfc_hba *); -int lpfc_issue_els_plogi(struct lpfc_hba *, uint32_t, uint8_t); -int lpfc_issue_els_prli(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t); -int lpfc_issue_els_adisc(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t); -int lpfc_issue_els_logo(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t); -int lpfc_issue_els_scr(struct lpfc_hba *, uint32_t, uint8_t); +int lpfc_initial_flogi(struct lpfc_vport *); +int lpfc_initial_fdisc(struct lpfc_vport *); +int lpfc_issue_els_fdisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); +int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t); +int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); +int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); +int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); +int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *); +int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t); int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); -int lpfc_els_rsp_acc(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, +int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); +int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, struct lpfc_nodelist *, LPFC_MBOXQ_t *, uint8_t); -int lpfc_els_rsp_reject(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, - struct lpfc_nodelist *); -int lpfc_els_rsp_adisc_acc(struct lpfc_hba *, struct lpfc_iocbq *, +int lpfc_els_rsp_reject(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, + struct lpfc_nodelist *, LPFC_MBOXQ_t *); +int lpfc_els_rsp_adisc_acc(struct lpfc_vport *, struct lpfc_iocbq *, struct lpfc_nodelist *); -int lpfc_els_rsp_prli_acc(struct lpfc_hba *, struct lpfc_iocbq *, +int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *, struct lpfc_nodelist *); -void lpfc_cancel_retry_delay_tmo(struct lpfc_hba *, struct lpfc_nodelist *); +void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *); void lpfc_els_retry_delay(unsigned long); void lpfc_els_retry_delay_handler(struct lpfc_nodelist *); +void lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *); void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); -int lpfc_els_handle_rscn(struct lpfc_hba *); -int lpfc_els_flush_rscn(struct lpfc_hba *); -int lpfc_rscn_payload_check(struct lpfc_hba *, uint32_t); -void lpfc_els_flush_cmd(struct lpfc_hba *); -int lpfc_els_disc_adisc(struct lpfc_hba *); -int lpfc_els_disc_plogi(struct lpfc_hba *); +int lpfc_els_handle_rscn(struct lpfc_vport *); +void lpfc_els_flush_rscn(struct lpfc_vport *); +int lpfc_rscn_payload_check(struct lpfc_vport *, uint32_t); +void lpfc_els_flush_cmd(struct lpfc_vport *); +int lpfc_els_disc_adisc(struct lpfc_vport *); +int lpfc_els_disc_plogi(struct lpfc_vport *); void lpfc_els_timeout(unsigned long); -void lpfc_els_timeout_handler(struct lpfc_hba *); +void lpfc_els_timeout_handler(struct lpfc_vport *); +void lpfc_hb_timeout(unsigned long); +void lpfc_hb_timeout_handler(struct lpfc_hba *); void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); -int lpfc_ns_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int); -int lpfc_fdmi_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int); +int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); +int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); void lpfc_fdmi_tmo(unsigned long); -void lpfc_fdmi_tmo_handler(struct lpfc_hba *); +void lpfc_fdmi_timeout_handler(struct lpfc_vport *vport); int lpfc_config_port_prep(struct lpfc_hba *); int lpfc_config_port_post(struct lpfc_hba *); @@ -136,16 +158,23 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); +void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_mbox_tmo_val(struct lpfc_hba *, int); +void lpfc_config_hbq(struct lpfc_hba *, struct lpfc_hbq_init *, uint32_t , + LPFC_MBOXQ_t *); +struct lpfc_hbq_entry * lpfc_sli_next_hbq_slot(struct lpfc_hba *, uint32_t); + int lpfc_mem_alloc(struct lpfc_hba *); void lpfc_mem_free(struct lpfc_hba *); +void lpfc_stop_vport_timers(struct lpfc_vport *); void lpfc_poll_timeout(unsigned long ptr); void lpfc_poll_start_timer(struct lpfc_hba * phba); void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba); struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); +void __lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); void lpfc_reset_barrier(struct lpfc_hba * phba); @@ -154,6 +183,7 @@ int lpfc_sli_brdkill(struct lpfc_hba *); int lpfc_sli_brdreset(struct lpfc_hba *); int lpfc_sli_brdrestart(struct lpfc_hba *); int lpfc_sli_hba_setup(struct lpfc_hba *); +int lpfc_sli_host_down(struct lpfc_vport *); int lpfc_sli_hba_down(struct lpfc_hba *); int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); int lpfc_sli_handle_mb_event(struct lpfc_hba *); @@ -164,27 +194,36 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *, uint32_t); void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); -int lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); +void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_dmabuf *); struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, struct lpfc_sli_ring *, dma_addr_t); +int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *, uint32_t); +int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t); +void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *); +struct hbq_dmabuf *lpfc_sli_hbqbuf_find(struct lpfc_hba *, uint32_t); +int lpfc_sli_hbq_size(void); int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); int lpfc_sli_sum_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t, - uint64_t, lpfc_ctx_cmd); + uint64_t, lpfc_ctx_cmd); int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t, - uint64_t, uint32_t, lpfc_ctx_cmd); + uint64_t, uint32_t, lpfc_ctx_cmd); void lpfc_mbox_timeout(unsigned long); void lpfc_mbox_timeout_handler(struct lpfc_hba *); -struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba *, uint32_t); -struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_hba *, struct lpfc_name *); +struct lpfc_nodelist *__lpfc_find_node(struct lpfc_vport *, node_filter, + void *); +struct lpfc_nodelist *lpfc_find_node(struct lpfc_vport *, node_filter, void *); +struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t); +struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, + struct lpfc_name *); int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, - uint32_t timeout); + uint32_t timeout); int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, @@ -195,25 +234,56 @@ void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb); +void *lpfc_hbq_alloc(struct lpfc_hba *, int, dma_addr_t *); +void lpfc_hbq_free(struct lpfc_hba *, void *, dma_addr_t); +void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *); + void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *); +void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t); void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t); +void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *); /* Function prototypes. */ const char* lpfc_info(struct Scsi_Host *); -void lpfc_scan_start(struct Scsi_Host *); int lpfc_scan_finished(struct Scsi_Host *, unsigned long); void lpfc_get_cfgparam(struct lpfc_hba *); -int lpfc_alloc_sysfs_attr(struct lpfc_hba *); -void lpfc_free_sysfs_attr(struct lpfc_hba *); -extern struct class_device_attribute *lpfc_host_attrs[]; +int lpfc_alloc_sysfs_attr(struct lpfc_vport *); +void lpfc_free_sysfs_attr(struct lpfc_vport *); +extern struct class_device_attribute *lpfc_hba_attrs[]; extern struct scsi_host_template lpfc_template; extern struct fc_function_template lpfc_transport_functions; +extern struct fc_function_template lpfc_vport_transport_functions; +extern int lpfc_sli_mode; -void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp); +int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); void lpfc_terminate_rport_io(struct fc_rport *); void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport); +struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct fc_vport *); +int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable); +void lpfc_mbx_unreg_vpi(struct lpfc_vport *); +void destroy_port(struct lpfc_vport *); +int lpfc_get_instance(void); +void lpfc_host_attrib_init(struct Scsi_Host *); + +extern void lpfc_debugfs_initialize(struct lpfc_vport *); +extern void lpfc_debugfs_terminate(struct lpfc_vport *); +extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t, + uint32_t, uint32_t); + +/* Interface exported by fabric iocb scheduler */ +int lpfc_issue_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *); +void lpfc_fabric_abort_vport(struct lpfc_vport *); +void lpfc_fabric_abort_nport(struct lpfc_nodelist *); +void lpfc_fabric_abort_hba(struct lpfc_hba *); +void lpfc_fabric_abort_flogi(struct lpfc_hba *); +void lpfc_fabric_block_timeout(unsigned long); +void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); +void lpfc_adjust_queue_depth(struct lpfc_hba *); +void lpfc_ramp_down_queue_handler(struct lpfc_hba *); +void lpfc_ramp_up_queue_handler(struct lpfc_hba *); + #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) #define HBA_EVENT_RSCN 5 #define HBA_EVENT_LINK_UP 2 diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 34a9e3bb2614..ae9d6f385a6c 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -40,6 +40,8 @@ #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_version.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" #define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver * incapable of reporting */ @@ -58,25 +60,69 @@ static char *lpfc_release_version = LPFC_DRIVER_VERSION; /* * lpfc_ct_unsol_event */ +static void +lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, + struct lpfc_dmabuf *mp, uint32_t size) +{ + if (!mp) { + printk(KERN_ERR "%s (%d): Unsolited CT, no buffer, " + "piocbq = %p, status = x%x, mp = %p, size = %d\n", + __FUNCTION__, __LINE__, + piocbq, piocbq->iocb.ulpStatus, mp, size); + } + + printk(KERN_ERR "%s (%d): Ignoring unsolicted CT piocbq = %p, " + "buffer = %p, size = %d, status = x%x\n", + __FUNCTION__, __LINE__, + piocbq, mp, size, + piocbq->iocb.ulpStatus); + +} + +static void +lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, + struct lpfc_dmabuf *mp, uint32_t size) +{ + if (!mp) { + printk(KERN_ERR "%s (%d): Unsolited CT, no " + "HBQ buffer, piocbq = %p, status = x%x\n", + __FUNCTION__, __LINE__, + piocbq, piocbq->iocb.ulpStatus); + } else { + lpfc_ct_unsol_buffer(phba, piocbq, mp, size); + printk(KERN_ERR "%s (%d): Ignoring unsolicted CT " + "piocbq = %p, buffer = %p, size = %d, " + "status = x%x\n", + __FUNCTION__, __LINE__, + piocbq, mp, size, piocbq->iocb.ulpStatus); + } +} + void -lpfc_ct_unsol_event(struct lpfc_hba * phba, - struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocbq) +lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *piocbq) { - struct lpfc_iocbq *next_piocbq; - struct lpfc_dmabuf *pmbuf = NULL; - struct lpfc_dmabuf *matp, *next_matp; - uint32_t ctx = 0, size = 0, cnt = 0; + struct lpfc_dmabuf *mp = NULL; IOCB_t *icmd = &piocbq->iocb; - IOCB_t *save_icmd = icmd; - int i, go_exit = 0; - struct list_head head; + int i; + struct lpfc_iocbq *iocbq; + dma_addr_t paddr; + uint32_t size; + struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; + struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; + + piocbq->context2 = NULL; + piocbq->context3 = NULL; - if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) && + if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { + lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); + } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) && ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) { /* Not enough posted buffers; Try posting more buffers */ phba->fc_stat.NoRcvBuf++; - lpfc_post_buffer(phba, pring, 0, 1); + if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) + lpfc_post_buffer(phba, pring, 0, 1); return; } @@ -86,66 +132,56 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba, if (icmd->ulpBdeCount == 0) return; - INIT_LIST_HEAD(&head); - list_add_tail(&head, &piocbq->list); - - list_for_each_entry_safe(piocbq, next_piocbq, &head, list) { - icmd = &piocbq->iocb; - if (ctx == 0) - ctx = (uint32_t) (icmd->ulpContext); - if (icmd->ulpBdeCount == 0) - continue; - - for (i = 0; i < icmd->ulpBdeCount; i++) { - matp = lpfc_sli_ringpostbuf_get(phba, pring, - getPaddr(icmd->un. - cont64[i]. - addrHigh, - icmd->un. - cont64[i]. - addrLow)); - if (!matp) { - /* Insert lpfc log message here */ - lpfc_post_buffer(phba, pring, cnt, 1); - go_exit = 1; - goto ct_unsol_event_exit_piocbq; + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + list_for_each_entry(iocbq, &piocbq->list, list) { + icmd = &iocbq->iocb; + if (icmd->ulpBdeCount == 0) { + printk(KERN_ERR "%s (%d): Unsolited CT, no " + "BDE, iocbq = %p, status = x%x\n", + __FUNCTION__, __LINE__, + iocbq, iocbq->iocb.ulpStatus); + continue; } - /* Typically for Unsolicited CT requests */ - if (!pmbuf) { - pmbuf = matp; - INIT_LIST_HEAD(&pmbuf->list); - } else - list_add_tail(&matp->list, &pmbuf->list); - - size += icmd->un.cont64[i].tus.f.bdeSize; - cnt++; + size = icmd->un.cont64[0].tus.f.bdeSize; + lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf1, size); + lpfc_in_buf_free(phba, bdeBuf1); + if (icmd->ulpBdeCount == 2) { + lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf2, + size); + lpfc_in_buf_free(phba, bdeBuf2); + } } + } else { + struct lpfc_iocbq *next; + + list_for_each_entry_safe(iocbq, next, &piocbq->list, list) { + icmd = &iocbq->iocb; + if (icmd->ulpBdeCount == 0) { + printk(KERN_ERR "%s (%d): Unsolited CT, no " + "BDE, iocbq = %p, status = x%x\n", + __FUNCTION__, __LINE__, + iocbq, iocbq->iocb.ulpStatus); + continue; + } - icmd->ulpBdeCount = 0; - } - - lpfc_post_buffer(phba, pring, cnt, 1); - if (save_icmd->ulpStatus) { - go_exit = 1; - } - -ct_unsol_event_exit_piocbq: - list_del(&head); - if (pmbuf) { - list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { - lpfc_mbuf_free(phba, matp->virt, matp->phys); - list_del(&matp->list); - kfree(matp); + for (i = 0; i < icmd->ulpBdeCount; i++) { + paddr = getPaddr(icmd->un.cont64[i].addrHigh, + icmd->un.cont64[i].addrLow); + mp = lpfc_sli_ringpostbuf_get(phba, pring, + paddr); + size = icmd->un.cont64[i].tus.f.bdeSize; + lpfc_ct_unsol_buffer(phba, piocbq, mp, size); + lpfc_in_buf_free(phba, mp); + } + list_del(&iocbq->list); + lpfc_sli_release_iocbq(phba, iocbq); } - lpfc_mbuf_free(phba, pmbuf->virt, pmbuf->phys); - kfree(pmbuf); } - return; } static void -lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist) +lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) { struct lpfc_dmabuf *mlast, *next_mlast; @@ -160,7 +196,7 @@ lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist) } static struct lpfc_dmabuf * -lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl, +lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl, uint32_t size, int *entries) { struct lpfc_dmabuf *mlist = NULL; @@ -181,7 +217,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl, INIT_LIST_HEAD(&mp->list); - if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT)) + if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT) || + cmdcode == be16_to_cpu(SLI_CTNS_GFF_ID)) mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); else mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys)); @@ -201,8 +238,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl, bpl->tus.f.bdeFlags = BUFF_USE_RCV; /* build buffer ptr list for IOCB */ - bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) ); - bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) ); + bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); bpl->tus.f.bdeSize = (uint16_t) cnt; bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl++; @@ -215,24 +252,49 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl, return mlist; } +int +lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb) +{ + struct lpfc_dmabuf *buf_ptr; + + if (ctiocb->context1) { + buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1; + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); + ctiocb->context1 = NULL; + } + if (ctiocb->context2) { + lpfc_free_ct_rsp(phba, (struct lpfc_dmabuf *) ctiocb->context2); + ctiocb->context2 = NULL; + } + + if (ctiocb->context3) { + buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3; + lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); + kfree(buf_ptr); + ctiocb->context1 = NULL; + } + lpfc_sli_release_iocbq(phba, ctiocb); + return 0; +} + static int -lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp, +lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp, void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *), struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry, - uint32_t tmo) + uint32_t tmo, uint8_t retry) { - - struct lpfc_sli *psli = &phba->sli; + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; IOCB_t *icmd; struct lpfc_iocbq *geniocb; + int rc; /* Allocate buffer for command iocb */ - spin_lock_irq(phba->host->host_lock); geniocb = lpfc_sli_get_iocbq(phba); - spin_unlock_irq(phba->host->host_lock); if (geniocb == NULL) return 1; @@ -272,31 +334,40 @@ lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp, icmd->ulpClass = CLASS3; icmd->ulpContext = ndlp->nlp_rpi; + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { + /* For GEN_REQUEST64_CR, use the RPI */ + icmd->ulpCt_h = 0; + icmd->ulpCt_l = 0; + } + /* Issue GEN REQ IOCB for NPORT <did> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0119 Issue GEN REQ IOCB for NPORT x%x " - "Data: x%x x%x\n", phba->brd_no, icmd->un.ulpWord[5], - icmd->ulpIoTag, phba->hba_state); + "%d (%d):0119 Issue GEN REQ IOCB to NPORT x%x " + "Data: x%x x%x\n", phba->brd_no, vport->vpi, + ndlp->nlp_DID, icmd->ulpIoTag, + vport->port_state); geniocb->iocb_cmpl = cmpl; geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; - spin_lock_irq(phba->host->host_lock); - if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) { + geniocb->vport = vport; + geniocb->retry = retry; + rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0); + + if (rc == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, geniocb); - spin_unlock_irq(phba->host->host_lock); return 1; } - spin_unlock_irq(phba->host->host_lock); return 0; } static int -lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp, +lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp, struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp, void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *), - uint32_t rsp_size) + uint32_t rsp_size, uint8_t retry) { + struct lpfc_hba *phba = vport->phba; struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt; struct lpfc_dmabuf *outmp; int cnt = 0, status; @@ -310,8 +381,8 @@ lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp, if (!outmp) return -ENOMEM; - status = lpfc_gen_req(phba, bmp, inmp, outmp, cmpl, ndlp, 0, - cnt+1, 0); + status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0, + cnt+1, 0, retry); if (status) { lpfc_free_ct_rsp(phba, outmp); return -ENOMEM; @@ -319,20 +390,35 @@ lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp, return 0; } +static struct lpfc_vport * +lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) { + + struct lpfc_vport *vport_curr; + + list_for_each_entry(vport_curr, &phba->port_list, listentry) { + if ((vport_curr->fc_myDID) && + (vport_curr->fc_myDID == did)) + return vport_curr; + } + + return NULL; +} + static int -lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size) +lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) { + struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ct_request *Response = (struct lpfc_sli_ct_request *) mp->virt; struct lpfc_nodelist *ndlp = NULL; struct lpfc_dmabuf *mlast, *next_mp; uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; - uint32_t Did; - uint32_t CTentry; + uint32_t Did, CTentry; int Cnt; struct list_head head; - lpfc_set_disctmo(phba); + lpfc_set_disctmo(vport); + vport->num_disc_nodes = 0; list_add_tail(&head, &mp->list); @@ -350,39 +436,96 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size) /* Loop through entire NameServer list of DIDs */ while (Cnt >= sizeof (uint32_t)) { - /* Get next DID from NameServer List */ CTentry = *ctptr++; Did = ((be32_to_cpu(CTentry)) & Mask_DID); ndlp = NULL; - if (Did != phba->fc_myDID) { - /* Check for rscn processing or not */ - ndlp = lpfc_setup_disc_node(phba, Did); - } - /* Mark all node table entries that are in the - Nameserver */ - if (ndlp) { - /* NameServer Rsp */ - lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0238 Process x%x NameServer" - " Rsp Data: x%x x%x x%x\n", - phba->brd_no, + + /* + * Check for rscn processing or not + * To conserve rpi's, filter out addresses for other + * vports on the same physical HBAs. + */ + if ((Did != vport->fc_myDID) && + ((lpfc_find_vport_by_did(phba, Did) == NULL) || + phba->cfg_peer_port_login)) { + if ((vport->port_type != LPFC_NPIV_PORT) || + (vport->fc_flag & FC_RFF_NOT_SUPPORTED) || + (!phba->cfg_vport_restrict_login)) { + ndlp = lpfc_setup_disc_node(vport, Did); + if (ndlp) { + lpfc_debugfs_disc_trc(vport, + LPFC_DISC_TRC_CT, + "Parse GID_FTrsp: " + "did:x%x flg:x%x x%x", Did, ndlp->nlp_flag, - phba->fc_flag, - phba->fc_rscn_id_cnt); - } else { - /* NameServer Rsp */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_DISCOVERY, - "%d:0239 Skip x%x NameServer " - "Rsp Data: x%x x%x x%x\n", - phba->brd_no, - Did, Size, phba->fc_flag, - phba->fc_rscn_id_cnt); + vport->fc_flag); + + lpfc_printf_log(phba, KERN_INFO, + LOG_DISCOVERY, + "%d (%d):0238 Process " + "x%x NameServer Rsp" + "Data: x%x x%x x%x\n", + phba->brd_no, + vport->vpi, Did, + ndlp->nlp_flag, + vport->fc_flag, + vport->fc_rscn_id_cnt); + } else { + lpfc_debugfs_disc_trc(vport, + LPFC_DISC_TRC_CT, + "Skip1 GID_FTrsp: " + "did:x%x flg:x%x cnt:%d", + Did, vport->fc_flag, + vport->fc_rscn_id_cnt); + + lpfc_printf_log(phba, KERN_INFO, + LOG_DISCOVERY, + "%d (%d):0239 Skip x%x " + "NameServer Rsp Data: " + "x%x x%x\n", + phba->brd_no, + vport->vpi, Did, + vport->fc_flag, + vport->fc_rscn_id_cnt); + } + + } else { + if (!(vport->fc_flag & FC_RSCN_MODE) || + (lpfc_rscn_payload_check(vport, Did))) { + lpfc_debugfs_disc_trc(vport, + LPFC_DISC_TRC_CT, + "Query GID_FTrsp: " + "did:x%x flg:x%x cnt:%d", + Did, vport->fc_flag, + vport->fc_rscn_id_cnt); + + if (lpfc_ns_cmd(vport, + SLI_CTNS_GFF_ID, + 0, Did) == 0) + vport->num_disc_nodes++; + } + else { + lpfc_debugfs_disc_trc(vport, + LPFC_DISC_TRC_CT, + "Skip2 GID_FTrsp: " + "did:x%x flg:x%x cnt:%d", + Did, vport->fc_flag, + vport->fc_rscn_id_cnt); + + lpfc_printf_log(phba, KERN_INFO, + LOG_DISCOVERY, + "%d (%d):0245 Skip x%x " + "NameServer Rsp Data: " + "x%x x%x\n", + phba->brd_no, + vport->vpi, Did, + vport->fc_flag, + vport->fc_rscn_id_cnt); + } + } } - if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY))) goto nsout1; Cnt -= sizeof (uint32_t); @@ -393,190 +536,369 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size) nsout1: list_del(&head); - - /* - * The driver has cycled through all Nports in the RSCN payload. - * Complete the handling by cleaning up and marking the - * current driver state. - */ - if (phba->hba_state == LPFC_HBA_READY) { - lpfc_els_flush_rscn(phba); - spin_lock_irq(phba->host->host_lock); - phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */ - spin_unlock_irq(phba->host->host_lock); - } return 0; } - - - static void -lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { + struct lpfc_vport *vport = cmdiocb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp; - struct lpfc_sli *psli; struct lpfc_dmabuf *bmp; - struct lpfc_dmabuf *inp; struct lpfc_dmabuf *outp; - struct lpfc_nodelist *ndlp; struct lpfc_sli_ct_request *CTrsp; + int rc; - psli = &phba->sli; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->context_un.rsp_iocb = rspiocb; - inp = (struct lpfc_dmabuf *) cmdiocb->context1; outp = (struct lpfc_dmabuf *) cmdiocb->context2; bmp = (struct lpfc_dmabuf *) cmdiocb->context3; - irsp = &rspiocb->iocb; - if (irsp->ulpStatus) { - if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) || - (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) { - goto out; - } + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "GID_FT cmpl: status:x%x/x%x rtry:%d", + irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry); + + /* Don't bother processing response if vport is being torn down. */ + if (vport->load_flag & FC_UNLOADING) + goto out; + + + if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) { + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0216 Link event during NS query\n", + phba->brd_no, vport->vpi); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + goto out; + } + + if (irsp->ulpStatus) { /* Check for retry */ - if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) { - phba->fc_ns_retry++; + if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { + if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || + (irsp->un.ulpWord[4] != IOERR_NO_RESOURCES)) + vport->fc_ns_retry++; /* CT command is being retried */ - ndlp = lpfc_findnode_did(phba, NameServer_DID); - if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { - if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == - 0) { - goto out; - } - } + rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, + vport->fc_ns_retry, 0); + if (rc == 0) + goto out; } + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0257 GID_FT Query error: 0x%x 0x%x\n", + phba->brd_no, vport->vpi, irsp->ulpStatus, + vport->fc_ns_retry); } else { /* Good status, continue checking */ CTrsp = (struct lpfc_sli_ct_request *) outp->virt; if (CTrsp->CommandResponse.bits.CmdRsp == be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0208 NameServer Rsp " + "%d (%d):0208 NameServer Rsp " "Data: x%x\n", - phba->brd_no, - phba->fc_flag); - lpfc_ns_rsp(phba, outp, + phba->brd_no, vport->vpi, + vport->fc_flag); + lpfc_ns_rsp(vport, outp, (uint32_t) (irsp->un.genreq64.bdl.bdeSize)); } else if (CTrsp->CommandResponse.bits.CmdRsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { /* NameServer Rsp Error */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0240 NameServer Rsp Error " + "%d (%d):0240 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", - phba->brd_no, + phba->brd_no, vport->vpi, CTrsp->CommandResponse.bits.CmdRsp, (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation, - phba->fc_flag); + vport->fc_flag); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "GID_FT rsp err1 cmd:x%x rsn:x%x exp:x%x", + (uint32_t)CTrsp->CommandResponse.bits.CmdRsp, + (uint32_t) CTrsp->ReasonCode, + (uint32_t) CTrsp->Explanation); + } else { /* NameServer Rsp Error */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_DISCOVERY, - "%d:0241 NameServer Rsp Error " + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "%d (%d):0241 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", - phba->brd_no, + phba->brd_no, vport->vpi, CTrsp->CommandResponse.bits.CmdRsp, (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation, - phba->fc_flag); + vport->fc_flag); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "GID_FT rsp err2 cmd:x%x rsn:x%x exp:x%x", + (uint32_t)CTrsp->CommandResponse.bits.CmdRsp, + (uint32_t) CTrsp->ReasonCode, + (uint32_t) CTrsp->Explanation); } } /* Link up / RSCN discovery */ - lpfc_disc_start(phba); + if (vport->num_disc_nodes == 0) { + /* + * The driver has cycled through all Nports in the RSCN payload. + * Complete the handling by cleaning up and marking the + * current driver state. + */ + if (vport->port_state >= LPFC_DISC_AUTH) { + if (vport->fc_flag & FC_RSCN_MODE) { + lpfc_els_flush_rscn(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */ + spin_unlock_irq(shost->host_lock); + } + else + lpfc_els_flush_rscn(vport); + } + + lpfc_disc_start(vport); + } out: - lpfc_free_ct_rsp(phba, outp); - lpfc_mbuf_free(phba, inp->virt, inp->phys); - lpfc_mbuf_free(phba, bmp->virt, bmp->phys); - kfree(inp); - kfree(bmp); - spin_lock_irq(phba->host->host_lock); - lpfc_sli_release_iocbq(phba, cmdiocb); - spin_unlock_irq(phba->host->host_lock); + lpfc_ct_free_iocb(phba, cmdiocb); return; } +void +lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + IOCB_t *irsp = &rspiocb->iocb; + struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1; + struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2; + struct lpfc_sli_ct_request *CTrsp; + int did; + uint8_t fbits; + struct lpfc_nodelist *ndlp; + + did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId; + did = be32_to_cpu(did); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "GFF_ID cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], did); + + if (irsp->ulpStatus == IOSTAT_SUCCESS) { + /* Good status, continue checking */ + CTrsp = (struct lpfc_sli_ct_request *) outp->virt; + fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET]; + + if (CTrsp->CommandResponse.bits.CmdRsp == + be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { + if ((fbits & FC4_FEATURE_INIT) && + !(fbits & FC4_FEATURE_TARGET)) { + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0245 Skip x%x GFF " + "NameServer Rsp Data: (init) " + "x%x x%x\n", phba->brd_no, + vport->vpi, did, fbits, + vport->fc_rscn_id_cnt); + goto out; + } + } + } + else { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "%d (%d):0267 NameServer GFF Rsp" + " x%x Error (%d %d) Data: x%x x%x\n", + phba->brd_no, vport->vpi, did, + irsp->ulpStatus, irsp->un.ulpWord[4], + vport->fc_flag, vport->fc_rscn_id_cnt) + } + + /* This is a target port, unregistered port, or the GFF_ID failed */ + ndlp = lpfc_setup_disc_node(vport, did); + if (ndlp) { + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0242 Process x%x GFF " + "NameServer Rsp Data: x%x x%x x%x\n", + phba->brd_no, vport->vpi, + did, ndlp->nlp_flag, vport->fc_flag, + vport->fc_rscn_id_cnt); + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0243 Skip x%x GFF " + "NameServer Rsp Data: x%x x%x\n", + phba->brd_no, vport->vpi, did, + vport->fc_flag, vport->fc_rscn_id_cnt); + } +out: + /* Link up / RSCN discovery */ + if (vport->num_disc_nodes) + vport->num_disc_nodes--; + if (vport->num_disc_nodes == 0) { + /* + * The driver has cycled through all Nports in the RSCN payload. + * Complete the handling by cleaning up and marking the + * current driver state. + */ + if (vport->port_state >= LPFC_DISC_AUTH) { + if (vport->fc_flag & FC_RSCN_MODE) { + lpfc_els_flush_rscn(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */ + spin_unlock_irq(shost->host_lock); + } + else + lpfc_els_flush_rscn(vport); + } + lpfc_disc_start(vport); + } + lpfc_ct_free_iocb(phba, cmdiocb); + return; +} + + static void -lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { - struct lpfc_sli *psli; - struct lpfc_dmabuf *bmp; + struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_dmabuf *inp; struct lpfc_dmabuf *outp; IOCB_t *irsp; struct lpfc_sli_ct_request *CTrsp; + int cmdcode, rc; + uint8_t retry; + uint32_t latt; - psli = &phba->sli; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->context_un.rsp_iocb = rspiocb; inp = (struct lpfc_dmabuf *) cmdiocb->context1; outp = (struct lpfc_dmabuf *) cmdiocb->context2; - bmp = (struct lpfc_dmabuf *) cmdiocb->context3; irsp = &rspiocb->iocb; + cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)-> + CommandResponse.bits.CmdRsp); CTrsp = (struct lpfc_sli_ct_request *) outp->virt; + latt = lpfc_els_chk_latt(vport); + /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0209 RFT request completes ulpStatus x%x " - "CmdRsp x%x\n", phba->brd_no, irsp->ulpStatus, - CTrsp->CommandResponse.bits.CmdRsp); + "%d (%d):0209 RFT request completes, latt %d, " + "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n", + phba->brd_no, vport->vpi, latt, irsp->ulpStatus, + CTrsp->CommandResponse.bits.CmdRsp, + cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag); - lpfc_free_ct_rsp(phba, outp); - lpfc_mbuf_free(phba, inp->virt, inp->phys); - lpfc_mbuf_free(phba, bmp->virt, bmp->phys); - kfree(inp); - kfree(bmp); - spin_lock_irq(phba->host->host_lock); - lpfc_sli_release_iocbq(phba, cmdiocb); - spin_unlock_irq(phba->host->host_lock); + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "CT cmd cmpl: status:x%x/x%x cmd:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode); + + if (irsp->ulpStatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "%d (%d):0268 NS cmd %x Error (%d %d)\n", + phba->brd_no, vport->vpi, cmdcode, + irsp->ulpStatus, irsp->un.ulpWord[4]); + + if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && + ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) || + (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) + goto out; + + retry = cmdiocb->retry; + if (retry >= LPFC_MAX_NS_RETRY) + goto out; + + retry++; + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0216 Retrying NS cmd %x\n", + phba->brd_no, vport->vpi, cmdcode); + rc = lpfc_ns_cmd(vport, cmdcode, retry, 0); + if (rc == 0) + goto out; + } + +out: + lpfc_ct_free_iocb(phba, cmdiocb); return; } static void -lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); return; } static void -lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); return; } static void -lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); return; } -void -lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp) +static void +lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { - char fwrev[16]; + IOCB_t *irsp = &rspiocb->iocb; + struct lpfc_vport *vport = cmdiocb->vport; - lpfc_decode_firmware_rev(phba, fwrev, 0); + if (irsp->ulpStatus != IOSTAT_SUCCESS) + vport->fc_flag |= FC_RFF_NOT_SUPPORTED; - sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, - fwrev, lpfc_release_version); + lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); return; } +int +lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol, + size_t size) +{ + int n; + uint8_t *wwn = vport->phba->wwpn; + + n = snprintf(symbol, size, + "Emulex PPN-%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", + wwn[0], wwn[1], wwn[2], wwn[3], + wwn[4], wwn[5], wwn[6], wwn[7]); + + if (vport->port_type == LPFC_PHYSICAL_PORT) + return n; + + if (n < size) + n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi); + + if (n < size && vport->vname) + n += snprintf(symbol + n, size - n, " VName-%s", vport->vname); + return n; +} + +int +lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, + size_t size) +{ + char fwrev[16]; + int n; + + lpfc_decode_firmware_rev(vport->phba, fwrev, 0); + + n = snprintf(symbol, size, "Emulex %s FV%s DV%s", + vport->phba->ModelName, fwrev, lpfc_release_version); + return n; +} + /* * lpfc_ns_cmd * Description: @@ -585,55 +907,76 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp) * LI_CTNS_RFT_ID */ int -lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) +lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, + uint8_t retry, uint32_t context) { + struct lpfc_nodelist * ndlp; + struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *mp, *bmp; struct lpfc_sli_ct_request *CtReq; struct ulp_bde64 *bpl; void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *) = NULL; uint32_t rsp_size = 1024; + size_t size; + int rc = 0; + + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (ndlp == NULL || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { + rc=1; + goto ns_cmd_exit; + } /* fill in BDEs for command */ /* Allocate buffer for command payload */ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); - if (!mp) + if (!mp) { + rc=2; goto ns_cmd_exit; + } INIT_LIST_HEAD(&mp->list); mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); - if (!mp->virt) + if (!mp->virt) { + rc=3; goto ns_cmd_free_mp; + } /* Allocate buffer for Buffer ptr list */ bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); - if (!bmp) + if (!bmp) { + rc=4; goto ns_cmd_free_mpvirt; + } INIT_LIST_HEAD(&bmp->list); bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys)); - if (!bmp->virt) + if (!bmp->virt) { + rc=5; goto ns_cmd_free_bmp; + } /* NameServer Req */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_DISCOVERY, - "%d:0236 NameServer Req Data: x%x x%x x%x\n", - phba->brd_no, cmdcode, phba->fc_flag, - phba->fc_rscn_id_cnt); + lpfc_printf_log(phba, KERN_INFO ,LOG_DISCOVERY, + "%d (%d):0236 NameServer Req Data: x%x x%x x%x\n", + phba->brd_no, vport->vpi, cmdcode, vport->fc_flag, + vport->fc_rscn_id_cnt); bpl = (struct ulp_bde64 *) bmp->virt; memset(bpl, 0, sizeof(struct ulp_bde64)); - bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) ); - bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) ); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); + bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); bpl->tus.f.bdeFlags = 0; if (cmdcode == SLI_CTNS_GID_FT) bpl->tus.f.bdeSize = GID_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_GFF_ID) + bpl->tus.f.bdeSize = GFF_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RFT_ID) bpl->tus.f.bdeSize = RFT_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RNN_ID) bpl->tus.f.bdeSize = RNN_REQUEST_SZ; + else if (cmdcode == SLI_CTNS_RSPN_ID) + bpl->tus.f.bdeSize = RSPN_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RSNN_NN) bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RFF_ID) @@ -654,56 +997,78 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_CTNS_GID_FT); CtReq->un.gid.Fc4Type = SLI_CTPT_FCP; - if (phba->hba_state < LPFC_HBA_READY) - phba->hba_state = LPFC_NS_QRY; - lpfc_set_disctmo(phba); + if (vport->port_state < LPFC_NS_QRY) + vport->port_state = LPFC_NS_QRY; + lpfc_set_disctmo(vport); cmpl = lpfc_cmpl_ct_cmd_gid_ft; rsp_size = FC_MAX_NS_RSP; break; + case SLI_CTNS_GFF_ID: + CtReq->CommandResponse.bits.CmdRsp = + be16_to_cpu(SLI_CTNS_GFF_ID); + CtReq->un.gff.PortId = be32_to_cpu(context); + cmpl = lpfc_cmpl_ct_cmd_gff_id; + break; + case SLI_CTNS_RFT_ID: CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_CTNS_RFT_ID); - CtReq->un.rft.PortId = be32_to_cpu(phba->fc_myDID); + CtReq->un.rft.PortId = be32_to_cpu(vport->fc_myDID); CtReq->un.rft.fcpReg = 1; cmpl = lpfc_cmpl_ct_cmd_rft_id; break; - case SLI_CTNS_RFF_ID: - CtReq->CommandResponse.bits.CmdRsp = - be16_to_cpu(SLI_CTNS_RFF_ID); - CtReq->un.rff.PortId = be32_to_cpu(phba->fc_myDID); - CtReq->un.rff.feature_res = 0; - CtReq->un.rff.feature_tgt = 0; - CtReq->un.rff.type_code = FC_FCP_DATA; - CtReq->un.rff.feature_init = 1; - cmpl = lpfc_cmpl_ct_cmd_rff_id; - break; - case SLI_CTNS_RNN_ID: CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_CTNS_RNN_ID); - CtReq->un.rnn.PortId = be32_to_cpu(phba->fc_myDID); - memcpy(CtReq->un.rnn.wwnn, &phba->fc_nodename, + CtReq->un.rnn.PortId = be32_to_cpu(vport->fc_myDID); + memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename, sizeof (struct lpfc_name)); cmpl = lpfc_cmpl_ct_cmd_rnn_id; break; + case SLI_CTNS_RSPN_ID: + CtReq->CommandResponse.bits.CmdRsp = + be16_to_cpu(SLI_CTNS_RSPN_ID); + CtReq->un.rspn.PortId = be32_to_cpu(vport->fc_myDID); + size = sizeof(CtReq->un.rspn.symbname); + CtReq->un.rspn.len = + lpfc_vport_symbolic_port_name(vport, + CtReq->un.rspn.symbname, size); + cmpl = lpfc_cmpl_ct_cmd_rspn_id; + break; case SLI_CTNS_RSNN_NN: CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_CTNS_RSNN_NN); - memcpy(CtReq->un.rsnn.wwnn, &phba->fc_nodename, + memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename, sizeof (struct lpfc_name)); - lpfc_get_hba_sym_node_name(phba, CtReq->un.rsnn.symbname); - CtReq->un.rsnn.len = strlen(CtReq->un.rsnn.symbname); + size = sizeof(CtReq->un.rsnn.symbname); + CtReq->un.rsnn.len = + lpfc_vport_symbolic_node_name(vport, + CtReq->un.rsnn.symbname, size); cmpl = lpfc_cmpl_ct_cmd_rsnn_nn; break; + case SLI_CTNS_RFF_ID: + vport->fc_flag &= ~FC_RFF_NOT_SUPPORTED; + CtReq->CommandResponse.bits.CmdRsp = + be16_to_cpu(SLI_CTNS_RFF_ID); + CtReq->un.rff.PortId = be32_to_cpu(vport->fc_myDID);; + CtReq->un.rff.fbits = FC4_FEATURE_INIT; + CtReq->un.rff.type_code = FC_FCP_DATA; + cmpl = lpfc_cmpl_ct_cmd_rff_id; + break; } - if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, rsp_size)) + if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) { /* On success, The cmpl function will free the buffers */ + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "Issue CT cmd: cmd:x%x did:x%x", + cmdcode, ndlp->nlp_DID, 0); return 0; + } + rc=6; lpfc_mbuf_free(phba, bmp->virt, bmp->phys); ns_cmd_free_bmp: kfree(bmp); @@ -712,14 +1077,17 @@ ns_cmd_free_mpvirt: ns_cmd_free_mp: kfree(mp); ns_cmd_exit: + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "%d (%d):0266 Issue NameServer Req x%x err %d Data: x%x x%x\n", + phba->brd_no, vport->vpi, cmdcode, rc, vport->fc_flag, + vport->fc_rscn_id_cnt); return 1; } static void -lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba, - struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb) +lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq * rspiocb) { - struct lpfc_dmabuf *bmp = cmdiocb->context3; struct lpfc_dmabuf *inp = cmdiocb->context1; struct lpfc_dmabuf *outp = cmdiocb->context2; struct lpfc_sli_ct_request *CTrsp = outp->virt; @@ -727,48 +1095,60 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba, struct lpfc_nodelist *ndlp; uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp; + struct lpfc_vport *vport = cmdiocb->vport; + IOCB_t *irsp = &rspiocb->iocb; + uint32_t latt; + + latt = lpfc_els_chk_latt(vport); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, + "FDMI cmpl: status:x%x/x%x latt:%d", + irsp->ulpStatus, irsp->un.ulpWord[4], latt); + + if (latt || irsp->ulpStatus) { + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0229 FDMI cmd %04x failed, latt = %d " + "ulpStatus: x%x, rid x%x\n", + phba->brd_no, vport->vpi, + be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus, + irsp->un.ulpWord[4]); + lpfc_ct_free_iocb(phba, cmdiocb); + return; + } - ndlp = lpfc_findnode_did(phba, FDMI_DID); + ndlp = lpfc_findnode_did(vport, FDMI_DID); if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { /* FDMI rsp failed */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_DISCOVERY, - "%d:0220 FDMI rsp failed Data: x%x\n", - phba->brd_no, - be16_to_cpu(fdmi_cmd)); + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0220 FDMI rsp failed Data: x%x\n", + phba->brd_no, vport->vpi, + be16_to_cpu(fdmi_cmd)); } switch (be16_to_cpu(fdmi_cmd)) { case SLI_MGMT_RHBA: - lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RPA); + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA); break; case SLI_MGMT_RPA: break; case SLI_MGMT_DHBA: - lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DPRT); + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT); break; case SLI_MGMT_DPRT: - lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RHBA); + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA); break; } - - lpfc_free_ct_rsp(phba, outp); - lpfc_mbuf_free(phba, inp->virt, inp->phys); - lpfc_mbuf_free(phba, bmp->virt, bmp->phys); - kfree(inp); - kfree(bmp); - spin_lock_irq(phba->host->host_lock); - lpfc_sli_release_iocbq(phba, cmdiocb); - spin_unlock_irq(phba->host->host_lock); + lpfc_ct_free_iocb(phba, cmdiocb); return; } + int -lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) +lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) { + struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *mp, *bmp; struct lpfc_sli_ct_request *CtReq; struct ulp_bde64 *bpl; @@ -805,12 +1185,10 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) INIT_LIST_HEAD(&bmp->list); /* FDMI request */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_DISCOVERY, - "%d:0218 FDMI Request Data: x%x x%x x%x\n", - phba->brd_no, - phba->fc_flag, phba->hba_state, cmdcode); + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0218 FDMI Request Data: x%x x%x x%x\n", + phba->brd_no, vport->vpi, vport->fc_flag, + vport->port_state, cmdcode); CtReq = (struct lpfc_sli_ct_request *) mp->virt; @@ -833,11 +1211,11 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) be16_to_cpu(SLI_MGMT_RHBA); CtReq->CommandResponse.bits.Size = 0; rh = (REG_HBA *) & CtReq->un.PortID; - memcpy(&rh->hi.PortName, &phba->fc_sparam.portName, + memcpy(&rh->hi.PortName, &vport->fc_sparam.portName, sizeof (struct lpfc_name)); /* One entry (port) per adapter */ rh->rpl.EntryCnt = be32_to_cpu(1); - memcpy(&rh->rpl.pe, &phba->fc_sparam.portName, + memcpy(&rh->rpl.pe, &vport->fc_sparam.portName, sizeof (struct lpfc_name)); /* point to the HBA attribute block */ @@ -853,7 +1231,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME); ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + sizeof (struct lpfc_name)); - memcpy(&ae->un.NodeName, &phba->fc_sparam.nodeName, + memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName, sizeof (struct lpfc_name)); ab->EntryCnt++; size += FOURBYTES + sizeof (struct lpfc_name); @@ -991,7 +1369,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID; size = sizeof (struct lpfc_name) + FOURBYTES; memcpy((uint8_t *) & pab->PortName, - (uint8_t *) & phba->fc_sparam.portName, + (uint8_t *) & vport->fc_sparam.portName, sizeof (struct lpfc_name)); pab->ab.EntryCnt = 0; @@ -1053,7 +1431,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size); ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE); ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4); - hsp = (struct serv_parm *) & phba->fc_sparam; + hsp = (struct serv_parm *) & vport->fc_sparam; ae->un.MaxFrameSize = (((uint32_t) hsp->cmn. bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn. @@ -1097,7 +1475,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) CtReq->CommandResponse.bits.Size = 0; pe = (PORT_ENTRY *) & CtReq->un.PortID; memcpy((uint8_t *) & pe->PortName, - (uint8_t *) & phba->fc_sparam.portName, + (uint8_t *) & vport->fc_sparam.portName, sizeof (struct lpfc_name)); size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name); break; @@ -1107,22 +1485,22 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) CtReq->CommandResponse.bits.Size = 0; pe = (PORT_ENTRY *) & CtReq->un.PortID; memcpy((uint8_t *) & pe->PortName, - (uint8_t *) & phba->fc_sparam.portName, + (uint8_t *) & vport->fc_sparam.portName, sizeof (struct lpfc_name)); size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name); break; } bpl = (struct ulp_bde64 *) bmp->virt; - bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) ); - bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) ); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); + bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); bpl->tus.f.bdeFlags = 0; bpl->tus.f.bdeSize = size; bpl->tus.w = le32_to_cpu(bpl->tus.w); cmpl = lpfc_cmpl_ct_cmd_fdmi; - if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP)) + if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0)) return 0; lpfc_mbuf_free(phba, bmp->virt, bmp->phys); @@ -1134,49 +1512,50 @@ fdmi_cmd_free_mp: kfree(mp); fdmi_cmd_exit: /* Issue FDMI request failed */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_DISCOVERY, - "%d:0244 Issue FDMI request failed Data: x%x\n", - phba->brd_no, - cmdcode); + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0244 Issue FDMI request failed Data: x%x\n", + phba->brd_no, vport->vpi, cmdcode); return 1; } void lpfc_fdmi_tmo(unsigned long ptr) { - struct lpfc_hba *phba = (struct lpfc_hba *)ptr; + struct lpfc_vport *vport = (struct lpfc_vport *)ptr; + struct lpfc_hba *phba = vport->phba; unsigned long iflag; - spin_lock_irqsave(phba->host->host_lock, iflag); - if (!(phba->work_hba_events & WORKER_FDMI_TMO)) { - phba->work_hba_events |= WORKER_FDMI_TMO; + spin_lock_irqsave(&vport->work_port_lock, iflag); + if (!(vport->work_port_events & WORKER_FDMI_TMO)) { + vport->work_port_events |= WORKER_FDMI_TMO; + spin_unlock_irqrestore(&vport->work_port_lock, iflag); + + spin_lock_irqsave(&phba->hbalock, iflag); if (phba->work_wait) - wake_up(phba->work_wait); + lpfc_worker_wake_up(phba); + spin_unlock_irqrestore(&phba->hbalock, iflag); } - spin_unlock_irqrestore(phba->host->host_lock,iflag); + else + spin_unlock_irqrestore(&vport->work_port_lock, iflag); } void -lpfc_fdmi_tmo_handler(struct lpfc_hba *phba) +lpfc_fdmi_timeout_handler(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp; - ndlp = lpfc_findnode_did(phba, FDMI_DID); + ndlp = lpfc_findnode_did(vport, FDMI_DID); if (ndlp) { - if (init_utsname()->nodename[0] != '\0') { - lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA); - } else { - mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60); - } + if (init_utsname()->nodename[0] != '\0') + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); + else + mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); } return; } - void -lpfc_decode_firmware_rev(struct lpfc_hba * phba, char *fwrevision, int flag) +lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) { struct lpfc_sli *psli = &phba->sli; lpfc_vpd_t *vp = &phba->vpd; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c new file mode 100644 index 000000000000..673cfe11cc2b --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -0,0 +1,508 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2007 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/idr.h> +#include <linux/interrupt.h> +#include <linux/kthread.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/ctype.h> +#include <linux/version.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_transport_fc.h> + +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_disc.h" +#include "lpfc_scsi.h" +#include "lpfc.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_version.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" + +#ifdef CONFIG_LPFC_DEBUG_FS +/* debugfs interface + * + * To access this interface the user should: + * # mkdir /debug + * # mount -t debugfs none /debug + * + * The lpfc debugfs directory hierachy is: + * lpfc/lpfcX/vportY + * where X is the lpfc hba unique_id + * where Y is the vport VPI on that hba + * + * Debugging services available per vport: + * discovery_trace + * This is an ACSII readable file that contains a trace of the last + * lpfc_debugfs_max_disc_trc events that happened on a specific vport. + * See lpfc_debugfs.h for different categories of + * discovery events. To enable the discovery trace, the following + * module parameters must be set: + * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support + * lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for + * EACH vport. X MUST also be a power of 2. + * lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in + * lpfc_debugfs.h . + */ +static int lpfc_debugfs_enable = 0; +module_param(lpfc_debugfs_enable, int, 0); +MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services"); + +static int lpfc_debugfs_max_disc_trc = 0; /* This MUST be a power of 2 */ +module_param(lpfc_debugfs_max_disc_trc, int, 0); +MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc, + "Set debugfs discovery trace depth"); + +static int lpfc_debugfs_mask_disc_trc = 0; +module_param(lpfc_debugfs_mask_disc_trc, int, 0); +MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, + "Set debugfs discovery trace mask"); + +#include <linux/debugfs.h> + +/* size of discovery_trace output line */ +#define LPFC_DISC_TRC_ENTRY_SIZE 80 + +/* nodelist output buffer size */ +#define LPFC_NODELIST_SIZE 8192 +#define LPFC_NODELIST_ENTRY_SIZE 120 + +struct lpfc_debug { + char *buffer; + int len; +}; + +atomic_t lpfc_debugfs_disc_trc_cnt = ATOMIC_INIT(0); +unsigned long lpfc_debugfs_start_time = 0L; + +static int +lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) +{ + int i, index, len, enable; + uint32_t ms; + struct lpfc_disc_trc *dtp; + char buffer[80]; + + + enable = lpfc_debugfs_enable; + lpfc_debugfs_enable = 0; + + len = 0; + index = (atomic_read(&vport->disc_trc_cnt) + 1) & + (lpfc_debugfs_max_disc_trc - 1); + for (i = index; i < lpfc_debugfs_max_disc_trc; i++) { + dtp = vport->disc_trc + i; + if (!dtp->fmt) + continue; + ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time); + snprintf(buffer, 80, "%010d:%010d ms:%s\n", + dtp->seq_cnt, ms, dtp->fmt); + len += snprintf(buf+len, size-len, buffer, + dtp->data1, dtp->data2, dtp->data3); + } + for (i = 0; i < index; i++) { + dtp = vport->disc_trc + i; + if (!dtp->fmt) + continue; + ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time); + snprintf(buffer, 80, "%010d:%010d ms:%s\n", + dtp->seq_cnt, ms, dtp->fmt); + len += snprintf(buf+len, size-len, buffer, + dtp->data1, dtp->data2, dtp->data3); + } + + lpfc_debugfs_enable = enable; + return len; +} + +static int +lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) +{ + int len = 0; + int cnt; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp; + unsigned char *statep, *name; + + cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); + + spin_lock_irq(shost->host_lock); + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (!cnt) { + len += snprintf(buf+len, size-len, + "Missing Nodelist Entries\n"); + break; + } + cnt--; + switch (ndlp->nlp_state) { + case NLP_STE_UNUSED_NODE: + statep = "UNUSED"; + break; + case NLP_STE_PLOGI_ISSUE: + statep = "PLOGI "; + break; + case NLP_STE_ADISC_ISSUE: + statep = "ADISC "; + break; + case NLP_STE_REG_LOGIN_ISSUE: + statep = "REGLOG"; + break; + case NLP_STE_PRLI_ISSUE: + statep = "PRLI "; + break; + case NLP_STE_UNMAPPED_NODE: + statep = "UNMAP "; + break; + case NLP_STE_MAPPED_NODE: + statep = "MAPPED"; + break; + case NLP_STE_NPR_NODE: + statep = "NPR "; + break; + default: + statep = "UNKNOWN"; + } + len += snprintf(buf+len, size-len, "%s DID:x%06x ", + statep, ndlp->nlp_DID); + name = (unsigned char *)&ndlp->nlp_portname; + len += snprintf(buf+len, size-len, + "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", + *name, *(name+1), *(name+2), *(name+3), + *(name+4), *(name+5), *(name+6), *(name+7)); + name = (unsigned char *)&ndlp->nlp_nodename; + len += snprintf(buf+len, size-len, + "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", + *name, *(name+1), *(name+2), *(name+3), + *(name+4), *(name+5), *(name+6), *(name+7)); + len += snprintf(buf+len, size-len, "RPI:%03d flag:x%08x ", + ndlp->nlp_rpi, ndlp->nlp_flag); + if (!ndlp->nlp_type) + len += snprintf(buf+len, size-len, "UNKNOWN_TYPE"); + if (ndlp->nlp_type & NLP_FC_NODE) + len += snprintf(buf+len, size-len, "FC_NODE "); + if (ndlp->nlp_type & NLP_FABRIC) + len += snprintf(buf+len, size-len, "FABRIC "); + if (ndlp->nlp_type & NLP_FCP_TARGET) + len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ", + ndlp->nlp_sid); + if (ndlp->nlp_type & NLP_FCP_INITIATOR) + len += snprintf(buf+len, size-len, "FCP_INITIATOR"); + len += snprintf(buf+len, size-len, "\n"); + } + spin_unlock_irq(shost->host_lock); + return len; +} +#endif + + +inline void +lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, + uint32_t data1, uint32_t data2, uint32_t data3) +{ +#ifdef CONFIG_LPFC_DEBUG_FS + struct lpfc_disc_trc *dtp; + int index; + + if (!(lpfc_debugfs_mask_disc_trc & mask)) + return; + + if (!lpfc_debugfs_enable || !lpfc_debugfs_max_disc_trc || + !vport || !vport->disc_trc) + return; + + index = atomic_inc_return(&vport->disc_trc_cnt) & + (lpfc_debugfs_max_disc_trc - 1); + dtp = vport->disc_trc + index; + dtp->fmt = fmt; + dtp->data1 = data1; + dtp->data2 = data2; + dtp->data3 = data3; + dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_disc_trc_cnt); + dtp->jif = jiffies; +#endif + return; +} + +#ifdef CONFIG_LPFC_DEBUG_FS +static int +lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int size; + int rc = -ENOMEM; + + if (!lpfc_debugfs_max_disc_trc) { + rc = -ENOSPC; + goto out; + } + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundry */ + size = (lpfc_debugfs_max_disc_trc * LPFC_DISC_TRC_ENTRY_SIZE); + size = PAGE_ALIGN(size); + + debug->buffer = kmalloc(size, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_disc_trc_data(vport, debug->buffer, size); + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static int +lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundry */ + debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_nodelist_data(vport, debug->buffer, + LPFC_NODELIST_SIZE); + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static loff_t +lpfc_debugfs_lseek(struct file *file, loff_t off, int whence) +{ + struct lpfc_debug *debug; + loff_t pos = -1; + + debug = file->private_data; + + switch (whence) { + case 0: + pos = off; + break; + case 1: + pos = file->f_pos + off; + break; + case 2: + pos = debug->len - off; + } + return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos); +} + +static ssize_t +lpfc_debugfs_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer, + debug->len); +} + +static int +lpfc_debugfs_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + kfree(debug->buffer); + kfree(debug); + + return 0; +} + +#undef lpfc_debugfs_op_disc_trc +static struct file_operations lpfc_debugfs_op_disc_trc = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_disc_trc_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .release = lpfc_debugfs_release, +}; + +#undef lpfc_debugfs_op_nodelist +static struct file_operations lpfc_debugfs_op_nodelist = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_nodelist_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .release = lpfc_debugfs_release, +}; + +static struct dentry *lpfc_debugfs_root = NULL; +static atomic_t lpfc_debugfs_hba_count; +#endif + +inline void +lpfc_debugfs_initialize(struct lpfc_vport *vport) +{ +#ifdef CONFIG_LPFC_DEBUG_FS + struct lpfc_hba *phba = vport->phba; + char name[64]; + uint32_t num, i; + + if (!lpfc_debugfs_enable) + return; + + if (lpfc_debugfs_max_disc_trc) { + num = lpfc_debugfs_max_disc_trc - 1; + if (num & lpfc_debugfs_max_disc_trc) { + /* Change to be a power of 2 */ + num = lpfc_debugfs_max_disc_trc; + i = 0; + while (num > 1) { + num = num >> 1; + i++; + } + lpfc_debugfs_max_disc_trc = (1 << i); + printk(KERN_ERR + "lpfc_debugfs_max_disc_trc changed to %d\n", + lpfc_debugfs_max_disc_trc); + } + } + + if (!lpfc_debugfs_root) { + lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL); + atomic_set(&lpfc_debugfs_hba_count, 0); + if (!lpfc_debugfs_root) + goto debug_failed; + } + + snprintf(name, sizeof(name), "lpfc%d", phba->brd_no); + if (!phba->hba_debugfs_root) { + phba->hba_debugfs_root = + debugfs_create_dir(name, lpfc_debugfs_root); + if (!phba->hba_debugfs_root) + goto debug_failed; + atomic_inc(&lpfc_debugfs_hba_count); + atomic_set(&phba->debugfs_vport_count, 0); + } + + snprintf(name, sizeof(name), "vport%d", vport->vpi); + if (!vport->vport_debugfs_root) { + vport->vport_debugfs_root = + debugfs_create_dir(name, phba->hba_debugfs_root); + if (!vport->vport_debugfs_root) + goto debug_failed; + atomic_inc(&phba->debugfs_vport_count); + } + + if (!lpfc_debugfs_start_time) + lpfc_debugfs_start_time = jiffies; + + vport->disc_trc = kmalloc( + (sizeof(struct lpfc_disc_trc) * lpfc_debugfs_max_disc_trc), + GFP_KERNEL); + + if (!vport->disc_trc) + goto debug_failed; + memset(vport->disc_trc, 0, + (sizeof(struct lpfc_disc_trc) * lpfc_debugfs_max_disc_trc)); + + snprintf(name, sizeof(name), "discovery_trace"); + vport->debug_disc_trc = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_disc_trc); + if (!vport->debug_disc_trc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "%d:0409 Cannot create debugfs", + phba->brd_no); + goto debug_failed; + } + snprintf(name, sizeof(name), "nodelist"); + vport->debug_nodelist = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_nodelist); + if (!vport->debug_nodelist) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "%d:0409 Cannot create debugfs", + phba->brd_no); + goto debug_failed; + } +debug_failed: + return; +#endif +} + + +inline void +lpfc_debugfs_terminate(struct lpfc_vport *vport) +{ +#ifdef CONFIG_LPFC_DEBUG_FS + struct lpfc_hba *phba = vport->phba; + + if (vport->disc_trc) { + kfree(vport->disc_trc); + vport->disc_trc = NULL; + } + if (vport->debug_disc_trc) { + debugfs_remove(vport->debug_disc_trc); /* discovery_trace */ + vport->debug_disc_trc = NULL; + } + if (vport->debug_nodelist) { + debugfs_remove(vport->debug_nodelist); /* nodelist */ + vport->debug_nodelist = NULL; + } + if (vport->vport_debugfs_root) { + debugfs_remove(vport->vport_debugfs_root); /* vportX */ + vport->vport_debugfs_root = NULL; + atomic_dec(&phba->debugfs_vport_count); + } + if (atomic_read(&phba->debugfs_vport_count) == 0) { + debugfs_remove(vport->phba->hba_debugfs_root); /* lpfcX */ + vport->phba->hba_debugfs_root = NULL; + atomic_dec(&lpfc_debugfs_hba_count); + if (atomic_read(&lpfc_debugfs_hba_count) == 0) { + debugfs_remove(lpfc_debugfs_root); /* lpfc */ + lpfc_debugfs_root = NULL; + } + } +#endif +} + + diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h new file mode 100644 index 000000000000..fffb678426a4 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -0,0 +1,50 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2007 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#ifndef _H_LPFC_DEBUG_FS +#define _H_LPFC_DEBUG_FS + +#ifdef CONFIG_LPFC_DEBUG_FS +struct lpfc_disc_trc { + char *fmt; + uint32_t data1; + uint32_t data2; + uint32_t data3; + uint32_t seq_cnt; + unsigned long jif; +}; +#endif + +/* Mask for discovery_trace */ +#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */ +#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */ +#define LPFC_DISC_TRC_ELS_UNSOL 0x4 /* Trace ELS rcv'ed */ +#define LPFC_DISC_TRC_ELS_ALL 0x7 /* Trace ELS */ +#define LPFC_DISC_TRC_MBOX_VPORT 0x8 /* Trace vport MBOXs */ +#define LPFC_DISC_TRC_MBOX 0x10 /* Trace other MBOXs */ +#define LPFC_DISC_TRC_MBOX_ALL 0x18 /* Trace all MBOXs */ +#define LPFC_DISC_TRC_CT 0x20 /* Trace disc CT requests */ +#define LPFC_DISC_TRC_DSM 0x40 /* Trace DSM events */ +#define LPFC_DISC_TRC_RPORT 0x80 /* Trace rport events */ +#define LPFC_DISC_TRC_NODE 0x100 /* Trace ndlp state changes */ + +#define LPFC_DISC_TRC_DISCOVERY 0xef /* common mask for general + * discovery */ +#endif /* H_LPFC_DEBUG_FS */ diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 498059f3f7f4..aacac9ac5381 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -36,21 +36,23 @@ enum lpfc_work_type { LPFC_EVT_WARM_START, LPFC_EVT_KILL, LPFC_EVT_ELS_RETRY, + LPFC_EVT_DEV_LOSS_DELAY, + LPFC_EVT_DEV_LOSS, }; /* structure used to queue event to the discovery tasklet */ struct lpfc_work_evt { struct list_head evt_listp; - void * evt_arg1; - void * evt_arg2; + void *evt_arg1; + void *evt_arg2; enum lpfc_work_type evt; }; struct lpfc_nodelist { struct list_head nlp_listp; - struct lpfc_name nlp_portname; /* port name */ - struct lpfc_name nlp_nodename; /* node name */ + struct lpfc_name nlp_portname; + struct lpfc_name nlp_nodename; uint32_t nlp_flag; /* entry flags */ uint32_t nlp_DID; /* FC D_ID of entry */ uint32_t nlp_last_elscmd; /* Last ELS cmd sent */ @@ -75,8 +77,9 @@ struct lpfc_nodelist { struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ struct fc_rport *rport; /* Corresponding FC transport port structure */ - struct lpfc_hba *nlp_phba; + struct lpfc_vport *vport; struct lpfc_work_evt els_retry_evt; + struct lpfc_work_evt dev_loss_evt; unsigned long last_ramp_up_time; /* jiffy of last ramp up */ unsigned long last_q_full_time; /* jiffy of last queue full */ struct kref kref; @@ -98,7 +101,9 @@ struct lpfc_nodelist { ACC */ #define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from NPR list */ +#define NLP_RM_DFLT_RPI 0x4000000 /* need to remove leftover dflt RPI */ #define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ +#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ /* There are 4 different double linked lists nodelist entries can reside on. * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 638b3cd677bd..33fbc1666946 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -35,38 +35,38 @@ #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); +static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *); + static int lpfc_max_els_tries = 3; -static int -lpfc_els_chk_latt(struct lpfc_hba * phba) +int +lpfc_els_chk_latt(struct lpfc_vport *vport) { - struct lpfc_sli *psli; - LPFC_MBOXQ_t *mbox; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; uint32_t ha_copy; - int rc; - psli = &phba->sli; - - if ((phba->hba_state >= LPFC_HBA_READY) || - (phba->hba_state == LPFC_LINK_DOWN)) + if (vport->port_state >= LPFC_VPORT_READY || + phba->link_state == LPFC_LINK_DOWN) return 0; /* Read the HBA Host Attention Register */ - spin_lock_irq(phba->host->host_lock); ha_copy = readl(phba->HAregaddr); - spin_unlock_irq(phba->host->host_lock); if (!(ha_copy & HA_LATT)) return 0; /* Pending Link Event during Discovery */ - lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY, - "%d:0237 Pending Link Event during " + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "%d (%d):0237 Pending Link Event during " "Discovery: State x%x\n", - phba->brd_no, phba->hba_state); + phba->brd_no, vport->vpi, phba->pport->port_state); /* CLEAR_LA should re-enable link attention events and * we should then imediately take a LATT event. The @@ -74,48 +74,34 @@ lpfc_els_chk_latt(struct lpfc_hba * phba) * will cleanup any left over in-progress discovery * events. */ - spin_lock_irq(phba->host->host_lock); - phba->fc_flag |= FC_ABORT_DISCOVERY; - spin_unlock_irq(phba->host->host_lock); - - if (phba->hba_state != LPFC_CLEAR_LA) { - if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { - phba->hba_state = LPFC_CLEAR_LA; - lpfc_clear_la(phba, mbox); - mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; - rc = lpfc_sli_issue_mbox (phba, mbox, - (MBX_NOWAIT | MBX_STOP_IOCB)); - if (rc == MBX_NOT_FINISHED) { - mempool_free(mbox, phba->mbox_mem_pool); - phba->hba_state = LPFC_HBA_ERROR; - } - } - } + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_ABORT_DISCOVERY; + spin_unlock_irq(shost->host_lock); - return 1; + if (phba->link_state != LPFC_CLEAR_LA) + lpfc_issue_clear_la(phba, vport); + return 1; } static struct lpfc_iocbq * -lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp, - uint16_t cmdSize, uint8_t retry, struct lpfc_nodelist * ndlp, - uint32_t did, uint32_t elscmd) +lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, + uint16_t cmdSize, uint8_t retry, + struct lpfc_nodelist *ndlp, uint32_t did, + uint32_t elscmd) { - struct lpfc_sli_ring *pring; + struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *elsiocb; struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; struct ulp_bde64 *bpl; IOCB_t *icmd; - pring = &phba->sli.ring[LPFC_ELS_RING]; - if (phba->hba_state < LPFC_LINK_UP) - return NULL; + if (!lpfc_is_link_up(phba)) + return NULL; /* Allocate buffer for command iocb */ - spin_lock_irq(phba->host->host_lock); elsiocb = lpfc_sli_get_iocbq(phba); - spin_unlock_irq(phba->host->host_lock); if (elsiocb == NULL) return NULL; @@ -123,14 +109,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp, /* fill in BDEs for command */ /* Allocate buffer for command payload */ - if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) || + if (((pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL)) == 0) || ((pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(pcmd->phys))) == 0)) { kfree(pcmd); - spin_lock_irq(phba->host->host_lock); lpfc_sli_release_iocbq(phba, elsiocb); - spin_unlock_irq(phba->host->host_lock); return NULL; } @@ -138,7 +122,7 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp, /* Allocate buffer for response payload */ if (expectRsp) { - prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); + prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (prsp) prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &prsp->phys); @@ -146,9 +130,7 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp, kfree(prsp); lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); kfree(pcmd); - spin_lock_irq(phba->host->host_lock); lpfc_sli_release_iocbq(phba, elsiocb); - spin_unlock_irq(phba->host->host_lock); return NULL; } INIT_LIST_HEAD(&prsp->list); @@ -157,14 +139,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp, } /* Allocate buffer for Buffer ptr list */ - pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); + pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (pbuflist) - pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, - &pbuflist->phys); + pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, + &pbuflist->phys); if (pbuflist == 0 || pbuflist->virt == 0) { - spin_lock_irq(phba->host->host_lock); lpfc_sli_release_iocbq(phba, elsiocb); - spin_unlock_irq(phba->host->host_lock); lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); lpfc_mbuf_free(phba, prsp->virt, prsp->phys); kfree(pcmd); @@ -178,20 +158,28 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp, icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL; + icmd->un.elsreq64.remoteID = did; /* DID */ if (expectRsp) { - icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); - icmd->un.elsreq64.remoteID = did; /* DID */ + icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); icmd->ulpCommand = CMD_ELS_REQUEST64_CR; icmd->ulpTimeout = phba->fc_ratov * 2; } else { - icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64); + icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64); icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; } - icmd->ulpBdeCount = 1; icmd->ulpLe = 1; icmd->ulpClass = CLASS3; + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { + icmd->un.elsreq64.myID = vport->fc_myDID; + + /* For ELS_REQUEST64_CR, use the VPI by default */ + icmd->ulpContext = vport->vpi; + icmd->ulpCt_h = 0; + icmd->ulpCt_l = 1; + } + bpl = (struct ulp_bde64 *) pbuflist->virt; bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); @@ -209,10 +197,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp, } /* Save for completion so we can release these resources */ - elsiocb->context1 = lpfc_nlp_get(ndlp); + if (elscmd != ELS_CMD_LS_RJT) + elsiocb->context1 = lpfc_nlp_get(ndlp); elsiocb->context2 = pcmd; elsiocb->context3 = pbuflist; elsiocb->retry = retry; + elsiocb->vport = vport; elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; if (prsp) { @@ -222,16 +212,16 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp, if (expectRsp) { /* Xmit ELS command <elsCmd> to remote NPORT <did> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0116 Xmit ELS command x%x to remote " - "NPORT x%x I/O tag: x%x, HBA state: x%x\n", - phba->brd_no, elscmd, - did, elsiocb->iotag, phba->hba_state); + "%d (%d):0116 Xmit ELS command x%x to remote " + "NPORT x%x I/O tag: x%x, port state: x%x\n", + phba->brd_no, vport->vpi, elscmd, did, + elsiocb->iotag, vport->port_state); } else { /* Xmit ELS response <elsCmd> to remote NPORT <did> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0117 Xmit ELS response x%x to remote " + "%d (%d):0117 Xmit ELS response x%x to remote " "NPORT x%x I/O tag: x%x, size: x%x\n", - phba->brd_no, elscmd, + phba->brd_no, vport->vpi, elscmd, ndlp->nlp_DID, elsiocb->iotag, cmdSize); } @@ -240,16 +230,79 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp, static int -lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, - struct serv_parm *sp, IOCB_t *irsp) +lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) { + struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; struct lpfc_dmabuf *mp; + struct lpfc_nodelist *ndlp; + struct serv_parm *sp; int rc; - spin_lock_irq(phba->host->host_lock); - phba->fc_flag |= FC_FABRIC; - spin_unlock_irq(phba->host->host_lock); + sp = &phba->fc_fabparam; + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) + goto fail; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + goto fail; + + vport->port_state = LPFC_FABRIC_CFG_LINK; + lpfc_config_link(phba, mbox); + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); + if (rc == MBX_NOT_FINISHED) + goto fail_free_mbox; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + goto fail; + rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, + 0); + if (rc) + goto fail_free_mbox; + + mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; + mbox->vport = vport; + mbox->context2 = lpfc_nlp_get(ndlp); + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); + if (rc == MBX_NOT_FINISHED) + goto fail_issue_reg_login; + + return 0; + +fail_issue_reg_login: + lpfc_nlp_put(ndlp); + mp = (struct lpfc_dmabuf *) mbox->context1; + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); +fail_free_mbox: + mempool_free(mbox, phba->mbox_mem_pool); + +fail: + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0249 Cannot issue Register Fabric login\n", + phba->brd_no, vport->vpi); + return -ENXIO; +} + +static int +lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct serv_parm *sp, IOCB_t *irsp) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *np; + struct lpfc_nodelist *next_np; + + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_FABRIC; + spin_unlock_irq(shost->host_lock); phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ @@ -258,20 +311,20 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; if (phba->fc_topology == TOPOLOGY_LOOP) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag |= FC_PUBLIC_LOOP; - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_PUBLIC_LOOP; + spin_unlock_irq(shost->host_lock); } else { /* * If we are a N-port connected to a Fabric, fixup sparam's so * logins to devices on remote loops work. */ - phba->fc_sparam.cmn.altBbCredit = 1; + vport->fc_sparam.cmn.altBbCredit = 1; } - phba->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; + vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); - memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); + memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); ndlp->nlp_class_sup = 0; if (sp->cls1.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS1; @@ -285,68 +338,85 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, sp->cmn.bbRcvSizeLsb; memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - goto fail; - - phba->hba_state = LPFC_FABRIC_CFG_LINK; - lpfc_config_link(phba, mbox); - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { + if (sp->cmn.response_multiple_NPort) { + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_VPORT, + "%d:1816 FLOGI NPIV supported, " + "response data 0x%x\n", + phba->brd_no, + sp->cmn.response_multiple_NPort); + phba->link_flag |= LS_NPIV_FAB_SUPPORTED; - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); - if (rc == MBX_NOT_FINISHED) - goto fail_free_mbox; - - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - goto fail; + } else { + /* Because we asked f/w for NPIV it still expects us + to call reg_vnpid atleast for the physcial host */ + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_VPORT, + "%d:1817 Fabric does not support NPIV " + "- configuring single port mode.\n", + phba->brd_no); + phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; + } + } - if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0)) - goto fail_free_mbox; + if ((vport->fc_prevDID != vport->fc_myDID) && + !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { - mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; - mbox->context2 = lpfc_nlp_get(ndlp); + /* If our NportID changed, we need to ensure all + * remaining NPORTs get unreg_login'ed. + */ + list_for_each_entry_safe(np, next_np, + &vport->fc_nodes, nlp_listp) { + if ((np->nlp_state != NLP_STE_NPR_NODE) || + !(np->nlp_flag & NLP_NPR_ADISC)) + continue; + spin_lock_irq(shost->host_lock); + np->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(shost->host_lock); + lpfc_unreg_rpi(vport, np); + } + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { + lpfc_mbx_unreg_vpi(vport); + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + } + } - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); - if (rc == MBX_NOT_FINISHED) - goto fail_issue_reg_login; + ndlp->nlp_sid = irsp->un.ulpWord[4] & Mask_DID; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && + vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) { + lpfc_register_new_vport(phba, vport, ndlp); + return 0; + } + lpfc_issue_fabric_reglogin(vport); return 0; - - fail_issue_reg_login: - lpfc_nlp_put(ndlp); - mp = (struct lpfc_dmabuf *) mbox->context1; - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); - fail_free_mbox: - mempool_free(mbox, phba->mbox_mem_pool); - fail: - return -ENXIO; } /* * We FLOGIed into an NPort, initiate pt2pt protocol */ static int -lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, - struct serv_parm *sp) +lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct serv_parm *sp) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + spin_unlock_irq(shost->host_lock); phba->fc_edtov = FF_DEF_EDTOV; phba->fc_ratov = FF_DEF_RATOV; - rc = memcmp(&phba->fc_portname, &sp->portName, - sizeof(struct lpfc_name)); + rc = memcmp(&vport->fc_portname, &sp->portName, + sizeof(vport->fc_portname)); if (rc >= 0) { /* This side will initiate the PLOGI */ - spin_lock_irq(phba->host->host_lock); - phba->fc_flag |= FC_PT2PT_PLOGI; - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_PT2PT_PLOGI; + spin_unlock_irq(shost->host_lock); /* * N_Port ID cannot be 0, set our to LocalID the other @@ -355,7 +425,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, /* not equal */ if (rc) - phba->fc_myDID = PT2PT_LocalID; + vport->fc_myDID = PT2PT_LocalID; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) @@ -364,15 +434,16 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, lpfc_config_link(phba, mbox); mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, - MBX_NOWAIT | MBX_STOP_IOCB); + MBX_NOWAIT | MBX_STOP_IOCB); if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); goto fail; } lpfc_nlp_put(ndlp); - ndlp = lpfc_findnode_did(phba, PT2PT_RemoteID); + ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); if (!ndlp) { /* * Cannot find existing Fabric ndlp, so allocate a @@ -382,28 +453,30 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, if (!ndlp) goto fail; - lpfc_nlp_init(phba, ndlp, PT2PT_RemoteID); + lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); } memcpy(&ndlp->nlp_portname, &sp->portName, - sizeof(struct lpfc_name)); + sizeof(struct lpfc_name)); memcpy(&ndlp->nlp_nodename, &sp->nodeName, - sizeof(struct lpfc_name)); - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); + sizeof(struct lpfc_name)); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irq(shost->host_lock); } else { /* This side will wait for the PLOGI */ lpfc_nlp_put(ndlp); } - spin_lock_irq(phba->host->host_lock); - phba->fc_flag |= FC_PT2PT; - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_PT2PT; + spin_unlock_irq(shost->host_lock); /* Start discovery - this should just do CLEAR_LA */ - lpfc_disc_start(phba); + lpfc_disc_start(vport); return 0; - fail: +fail: return -ENXIO; } @@ -411,6 +484,8 @@ static void lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { + struct lpfc_vport *vport = cmdiocb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp = &rspiocb->iocb; struct lpfc_nodelist *ndlp = cmdiocb->context1; struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; @@ -418,21 +493,25 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, int rc; /* Check to see if link went down during discovery */ - if (lpfc_els_chk_latt(phba)) { + if (lpfc_els_chk_latt(vport)) { lpfc_nlp_put(ndlp); goto out; } + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "FLOGI cmpl: status:x%x/x%x state:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], + vport->port_state); + if (irsp->ulpStatus) { /* Check for retry */ - if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { - /* ELS command is being retried */ + if (lpfc_els_retry(phba, cmdiocb, rspiocb)) goto out; - } + /* FLOGI failed, so there is no fabric */ - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + spin_unlock_irq(shost->host_lock); /* If private loop, then allow max outstanding els to be * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no @@ -443,11 +522,10 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } /* FLOGI failure */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_ELS, - "%d:0100 FLOGI failure Data: x%x x%x x%x\n", - phba->brd_no, + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "%d (%d):0100 FLOGI failure Data: x%x x%x " + "x%x\n", + phba->brd_no, vport->vpi, irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout); goto flogifail; @@ -463,21 +541,21 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* FLOGI completes successfully */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0101 FLOGI completes sucessfully " + "%d (%d):0101 FLOGI completes sucessfully " "Data: x%x x%x x%x x%x\n", - phba->brd_no, + phba->brd_no, vport->vpi, irsp->un.ulpWord[4], sp->cmn.e_d_tov, sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); - if (phba->hba_state == LPFC_FLOGI) { + if (vport->port_state == LPFC_FLOGI) { /* * If Common Service Parameters indicate Nport * we are point to point, if Fport we are Fabric. */ if (sp->cmn.fPort) - rc = lpfc_cmpl_els_flogi_fabric(phba, ndlp, sp, irsp); + rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); else - rc = lpfc_cmpl_els_flogi_nport(phba, ndlp, sp); + rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); if (!rc) goto out; @@ -486,14 +564,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, flogifail: lpfc_nlp_put(ndlp); - if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT || - (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED && - irsp->un.ulpWord[4] != IOERR_SLI_DOWN)) { + if (!lpfc_error_lost_link(irsp)) { /* FLOGI failed, so just use loop map to make discovery list */ - lpfc_disc_list_loopmap(phba); + lpfc_disc_list_loopmap(vport); /* Start discovery */ - lpfc_disc_start(phba); + lpfc_disc_start(vport); } out: @@ -501,9 +577,10 @@ out: } static int -lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, +lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint8_t retry) { + struct lpfc_hba *phba = vport->phba; struct serv_parm *sp; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; @@ -515,9 +592,10 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, pring = &phba->sli.ring[LPFC_ELS_RING]; - cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm)); - elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, - ndlp->nlp_DID, ELS_CMD_FLOGI); + cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_FLOGI); + if (!elsiocb) return 1; @@ -526,8 +604,8 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, /* For FLOGI request, remainder of payload is service parameters */ *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; - pcmd += sizeof (uint32_t); - memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); + pcmd += sizeof(uint32_t); + memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); sp = (struct serv_parm *) pcmd; /* Setup CSPs accordingly for Fabric */ @@ -541,16 +619,32 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, if (sp->cmn.fcphHigh < FC_PH3) sp->cmn.fcphHigh = FC_PH3; + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { + sp->cmn.request_multiple_Nport = 1; + + /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ + icmd->ulpCt_h = 1; + icmd->ulpCt_l = 0; + } + + if (phba->fc_topology != TOPOLOGY_LOOP) { + icmd->un.elsreq64.myID = 0; + icmd->un.elsreq64.fl = 1; + } + tmo = phba->fc_ratov; phba->fc_ratov = LPFC_DISC_FLOGI_TMO; - lpfc_set_disctmo(phba); + lpfc_set_disctmo(vport); phba->fc_ratov = tmo; phba->fc_stat.elsXmitFLOGI++; elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; - spin_lock_irq(phba->host->host_lock); - rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); - spin_unlock_irq(phba->host->host_lock); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue FLOGI: opt:x%x", + phba->sli3_options, 0, 0); + + rc = lpfc_issue_fabric_iocb(phba, elsiocb); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -559,7 +653,7 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, } int -lpfc_els_abort_flogi(struct lpfc_hba * phba) +lpfc_els_abort_flogi(struct lpfc_hba *phba) { struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; @@ -577,73 +671,99 @@ lpfc_els_abort_flogi(struct lpfc_hba * phba) * Check the txcmplq for an iocb that matches the nport the driver is * searching for. */ - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { icmd = &iocb->iocb; - if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { + if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR && + icmd->un.elsreq64.bdl.ulpIoTag32) { ndlp = (struct lpfc_nodelist *)(iocb->context1); - if (ndlp && (ndlp->nlp_DID == Fabric_DID)) + if (ndlp && (ndlp->nlp_DID == Fabric_DID)) { lpfc_sli_issue_abort_iotag(phba, pring, iocb); + } } } - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); return 0; } int -lpfc_initial_flogi(struct lpfc_hba *phba) +lpfc_initial_flogi(struct lpfc_vport *vport) { + struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp; /* First look for the Fabric ndlp */ - ndlp = lpfc_findnode_did(phba, Fabric_DID); + ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) { /* Cannot find existing Fabric ndlp, so allocate a new one */ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); if (!ndlp) return 0; - lpfc_nlp_init(phba, ndlp, Fabric_DID); + lpfc_nlp_init(vport, ndlp, Fabric_DID); } else { - lpfc_dequeue_node(phba, ndlp); + lpfc_dequeue_node(vport, ndlp); } - if (lpfc_issue_els_flogi(phba, ndlp, 0)) { + if (lpfc_issue_els_flogi(vport, ndlp, 0)) { lpfc_nlp_put(ndlp); } return 1; } +int +lpfc_initial_fdisc(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp; + + /* First look for the Fabric ndlp */ + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) { + /* Cannot find existing Fabric ndlp, so allocate a new one */ + ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); + if (!ndlp) + return 0; + lpfc_nlp_init(vport, ndlp, Fabric_DID); + } else { + lpfc_dequeue_node(vport, ndlp); + } + if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { + lpfc_nlp_put(ndlp); + } + return 1; +} static void -lpfc_more_plogi(struct lpfc_hba * phba) +lpfc_more_plogi(struct lpfc_vport *vport) { int sentplogi; + struct lpfc_hba *phba = vport->phba; - if (phba->num_disc_nodes) - phba->num_disc_nodes--; + if (vport->num_disc_nodes) + vport->num_disc_nodes--; /* Continue discovery with <num_disc_nodes> PLOGIs to go */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0232 Continue discovery with %d PLOGIs to go " + "%d (%d):0232 Continue discovery with %d PLOGIs to go " "Data: x%x x%x x%x\n", - phba->brd_no, phba->num_disc_nodes, phba->fc_plogi_cnt, - phba->fc_flag, phba->hba_state); + phba->brd_no, vport->vpi, vport->num_disc_nodes, + vport->fc_plogi_cnt, vport->fc_flag, vport->port_state); /* Check to see if there are more PLOGIs to be sent */ - if (phba->fc_flag & FC_NLP_MORE) { - /* go thru NPR list and issue any remaining ELS PLOGIs */ - sentplogi = lpfc_els_disc_plogi(phba); - } + if (vport->fc_flag & FC_NLP_MORE) + /* go thru NPR nodes and issue any remaining ELS PLOGIs */ + sentplogi = lpfc_els_disc_plogi(vport); + return; } static struct lpfc_nodelist * -lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp, +lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, struct lpfc_nodelist *ndlp) { + struct lpfc_vport *vport = ndlp->vport; struct lpfc_nodelist *new_ndlp; - uint32_t *lp; struct serv_parm *sp; - uint8_t name[sizeof (struct lpfc_name)]; + uint8_t name[sizeof(struct lpfc_name)]; uint32_t rc; /* Fabric nodes can have the same WWPN so we don't bother searching @@ -652,50 +772,51 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp, if (ndlp->nlp_type & NLP_FABRIC) return ndlp; - lp = (uint32_t *) prsp->virt; - sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); + sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); memset(name, 0, sizeof(struct lpfc_name)); /* Now we find out if the NPort we are logging into, matches the WWPN * we have for that ndlp. If not, we have some work to do. */ - new_ndlp = lpfc_findnode_wwpn(phba, &sp->portName); + new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); if (new_ndlp == ndlp) return ndlp; if (!new_ndlp) { - rc = - memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); + rc = memcmp(&ndlp->nlp_portname, name, + sizeof(struct lpfc_name)); if (!rc) return ndlp; new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); if (!new_ndlp) return ndlp; - lpfc_nlp_init(phba, new_ndlp, ndlp->nlp_DID); + lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); } - lpfc_unreg_rpi(phba, new_ndlp); + lpfc_unreg_rpi(vport, new_ndlp); new_ndlp->nlp_DID = ndlp->nlp_DID; new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; - lpfc_nlp_set_state(phba, new_ndlp, ndlp->nlp_state); + lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); - /* Move this back to NPR list */ + /* Move this back to NPR state */ if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); else { - lpfc_unreg_rpi(phba, ndlp); + lpfc_unreg_rpi(vport, ndlp); ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } return new_ndlp; } static void -lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { + struct lpfc_vport *vport = cmdiocb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp; struct lpfc_nodelist *ndlp; struct lpfc_dmabuf *prsp; @@ -705,32 +826,43 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, cmdiocb->context_un.rsp_iocb = rspiocb; irsp = &rspiocb->iocb; - ndlp = lpfc_findnode_did(phba, irsp->un.elsreq64.remoteID); - if (!ndlp) + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "PLOGI cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], + irsp->un.elsreq64.remoteID); + + ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); + if (!ndlp) { + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0136 PLOGI completes to NPort x%x " + "with no ndlp. Data: x%x x%x x%x\n", + phba->brd_no, vport->vpi, irsp->un.elsreq64.remoteID, + irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpIoTag); goto out; + } /* Since ndlp can be freed in the disc state machine, note if this node * is being used during discovery. */ + spin_lock_irq(shost->host_lock); disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); - spin_lock_irq(phba->host->host_lock); ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); rc = 0; /* PLOGI completes to NPort <nlp_DID> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0102 PLOGI completes to NPort x%x " + "%d (%d):0102 PLOGI completes to NPort x%x " "Data: x%x x%x x%x x%x x%x\n", - phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, - irsp->un.ulpWord[4], irsp->ulpTimeout, disc, - phba->num_disc_nodes); + phba->brd_no, vport->vpi, ndlp->nlp_DID, + irsp->ulpStatus, irsp->un.ulpWord[4], + irsp->ulpTimeout, disc, vport->num_disc_nodes); /* Check to see if link went down during discovery */ - if (lpfc_els_chk_latt(phba)) { - spin_lock_irq(phba->host->host_lock); + if (lpfc_els_chk_latt(vport)) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); goto out; } @@ -743,56 +875,62 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ if (disc) { - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); } goto out; } /* PLOGI failed */ + if (ndlp->nlp_DID == NameServer_DID) { + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0250 Nameserver login error: " + "0x%x / 0x%x\n", + phba->brd_no, vport->vpi, + irsp->ulpStatus, irsp->un.ulpWord[4]); + } + /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ - if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || - (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || - (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) { + if (lpfc_error_lost_link(irsp)) { rc = NLP_STE_FREED_NODE; } else { - rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, - NLP_EVT_CMPL_PLOGI); + rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_PLOGI); } } else { /* Good status, call state machine */ prsp = list_entry(((struct lpfc_dmabuf *) - cmdiocb->context2)->list.next, - struct lpfc_dmabuf, list); - ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp); - rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, - NLP_EVT_CMPL_PLOGI); + cmdiocb->context2)->list.next, + struct lpfc_dmabuf, list); + ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); + rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_PLOGI); } - if (disc && phba->num_disc_nodes) { + if (disc && vport->num_disc_nodes) { /* Check to see if there are more PLOGIs to be sent */ - lpfc_more_plogi(phba); + lpfc_more_plogi(vport); - if (phba->num_disc_nodes == 0) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_NDISC_ACTIVE; - spin_unlock_irq(phba->host->host_lock); + if (vport->num_disc_nodes == 0) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); - lpfc_can_disctmo(phba); - if (phba->fc_flag & FC_RSCN_MODE) { + lpfc_can_disctmo(vport); + if (vport->fc_flag & FC_RSCN_MODE) { /* * Check to see if more RSCNs came in while * we were processing this one. */ - if ((phba->fc_rscn_id_cnt == 0) && - (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_RSCN_MODE; - spin_unlock_irq(phba->host->host_lock); + if ((vport->fc_rscn_id_cnt == 0) && + (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_RSCN_MODE; + spin_unlock_irq(shost->host_lock); } else { - lpfc_els_handle_rscn(phba); + lpfc_els_handle_rscn(vport); } } } @@ -804,8 +942,9 @@ out: } int -lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry) +lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) { + struct lpfc_hba *phba = vport->phba; struct serv_parm *sp; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; @@ -813,13 +952,14 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry) struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; + int ret; psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ - cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm)); - elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, NULL, did, - ELS_CMD_PLOGI); + cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, NULL, did, + ELS_CMD_PLOGI); if (!elsiocb) return 1; @@ -828,8 +968,8 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry) /* For PLOGI request, remainder of payload is service parameters */ *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; - pcmd += sizeof (uint32_t); - memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); + pcmd += sizeof(uint32_t); + memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); sp = (struct serv_parm *) pcmd; if (sp->cmn.fcphLow < FC_PH_4_3) @@ -838,22 +978,27 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry) if (sp->cmn.fcphHigh < FC_PH3) sp->cmn.fcphHigh = FC_PH3; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue PLOGI: did:x%x", + did, 0, 0); + phba->fc_stat.elsXmitPLOGI++; elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; - spin_lock_irq(phba->host->host_lock); - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { - spin_unlock_irq(phba->host->host_lock); + ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + + if (ret == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; } - spin_unlock_irq(phba->host->host_lock); return 0; } static void -lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { + struct lpfc_vport *vport = cmdiocb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp; struct lpfc_sli *psli; struct lpfc_nodelist *ndlp; @@ -864,21 +1009,26 @@ lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, irsp = &(rspiocb->iocb); ndlp = (struct lpfc_nodelist *) cmdiocb->context1; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_PRLI_SND; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "PRLI cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], + ndlp->nlp_DID); /* PRLI completes to NPort <nlp_DID> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0103 PRLI completes to NPort x%x " + "%d (%d):0103 PRLI completes to NPort x%x " "Data: x%x x%x x%x x%x\n", - phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, - irsp->un.ulpWord[4], irsp->ulpTimeout, - phba->num_disc_nodes); + phba->brd_no, vport->vpi, ndlp->nlp_DID, + irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout, + vport->num_disc_nodes); - phba->fc_prli_sent--; + vport->fc_prli_sent--; /* Check to see if link went down during discovery */ - if (lpfc_els_chk_latt(phba)) + if (lpfc_els_chk_latt(vport)) goto out; if (irsp->ulpStatus) { @@ -889,18 +1039,16 @@ lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, } /* PRLI failed */ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ - if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || - (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || - (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) { + if (lpfc_error_lost_link(irsp)) { goto out; } else { - lpfc_disc_state_machine(phba, ndlp, cmdiocb, - NLP_EVT_CMPL_PRLI); + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_PRLI); } } else { /* Good status, call state machine */ - lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI); + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_PRLI); } out: @@ -909,9 +1057,11 @@ out: } int -lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, +lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint8_t retry) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; PRLI *npr; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; @@ -923,9 +1073,9 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ - cmdsize = (sizeof (uint32_t) + sizeof (PRLI)); - elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, - ndlp->nlp_DID, ELS_CMD_PRLI); + cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_PRLI); if (!elsiocb) return 1; @@ -933,9 +1083,9 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); /* For PRLI request, remainder of payload is service parameters */ - memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t))); + memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t))); *((uint32_t *) (pcmd)) = ELS_CMD_PRLI; - pcmd += sizeof (uint32_t); + pcmd += sizeof(uint32_t); /* For PRLI, remainder of payload is PRLI parameter page */ npr = (PRLI *) pcmd; @@ -955,81 +1105,88 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, npr->prliType = PRLI_FCP_TYPE; npr->initiatorFunc = 1; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue PRLI: did:x%x", + ndlp->nlp_DID, 0, 0); + phba->fc_stat.elsXmitPRLI++; elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_PRLI_SND; + spin_unlock_irq(shost->host_lock); if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_PRLI_SND; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); lpfc_els_free_iocb(phba, elsiocb); return 1; } - spin_unlock_irq(phba->host->host_lock); - phba->fc_prli_sent++; + vport->fc_prli_sent++; return 0; } static void -lpfc_more_adisc(struct lpfc_hba * phba) +lpfc_more_adisc(struct lpfc_vport *vport) { int sentadisc; + struct lpfc_hba *phba = vport->phba; - if (phba->num_disc_nodes) - phba->num_disc_nodes--; + if (vport->num_disc_nodes) + vport->num_disc_nodes--; /* Continue discovery with <num_disc_nodes> ADISCs to go */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0210 Continue discovery with %d ADISCs to go " + "%d (%d):0210 Continue discovery with %d ADISCs to go " "Data: x%x x%x x%x\n", - phba->brd_no, phba->num_disc_nodes, phba->fc_adisc_cnt, - phba->fc_flag, phba->hba_state); + phba->brd_no, vport->vpi, vport->num_disc_nodes, + vport->fc_adisc_cnt, vport->fc_flag, vport->port_state); /* Check to see if there are more ADISCs to be sent */ - if (phba->fc_flag & FC_NLP_MORE) { - lpfc_set_disctmo(phba); - - /* go thru NPR list and issue any remaining ELS ADISCs */ - sentadisc = lpfc_els_disc_adisc(phba); + if (vport->fc_flag & FC_NLP_MORE) { + lpfc_set_disctmo(vport); + /* go thru NPR nodes and issue any remaining ELS ADISCs */ + sentadisc = lpfc_els_disc_adisc(vport); } return; } static void -lpfc_rscn_disc(struct lpfc_hba * phba) +lpfc_rscn_disc(struct lpfc_vport *vport) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + lpfc_can_disctmo(vport); + /* RSCN discovery */ - /* go thru NPR list and issue ELS PLOGIs */ - if (phba->fc_npr_cnt) { - if (lpfc_els_disc_plogi(phba)) + /* go thru NPR nodes and issue ELS PLOGIs */ + if (vport->fc_npr_cnt) + if (lpfc_els_disc_plogi(vport)) return; - } - if (phba->fc_flag & FC_RSCN_MODE) { + + if (vport->fc_flag & FC_RSCN_MODE) { /* Check to see if more RSCNs came in while we were * processing this one. */ - if ((phba->fc_rscn_id_cnt == 0) && - (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_RSCN_MODE; - spin_unlock_irq(phba->host->host_lock); + if ((vport->fc_rscn_id_cnt == 0) && + (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_RSCN_MODE; + spin_unlock_irq(shost->host_lock); } else { - lpfc_els_handle_rscn(phba); + lpfc_els_handle_rscn(vport); } } } static void -lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { + struct lpfc_vport *vport = cmdiocb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp; - struct lpfc_sli *psli; struct lpfc_nodelist *ndlp; - LPFC_MBOXQ_t *mbox; - int disc, rc; - - psli = &phba->sli; + int disc; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->context_un.rsp_iocb = rspiocb; @@ -1037,27 +1194,32 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, irsp = &(rspiocb->iocb); ndlp = (struct lpfc_nodelist *) cmdiocb->context1; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "ADISC cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], + ndlp->nlp_DID); + /* Since ndlp can be freed in the disc state machine, note if this node * is being used during discovery. */ + spin_lock_irq(shost->host_lock); disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); - spin_lock_irq(phba->host->host_lock); ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); /* ADISC completes to NPort <nlp_DID> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0104 ADISC completes to NPort x%x " + "%d (%d):0104 ADISC completes to NPort x%x " "Data: x%x x%x x%x x%x x%x\n", - phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, - irsp->un.ulpWord[4], irsp->ulpTimeout, disc, - phba->num_disc_nodes); + phba->brd_no, vport->vpi, ndlp->nlp_DID, + irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout, + disc, vport->num_disc_nodes); /* Check to see if link went down during discovery */ - if (lpfc_els_chk_latt(phba)) { - spin_lock_irq(phba->host->host_lock); + if (lpfc_els_chk_latt(vport)) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); goto out; } @@ -1066,67 +1228,68 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ if (disc) { - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(phba->host->host_lock); - lpfc_set_disctmo(phba); + spin_unlock_irq(shost->host_lock); + lpfc_set_disctmo(vport); } goto out; } /* ADISC failed */ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ - if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || - ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) && - (irsp->un.ulpWord[4] != IOERR_LINK_DOWN) && - (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) { - lpfc_disc_state_machine(phba, ndlp, cmdiocb, - NLP_EVT_CMPL_ADISC); + if (!lpfc_error_lost_link(irsp)) { + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_ADISC); } } else { /* Good status, call state machine */ - lpfc_disc_state_machine(phba, ndlp, cmdiocb, + lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_ADISC); } - if (disc && phba->num_disc_nodes) { + if (disc && vport->num_disc_nodes) { /* Check to see if there are more ADISCs to be sent */ - lpfc_more_adisc(phba); + lpfc_more_adisc(vport); /* Check to see if we are done with ADISC authentication */ - if (phba->num_disc_nodes == 0) { - lpfc_can_disctmo(phba); - /* If we get here, there is nothing left to wait for */ - if ((phba->hba_state < LPFC_HBA_READY) && - (phba->hba_state != LPFC_CLEAR_LA)) { - /* Link up discovery */ - if ((mbox = mempool_alloc(phba->mbox_mem_pool, - GFP_KERNEL))) { - phba->hba_state = LPFC_CLEAR_LA; - lpfc_clear_la(phba, mbox); - mbox->mbox_cmpl = - lpfc_mbx_cmpl_clear_la; - rc = lpfc_sli_issue_mbox - (phba, mbox, - (MBX_NOWAIT | MBX_STOP_IOCB)); - if (rc == MBX_NOT_FINISHED) { - mempool_free(mbox, - phba->mbox_mem_pool); - lpfc_disc_flush_list(phba); - psli->ring[(psli->extra_ring)]. - flag &= - ~LPFC_STOP_IOCB_EVENT; - psli->ring[(psli->fcp_ring)]. - flag &= - ~LPFC_STOP_IOCB_EVENT; - psli->ring[(psli->next_ring)]. - flag &= - ~LPFC_STOP_IOCB_EVENT; - phba->hba_state = - LPFC_HBA_READY; + if (vport->num_disc_nodes == 0) { + /* If we get here, there is nothing left to ADISC */ + /* + * For NPIV, cmpl_reg_vpi will set port_state to READY, + * and continue discovery. + */ + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + !(vport->fc_flag & FC_RSCN_MODE)) { + lpfc_issue_reg_vpi(phba, vport); + goto out; + } + /* + * For SLI2, we need to set port_state to READY + * and continue discovery. + */ + if (vport->port_state < LPFC_VPORT_READY) { + /* If we get here, there is nothing to ADISC */ + if (vport->port_type == LPFC_PHYSICAL_PORT) + lpfc_issue_clear_la(phba, vport); + + if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { + vport->num_disc_nodes = 0; + /* go thru NPR list, issue ELS PLOGIs */ + if (vport->fc_npr_cnt) + lpfc_els_disc_plogi(vport); + + if (!vport->num_disc_nodes) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= + ~FC_NDISC_ACTIVE; + spin_unlock_irq( + shost->host_lock); + lpfc_can_disctmo(vport); } } + vport->port_state = LPFC_VPORT_READY; } else { - lpfc_rscn_disc(phba); + lpfc_rscn_disc(vport); } } } @@ -1136,23 +1299,22 @@ out: } int -lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, +lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint8_t retry) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; ADISC *ap; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; - struct lpfc_sli *psli; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; uint8_t *pcmd; uint16_t cmdsize; - psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ - - cmdsize = (sizeof (uint32_t) + sizeof (ADISC)); - elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, - ndlp->nlp_DID, ELS_CMD_ADISC); + cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_ADISC); if (!elsiocb) return 1; @@ -1161,81 +1323,97 @@ lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, /* For ADISC request, remainder of payload is service parameters */ *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; - pcmd += sizeof (uint32_t); + pcmd += sizeof(uint32_t); /* Fill in ADISC payload */ ap = (ADISC *) pcmd; ap->hardAL_PA = phba->fc_pref_ALPA; - memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name)); - memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name)); - ap->DID = be32_to_cpu(phba->fc_myDID); + memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); + memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); + ap->DID = be32_to_cpu(vport->fc_myDID); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue ADISC: did:x%x", + ndlp->nlp_DID, 0, 0); phba->fc_stat.elsXmitADISC++; elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_ADISC_SND; + spin_unlock_irq(shost->host_lock); if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_ADISC_SND; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); lpfc_els_free_iocb(phba, elsiocb); return 1; } - spin_unlock_irq(phba->host->host_lock); return 0; } static void -lpfc_cmpl_els_logo(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; + struct lpfc_vport *vport = ndlp->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp; struct lpfc_sli *psli; - struct lpfc_nodelist *ndlp; psli = &phba->sli; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->context_un.rsp_iocb = rspiocb; irsp = &(rspiocb->iocb); - ndlp = (struct lpfc_nodelist *) cmdiocb->context1; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_LOGO_SND; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "LOGO cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], + ndlp->nlp_DID); /* LOGO completes to NPort <nlp_DID> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0105 LOGO completes to NPort x%x " + "%d (%d):0105 LOGO completes to NPort x%x " "Data: x%x x%x x%x x%x\n", - phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, - irsp->un.ulpWord[4], irsp->ulpTimeout, - phba->num_disc_nodes); + phba->brd_no, vport->vpi, ndlp->nlp_DID, + irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout, + vport->num_disc_nodes); /* Check to see if link went down during discovery */ - if (lpfc_els_chk_latt(phba)) + if (lpfc_els_chk_latt(vport)) goto out; + if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { + /* NLP_EVT_DEVICE_RM should unregister the RPI + * which should abort all outstanding IOs. + */ + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_DEVICE_RM); + goto out; + } + if (irsp->ulpStatus) { /* Check for retry */ - if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { + if (lpfc_els_retry(phba, cmdiocb, rspiocb)) /* ELS command is being retried */ goto out; - } /* LOGO failed */ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ - if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || - (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || - (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) { + if (lpfc_error_lost_link(irsp)) goto out; - } else { - lpfc_disc_state_machine(phba, ndlp, cmdiocb, - NLP_EVT_CMPL_LOGO); - } + else + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_LOGO); } else { /* Good status, call state machine. * This will unregister the rpi if needed. */ - lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_LOGO); } out: @@ -1244,75 +1422,91 @@ out: } int -lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, +lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint8_t retry) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; + int rc; psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; - cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name); - elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, - ndlp->nlp_DID, ELS_CMD_LOGO); + cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_LOGO); if (!elsiocb) return 1; icmd = &elsiocb->iocb; pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; - pcmd += sizeof (uint32_t); + pcmd += sizeof(uint32_t); /* Fill in LOGO payload */ - *((uint32_t *) (pcmd)) = be32_to_cpu(phba->fc_myDID); - pcmd += sizeof (uint32_t); - memcpy(pcmd, &phba->fc_portname, sizeof (struct lpfc_name)); + *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); + pcmd += sizeof(uint32_t); + memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue LOGO: did:x%x", + ndlp->nlp_DID, 0, 0); phba->fc_stat.elsXmitLOGO++; elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_LOGO_SND; - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + spin_unlock_irq(shost->host_lock); + rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + + if (rc == IOCB_ERROR) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_LOGO_SND; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); lpfc_els_free_iocb(phba, elsiocb); return 1; } - spin_unlock_irq(phba->host->host_lock); return 0; } static void -lpfc_cmpl_els_cmd(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { + struct lpfc_vport *vport = cmdiocb->vport; IOCB_t *irsp; irsp = &rspiocb->iocb; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "ELS cmd cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], + irsp->un.elsreq64.remoteID); + /* ELS cmd tag <ulpIoTag> completes */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_ELS, - "%d:0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", - phba->brd_no, + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "%d (%d):0106 ELS cmd tag x%x completes Data: x%x x%x " + "x%x\n", + phba->brd_no, vport->vpi, irsp->ulpIoTag, irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout); /* Check to see if link went down during discovery */ - lpfc_els_chk_latt(phba); + lpfc_els_chk_latt(vport); lpfc_els_free_iocb(phba, cmdiocb); return; } int -lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry) +lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) { + struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; struct lpfc_sli_ring *pring; @@ -1323,15 +1517,16 @@ lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry) psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ - cmdsize = (sizeof (uint32_t) + sizeof (SCR)); + cmdsize = (sizeof(uint32_t) + sizeof(SCR)); ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); if (!ndlp) return 1; - lpfc_nlp_init(phba, ndlp, nportid); + lpfc_nlp_init(vport, ndlp, nportid); + + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_SCR); - elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, - ndlp->nlp_DID, ELS_CMD_SCR); if (!elsiocb) { lpfc_nlp_put(ndlp); return 1; @@ -1341,29 +1536,31 @@ lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry) pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_SCR; - pcmd += sizeof (uint32_t); + pcmd += sizeof(uint32_t); /* For SCR, remainder of payload is SCR parameter page */ - memset(pcmd, 0, sizeof (SCR)); + memset(pcmd, 0, sizeof(SCR)); ((SCR *) pcmd)->Function = SCR_FUNC_FULL; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue SCR: did:x%x", + ndlp->nlp_DID, 0, 0); + phba->fc_stat.elsXmitSCR++; elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; - spin_lock_irq(phba->host->host_lock); if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { - spin_unlock_irq(phba->host->host_lock); lpfc_nlp_put(ndlp); lpfc_els_free_iocb(phba, elsiocb); return 1; } - spin_unlock_irq(phba->host->host_lock); lpfc_nlp_put(ndlp); return 0; } static int -lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry) +lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) { + struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; struct lpfc_sli_ring *pring; @@ -1377,14 +1574,15 @@ lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry) psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ - cmdsize = (sizeof (uint32_t) + sizeof (FARP)); + cmdsize = (sizeof(uint32_t) + sizeof(FARP)); ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); if (!ndlp) return 1; - lpfc_nlp_init(phba, ndlp, nportid); - elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, - ndlp->nlp_DID, ELS_CMD_RNID); + lpfc_nlp_init(vport, ndlp, nportid); + + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, + ndlp->nlp_DID, ELS_CMD_RNID); if (!elsiocb) { lpfc_nlp_put(ndlp); return 1; @@ -1394,44 +1592,71 @@ lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry) pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; - pcmd += sizeof (uint32_t); + pcmd += sizeof(uint32_t); /* Fill in FARPR payload */ fp = (FARP *) (pcmd); - memset(fp, 0, sizeof (FARP)); + memset(fp, 0, sizeof(FARP)); lp = (uint32_t *) pcmd; *lp++ = be32_to_cpu(nportid); - *lp++ = be32_to_cpu(phba->fc_myDID); + *lp++ = be32_to_cpu(vport->fc_myDID); fp->Rflags = 0; fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); - memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name)); - memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name)); - if ((ondlp = lpfc_findnode_did(phba, nportid))) { + memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); + memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); + ondlp = lpfc_findnode_did(vport, nportid); + if (ondlp) { memcpy(&fp->OportName, &ondlp->nlp_portname, - sizeof (struct lpfc_name)); + sizeof(struct lpfc_name)); memcpy(&fp->OnodeName, &ondlp->nlp_nodename, - sizeof (struct lpfc_name)); + sizeof(struct lpfc_name)); } + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue FARPR: did:x%x", + ndlp->nlp_DID, 0, 0); + phba->fc_stat.elsXmitFARPR++; elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; - spin_lock_irq(phba->host->host_lock); if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { - spin_unlock_irq(phba->host->host_lock); lpfc_nlp_put(ndlp); lpfc_els_free_iocb(phba, elsiocb); return 1; } - spin_unlock_irq(phba->host->host_lock); lpfc_nlp_put(ndlp); return 0; } +static void +lpfc_end_rscn(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (vport->fc_flag & FC_RSCN_MODE) { + /* + * Check to see if more RSCNs came in while we were + * processing this one. + */ + if (vport->fc_rscn_id_cnt || + (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) + lpfc_els_handle_rscn(vport); + else { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_RSCN_MODE; + spin_unlock_irq(shost->host_lock); + } + } +} + void -lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp) +lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + spin_lock_irq(shost->host_lock); nlp->nlp_flag &= ~NLP_DELAY_TMO; + spin_unlock_irq(shost->host_lock); del_timer_sync(&nlp->nlp_delayfunc); nlp->nlp_last_elscmd = 0; @@ -1439,30 +1664,21 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp) list_del_init(&nlp->els_retry_evt.evt_listp); if (nlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(shost->host_lock); nlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (phba->num_disc_nodes) { + spin_unlock_irq(shost->host_lock); + if (vport->num_disc_nodes) { /* Check to see if there are more * PLOGIs to be sent */ - lpfc_more_plogi(phba); - - if (phba->num_disc_nodes == 0) { - phba->fc_flag &= ~FC_NDISC_ACTIVE; - lpfc_can_disctmo(phba); - if (phba->fc_flag & FC_RSCN_MODE) { - /* - * Check to see if more RSCNs - * came in while we were - * processing this one. - */ - if((phba->fc_rscn_id_cnt==0) && - !(phba->fc_flag & FC_RSCN_DISCOVERY)) { - phba->fc_flag &= ~FC_RSCN_MODE; - } - else { - lpfc_els_handle_rscn(phba); - } - } + lpfc_more_plogi(vport); + + if (vport->num_disc_nodes == 0) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + lpfc_end_rscn(vport); } } } @@ -1472,18 +1688,19 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp) void lpfc_els_retry_delay(unsigned long ptr) { - struct lpfc_nodelist *ndlp; - struct lpfc_hba *phba; - unsigned long iflag; - struct lpfc_work_evt *evtp; - - ndlp = (struct lpfc_nodelist *)ptr; - phba = ndlp->nlp_phba; + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr; + struct lpfc_vport *vport = ndlp->vport; + struct lpfc_hba *phba = vport->phba; + unsigned long flags; + struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; + + ndlp = (struct lpfc_nodelist *) ptr; + phba = ndlp->vport->phba; evtp = &ndlp->els_retry_evt; - spin_lock_irqsave(phba->host->host_lock, iflag); + spin_lock_irqsave(&phba->hbalock, flags); if (!list_empty(&evtp->evt_listp)) { - spin_unlock_irqrestore(phba->host->host_lock, iflag); + spin_unlock_irqrestore(&phba->hbalock, flags); return; } @@ -1491,33 +1708,31 @@ lpfc_els_retry_delay(unsigned long ptr) evtp->evt = LPFC_EVT_ELS_RETRY; list_add_tail(&evtp->evt_listp, &phba->work_list); if (phba->work_wait) - wake_up(phba->work_wait); + lpfc_worker_wake_up(phba); - spin_unlock_irqrestore(phba->host->host_lock, iflag); + spin_unlock_irqrestore(&phba->hbalock, flags); return; } void lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) { - struct lpfc_hba *phba; - uint32_t cmd; - uint32_t did; - uint8_t retry; + struct lpfc_vport *vport = ndlp->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + uint32_t cmd, did, retry; - phba = ndlp->nlp_phba; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); did = ndlp->nlp_DID; cmd = ndlp->nlp_last_elscmd; ndlp->nlp_last_elscmd = 0; if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); return; } ndlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); /* * If a discovery event readded nlp_delayfunc after timer * firing and before processing the timer, cancel the @@ -1528,57 +1743,54 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) switch (cmd) { case ELS_CMD_FLOGI: - lpfc_issue_els_flogi(phba, ndlp, retry); + lpfc_issue_els_flogi(vport, ndlp, retry); break; case ELS_CMD_PLOGI: - if(!lpfc_issue_els_plogi(phba, ndlp->nlp_DID, retry)) { + if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); } break; case ELS_CMD_ADISC: - if (!lpfc_issue_els_adisc(phba, ndlp, retry)) { + if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); } break; case ELS_CMD_PRLI: - if (!lpfc_issue_els_prli(phba, ndlp, retry)) { + if (!lpfc_issue_els_prli(vport, ndlp, retry)) { ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); } break; case ELS_CMD_LOGO: - if (!lpfc_issue_els_logo(phba, ndlp, retry)) { + if (!lpfc_issue_els_logo(vport, ndlp, retry)) { ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } break; + case ELS_CMD_FDISC: + lpfc_issue_els_fdisc(vport, ndlp, retry); + break; } return; } static int -lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { - IOCB_t *irsp; - struct lpfc_dmabuf *pcmd; - struct lpfc_nodelist *ndlp; + struct lpfc_vport *vport = cmdiocb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + IOCB_t *irsp = &rspiocb->iocb; + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; + struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; uint32_t *elscmd; struct ls_rjt stat; - int retry, maxretry; - int delay; - uint32_t cmd; + int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; + uint32_t cmd = 0; uint32_t did; - retry = 0; - delay = 0; - maxretry = lpfc_max_els_tries; - irsp = &rspiocb->iocb; - ndlp = (struct lpfc_nodelist *) cmdiocb->context1; - pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; - cmd = 0; /* Note: context2 may be 0 for internal driver abort * of delays ELS command. @@ -1594,11 +1806,15 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, else { /* We should only hit this case for retrying PLOGI */ did = irsp->un.elsreq64.remoteID; - ndlp = lpfc_findnode_did(phba, did); + ndlp = lpfc_findnode_did(vport, did); if (!ndlp && (cmd != ELS_CMD_PLOGI)) return 1; } + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Retry ELS: wd7:x%x wd4:x%x did:x%x", + *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID); + switch (irsp->ulpStatus) { case IOSTAT_FCP_RSP_ERROR: case IOSTAT_REMOTE_STOP: @@ -1607,25 +1823,37 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, case IOSTAT_LOCAL_REJECT: switch ((irsp->un.ulpWord[4] & 0xff)) { case IOERR_LOOP_OPEN_FAILURE: - if (cmd == ELS_CMD_PLOGI) { - if (cmdiocb->retry == 0) { - delay = 1; - } - } + if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) + delay = 1000; retry = 1; break; - case IOERR_SEQUENCE_TIMEOUT: - retry = 1; + case IOERR_ILLEGAL_COMMAND: + if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) && + (cmd == ELS_CMD_FDISC)) { + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0124 FDISC failed (3/6) retrying...\n", + phba->brd_no, vport->vpi); + lpfc_mbx_unreg_vpi(vport); + retry = 1; + /* Always retry for this case */ + cmdiocb->retry = 0; + } break; case IOERR_NO_RESOURCES: - if (cmd == ELS_CMD_PLOGI) { - delay = 1; - } + retry = 1; + if (cmdiocb->retry > 100) + delay = 100; + maxretry = 250; + break; + + case IOERR_ILLEGAL_FRAME: + delay = 100; retry = 1; break; + case IOERR_SEQUENCE_TIMEOUT: case IOERR_INVALID_RPI: retry = 1; break; @@ -1655,27 +1883,57 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if (stat.un.b.lsRjtRsnCodeExp == LSEXP_CMD_IN_PROGRESS) { if (cmd == ELS_CMD_PLOGI) { - delay = 1; + delay = 1000; maxretry = 48; } retry = 1; break; } if (cmd == ELS_CMD_PLOGI) { - delay = 1; + delay = 1000; maxretry = lpfc_max_els_tries + 1; retry = 1; break; } + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + (cmd == ELS_CMD_FDISC) && + (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0125 FDISC Failed (x%x)." + " Fabric out of resources\n", + phba->brd_no, vport->vpi, stat.un.lsRjtError); + lpfc_vport_set_state(vport, + FC_VPORT_NO_FABRIC_RSCS); + } break; case LSRJT_LOGICAL_BSY: - if (cmd == ELS_CMD_PLOGI) { - delay = 1; + if ((cmd == ELS_CMD_PLOGI) || + (cmd == ELS_CMD_PRLI)) { + delay = 1000; maxretry = 48; + } else if (cmd == ELS_CMD_FDISC) { + /* Always retry for this case */ + cmdiocb->retry = 0; } retry = 1; break; + + case LSRJT_LOGICAL_ERR: + case LSRJT_PROTOCOL_ERR: + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + (cmd == ELS_CMD_FDISC) && + ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || + (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) + ) { + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0123 FDISC Failed (x%x)." + " Fabric Detected Bad WWN\n", + phba->brd_no, vport->vpi, stat.un.lsRjtError); + lpfc_vport_set_state(vport, + FC_VPORT_FABRIC_REJ_WWN); + } + break; } break; @@ -1695,21 +1953,27 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, retry = 0; } + if ((vport->load_flag & FC_UNLOADING) != 0) + retry = 0; + if (retry) { /* Retry ELS command <elsCmd> to remote NPORT <did> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0107 Retry ELS command x%x to remote " + "%d (%d):0107 Retry ELS command x%x to remote " "NPORT x%x Data: x%x x%x\n", - phba->brd_no, + phba->brd_no, vport->vpi, cmd, did, cmdiocb->retry, delay); - if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) { + if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && + ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || + ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) { + /* Don't reset timer for no resources */ + /* If discovery / RSCN timer is running, reset it */ - if (timer_pending(&phba->fc_disctmo) || - (phba->fc_flag & FC_RSCN_MODE)) { - lpfc_set_disctmo(phba); - } + if (timer_pending(&vport->fc_disctmo) || + (vport->fc_flag & FC_RSCN_MODE)) + lpfc_set_disctmo(vport); } phba->fc_stat.elsXmitRetry++; @@ -1717,50 +1981,62 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, phba->fc_stat.elsDelayRetry++; ndlp->nlp_retry = cmdiocb->retry; - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + /* delay is specified in milliseconds */ + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(delay)); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; + spin_unlock_irq(shost->host_lock); ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); + if (cmd == ELS_CMD_PRLI) + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_REG_LOGIN_ISSUE); + else + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_NPR_NODE); ndlp->nlp_last_elscmd = cmd; return 1; } switch (cmd) { case ELS_CMD_FLOGI: - lpfc_issue_els_flogi(phba, ndlp, cmdiocb->retry); + lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); + return 1; + case ELS_CMD_FDISC: + lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); return 1; case ELS_CMD_PLOGI: if (ndlp) { ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); } - lpfc_issue_els_plogi(phba, did, cmdiocb->retry); + lpfc_issue_els_plogi(vport, did, cmdiocb->retry); return 1; case ELS_CMD_ADISC: ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); - lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); + lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); return 1; case ELS_CMD_PRLI: ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE); - lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); + lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); return 1; case ELS_CMD_LOGO: ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); - lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); return 1; } } /* No retry ELS command <elsCmd> to remote NPORT <did> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0108 No retry ELS command x%x to remote NPORT x%x " - "Data: x%x\n", - phba->brd_no, + "%d (%d):0108 No retry ELS command x%x to remote " + "NPORT x%x Data: x%x\n", + phba->brd_no, vport->vpi, cmd, did, cmdiocb->retry); return 0; @@ -1795,33 +2071,36 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); } - spin_lock_irq(phba->host->host_lock); lpfc_sli_release_iocbq(phba, elsiocb); - spin_unlock_irq(phba->host->host_lock); return 0; } static void -lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { - struct lpfc_nodelist *ndlp; + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; + struct lpfc_vport *vport = cmdiocb->vport; + IOCB_t *irsp; - ndlp = (struct lpfc_nodelist *) cmdiocb->context1; + irsp = &rspiocb->iocb; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "ACC LOGO cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); /* ACC to LOGO completes to NPort <nlp_DID> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0109 ACC to LOGO completes to NPort x%x " + "%d (%d):0109 ACC to LOGO completes to NPort x%x " "Data: x%x x%x x%x\n", - phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, - ndlp->nlp_state, ndlp->nlp_rpi); + phba->brd_no, vport->vpi, ndlp->nlp_DID, + ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); switch (ndlp->nlp_state) { case NLP_STE_UNUSED_NODE: /* node is just allocated */ - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); break; case NLP_STE_NPR_NODE: /* NPort Recovery mode */ - lpfc_unreg_rpi(phba, ndlp); + lpfc_unreg_rpi(vport, ndlp); break; default: break; @@ -1830,24 +2109,38 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, return; } +void +lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; + + pmb->context1 = NULL; + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + mempool_free(pmb, phba->mbox_mem_pool); + lpfc_nlp_put(ndlp); + return; +} + static void -lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, +lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; + struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; + struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; IOCB_t *irsp; - struct lpfc_nodelist *ndlp; LPFC_MBOXQ_t *mbox = NULL; - struct lpfc_dmabuf *mp; + struct lpfc_dmabuf *mp = NULL; irsp = &rspiocb->iocb; - ndlp = (struct lpfc_nodelist *) cmdiocb->context1; if (cmdiocb->context_un.mbox) mbox = cmdiocb->context_un.mbox; - /* Check to see if link went down during discovery */ - if (lpfc_els_chk_latt(phba) || !ndlp) { + if (!ndlp || lpfc_els_chk_latt(vport)) { if (mbox) { mp = (struct lpfc_dmabuf *) mbox->context1; if (mp) { @@ -1859,24 +2152,37 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "ACC cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], + irsp->un.rcvels.remoteID); + /* ELS response tag <ulpIoTag> completes */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0110 ELS response tag x%x completes " + "%d (%d):0110 ELS response tag x%x completes " "Data: x%x x%x x%x x%x x%x x%x x%x\n", - phba->brd_no, + phba->brd_no, vport->vpi, cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, - ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); if (mbox) { if ((rspiocb->iocb.ulpStatus == 0) && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { - lpfc_unreg_rpi(phba, ndlp); - mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; + lpfc_unreg_rpi(vport, ndlp); mbox->context2 = lpfc_nlp_get(ndlp); - ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_REG_LOGIN_ISSUE); + mbox->vport = vport; + if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { + mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; + mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; + } + else { + mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_REG_LOGIN_ISSUE); + } if (lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) { @@ -1886,15 +2192,11 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* NOTE: we should have messages for unsuccessful reglogin */ } else { - /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */ - if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || - (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || - (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) { - if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { - lpfc_drop_node(phba, ndlp); - ndlp = NULL; - } + /* Do not drop node for lpfc_els_abort'ed ELS cmds */ + if (!lpfc_error_lost_link(irsp) && + ndlp->nlp_flag & NLP_ACC_REGLOGIN) { + lpfc_drop_node(vport, ndlp); + ndlp = NULL; } } mp = (struct lpfc_dmabuf *) mbox->context1; @@ -1906,19 +2208,21 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } out: if (ndlp) { - spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); + spin_unlock_irq(shost->host_lock); } lpfc_els_free_iocb(phba, cmdiocb); return; } int -lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag, - struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp, - LPFC_MBOXQ_t * mbox, uint8_t newnode) +lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, + struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, + LPFC_MBOXQ_t *mbox, uint8_t newnode) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; IOCB_t *oldcmd; struct lpfc_iocbq *elsiocb; @@ -1935,23 +2239,30 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag, switch (flag) { case ELS_CMD_ACC: - cmdsize = sizeof (uint32_t); - elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, - ndlp, ndlp->nlp_DID, ELS_CMD_ACC); + cmdsize = sizeof(uint32_t); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, + ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_LOGO_ACC; + spin_unlock_irq(shost->host_lock); return 1; } + icmd = &elsiocb->iocb; icmd->ulpContext = oldcmd->ulpContext; /* Xri */ pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_ACC; - pcmd += sizeof (uint32_t); + pcmd += sizeof(uint32_t); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue ACC: did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_PLOGI: - cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t)); - elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, - ndlp, ndlp->nlp_DID, ELS_CMD_ACC); + cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, + ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) return 1; @@ -1963,12 +2274,16 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag, elsiocb->context_un.mbox = mbox; *((uint32_t *) (pcmd)) = ELS_CMD_ACC; - pcmd += sizeof (uint32_t); - memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); + pcmd += sizeof(uint32_t); + memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue ACC PLOGI: did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_PRLO: - cmdsize = sizeof (uint32_t) + sizeof (PRLO); - elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, + cmdsize = sizeof(uint32_t) + sizeof(PRLO); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); if (!elsiocb) return 1; @@ -1978,10 +2293,14 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag, pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, - sizeof (uint32_t) + sizeof (PRLO)); + sizeof(uint32_t) + sizeof(PRLO)); *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; els_pkt_ptr = (ELS_PKT *) pcmd; els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue ACC PRLO: did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, 0); break; default: return 1; @@ -1994,25 +2313,23 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag, /* Xmit ELS ACC response tag <ulpIoTag> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0128 Xmit ELS ACC response tag x%x, XRI: x%x, " + "%d (%d):0128 Xmit ELS ACC response tag x%x, XRI: x%x, " "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n", - phba->brd_no, elsiocb->iotag, + phba->brd_no, vport->vpi, elsiocb->iotag, elsiocb->iocb.ulpContext, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); if (ndlp->nlp_flag & NLP_LOGO_ACC) { - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_LOGO_ACC; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; } else { - elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; } phba->fc_stat.elsXmitACC++; - spin_lock_irq(phba->host->host_lock); rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); - spin_unlock_irq(phba->host->host_lock); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -2021,9 +2338,11 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag, } int -lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError, - struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp) +lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, + struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, + LPFC_MBOXQ_t *mbox) { + struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; IOCB_t *oldcmd; struct lpfc_iocbq *elsiocb; @@ -2036,9 +2355,9 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError, psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ - cmdsize = 2 * sizeof (uint32_t); - elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, - ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); + cmdsize = 2 * sizeof(uint32_t); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, + ndlp->nlp_DID, ELS_CMD_LS_RJT); if (!elsiocb) return 1; @@ -2048,22 +2367,30 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError, pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; - pcmd += sizeof (uint32_t); + pcmd += sizeof(uint32_t); *((uint32_t *) (pcmd)) = rejectError; + if (mbox) { + elsiocb->context_un.mbox = mbox; + elsiocb->context1 = lpfc_nlp_get(ndlp); + } + /* Xmit ELS RJT <err> response tag <ulpIoTag> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0129 Xmit ELS RJT x%x response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", - phba->brd_no, rejectError, elsiocb->iotag, + "%d (%d):0129 Xmit ELS RJT x%x response tag x%x " + "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "rpi x%x\n", + phba->brd_no, vport->vpi, rejectError, elsiocb->iotag, elsiocb->iocb.ulpContext, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue LS_RJT: did:x%x flg:x%x err:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, rejectError); + phba->fc_stat.elsXmitLSRJT++; - elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; - spin_lock_irq(phba->host->host_lock); + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); - spin_unlock_irq(phba->host->host_lock); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -2072,25 +2399,22 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError, } int -lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba, - struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp) +lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, + struct lpfc_nodelist *ndlp) { + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; ADISC *ap; - IOCB_t *icmd; - IOCB_t *oldcmd; + IOCB_t *icmd, *oldcmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; - struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int rc; - psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ - - cmdsize = sizeof (uint32_t) + sizeof (ADISC); - elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, - ndlp, ndlp->nlp_DID, ELS_CMD_ACC); + cmdsize = sizeof(uint32_t) + sizeof(ADISC); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) return 1; @@ -2100,28 +2424,30 @@ lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba, /* Xmit ADISC ACC response tag <ulpIoTag> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0130 Xmit ADISC ACC response iotag x%x xri: " + "%d (%d):0130 Xmit ADISC ACC response iotag x%x xri: " "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", - phba->brd_no, elsiocb->iotag, + phba->brd_no, vport->vpi, elsiocb->iotag, elsiocb->iocb.ulpContext, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_ACC; - pcmd += sizeof (uint32_t); + pcmd += sizeof(uint32_t); ap = (ADISC *) (pcmd); ap->hardAL_PA = phba->fc_pref_ALPA; - memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name)); - memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name)); - ap->DID = be32_to_cpu(phba->fc_myDID); + memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); + memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); + ap->DID = be32_to_cpu(vport->fc_myDID); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue ACC ADISC: did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, 0); phba->fc_stat.elsXmitACC++; - elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; - spin_lock_irq(phba->host->host_lock); + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); - spin_unlock_irq(phba->host->host_lock); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -2130,9 +2456,10 @@ lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba, } int -lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb, +lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) { + struct lpfc_hba *phba = vport->phba; PRLI *npr; lpfc_vpd_t *vpd; IOCB_t *icmd; @@ -2147,8 +2474,8 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb, psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ - cmdsize = sizeof (uint32_t) + sizeof (PRLI); - elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, ndlp, + cmdsize = sizeof(uint32_t) + sizeof(PRLI); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK))); if (!elsiocb) return 1; @@ -2159,19 +2486,19 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb, /* Xmit PRLI ACC response tag <ulpIoTag> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0131 Xmit PRLI ACC response tag x%x xri x%x, " + "%d (%d):0131 Xmit PRLI ACC response tag x%x xri x%x, " "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", - phba->brd_no, elsiocb->iotag, + phba->brd_no, vport->vpi, elsiocb->iotag, elsiocb->iocb.ulpContext, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); - pcmd += sizeof (uint32_t); + pcmd += sizeof(uint32_t); /* For PRLI, remainder of payload is PRLI parameter page */ - memset(pcmd, 0, sizeof (PRLI)); + memset(pcmd, 0, sizeof(PRLI)); npr = (PRLI *) pcmd; vpd = &phba->vpd; @@ -2193,12 +2520,14 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb, npr->prliType = PRLI_FCP_TYPE; npr->initiatorFunc = 1; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue ACC PRLI: did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, 0); + phba->fc_stat.elsXmitACC++; - elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; - spin_lock_irq(phba->host->host_lock); rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); - spin_unlock_irq(phba->host->host_lock); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -2207,12 +2536,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb, } static int -lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format, +lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) { + struct lpfc_hba *phba = vport->phba; RNID *rn; - IOCB_t *icmd; - IOCB_t *oldcmd; + IOCB_t *icmd, *oldcmd; struct lpfc_iocbq *elsiocb; struct lpfc_sli_ring *pring; struct lpfc_sli *psli; @@ -2223,13 +2552,13 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format, psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; - cmdsize = sizeof (uint32_t) + sizeof (uint32_t) - + (2 * sizeof (struct lpfc_name)); + cmdsize = sizeof(uint32_t) + sizeof(uint32_t) + + (2 * sizeof(struct lpfc_name)); if (format) - cmdsize += sizeof (RNID_TOP_DISC); + cmdsize += sizeof(RNID_TOP_DISC); - elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, - ndlp, ndlp->nlp_DID, ELS_CMD_ACC); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) return 1; @@ -2239,30 +2568,30 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format, /* Xmit RNID ACC response tag <ulpIoTag> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0132 Xmit RNID ACC response tag x%x " + "%d (%d):0132 Xmit RNID ACC response tag x%x " "xri x%x\n", - phba->brd_no, elsiocb->iotag, + phba->brd_no, vport->vpi, elsiocb->iotag, elsiocb->iocb.ulpContext); pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_ACC; - pcmd += sizeof (uint32_t); + pcmd += sizeof(uint32_t); - memset(pcmd, 0, sizeof (RNID)); + memset(pcmd, 0, sizeof(RNID)); rn = (RNID *) (pcmd); rn->Format = format; - rn->CommonLen = (2 * sizeof (struct lpfc_name)); - memcpy(&rn->portName, &phba->fc_portname, sizeof (struct lpfc_name)); - memcpy(&rn->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name)); + rn->CommonLen = (2 * sizeof(struct lpfc_name)); + memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); + memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); switch (format) { case 0: rn->SpecificLen = 0; break; case RNID_TOPOLOGY_DISC: - rn->SpecificLen = sizeof (RNID_TOP_DISC); + rn->SpecificLen = sizeof(RNID_TOP_DISC); memcpy(&rn->un.topologyDisc.portName, - &phba->fc_portname, sizeof (struct lpfc_name)); + &vport->fc_portname, sizeof(struct lpfc_name)); rn->un.topologyDisc.unitType = RNID_HBA; rn->un.topologyDisc.physPort = 0; rn->un.topologyDisc.attachedNodes = 0; @@ -2273,15 +2602,17 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format, break; } + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue ACC RNID: did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, 0); + phba->fc_stat.elsXmitACC++; - elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; lpfc_nlp_put(ndlp); elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, * it could be freed */ - spin_lock_irq(phba->host->host_lock); rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); - spin_unlock_irq(phba->host->host_lock); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -2290,168 +2621,153 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format, } int -lpfc_els_disc_adisc(struct lpfc_hba *phba) +lpfc_els_disc_adisc(struct lpfc_vport *vport) { - int sentadisc; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp, *next_ndlp; + int sentadisc = 0; - sentadisc = 0; /* go thru NPR nodes and issue any remaining ELS ADISCs */ - list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) { + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_NPR_NODE && (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); - lpfc_issue_els_adisc(phba, ndlp, 0); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); + lpfc_issue_els_adisc(vport, ndlp, 0); sentadisc++; - phba->num_disc_nodes++; - if (phba->num_disc_nodes >= - phba->cfg_discovery_threads) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag |= FC_NLP_MORE; - spin_unlock_irq(phba->host->host_lock); + vport->num_disc_nodes++; + if (vport->num_disc_nodes >= + vport->phba->cfg_discovery_threads) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_NLP_MORE; + spin_unlock_irq(shost->host_lock); break; } } } if (sentadisc == 0) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_NLP_MORE; - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NLP_MORE; + spin_unlock_irq(shost->host_lock); } return sentadisc; } int -lpfc_els_disc_plogi(struct lpfc_hba * phba) +lpfc_els_disc_plogi(struct lpfc_vport *vport) { - int sentplogi; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp, *next_ndlp; + int sentplogi = 0; - sentplogi = 0; - /* go thru NPR list and issue any remaining ELS PLOGIs */ - list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) { + /* go thru NPR nodes and issue any remaining ELS PLOGIs */ + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_NPR_NODE && (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); - lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); sentplogi++; - phba->num_disc_nodes++; - if (phba->num_disc_nodes >= - phba->cfg_discovery_threads) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag |= FC_NLP_MORE; - spin_unlock_irq(phba->host->host_lock); + vport->num_disc_nodes++; + if (vport->num_disc_nodes >= + vport->phba->cfg_discovery_threads) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_NLP_MORE; + spin_unlock_irq(shost->host_lock); break; } } } if (sentplogi == 0) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_NLP_MORE; - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NLP_MORE; + spin_unlock_irq(shost->host_lock); } return sentplogi; } -int -lpfc_els_flush_rscn(struct lpfc_hba * phba) +void +lpfc_els_flush_rscn(struct lpfc_vport *vport) { - struct lpfc_dmabuf *mp; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; int i; - for (i = 0; i < phba->fc_rscn_id_cnt; i++) { - mp = phba->fc_rscn_id_list[i]; - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); - phba->fc_rscn_id_list[i] = NULL; - } - phba->fc_rscn_id_cnt = 0; - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); - spin_unlock_irq(phba->host->host_lock); - lpfc_can_disctmo(phba); - return 0; + for (i = 0; i < vport->fc_rscn_id_cnt; i++) { + lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); + vport->fc_rscn_id_list[i] = NULL; + } + spin_lock_irq(shost->host_lock); + vport->fc_rscn_id_cnt = 0; + vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); } int -lpfc_rscn_payload_check(struct lpfc_hba * phba, uint32_t did) +lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) { D_ID ns_did; D_ID rscn_did; - struct lpfc_dmabuf *mp; uint32_t *lp; - uint32_t payload_len, cmd, i, match; + uint32_t payload_len, i; + struct lpfc_hba *phba = vport->phba; ns_did.un.word = did; - match = 0; /* Never match fabric nodes for RSCNs */ if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) - return(0); + return 0; /* If we are doing a FULL RSCN rediscovery, match everything */ - if (phba->fc_flag & FC_RSCN_DISCOVERY) { + if (vport->fc_flag & FC_RSCN_DISCOVERY) return did; - } - for (i = 0; i < phba->fc_rscn_id_cnt; i++) { - mp = phba->fc_rscn_id_list[i]; - lp = (uint32_t *) mp->virt; - cmd = *lp++; - payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */ - payload_len -= sizeof (uint32_t); /* take off word 0 */ + for (i = 0; i < vport->fc_rscn_id_cnt; i++) { + lp = vport->fc_rscn_id_list[i]->virt; + payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); + payload_len -= sizeof(uint32_t); /* take off word 0 */ while (payload_len) { - rscn_did.un.word = *lp++; - rscn_did.un.word = be32_to_cpu(rscn_did.un.word); - payload_len -= sizeof (uint32_t); + rscn_did.un.word = be32_to_cpu(*lp++); + payload_len -= sizeof(uint32_t); switch (rscn_did.un.b.resv) { case 0: /* Single N_Port ID effected */ - if (ns_did.un.word == rscn_did.un.word) { - match = did; - } + if (ns_did.un.word == rscn_did.un.word) + return did; break; case 1: /* Whole N_Port Area effected */ if ((ns_did.un.b.domain == rscn_did.un.b.domain) && (ns_did.un.b.area == rscn_did.un.b.area)) - { - match = did; - } + return did; break; case 2: /* Whole N_Port Domain effected */ if (ns_did.un.b.domain == rscn_did.un.b.domain) - { - match = did; - } - break; - case 3: /* Whole Fabric effected */ - match = did; + return did; break; default: - /* Unknown Identifier in RSCN list */ + /* Unknown Identifier in RSCN node */ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0217 Unknown Identifier in " - "RSCN payload Data: x%x\n", - phba->brd_no, rscn_did.un.word); - break; - } - if (match) { - break; + "%d (%d):0217 Unknown " + "Identifier in RSCN payload " + "Data: x%x\n", + phba->brd_no, vport->vpi, + rscn_did.un.word); + case 3: /* Whole Fabric effected */ + return did; } } } - return match; + return 0; } static int -lpfc_rscn_recovery_check(struct lpfc_hba *phba) +lpfc_rscn_recovery_check(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp = NULL; @@ -2459,188 +2775,261 @@ lpfc_rscn_recovery_check(struct lpfc_hba *phba) * them to NPR state. */ - list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_UNUSED_NODE || - lpfc_rscn_payload_check(phba, ndlp->nlp_DID) == 0) + lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) continue; - lpfc_disc_state_machine(phba, ndlp, NULL, - NLP_EVT_DEVICE_RECOVERY); + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RECOVERY); /* * Make sure NLP_DELAY_TMO is NOT running after a device * recovery event. */ if (ndlp->nlp_flag & NLP_DELAY_TMO) - lpfc_cancel_retry_delay_tmo(phba, ndlp); + lpfc_cancel_retry_delay_tmo(vport, ndlp); } return 0; } static int -lpfc_els_rcv_rscn(struct lpfc_hba * phba, - struct lpfc_iocbq * cmdiocb, - struct lpfc_nodelist * ndlp, uint8_t newnode) +lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp, uint8_t newnode) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; - uint32_t *lp; + struct lpfc_vport *next_vport; + uint32_t *lp, *datap; IOCB_t *icmd; - uint32_t payload_len, cmd; + uint32_t payload_len, length, nportid, *cmd; + int rscn_cnt = vport->fc_rscn_id_cnt; + int rscn_id = 0, hba_id = 0; int i; icmd = &cmdiocb->iocb; pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; lp = (uint32_t *) pcmd->virt; - cmd = *lp++; - payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */ - payload_len -= sizeof (uint32_t); /* take off word 0 */ - cmd &= ELS_CMD_MASK; + payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); + payload_len -= sizeof(uint32_t); /* take off word 0 */ /* RSCN received */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_DISCOVERY, - "%d:0214 RSCN received Data: x%x x%x x%x x%x\n", - phba->brd_no, - phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt); + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0214 RSCN received Data: x%x x%x x%x x%x\n", + phba->brd_no, vport->vpi, vport->fc_flag, payload_len, + *lp, rscn_cnt); for (i = 0; i < payload_len/sizeof(uint32_t); i++) - fc_host_post_event(phba->host, fc_get_event_number(), + fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_RSCN, lp[i]); /* If we are about to begin discovery, just ACC the RSCN. * Discovery processing will satisfy it. */ - if (phba->hba_state <= LPFC_NS_QRY) { - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, - newnode); + if (vport->port_state <= LPFC_NS_QRY) { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", + ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); + + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, + newnode); return 0; } + /* If this RSCN just contains NPortIDs for other vports on this HBA, + * just ACC and ignore it. + */ + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + !(phba->cfg_peer_port_login)) { + i = payload_len; + datap = lp; + while (i > 0) { + nportid = *datap++; + nportid = ((be32_to_cpu(nportid)) & Mask_DID); + i -= sizeof(uint32_t); + rscn_id++; + list_for_each_entry(next_vport, &phba->port_list, + listentry) { + if (nportid == next_vport->fc_myDID) { + hba_id++; + break; + } + } + } + if (rscn_id == hba_id) { + /* ALL NPortIDs in RSCN are on HBA */ + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0214 Ignore RSCN Data: x%x x%x x%x x%x\n", + phba->brd_no, vport->vpi, vport->fc_flag, payload_len, + *lp, rscn_cnt); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", + ndlp->nlp_DID, vport->port_state, + ndlp->nlp_flag); + + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, + ndlp, NULL, newnode); + return 0; + } + } + /* If we are already processing an RSCN, save the received * RSCN payload buffer, cmdiocb->context2 to process later. */ - if (phba->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { - if ((phba->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) && - !(phba->fc_flag & FC_RSCN_DISCOVERY)) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag |= FC_RSCN_MODE; - spin_unlock_irq(phba->host->host_lock); - phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd; - - /* If we zero, cmdiocb->context2, the calling - * routine will not try to free it. - */ - cmdiocb->context2 = NULL; + if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", + ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); + + vport->fc_flag |= FC_RSCN_DEFERRED; + if ((rscn_cnt < FC_MAX_HOLD_RSCN) && + !(vport->fc_flag & FC_RSCN_DISCOVERY)) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_RSCN_MODE; + spin_unlock_irq(shost->host_lock); + if (rscn_cnt) { + cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; + length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); + } + if ((rscn_cnt) && + (payload_len + length <= LPFC_BPL_SIZE)) { + *cmd &= ELS_CMD_MASK; + *cmd |= be32_to_cpu(payload_len + length); + memcpy(((uint8_t *)cmd) + length, lp, + payload_len); + } else { + vport->fc_rscn_id_list[rscn_cnt] = pcmd; + vport->fc_rscn_id_cnt++; + /* If we zero, cmdiocb->context2, the calling + * routine will not try to free it. + */ + cmdiocb->context2 = NULL; + } /* Deferred RSCN */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0235 Deferred RSCN " + "%d (%d):0235 Deferred RSCN " "Data: x%x x%x x%x\n", - phba->brd_no, phba->fc_rscn_id_cnt, - phba->fc_flag, phba->hba_state); + phba->brd_no, vport->vpi, + vport->fc_rscn_id_cnt, vport->fc_flag, + vport->port_state); } else { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag |= FC_RSCN_DISCOVERY; - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_RSCN_DISCOVERY; + spin_unlock_irq(shost->host_lock); /* ReDiscovery RSCN */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0234 ReDiscovery RSCN " + "%d (%d):0234 ReDiscovery RSCN " "Data: x%x x%x x%x\n", - phba->brd_no, phba->fc_rscn_id_cnt, - phba->fc_flag, phba->hba_state); + phba->brd_no, vport->vpi, + vport->fc_rscn_id_cnt, vport->fc_flag, + vport->port_state); } /* Send back ACC */ - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode); /* send RECOVERY event for ALL nodes that match RSCN payload */ - lpfc_rscn_recovery_check(phba); + lpfc_rscn_recovery_check(vport); + vport->fc_flag &= ~FC_RSCN_DEFERRED; return 0; } - phba->fc_flag |= FC_RSCN_MODE; - phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RSCN: did:x%x/ste:x%x flg:x%x", + ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); + + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_RSCN_MODE; + spin_unlock_irq(shost->host_lock); + vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; /* * If we zero, cmdiocb->context2, the calling routine will * not try to free it. */ cmdiocb->context2 = NULL; - lpfc_set_disctmo(phba); + lpfc_set_disctmo(vport); /* Send back ACC */ - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode); + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode); /* send RECOVERY event for ALL nodes that match RSCN payload */ - lpfc_rscn_recovery_check(phba); + lpfc_rscn_recovery_check(vport); - return lpfc_els_handle_rscn(phba); + return lpfc_els_handle_rscn(vport); } int -lpfc_els_handle_rscn(struct lpfc_hba * phba) +lpfc_els_handle_rscn(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp; + struct lpfc_hba *phba = vport->phba; + + /* Ignore RSCN if the port is being torn down. */ + if (vport->load_flag & FC_UNLOADING) { + lpfc_els_flush_rscn(vport); + return 0; + } /* Start timer for RSCN processing */ - lpfc_set_disctmo(phba); + lpfc_set_disctmo(vport); /* RSCN processed */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_DISCOVERY, - "%d:0215 RSCN processed Data: x%x x%x x%x x%x\n", - phba->brd_no, - phba->fc_flag, 0, phba->fc_rscn_id_cnt, - phba->hba_state); + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0215 RSCN processed Data: x%x x%x x%x x%x\n", + phba->brd_no, vport->vpi, + vport->fc_flag, 0, vport->fc_rscn_id_cnt, + vport->port_state); /* To process RSCN, first compare RSCN data with NameServer */ - phba->fc_ns_retry = 0; - ndlp = lpfc_findnode_did(phba, NameServer_DID); + vport->fc_ns_retry = 0; + ndlp = lpfc_findnode_did(vport, NameServer_DID); if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { /* Good ndlp, issue CT Request to NameServer */ - if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) { + if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) /* Wait for NameServer query cmpl before we can continue */ return 1; - } } else { /* If login to NameServer does not exist, issue one */ /* Good status, issue PLOGI to NameServer */ - ndlp = lpfc_findnode_did(phba, NameServer_DID); - if (ndlp) { + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (ndlp) /* Wait for NameServer login cmpl before we can continue */ return 1; - } + ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); if (!ndlp) { - lpfc_els_flush_rscn(phba); + lpfc_els_flush_rscn(vport); return 0; } else { - lpfc_nlp_init(phba, ndlp, NameServer_DID); + lpfc_nlp_init(vport, ndlp, NameServer_DID); ndlp->nlp_type |= NLP_FABRIC; ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); - lpfc_issue_els_plogi(phba, NameServer_DID, 0); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, NameServer_DID, 0); /* Wait for NameServer login cmpl before we can continue */ return 1; } } - lpfc_els_flush_rscn(phba); + lpfc_els_flush_rscn(vport); return 0; } static int -lpfc_els_rcv_flogi(struct lpfc_hba * phba, - struct lpfc_iocbq * cmdiocb, - struct lpfc_nodelist * ndlp, uint8_t newnode) +lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp, uint8_t newnode) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; uint32_t *lp = (uint32_t *) pcmd->virt; IOCB_t *icmd = &cmdiocb->iocb; @@ -2655,7 +3044,7 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba, /* FLOGI received */ - lpfc_set_disctmo(phba); + lpfc_set_disctmo(vport); if (phba->fc_topology == TOPOLOGY_LOOP) { /* We should never receive a FLOGI in loop mode, ignore it */ @@ -2664,33 +3053,34 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba, /* An FLOGI ELS command <elsCmd> was received from DID <did> in Loop Mode */ lpfc_printf_log(phba, KERN_ERR, LOG_ELS, - "%d:0113 An FLOGI ELS command x%x was received " - "from DID x%x in Loop Mode\n", - phba->brd_no, cmd, did); + "%d (%d):0113 An FLOGI ELS command x%x was " + "received from DID x%x in Loop Mode\n", + phba->brd_no, vport->vpi, cmd, did); return 1; } did = Fabric_DID; - if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) { + if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) { /* For a FLOGI we accept, then if our portname is greater * then the remote portname we initiate Nport login. */ - rc = memcmp(&phba->fc_portname, &sp->portName, - sizeof (struct lpfc_name)); + rc = memcmp(&vport->fc_portname, &sp->portName, + sizeof(struct lpfc_name)); if (!rc) { - if ((mbox = mempool_alloc(phba->mbox_mem_pool, - GFP_KERNEL)) == 0) { + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) return 1; - } + lpfc_linkdown(phba); lpfc_init_link(phba, mbox, phba->cfg_topology, phba->cfg_link_speed); mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; rc = lpfc_sli_issue_mbox (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); lpfc_set_loopback_flag(phba); @@ -2699,31 +3089,34 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba, } return 1; } else if (rc > 0) { /* greater than */ - spin_lock_irq(phba->host->host_lock); - phba->fc_flag |= FC_PT2PT_PLOGI; - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_PT2PT_PLOGI; + spin_unlock_irq(shost->host_lock); } - phba->fc_flag |= FC_PT2PT; - phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_PT2PT; + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + spin_unlock_irq(shost->host_lock); } else { /* Reject this request because invalid parameters */ stat.un.b.lsRjtRsvd0 = 0; stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; stat.un.b.vendorUnique = 0; - lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, + NULL); return 1; } /* Send back ACC */ - lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode); + lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode); return 0; } static int -lpfc_els_rcv_rnid(struct lpfc_hba * phba, - struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp) +lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) { struct lpfc_dmabuf *pcmd; uint32_t *lp; @@ -2746,7 +3139,7 @@ lpfc_els_rcv_rnid(struct lpfc_hba * phba, case 0: case RNID_TOPOLOGY_DISC: /* Send back ACC */ - lpfc_els_rsp_rnid_acc(phba, rn->Format, cmdiocb, ndlp); + lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); break; default: /* Reject this request because format not supported */ @@ -2754,14 +3147,15 @@ lpfc_els_rcv_rnid(struct lpfc_hba * phba, stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; stat.un.b.vendorUnique = 0; - lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, + NULL); } return 0; } static int -lpfc_els_rcv_lirr(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, - struct lpfc_nodelist *ndlp) +lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) { struct ls_rjt stat; @@ -2770,15 +3164,15 @@ lpfc_els_rcv_lirr(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; stat.un.b.vendorUnique = 0; - lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } static void lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - struct lpfc_sli *psli; - struct lpfc_sli_ring *pring; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; MAILBOX_t *mb; IOCB_t *icmd; RPS_RSP *rps_rsp; @@ -2788,8 +3182,6 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) uint16_t xri, status; uint32_t cmdsize; - psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; mb = &pmb->mb; ndlp = (struct lpfc_nodelist *) pmb->context2; @@ -2804,8 +3196,9 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t); mempool_free(pmb, phba->mbox_mem_pool); - elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, lpfc_max_els_tries, ndlp, - ndlp->nlp_DID, ELS_CMD_ACC); + elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, + lpfc_max_els_tries, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); lpfc_nlp_put(ndlp); if (!elsiocb) return; @@ -2815,14 +3208,14 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_ACC; - pcmd += sizeof (uint32_t); /* Skip past command */ + pcmd += sizeof(uint32_t); /* Skip past command */ rps_rsp = (RPS_RSP *)pcmd; if (phba->fc_topology != TOPOLOGY_LOOP) status = 0x10; else status = 0x8; - if (phba->fc_flag & FC_FABRIC) + if (phba->pport->fc_flag & FC_FABRIC) status |= 0x4; rps_rsp->rsvd1 = 0; @@ -2836,25 +3229,25 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Xmit ELS RPS ACC response tag <ulpIoTag> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0118 Xmit ELS RPS ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", - phba->brd_no, elsiocb->iotag, + "%d (%d):0118 Xmit ELS RPS ACC response tag x%x " + "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "rpi x%x\n", + phba->brd_no, ndlp->vport->vpi, elsiocb->iotag, elsiocb->iocb.ulpContext, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); - elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitACC++; - - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) lpfc_els_free_iocb(phba, elsiocb); - } return; } static int -lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_nodelist * ndlp) +lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) { + struct lpfc_hba *phba = vport->phba; uint32_t *lp; uint8_t flag; LPFC_MBOXQ_t *mbox; @@ -2868,7 +3261,8 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; stat.un.b.vendorUnique = 0; - lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, + NULL); } pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; @@ -2878,19 +3272,24 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if ((flag == 0) || ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) || - ((flag == 2) && (memcmp(&rps->un.portName, &phba->fc_portname, - sizeof (struct lpfc_name)) == 0))) { - if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) { + ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname, + sizeof(struct lpfc_name)) == 0))) { + + printk("Fix me....\n"); + dump_stack(); + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); + if (mbox) { lpfc_read_lnk_stat(phba, mbox); mbox->context1 = - (void *)((unsigned long)cmdiocb->iocb.ulpContext); + (void *)((unsigned long) cmdiocb->iocb.ulpContext); mbox->context2 = lpfc_nlp_get(ndlp); + mbox->vport = vport; mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; if (lpfc_sli_issue_mbox (phba, mbox, - (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) { + (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) /* Mbox completion will send ELS Response */ return 0; - } + lpfc_nlp_put(ndlp); mempool_free(mbox, phba->mbox_mem_pool); } @@ -2899,27 +3298,25 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; stat.un.b.vendorUnique = 0; - lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } static int -lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize, - struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp) +lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, + struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) { - IOCB_t *icmd; - IOCB_t *oldcmd; + struct lpfc_hba *phba = vport->phba; + IOCB_t *icmd, *oldcmd; RPL_RSP rpl_rsp; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; - struct lpfc_sli *psli; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; uint8_t *pcmd; - psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); - elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, - ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) return 1; @@ -2929,7 +3326,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize, pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_ACC; - pcmd += sizeof (uint16_t); + pcmd += sizeof(uint16_t); *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); pcmd += sizeof(uint16_t); @@ -2937,8 +3334,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize, rpl_rsp.listLen = be32_to_cpu(1); rpl_rsp.index = 0; rpl_rsp.port_num_blk.portNum = 0; - rpl_rsp.port_num_blk.portID = be32_to_cpu(phba->fc_myDID); - memcpy(&rpl_rsp.port_num_blk.portName, &phba->fc_portname, + rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); + memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, sizeof(struct lpfc_name)); memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); @@ -2946,13 +3343,14 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize, /* Xmit ELS RPL ACC response tag <ulpIoTag> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0120 Xmit ELS RPL ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", - phba->brd_no, elsiocb->iotag, + "%d (%d):0120 Xmit ELS RPL ACC response tag x%x " + "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "rpi x%x\n", + phba->brd_no, vport->vpi, elsiocb->iotag, elsiocb->iocb.ulpContext, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); - elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitACC++; if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { @@ -2963,8 +3361,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize, } static int -lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_nodelist * ndlp) +lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) { struct lpfc_dmabuf *pcmd; uint32_t *lp; @@ -2979,7 +3377,8 @@ lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; stat.un.b.vendorUnique = 0; - lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, + NULL); } pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; @@ -2996,15 +3395,16 @@ lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, } else { cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); } - lpfc_els_rsp_rpl_acc(phba, cmdsize, cmdiocb, ndlp); + lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); return 0; } static int -lpfc_els_rcv_farp(struct lpfc_hba * phba, - struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp) +lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) { + struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; uint32_t *lp; IOCB_t *icmd; @@ -3020,11 +3420,9 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba, fp = (FARP *) lp; /* FARP-REQ received from DID <did> */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_ELS, - "%d:0601 FARP-REQ received from DID x%x\n", - phba->brd_no, did); + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "%d (%d):0601 FARP-REQ received from DID x%x\n", + phba->brd_no, vport->vpi, did); /* We will only support match on WWPN or WWNN */ if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { @@ -3034,15 +3432,15 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba, cnt = 0; /* If this FARP command is searching for my portname */ if (fp->Mflags & FARP_MATCH_PORT) { - if (memcmp(&fp->RportName, &phba->fc_portname, - sizeof (struct lpfc_name)) == 0) + if (memcmp(&fp->RportName, &vport->fc_portname, + sizeof(struct lpfc_name)) == 0) cnt = 1; } /* If this FARP command is searching for my nodename */ if (fp->Mflags & FARP_MATCH_NODE) { - if (memcmp(&fp->RnodeName, &phba->fc_nodename, - sizeof (struct lpfc_name)) == 0) + if (memcmp(&fp->RnodeName, &vport->fc_nodename, + sizeof(struct lpfc_name)) == 0) cnt = 1; } @@ -3052,28 +3450,28 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba, /* Log back into the node before sending the FARP. */ if (fp->Rflags & FARP_REQUEST_PLOGI) { ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); - lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } /* Send a FARP response to that node */ - if (fp->Rflags & FARP_REQUEST_FARPR) { - lpfc_issue_els_farpr(phba, did, 0); - } + if (fp->Rflags & FARP_REQUEST_FARPR) + lpfc_issue_els_farpr(vport, did, 0); } } return 0; } static int -lpfc_els_rcv_farpr(struct lpfc_hba * phba, - struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp) +lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) { struct lpfc_dmabuf *pcmd; uint32_t *lp; IOCB_t *icmd; uint32_t cmd, did; + struct lpfc_hba *phba = vport->phba; icmd = &cmdiocb->iocb; did = icmd->un.elsreq64.remoteID; @@ -3082,21 +3480,18 @@ lpfc_els_rcv_farpr(struct lpfc_hba * phba, cmd = *lp++; /* FARP-RSP received from DID <did> */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_ELS, - "%d:0600 FARP-RSP received from DID x%x\n", - phba->brd_no, did); - + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "%d (%d):0600 FARP-RSP received from DID x%x\n", + phba->brd_no, vport->vpi, did); /* ACCEPT the Farp resp request */ - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); return 0; } static int -lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_nodelist * fan_ndlp) +lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *fan_ndlp) { struct lpfc_dmabuf *pcmd; uint32_t *lp; @@ -3104,10 +3499,12 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, uint32_t cmd, did; FAN *fp; struct lpfc_nodelist *ndlp, *next_ndlp; + struct lpfc_hba *phba = vport->phba; /* FAN received */ - lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n", - phba->brd_no); + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "%d (%d):0265 FAN received\n", + phba->brd_no, vport->vpi); icmd = &cmdiocb->iocb; did = icmd->un.elsreq64.remoteID; @@ -3115,11 +3512,11 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, lp = (uint32_t *)pcmd->virt; cmd = *lp++; - fp = (FAN *)lp; + fp = (FAN *) lp; /* FAN received; Fan does not have a reply sequence */ - if (phba->hba_state == LPFC_LOCAL_CFG_LINK) { + if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) { if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, sizeof(struct lpfc_name)) != 0) || (memcmp(&phba->fc_fabparam.portName, &fp->FportName, @@ -3130,7 +3527,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, */ list_for_each_entry_safe(ndlp, next_ndlp, - &phba->fc_nodes, nlp_listp) { + &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state != NLP_STE_NPR_NODE) continue; if (ndlp->nlp_type & NLP_FABRIC) { @@ -3138,24 +3535,24 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, * Clean up old Fabric, Nameserver and * other NLP_FABRIC logins */ - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { /* Fail outstanding I/O now since this * device is marked for PLOGI */ - lpfc_unreg_rpi(phba, ndlp); + lpfc_unreg_rpi(vport, ndlp); } } - phba->hba_state = LPFC_FLOGI; - lpfc_set_disctmo(phba); - lpfc_initial_flogi(phba); + vport->port_state = LPFC_FLOGI; + lpfc_set_disctmo(vport); + lpfc_initial_flogi(vport); return 0; } /* Discovery not needed, * move the nodes to their original state. */ - list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state != NLP_STE_NPR_NODE) continue; @@ -3163,13 +3560,13 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, switch (ndlp->nlp_prev_state) { case NLP_STE_UNMAPPED_NODE: ndlp->nlp_prev_state = NLP_STE_NPR_NODE; - lpfc_nlp_set_state(phba, ndlp, + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); break; case NLP_STE_MAPPED_NODE: ndlp->nlp_prev_state = NLP_STE_NPR_NODE; - lpfc_nlp_set_state(phba, ndlp, + lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); break; @@ -3179,7 +3576,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, } /* Start discovery - this should just do CLEAR_LA */ - lpfc_disc_start(phba); + lpfc_disc_start(vport); } return 0; } @@ -3187,42 +3584,42 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, void lpfc_els_timeout(unsigned long ptr) { - struct lpfc_hba *phba; + struct lpfc_vport *vport = (struct lpfc_vport *) ptr; + struct lpfc_hba *phba = vport->phba; unsigned long iflag; - phba = (struct lpfc_hba *)ptr; - if (phba == 0) - return; - spin_lock_irqsave(phba->host->host_lock, iflag); - if (!(phba->work_hba_events & WORKER_ELS_TMO)) { - phba->work_hba_events |= WORKER_ELS_TMO; + spin_lock_irqsave(&vport->work_port_lock, iflag); + if ((vport->work_port_events & WORKER_ELS_TMO) == 0) { + vport->work_port_events |= WORKER_ELS_TMO; + spin_unlock_irqrestore(&vport->work_port_lock, iflag); + + spin_lock_irqsave(&phba->hbalock, iflag); if (phba->work_wait) - wake_up(phba->work_wait); + lpfc_worker_wake_up(phba); + spin_unlock_irqrestore(&phba->hbalock, iflag); } - spin_unlock_irqrestore(phba->host->host_lock, iflag); + else + spin_unlock_irqrestore(&vport->work_port_lock, iflag); return; } void -lpfc_els_timeout_handler(struct lpfc_hba *phba) +lpfc_els_timeout_handler(struct lpfc_vport *vport) { + struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ring *pring; struct lpfc_iocbq *tmp_iocb, *piocb; IOCB_t *cmd = NULL; struct lpfc_dmabuf *pcmd; - uint32_t *elscmd; - uint32_t els_command=0; + uint32_t els_command = 0; uint32_t timeout; - uint32_t remote_ID; + uint32_t remote_ID = 0xffffffff; - if (phba == 0) - return; - spin_lock_irq(phba->host->host_lock); /* If the timer is already canceled do nothing */ - if (!(phba->work_hba_events & WORKER_ELS_TMO)) { - spin_unlock_irq(phba->host->host_lock); + if ((vport->work_port_events & WORKER_ELS_TMO) == 0) { return; } + spin_lock_irq(&phba->hbalock); timeout = (uint32_t)(phba->fc_ratov << 1); pring = &phba->sli.ring[LPFC_ELS_RING]; @@ -3230,63 +3627,70 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba) list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { cmd = &piocb->iocb; - if ((piocb->iocb_flag & LPFC_IO_LIBDFC) || - (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN) || - (piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)) { + if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || + piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || + piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) continue; - } + + if (piocb->vport != vport) + continue; + pcmd = (struct lpfc_dmabuf *) piocb->context2; - if (pcmd) { - elscmd = (uint32_t *) (pcmd->virt); - els_command = *elscmd; - } + if (pcmd) + els_command = *(uint32_t *) (pcmd->virt); - if ((els_command == ELS_CMD_FARP) - || (els_command == ELS_CMD_FARPR)) { + if (els_command == ELS_CMD_FARP || + els_command == ELS_CMD_FARPR || + els_command == ELS_CMD_FDISC) + continue; + + if (vport != piocb->vport) continue; - } if (piocb->drvrTimeout > 0) { - if (piocb->drvrTimeout >= timeout) { + if (piocb->drvrTimeout >= timeout) piocb->drvrTimeout -= timeout; - } else { + else piocb->drvrTimeout = 0; - } continue; } - if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) { - struct lpfc_nodelist *ndlp; - ndlp = __lpfc_findnode_rpi(phba, cmd->ulpContext); - remote_ID = ndlp->nlp_DID; - } else { + remote_ID = 0xffffffff; + if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) remote_ID = cmd->un.elsreq64.remoteID; + else { + struct lpfc_nodelist *ndlp; + ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); + if (ndlp) + remote_ID = ndlp->nlp_DID; } - lpfc_printf_log(phba, - KERN_ERR, - LOG_ELS, - "%d:0127 ELS timeout Data: x%x x%x x%x x%x\n", - phba->brd_no, els_command, + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0127 ELS timeout Data: x%x x%x x%x " + "x%x\n", + phba->brd_no, vport->vpi, els_command, remote_ID, cmd->ulpCommand, cmd->ulpIoTag); lpfc_sli_issue_abort_iotag(phba, pring, piocb); } - if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) - mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout); + spin_unlock_irq(&phba->hbalock); - spin_unlock_irq(phba->host->host_lock); + if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) + mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); } void -lpfc_els_flush_cmd(struct lpfc_hba *phba) +lpfc_els_flush_cmd(struct lpfc_vport *vport) { LIST_HEAD(completions); + struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; struct lpfc_iocbq *tmp_iocb, *piocb; IOCB_t *cmd = NULL; - spin_lock_irq(phba->host->host_lock); + lpfc_fabric_abort_vport(vport); + + spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { cmd = &piocb->iocb; @@ -3301,271 +3705,1042 @@ lpfc_els_flush_cmd(struct lpfc_hba *phba) cmd->ulpCommand == CMD_ABORT_XRI_CN) continue; + if (piocb->vport != vport) + continue; + list_move_tail(&piocb->list, &completions); pring->txq_cnt--; - } list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { - cmd = &piocb->iocb; - if (piocb->iocb_flag & LPFC_IO_LIBDFC) { continue; } + if (piocb->vport != vport) + continue; + lpfc_sli_issue_abort_iotag(phba, pring, piocb); } - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); - while(!list_empty(&completions)) { + while (!list_empty(&completions)) { piocb = list_get_first(&completions, struct lpfc_iocbq, list); cmd = &piocb->iocb; - list_del(&piocb->list); + list_del_init(&piocb->list); - if (piocb->iocb_cmpl) { + if (!piocb->iocb_cmpl) + lpfc_sli_release_iocbq(phba, piocb); + else { cmd->ulpStatus = IOSTAT_LOCAL_REJECT; cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; (piocb->iocb_cmpl) (phba, piocb, piocb); - } else - lpfc_sli_release_iocbq(phba, piocb); + } } return; } -void -lpfc_els_unsol_event(struct lpfc_hba * phba, - struct lpfc_sli_ring * pring, struct lpfc_iocbq * elsiocb) +static void +lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) { - struct lpfc_sli *psli; struct lpfc_nodelist *ndlp; - struct lpfc_dmabuf *mp; - uint32_t *lp; - IOCB_t *icmd; struct ls_rjt stat; - uint32_t cmd; - uint32_t did; - uint32_t newnode; - uint32_t drop_cmd = 0; /* by default do NOT drop received cmd */ - uint32_t rjt_err = 0; - - psli = &phba->sli; - icmd = &elsiocb->iocb; - - if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) && - ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) { - /* Not enough posted buffers; Try posting more buffers */ - phba->fc_stat.NoRcvBuf++; - lpfc_post_buffer(phba, pring, 0, 1); - return; - } - - /* If there are no BDEs associated with this IOCB, - * there is nothing to do. - */ - if (icmd->ulpBdeCount == 0) - return; + uint32_t *payload; + uint32_t cmd, did, newnode, rjt_err = 0; + IOCB_t *icmd = &elsiocb->iocb; - /* type of ELS cmd is first 32bit word in packet */ - mp = lpfc_sli_ringpostbuf_get(phba, pring, getPaddr(icmd->un. - cont64[0]. - addrHigh, - icmd->un. - cont64[0].addrLow)); - if (mp == 0) { - drop_cmd = 1; + if (vport == NULL || elsiocb->context2 == NULL) goto dropit; - } newnode = 0; - lp = (uint32_t *) mp->virt; - cmd = *lp++; - lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], 1, 1); + payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; + cmd = *payload; + if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) + lpfc_post_buffer(phba, pring, 1, 1); + did = icmd->un.rcvels.remoteID; if (icmd->ulpStatus) { - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); - drop_cmd = 1; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV Unsol ELS: status:x%x/x%x did:x%x", + icmd->ulpStatus, icmd->un.ulpWord[4], did); goto dropit; } /* Check to see if link went down during discovery */ - if (lpfc_els_chk_latt(phba)) { - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); - drop_cmd = 1; + if (lpfc_els_chk_latt(vport)) goto dropit; - } - did = icmd->un.rcvels.remoteID; - ndlp = lpfc_findnode_did(phba, did); + /* Ignore traffic recevied during vport shutdown. */ + if (vport->load_flag & FC_UNLOADING) + goto dropit; + + ndlp = lpfc_findnode_did(vport, did); if (!ndlp) { /* Cannot find existing Fabric ndlp, so allocate a new one */ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); - if (!ndlp) { - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); - drop_cmd = 1; + if (!ndlp) goto dropit; - } - lpfc_nlp_init(phba, ndlp, did); + lpfc_nlp_init(vport, ndlp, did); newnode = 1; if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { ndlp->nlp_type |= NLP_FABRIC; } - lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); } phba->fc_stat.elsRcvFrame++; if (elsiocb->context1) lpfc_nlp_put(elsiocb->context1); elsiocb->context1 = lpfc_nlp_get(ndlp); - elsiocb->context2 = mp; + elsiocb->vport = vport; if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { cmd &= ELS_CMD_MASK; } /* ELS command <elsCmd> received from NPORT <did> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0112 ELS command x%x received from NPORT x%x " - "Data: x%x\n", phba->brd_no, cmd, did, phba->hba_state); + "%d (%d):0112 ELS command x%x received from NPORT x%x " + "Data: x%x\n", phba->brd_no, vport->vpi, cmd, did, + vport->port_state); switch (cmd) { case ELS_CMD_PLOGI: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV PLOGI: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvPLOGI++; - if (phba->hba_state < LPFC_DISC_AUTH) { - rjt_err = 1; + ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); + + if (vport->port_state < LPFC_DISC_AUTH) { + rjt_err = LSRJT_UNABLE_TPC; break; } - ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp); - lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); + lpfc_disc_state_machine(vport, ndlp, elsiocb, + NLP_EVT_RCV_PLOGI); + break; case ELS_CMD_FLOGI: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV FLOGI: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvFLOGI++; - lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode); + lpfc_els_rcv_flogi(vport, elsiocb, ndlp, newnode); if (newnode) - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); break; case ELS_CMD_LOGO: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV LOGO: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvLOGO++; - if (phba->hba_state < LPFC_DISC_AUTH) { - rjt_err = 1; + if (vport->port_state < LPFC_DISC_AUTH) { + rjt_err = LSRJT_UNABLE_TPC; break; } - lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_LOGO); + lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); break; case ELS_CMD_PRLO: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV PRLO: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvPRLO++; - if (phba->hba_state < LPFC_DISC_AUTH) { - rjt_err = 1; + if (vport->port_state < LPFC_DISC_AUTH) { + rjt_err = LSRJT_UNABLE_TPC; break; } - lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLO); + lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); break; case ELS_CMD_RSCN: phba->fc_stat.elsRcvRSCN++; - lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode); + lpfc_els_rcv_rscn(vport, elsiocb, ndlp, newnode); if (newnode) - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); break; case ELS_CMD_ADISC: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV ADISC: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvADISC++; - if (phba->hba_state < LPFC_DISC_AUTH) { - rjt_err = 1; + if (vport->port_state < LPFC_DISC_AUTH) { + rjt_err = LSRJT_UNABLE_TPC; break; } - lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_ADISC); + lpfc_disc_state_machine(vport, ndlp, elsiocb, + NLP_EVT_RCV_ADISC); break; case ELS_CMD_PDISC: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV PDISC: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvPDISC++; - if (phba->hba_state < LPFC_DISC_AUTH) { - rjt_err = 1; + if (vport->port_state < LPFC_DISC_AUTH) { + rjt_err = LSRJT_UNABLE_TPC; break; } - lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PDISC); + lpfc_disc_state_machine(vport, ndlp, elsiocb, + NLP_EVT_RCV_PDISC); break; case ELS_CMD_FARPR: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV FARPR: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvFARPR++; - lpfc_els_rcv_farpr(phba, elsiocb, ndlp); + lpfc_els_rcv_farpr(vport, elsiocb, ndlp); break; case ELS_CMD_FARP: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV FARP: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvFARP++; - lpfc_els_rcv_farp(phba, elsiocb, ndlp); + lpfc_els_rcv_farp(vport, elsiocb, ndlp); break; case ELS_CMD_FAN: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV FAN: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvFAN++; - lpfc_els_rcv_fan(phba, elsiocb, ndlp); + lpfc_els_rcv_fan(vport, elsiocb, ndlp); break; case ELS_CMD_PRLI: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV PRLI: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvPRLI++; - if (phba->hba_state < LPFC_DISC_AUTH) { - rjt_err = 1; + if (vport->port_state < LPFC_DISC_AUTH) { + rjt_err = LSRJT_UNABLE_TPC; break; } - lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLI); + lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); break; case ELS_CMD_LIRR: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV LIRR: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvLIRR++; - lpfc_els_rcv_lirr(phba, elsiocb, ndlp); + lpfc_els_rcv_lirr(vport, elsiocb, ndlp); if (newnode) - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); break; case ELS_CMD_RPS: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RPS: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvRPS++; - lpfc_els_rcv_rps(phba, elsiocb, ndlp); + lpfc_els_rcv_rps(vport, elsiocb, ndlp); if (newnode) - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); break; case ELS_CMD_RPL: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RPL: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvRPL++; - lpfc_els_rcv_rpl(phba, elsiocb, ndlp); + lpfc_els_rcv_rpl(vport, elsiocb, ndlp); if (newnode) - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); break; case ELS_CMD_RNID: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RNID: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvRNID++; - lpfc_els_rcv_rnid(phba, elsiocb, ndlp); + lpfc_els_rcv_rnid(vport, elsiocb, ndlp); if (newnode) - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); break; default: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", + cmd, did, vport->port_state); + /* Unsupported ELS command, reject */ - rjt_err = 1; + rjt_err = LSRJT_INVALID_CMD; /* Unknown ELS command <elsCmd> received from NPORT <did> */ lpfc_printf_log(phba, KERN_ERR, LOG_ELS, - "%d:0115 Unknown ELS command x%x received from " - "NPORT x%x\n", phba->brd_no, cmd, did); + "%d (%d):0115 Unknown ELS command x%x " + "received from NPORT x%x\n", + phba->brd_no, vport->vpi, cmd, did); if (newnode) - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); break; } /* check if need to LS_RJT received ELS cmd */ if (rjt_err) { - stat.un.b.lsRjtRsvd0 = 0; - stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + memset(&stat, 0, sizeof(stat)); + stat.un.b.lsRjtRsnCode = rjt_err; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; - stat.un.b.vendorUnique = 0; - lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp); + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, + NULL); + if (newnode) + lpfc_drop_node(vport, ndlp); + } + + return; + +dropit: + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0111 Dropping received ELS cmd " + "Data: x%x x%x x%x\n", + phba->brd_no, vport ? vport->vpi : 0xffff, + icmd->ulpStatus, icmd->un.ulpWord[4], + icmd->ulpTimeout); + phba->fc_stat.elsRcvDrop++; +} + +static struct lpfc_vport * +lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) +{ + struct lpfc_vport *vport; + + list_for_each_entry(vport, &phba->port_list, listentry) { + if (vport->vpi == vpi) + return vport; + } + return NULL; +} + +void +lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *elsiocb) +{ + struct lpfc_vport *vport = phba->pport; + IOCB_t *icmd = &elsiocb->iocb; + dma_addr_t paddr; + struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; + struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; + + elsiocb->context2 = NULL; + elsiocb->context3 = NULL; + + if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { + lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); + } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && + (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) { + phba->fc_stat.NoRcvBuf++; + /* Not enough posted buffers; Try posting more buffers */ + if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) + lpfc_post_buffer(phba, pring, 0, 1); + return; + } + + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || + icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { + if (icmd->unsli3.rcvsli3.vpi == 0xffff) + vport = phba->pport; + else { + uint16_t vpi = icmd->unsli3.rcvsli3.vpi; + vport = lpfc_find_vport_by_vpid(phba, vpi); + } + } + /* If there are no BDEs associated + * with this IOCB, there is nothing to do. + */ + if (icmd->ulpBdeCount == 0) + return; + + /* type of ELS cmd is first 32bit word + * in packet + */ + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + elsiocb->context2 = bdeBuf1; + } else { + paddr = getPaddr(icmd->un.cont64[0].addrHigh, + icmd->un.cont64[0].addrLow); + elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, + paddr); } + lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); + /* + * The different unsolicited event handlers would tell us + * if they are done with "mp" by setting context2 to NULL. + */ lpfc_nlp_put(elsiocb->context1); elsiocb->context1 = NULL; if (elsiocb->context2) { - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); + lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); + elsiocb->context2 = NULL; } -dropit: - /* check if need to drop received ELS cmd */ - if (drop_cmd == 1) { + + /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ + if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && + icmd->ulpBdeCount == 2) { + elsiocb->context2 = bdeBuf2; + lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); + /* free mp if we are done with it */ + if (elsiocb->context2) { + lpfc_in_buf_free(phba, elsiocb->context2); + elsiocb->context2 = NULL; + } + } +} + +void +lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp, *ndlp_fdmi; + + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (!ndlp) { + ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); + if (!ndlp) { + if (phba->fc_topology == TOPOLOGY_LOOP) { + lpfc_disc_start(vport); + return; + } + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0251 NameServer login: no memory\n", + phba->brd_no, vport->vpi); + return; + } + lpfc_nlp_init(vport, ndlp, NameServer_DID); + ndlp->nlp_type |= NLP_FABRIC; + } + + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + + if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { + lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_log(phba, KERN_ERR, LOG_ELS, - "%d:0111 Dropping received ELS cmd " - "Data: x%x x%x x%x\n", phba->brd_no, - icmd->ulpStatus, icmd->un.ulpWord[4], - icmd->ulpTimeout); - phba->fc_stat.elsRcvDrop++; + "%d (%d):0252 Cannot issue NameServer login\n", + phba->brd_no, vport->vpi); + return; + } + + if (phba->cfg_fdmi_on) { + ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, + GFP_KERNEL); + if (ndlp_fdmi) { + lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); + ndlp_fdmi->nlp_type |= NLP_FABRIC; + ndlp_fdmi->nlp_state = + NLP_STE_PLOGI_ISSUE; + lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, + 0); + } + } + return; +} + +static void +lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; + MAILBOX_t *mb = &pmb->mb; + + vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; + lpfc_nlp_put(ndlp); + + if (mb->mbxStatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "%d (%d):0915 Register VPI failed: 0x%x\n", + phba->brd_no, vport->vpi, mb->mbxStatus); + + switch (mb->mbxStatus) { + case 0x11: /* unsupported feature */ + case 0x9603: /* max_vpi exceeded */ + /* giving up on vport registration */ + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + break; + default: + /* Try to recover from this error */ + lpfc_mbx_unreg_vpi(vport); + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + lpfc_initial_fdisc(vport); + break; + } + + } else { + if (vport == phba->pport) + lpfc_issue_fabric_reglogin(vport); + else + lpfc_do_scr_ns_plogi(phba, vport); } + mempool_free(pmb, phba->mbox_mem_pool); return; } + +void +lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp) +{ + LPFC_MBOXQ_t *mbox; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox) { + lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox); + mbox->vport = vport; + mbox->context2 = lpfc_nlp_get(ndlp); + mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; + if (lpfc_sli_issue_mbox(phba, mbox, + MBX_NOWAIT | MBX_STOP_IOCB) + == MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; + + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "%d (%d):0253 Register VPI: Cannot send mbox\n", + phba->brd_no, vport->vpi); + } + } else { + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "%d (%d):0254 Register VPI: no memory\n", + phba->brd_no, vport->vpi); + + vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; + lpfc_nlp_put(ndlp); + } +} + +static void +lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; + struct lpfc_nodelist *np; + struct lpfc_nodelist *next_np; + IOCB_t *irsp = &rspiocb->iocb; + struct lpfc_iocbq *piocb; + + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "%d (%d):0123 FDISC completes. x%x/x%x prevDID: x%x\n", + phba->brd_no, vport->vpi, + irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); + + /* Since all FDISCs are being single threaded, we + * must reset the discovery timer for ALL vports + * waiting to send FDISC when one completes. + */ + list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { + lpfc_set_disctmo(piocb->vport); + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "FDISC cmpl: status:x%x/x%x prevdid:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); + + if (irsp->ulpStatus) { + /* Check for retry */ + if (lpfc_els_retry(phba, cmdiocb, rspiocb)) + goto out; + + /* FDISC failed */ + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0124 FDISC failed. (%d/%d)\n", + phba->brd_no, vport->vpi, + irsp->ulpStatus, irsp->un.ulpWord[4]); + + if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + + lpfc_nlp_put(ndlp); + /* giving up on FDISC. Cancel discovery timer */ + lpfc_can_disctmo(vport); + } else { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_FABRIC; + if (vport->phba->fc_topology == TOPOLOGY_LOOP) + vport->fc_flag |= FC_PUBLIC_LOOP; + spin_unlock_irq(shost->host_lock); + + vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; + lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); + if ((vport->fc_prevDID != vport->fc_myDID) && + !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { + /* If our NportID changed, we need to ensure all + * remaining NPORTs get unreg_login'ed so we can + * issue unreg_vpi. + */ + list_for_each_entry_safe(np, next_np, + &vport->fc_nodes, nlp_listp) { + if (np->nlp_state != NLP_STE_NPR_NODE + || !(np->nlp_flag & NLP_NPR_ADISC)) + continue; + spin_lock_irq(shost->host_lock); + np->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(shost->host_lock); + lpfc_unreg_rpi(vport, np); + } + lpfc_mbx_unreg_vpi(vport); + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + } + + if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) + lpfc_register_new_vport(phba, vport, ndlp); + else + lpfc_do_scr_ns_plogi(phba, vport); + + lpfc_nlp_put(ndlp); /* Free Fabric ndlp for vports */ + } + +out: + lpfc_els_free_iocb(phba, cmdiocb); +} + +int +lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + uint8_t retry) +{ + struct lpfc_hba *phba = vport->phba; + IOCB_t *icmd; + struct lpfc_iocbq *elsiocb; + struct serv_parm *sp; + uint8_t *pcmd; + uint16_t cmdsize; + int did = ndlp->nlp_DID; + int rc; + + cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, + ELS_CMD_FDISC); + if (!elsiocb) { + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0255 Issue FDISC: no IOCB\n", + phba->brd_no, vport->vpi); + return 1; + } + + icmd = &elsiocb->iocb; + icmd->un.elsreq64.myID = 0; + icmd->un.elsreq64.fl = 1; + + /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ + icmd->ulpCt_h = 1; + icmd->ulpCt_l = 0; + + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); + *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; + pcmd += sizeof(uint32_t); /* CSP Word 1 */ + memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); + sp = (struct serv_parm *) pcmd; + /* Setup CSPs accordingly for Fabric */ + sp->cmn.e_d_tov = 0; + sp->cmn.w2.r_a_tov = 0; + sp->cls1.classValid = 0; + sp->cls2.seqDelivery = 1; + sp->cls3.seqDelivery = 1; + + pcmd += sizeof(uint32_t); /* CSP Word 2 */ + pcmd += sizeof(uint32_t); /* CSP Word 3 */ + pcmd += sizeof(uint32_t); /* CSP Word 4 */ + pcmd += sizeof(uint32_t); /* Port Name */ + memcpy(pcmd, &vport->fc_portname, 8); + pcmd += sizeof(uint32_t); /* Node Name */ + pcmd += sizeof(uint32_t); /* Node Name */ + memcpy(pcmd, &vport->fc_nodename, 8); + + lpfc_set_disctmo(vport); + + phba->fc_stat.elsXmitFDISC++; + elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue FDISC: did:x%x", + did, 0, 0); + + rc = lpfc_issue_fabric_iocb(phba, elsiocb); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0256 Issue FDISC: Cannot send IOCB\n", + phba->brd_no, vport->vpi); + + return 1; + } + lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); + vport->port_state = LPFC_FDISC; + return 0; +} + +static void +lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + IOCB_t *irsp; + + irsp = &rspiocb->iocb; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "LOGO npiv cmpl: status:x%x/x%x did:x%x", + irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); + + lpfc_els_free_iocb(phba, cmdiocb); + vport->unreg_vpi_cmpl = VPORT_ERROR; +} + +int +lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + IOCB_t *icmd; + struct lpfc_iocbq *elsiocb; + uint8_t *pcmd; + uint16_t cmdsize; + + cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); + elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, + ELS_CMD_LOGO); + if (!elsiocb) + return 1; + + icmd = &elsiocb->iocb; + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); + *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; + pcmd += sizeof(uint32_t); + + /* Fill in LOGO payload */ + *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); + pcmd += sizeof(uint32_t); + memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Issue LOGO npiv did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, 0); + + elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_LOGO_SND; + spin_unlock_irq(shost->host_lock); + if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag &= ~NLP_LOGO_SND; + spin_unlock_irq(shost->host_lock); + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + return 0; +} + +void +lpfc_fabric_block_timeout(unsigned long ptr) +{ + struct lpfc_hba *phba = (struct lpfc_hba *) ptr; + unsigned long iflags; + uint32_t tmo_posted; + spin_lock_irqsave(&phba->pport->work_port_lock, iflags); + tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; + if (!tmo_posted) + phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; + spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); + + if (!tmo_posted) { + spin_lock_irqsave(&phba->hbalock, iflags); + if (phba->work_wait) + lpfc_worker_wake_up(phba); + spin_unlock_irqrestore(&phba->hbalock, iflags); + } +} + +static void +lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) +{ + struct lpfc_iocbq *iocb; + unsigned long iflags; + int ret; + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + IOCB_t *cmd; + +repeat: + iocb = NULL; + spin_lock_irqsave(&phba->hbalock, iflags); + /* Post any pending iocb to the SLI layer */ + if (atomic_read(&phba->fabric_iocb_count) == 0) { + list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), + list); + if (iocb) + atomic_inc(&phba->fabric_iocb_count); + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + if (iocb) { + iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; + iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; + iocb->iocb_flag |= LPFC_IO_FABRIC; + + lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, + "Fabric sched1: ste:x%x", + iocb->vport->port_state, 0, 0); + + ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); + + if (ret == IOCB_ERROR) { + iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; + iocb->fabric_iocb_cmpl = NULL; + iocb->iocb_flag &= ~LPFC_IO_FABRIC; + cmd = &iocb->iocb; + cmd->ulpStatus = IOSTAT_LOCAL_REJECT; + cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; + iocb->iocb_cmpl(phba, iocb, iocb); + + atomic_dec(&phba->fabric_iocb_count); + goto repeat; + } + } + + return; +} + +void +lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) +{ + clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); + + lpfc_resume_fabric_iocbs(phba); + return; +} + +static void +lpfc_block_fabric_iocbs(struct lpfc_hba *phba) +{ + int blocked; + + blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); + /* Start a timer to unblock fabric + * iocbs after 100ms + */ + if (!blocked) + mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); + + return; +} + +static void +lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct ls_rjt stat; + + if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC) + BUG(); + + switch (rspiocb->iocb.ulpStatus) { + case IOSTAT_NPORT_RJT: + case IOSTAT_FABRIC_RJT: + if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { + lpfc_block_fabric_iocbs(phba); + } + break; + + case IOSTAT_NPORT_BSY: + case IOSTAT_FABRIC_BSY: + lpfc_block_fabric_iocbs(phba); + break; + + case IOSTAT_LS_RJT: + stat.un.lsRjtError = + be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); + if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || + (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) + lpfc_block_fabric_iocbs(phba); + break; + } + + if (atomic_read(&phba->fabric_iocb_count) == 0) + BUG(); + + cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; + cmdiocb->fabric_iocb_cmpl = NULL; + cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; + cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); + + atomic_dec(&phba->fabric_iocb_count); + if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { + /* Post any pending iocbs to HBA */ + lpfc_resume_fabric_iocbs(phba); + } +} + +int +lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) +{ + unsigned long iflags; + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + int ready; + int ret; + + if (atomic_read(&phba->fabric_iocb_count) > 1) + BUG(); + + spin_lock_irqsave(&phba->hbalock, iflags); + ready = atomic_read(&phba->fabric_iocb_count) == 0 && + !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); + + spin_unlock_irqrestore(&phba->hbalock, iflags); + if (ready) { + iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; + iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; + iocb->iocb_flag |= LPFC_IO_FABRIC; + + lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, + "Fabric sched2: ste:x%x", + iocb->vport->port_state, 0, 0); + + atomic_inc(&phba->fabric_iocb_count); + ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); + + if (ret == IOCB_ERROR) { + iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; + iocb->fabric_iocb_cmpl = NULL; + iocb->iocb_flag &= ~LPFC_IO_FABRIC; + atomic_dec(&phba->fabric_iocb_count); + } + } else { + spin_lock_irqsave(&phba->hbalock, iflags); + list_add_tail(&iocb->list, &phba->fabric_iocb_list); + spin_unlock_irqrestore(&phba->hbalock, iflags); + ret = IOCB_SUCCESS; + } + return ret; +} + + +void lpfc_fabric_abort_vport(struct lpfc_vport *vport) +{ + LIST_HEAD(completions); + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *tmp_iocb, *piocb; + IOCB_t *cmd; + + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, + list) { + + if (piocb->vport != vport) + continue; + + list_move_tail(&piocb->list, &completions); + } + spin_unlock_irq(&phba->hbalock); + + while (!list_empty(&completions)) { + piocb = list_get_first(&completions, struct lpfc_iocbq, list); + list_del_init(&piocb->list); + + cmd = &piocb->iocb; + cmd->ulpStatus = IOSTAT_LOCAL_REJECT; + cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; + (piocb->iocb_cmpl) (phba, piocb, piocb); + } +} + +void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) +{ + LIST_HEAD(completions); + struct lpfc_hba *phba = ndlp->vport->phba; + struct lpfc_iocbq *tmp_iocb, *piocb; + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + IOCB_t *cmd; + + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, + list) { + if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { + + list_move_tail(&piocb->list, &completions); + } + } + spin_unlock_irq(&phba->hbalock); + + while (!list_empty(&completions)) { + piocb = list_get_first(&completions, struct lpfc_iocbq, list); + list_del_init(&piocb->list); + + cmd = &piocb->iocb; + cmd->ulpStatus = IOSTAT_LOCAL_REJECT; + cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; + (piocb->iocb_cmpl) (phba, piocb, piocb); + } +} + +void lpfc_fabric_abort_hba(struct lpfc_hba *phba) +{ + LIST_HEAD(completions); + struct lpfc_iocbq *piocb; + IOCB_t *cmd; + + spin_lock_irq(&phba->hbalock); + list_splice_init(&phba->fabric_iocb_list, &completions); + spin_unlock_irq(&phba->hbalock); + + while (!list_empty(&completions)) { + piocb = list_get_first(&completions, struct lpfc_iocbq, list); + list_del_init(&piocb->list); + + cmd = &piocb->iocb; + cmd->ulpStatus = IOSTAT_LOCAL_REJECT; + cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; + (piocb->iocb_cmpl) (phba, piocb, piocb); + } +} + + +void lpfc_fabric_abort_flogi(struct lpfc_hba *phba) +{ + LIST_HEAD(completions); + struct lpfc_iocbq *tmp_iocb, *piocb; + IOCB_t *cmd; + struct lpfc_nodelist *ndlp; + + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, + list) { + + cmd = &piocb->iocb; + ndlp = (struct lpfc_nodelist *) piocb->context1; + if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR && + ndlp != NULL && + ndlp->nlp_DID == Fabric_DID) + list_move_tail(&piocb->list, &completions); + } + spin_unlock_irq(&phba->hbalock); + + while (!list_empty(&completions)) { + piocb = list_get_first(&completions, struct lpfc_iocbq, list); + list_del_init(&piocb->list); + + cmd = &piocb->iocb; + cmd->ulpStatus = IOSTAT_LOCAL_REJECT; + cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; + (piocb->iocb_cmpl) (phba, piocb, piocb); + } +} + + diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 61caa8d379e2..f2f4639eab59 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -36,6 +36,8 @@ #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" /* AlpaArray for assignment of scsid for scan-down and bind_method */ static uint8_t lpfcAlpaArray[] = { @@ -54,7 +56,7 @@ static uint8_t lpfcAlpaArray[] = { 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 }; -static void lpfc_disc_timeout_handler(struct lpfc_hba *); +static void lpfc_disc_timeout_handler(struct lpfc_vport *); void lpfc_terminate_rport_io(struct fc_rport *rport) @@ -74,14 +76,16 @@ lpfc_terminate_rport_io(struct fc_rport *rport) return; } - phba = ndlp->nlp_phba; + phba = ndlp->vport->phba; + + lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, + "rport terminate: sid:x%x did:x%x flg:x%x", + ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); - spin_lock_irq(phba->host->host_lock); if (ndlp->nlp_sid != NLP_NO_SID) { lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); } - spin_unlock_irq(phba->host->host_lock); return; } @@ -94,105 +98,213 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) { struct lpfc_rport_data *rdata; struct lpfc_nodelist * ndlp; - uint8_t *name; - int warn_on = 0; - struct lpfc_hba *phba; + struct lpfc_vport *vport; + struct lpfc_hba *phba; + struct completion devloss_compl; + struct lpfc_work_evt *evtp; rdata = rport->dd_data; ndlp = rdata->pnode; if (!ndlp) { - if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) + if (rport->scsi_target_id != -1) { printk(KERN_ERR "Cannot find remote node" - " for rport in dev_loss_tmo_callbk x%x\n", - rport->port_id); + " for rport in dev_loss_tmo_callbk x%x\n", + rport->port_id); + } return; } - if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) + vport = ndlp->vport; + phba = vport->phba; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, + "rport devlosscb: sid:x%x did:x%x flg:x%x", + ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); + + init_completion(&devloss_compl); + evtp = &ndlp->dev_loss_evt; + + if (!list_empty(&evtp->evt_listp)) + return; + + spin_lock_irq(&phba->hbalock); + evtp->evt_arg1 = ndlp; + evtp->evt_arg2 = &devloss_compl; + evtp->evt = LPFC_EVT_DEV_LOSS; + list_add_tail(&evtp->evt_listp, &phba->work_list); + if (phba->work_wait) + wake_up(phba->work_wait); + + spin_unlock_irq(&phba->hbalock); + + wait_for_completion(&devloss_compl); + + return; +} + +/* + * This function is called from the worker thread when dev_loss_tmo + * expire. + */ +void +lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) +{ + struct lpfc_rport_data *rdata; + struct fc_rport *rport; + struct lpfc_vport *vport; + struct lpfc_hba *phba; + uint8_t *name; + int warn_on = 0; + + rport = ndlp->rport; + + if (!rport) + return; + + rdata = rport->dd_data; + name = (uint8_t *) &ndlp->nlp_portname; + vport = ndlp->vport; + phba = vport->phba; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, + "rport devlosstmo:did:x%x type:x%x id:x%x", + ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); + + if (!(vport->load_flag & FC_UNLOADING) && + ndlp->nlp_state == NLP_STE_MAPPED_NODE) return; - name = (uint8_t *)&ndlp->nlp_portname; - phba = ndlp->nlp_phba; + if (ndlp->nlp_type & NLP_FABRIC) { + int put_node; + int put_rport; - spin_lock_irq(phba->host->host_lock); + /* We will clean up these Nodes in linkup */ + put_node = rdata->pnode != NULL; + put_rport = ndlp->rport != NULL; + rdata->pnode = NULL; + ndlp->rport = NULL; + if (put_node) + lpfc_nlp_put(ndlp); + if (put_rport) + put_device(&rport->dev); + return; + } if (ndlp->nlp_sid != NLP_NO_SID) { warn_on = 1; /* flush the target */ lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], - ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); + ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); } - if (phba->fc_flag & FC_UNLOADING) + if (vport->load_flag & FC_UNLOADING) warn_on = 0; - spin_unlock_irq(phba->host->host_lock); - if (warn_on) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0203 Devloss timeout on " + "%d (%d):0203 Devloss timeout on " "WWPN %x:%x:%x:%x:%x:%x:%x:%x " "NPort x%x Data: x%x x%x x%x\n", - phba->brd_no, + phba->brd_no, vport->vpi, *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } else { lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0204 Devloss timeout on " + "%d (%d):0204 Devloss timeout on " "WWPN %x:%x:%x:%x:%x:%x:%x:%x " "NPort x%x Data: x%x x%x x%x\n", - phba->brd_no, + phba->brd_no, vport->vpi, *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } - if (!(phba->fc_flag & FC_UNLOADING) && + if (!(vport->load_flag & FC_UNLOADING) && !(ndlp->nlp_flag & NLP_DELAY_TMO) && !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) - lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM); + lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); else { + int put_node; + int put_rport; + + put_node = rdata->pnode != NULL; + put_rport = ndlp->rport != NULL; rdata->pnode = NULL; ndlp->rport = NULL; - lpfc_nlp_put(ndlp); - put_device(&rport->dev); + if (put_node) + lpfc_nlp_put(ndlp); + if (put_rport) + put_device(&rport->dev); } +} + +void +lpfc_worker_wake_up(struct lpfc_hba *phba) +{ + wake_up(phba->work_wait); return; } static void -lpfc_work_list_done(struct lpfc_hba * phba) +lpfc_work_list_done(struct lpfc_hba *phba) { struct lpfc_work_evt *evtp = NULL; struct lpfc_nodelist *ndlp; + struct lpfc_vport *vport; int free_evt; - spin_lock_irq(phba->host->host_lock); - while(!list_empty(&phba->work_list)) { + spin_lock_irq(&phba->hbalock); + while (!list_empty(&phba->work_list)) { list_remove_head((&phba->work_list), evtp, typeof(*evtp), evt_listp); - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); free_evt = 1; switch (evtp->evt) { + case LPFC_EVT_DEV_LOSS_DELAY: + free_evt = 0; /* evt is part of ndlp */ + ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); + vport = ndlp->vport; + if (!vport) + break; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, + "rport devlossdly:did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, 0); + + if (!(vport->load_flag & FC_UNLOADING) && + !(ndlp->nlp_flag & NLP_DELAY_TMO) && + !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + } + break; case LPFC_EVT_ELS_RETRY: - ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); + ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); lpfc_els_retry_delay_handler(ndlp); + free_evt = 0; /* evt is part of ndlp */ + break; + case LPFC_EVT_DEV_LOSS: + ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); + lpfc_nlp_get(ndlp); + lpfc_dev_loss_tmo_handler(ndlp); free_evt = 0; + complete((struct completion *)(evtp->evt_arg2)); + lpfc_nlp_put(ndlp); break; case LPFC_EVT_ONLINE: - if (phba->hba_state < LPFC_LINK_DOWN) - *(int *)(evtp->evt_arg1) = lpfc_online(phba); + if (phba->link_state < LPFC_LINK_DOWN) + *(int *) (evtp->evt_arg1) = lpfc_online(phba); else - *(int *)(evtp->evt_arg1) = 0; + *(int *) (evtp->evt_arg1) = 0; complete((struct completion *)(evtp->evt_arg2)); break; case LPFC_EVT_OFFLINE_PREP: - if (phba->hba_state >= LPFC_LINK_DOWN) + if (phba->link_state >= LPFC_LINK_DOWN) lpfc_offline_prep(phba); *(int *)(evtp->evt_arg1) = 0; complete((struct completion *)(evtp->evt_arg2)); @@ -218,33 +330,31 @@ lpfc_work_list_done(struct lpfc_hba * phba) case LPFC_EVT_KILL: lpfc_offline(phba); *(int *)(evtp->evt_arg1) - = (phba->stopped) ? 0 : lpfc_sli_brdkill(phba); + = (phba->pport->stopped) + ? 0 : lpfc_sli_brdkill(phba); lpfc_unblock_mgmt_io(phba); complete((struct completion *)(evtp->evt_arg2)); break; } if (free_evt) kfree(evtp); - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); } - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); } -static void -lpfc_work_done(struct lpfc_hba * phba) +void +lpfc_work_done(struct lpfc_hba *phba) { struct lpfc_sli_ring *pring; - int i; - uint32_t ha_copy; - uint32_t control; - uint32_t work_hba_events; + uint32_t ha_copy, status, control, work_port_events; + struct lpfc_vport *vport; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); ha_copy = phba->work_ha; phba->work_ha = 0; - work_hba_events=phba->work_hba_events; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); if (ha_copy & HA_ERATT) lpfc_handle_eratt(phba); @@ -255,66 +365,111 @@ lpfc_work_done(struct lpfc_hba * phba) if (ha_copy & HA_LATT) lpfc_handle_latt(phba); - if (work_hba_events & WORKER_DISC_TMO) - lpfc_disc_timeout_handler(phba); - - if (work_hba_events & WORKER_ELS_TMO) - lpfc_els_timeout_handler(phba); - - if (work_hba_events & WORKER_MBOX_TMO) - lpfc_mbox_timeout_handler(phba); - - if (work_hba_events & WORKER_FDMI_TMO) - lpfc_fdmi_tmo_handler(phba); - - spin_lock_irq(phba->host->host_lock); - phba->work_hba_events &= ~work_hba_events; - spin_unlock_irq(phba->host->host_lock); - - for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) { - pring = &phba->sli.ring[i]; - if ((ha_copy & HA_RXATT) - || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { - if (pring->flag & LPFC_STOP_IOCB_MASK) { - pring->flag |= LPFC_DEFERRED_RING_EVENT; - } else { - lpfc_sli_handle_slow_ring_event(phba, pring, - (ha_copy & - HA_RXMASK)); - pring->flag &= ~LPFC_DEFERRED_RING_EVENT; - } - /* - * Turn on Ring interrupts - */ - spin_lock_irq(phba->host->host_lock); - control = readl(phba->HCregaddr); - control |= (HC_R0INT_ENA << i); + spin_lock_irq(&phba->hbalock); + list_for_each_entry(vport, &phba->port_list, listentry) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (!scsi_host_get(shost)) { + continue; + } + spin_unlock_irq(&phba->hbalock); + work_port_events = vport->work_port_events; + + if (work_port_events & WORKER_DISC_TMO) + lpfc_disc_timeout_handler(vport); + + if (work_port_events & WORKER_ELS_TMO) + lpfc_els_timeout_handler(vport); + + if (work_port_events & WORKER_HB_TMO) + lpfc_hb_timeout_handler(phba); + + if (work_port_events & WORKER_MBOX_TMO) + lpfc_mbox_timeout_handler(phba); + + if (work_port_events & WORKER_FABRIC_BLOCK_TMO) + lpfc_unblock_fabric_iocbs(phba); + + if (work_port_events & WORKER_FDMI_TMO) + lpfc_fdmi_timeout_handler(vport); + + if (work_port_events & WORKER_RAMP_DOWN_QUEUE) + lpfc_ramp_down_queue_handler(phba); + + if (work_port_events & WORKER_RAMP_UP_QUEUE) + lpfc_ramp_up_queue_handler(phba); + + spin_lock_irq(&vport->work_port_lock); + vport->work_port_events &= ~work_port_events; + spin_unlock_irq(&vport->work_port_lock); + scsi_host_put(shost); + spin_lock_irq(&phba->hbalock); + } + spin_unlock_irq(&phba->hbalock); + + pring = &phba->sli.ring[LPFC_ELS_RING]; + status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); + status >>= (4*LPFC_ELS_RING); + if ((status & HA_RXMASK) + || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { + if (pring->flag & LPFC_STOP_IOCB_MASK) { + pring->flag |= LPFC_DEFERRED_RING_EVENT; + } else { + lpfc_sli_handle_slow_ring_event(phba, pring, + (status & + HA_RXMASK)); + pring->flag &= ~LPFC_DEFERRED_RING_EVENT; + } + /* + * Turn on Ring interrupts + */ + spin_lock_irq(&phba->hbalock); + control = readl(phba->HCregaddr); + if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { + control |= (HC_R0INT_ENA << LPFC_ELS_RING); writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ - spin_unlock_irq(phba->host->host_lock); } + spin_unlock_irq(&phba->hbalock); } - - lpfc_work_list_done (phba); - + lpfc_work_list_done(phba); } static int -check_work_wait_done(struct lpfc_hba *phba) { +check_work_wait_done(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; + struct lpfc_sli_ring *pring; + int rc = 0; + + spin_lock_irq(&phba->hbalock); + list_for_each_entry(vport, &phba->port_list, listentry) { + if (vport->work_port_events) { + rc = 1; + goto exit; + } + } - spin_lock_irq(phba->host->host_lock); - if (phba->work_ha || - phba->work_hba_events || - (!list_empty(&phba->work_list)) || + if (phba->work_ha || (!list_empty(&phba->work_list)) || kthread_should_stop()) { - spin_unlock_irq(phba->host->host_lock); - return 1; - } else { - spin_unlock_irq(phba->host->host_lock); - return 0; + rc = 1; + goto exit; } + + pring = &phba->sli.ring[LPFC_ELS_RING]; + if (pring->flag & LPFC_DEFERRED_RING_EVENT) + rc = 1; +exit: + if (rc) + phba->work_found++; + else + phba->work_found = 0; + + spin_unlock_irq(&phba->hbalock); + return rc; } + int lpfc_do_work(void *p) { @@ -324,11 +479,13 @@ lpfc_do_work(void *p) set_user_nice(current, -20); phba->work_wait = &work_waitq; + phba->work_found = 0; while (1) { rc = wait_event_interruptible(work_waitq, - check_work_wait_done(phba)); + check_work_wait_done(phba)); + BUG_ON(rc); if (kthread_should_stop()) @@ -336,6 +493,17 @@ lpfc_do_work(void *p) lpfc_work_done(phba); + /* If there is alot of slow ring work, like during link up + * check_work_wait_done() may cause this thread to not give + * up the CPU for very long periods of time. This may cause + * soft lockups or other problems. To avoid these situations + * give up the CPU here after LPFC_MAX_WORKER_ITERATION + * consecutive iterations. + */ + if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) { + phba->work_found = 0; + schedule(); + } } phba->work_wait = NULL; return 0; @@ -347,16 +515,17 @@ lpfc_do_work(void *p) * embedding it in the IOCB. */ int -lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2, +lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, uint32_t evt) { struct lpfc_work_evt *evtp; + unsigned long flags; /* * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will * be queued to worker thread for processing */ - evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL); + evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC); if (!evtp) return 0; @@ -364,136 +533,210 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2, evtp->evt_arg2 = arg2; evtp->evt = evt; - spin_lock_irq(phba->host->host_lock); + spin_lock_irqsave(&phba->hbalock, flags); list_add_tail(&evtp->evt_listp, &phba->work_list); if (phba->work_wait) - wake_up(phba->work_wait); - spin_unlock_irq(phba->host->host_lock); + lpfc_worker_wake_up(phba); + spin_unlock_irqrestore(&phba->hbalock, flags); return 1; } -int -lpfc_linkdown(struct lpfc_hba *phba) +void +lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) { - struct lpfc_sli *psli; - struct lpfc_nodelist *ndlp, *next_ndlp; - LPFC_MBOXQ_t *mb; - int rc; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp, *next_ndlp; + int rc; - psli = &phba->sli; - /* sysfs or selective reset may call this routine to clean up */ - if (phba->hba_state >= LPFC_LINK_DOWN) { - if (phba->hba_state == LPFC_LINK_DOWN) - return 0; + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) + continue; - spin_lock_irq(phba->host->host_lock); - phba->hba_state = LPFC_LINK_DOWN; - spin_unlock_irq(phba->host->host_lock); + if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) + lpfc_unreg_rpi(vport, ndlp); + + /* Leave Fabric nodes alone on link down */ + if (!remove && ndlp->nlp_type & NLP_FABRIC) + continue; + rc = lpfc_disc_state_machine(vport, ndlp, NULL, + remove + ? NLP_EVT_DEVICE_RM + : NLP_EVT_DEVICE_RECOVERY); } + if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { + lpfc_mbx_unreg_vpi(vport); + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + } +} + +static void +lpfc_linkdown_port(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp, *next_ndlp; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - fc_host_post_event(phba->host, fc_get_event_number(), - FCH_EVT_LINKDOWN, 0); + fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); - /* Clean up any firmware default rpi's */ - if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { - lpfc_unreg_did(phba, 0xffffffff, mb); - mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl; - if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB)) - == MBX_NOT_FINISHED) { - mempool_free( mb, phba->mbox_mem_pool); - } - } + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Link Down: state:x%x rtry:x%x flg:x%x", + vport->port_state, vport->fc_ns_retry, vport->fc_flag); /* Cleanup any outstanding RSCN activity */ - lpfc_els_flush_rscn(phba); + lpfc_els_flush_rscn(vport); /* Cleanup any outstanding ELS commands */ - lpfc_els_flush_cmd(phba); + lpfc_els_flush_cmd(vport); - /* - * Issue a LINK DOWN event to all nodes. - */ - list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) { - /* free any ndlp's on unused list */ + lpfc_cleanup_rpis(vport, 0); + + /* free any ndlp's on unused list */ + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) + /* free any ndlp's in unused state */ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) - lpfc_drop_node(phba, ndlp); - else /* otherwise, force node recovery. */ - rc = lpfc_disc_state_machine(phba, ndlp, NULL, - NLP_EVT_DEVICE_RECOVERY); + lpfc_drop_node(vport, ndlp); + + /* Turn off discovery timer if its running */ + lpfc_can_disctmo(vport); +} + +int +lpfc_linkdown(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_vport *port_iterator; + LPFC_MBOXQ_t *mb; + + if (phba->link_state == LPFC_LINK_DOWN) { + return 0; + } + spin_lock_irq(&phba->hbalock); + if (phba->link_state > LPFC_LINK_DOWN) { + phba->link_state = LPFC_LINK_DOWN; + phba->pport->fc_flag &= ~FC_LBIT; + } + spin_unlock_irq(&phba->hbalock); + + list_for_each_entry(port_iterator, &phba->port_list, listentry) { + + /* Issue a LINK DOWN event to all nodes */ + lpfc_linkdown_port(port_iterator); + } + + /* Clean up any firmware default rpi's */ + mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mb) { + lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb); + mb->vport = vport; + mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB)) + == MBX_NOT_FINISHED) { + mempool_free(mb, phba->mbox_mem_pool); + } } /* Setup myDID for link up if we are in pt2pt mode */ - if (phba->fc_flag & FC_PT2PT) { - phba->fc_myDID = 0; - if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { + if (phba->pport->fc_flag & FC_PT2PT) { + phba->pport->fc_myDID = 0; + mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mb) { lpfc_config_link(phba, mb); - mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl; - if (lpfc_sli_issue_mbox - (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB)) + mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mb->vport = vport; + if (lpfc_sli_issue_mbox(phba, mb, + (MBX_NOWAIT | MBX_STOP_IOCB)) == MBX_NOT_FINISHED) { - mempool_free( mb, phba->mbox_mem_pool); + mempool_free(mb, phba->mbox_mem_pool); } } - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); + spin_unlock_irq(shost->host_lock); } - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_LBIT; - spin_unlock_irq(phba->host->host_lock); - - /* Turn off discovery timer if its running */ - lpfc_can_disctmo(phba); - /* Must process IOCBs on all rings to handle ABORTed I/Os */ return 0; } -static int -lpfc_linkup(struct lpfc_hba *phba) +static void +lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) { - struct lpfc_nodelist *ndlp, *next_ndlp; + struct lpfc_nodelist *ndlp; - fc_host_post_event(phba->host, fc_get_event_number(), - FCH_EVT_LINKUP, 0); - - spin_lock_irq(phba->host->host_lock); - phba->hba_state = LPFC_LINK_UP; - phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | - FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); - phba->fc_flag |= FC_NDISC_ACTIVE; - phba->fc_ns_retry = 0; - spin_unlock_irq(phba->host->host_lock); - - - if (phba->fc_flag & FC_LBIT) { - list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { - if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) { - if (ndlp->nlp_type & NLP_FABRIC) { - /* - * On Linkup its safe to clean up the - * ndlp from Fabric connections. - */ - lpfc_nlp_set_state(phba, ndlp, - NLP_STE_UNUSED_NODE); - } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { - /* - * Fail outstanding IO now since - * device is marked for PLOGI. - */ - lpfc_unreg_rpi(phba, ndlp); - } - } + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) + continue; + + if (ndlp->nlp_type & NLP_FABRIC) { + /* On Linkup its safe to clean up the ndlp + * from Fabric connections. + */ + if (ndlp->nlp_DID != Fabric_DID) + lpfc_unreg_rpi(vport, ndlp); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + /* Fail outstanding IO now since device is + * marked for PLOGI. + */ + lpfc_unreg_rpi(vport, ndlp); } } +} - /* free any ndlp's on unused list */ - list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, - nlp_listp) { +static void +lpfc_linkup_port(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp, *next_ndlp; + struct lpfc_hba *phba = vport->phba; + + if ((vport->load_flag & FC_UNLOADING) != 0) + return; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "Link Up: top:x%x speed:x%x flg:x%x", + phba->fc_topology, phba->fc_linkspeed, phba->link_flag); + + /* If NPIV is not enabled, only bring the physical port up */ + if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + (vport != phba->pport)) + return; + + fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0); + + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | + FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); + vport->fc_flag |= FC_NDISC_ACTIVE; + vport->fc_ns_retry = 0; + spin_unlock_irq(shost->host_lock); + + if (vport->fc_flag & FC_LBIT) + lpfc_linkup_cleanup_nodes(vport); + + /* free any ndlp's in unused state */ + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, + nlp_listp) if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); +} + +static int +lpfc_linkup(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; + + phba->link_state = LPFC_LINK_UP; + + /* Unblock fabric iocbs if they are blocked */ + clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); + del_timer_sync(&phba->fabric_block_timer); + + list_for_each_entry(vport, &phba->port_list, listentry) { + lpfc_linkup_port(vport); } + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + lpfc_issue_clear_la(phba, phba->pport); return 0; } @@ -505,14 +748,14 @@ lpfc_linkup(struct lpfc_hba *phba) * handed off to the SLI layer. */ void -lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - struct lpfc_sli *psli; - MAILBOX_t *mb; + struct lpfc_vport *vport = pmb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_sli *psli = &phba->sli; + MAILBOX_t *mb = &pmb->mb; uint32_t control; - psli = &phba->sli; - mb = &pmb->mb; /* Since we don't do discovery right now, turn these off here */ psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; @@ -522,69 +765,74 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, - "%d:0320 CLEAR_LA mbxStatus error x%x hba " + "%d (%d):0320 CLEAR_LA mbxStatus error x%x hba " "state x%x\n", - phba->brd_no, mb->mbxStatus, phba->hba_state); + phba->brd_no, vport->vpi, mb->mbxStatus, + vport->port_state); - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; goto out; } - if (phba->fc_flag & FC_ABORT_DISCOVERY) - goto out; + if (vport->port_type == LPFC_PHYSICAL_PORT) + phba->link_state = LPFC_HBA_READY; - phba->num_disc_nodes = 0; - /* go thru NPR list and issue ELS PLOGIs */ - if (phba->fc_npr_cnt) { - lpfc_els_disc_plogi(phba); - } + spin_lock_irq(&phba->hbalock); + psli->sli_flag |= LPFC_PROCESS_LA; + control = readl(phba->HCregaddr); + control |= HC_LAINT_ENA; + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + spin_unlock_irq(&phba->hbalock); + return; + + vport->num_disc_nodes = 0; + /* go thru NPR nodes and issue ELS PLOGIs */ + if (vport->fc_npr_cnt) + lpfc_els_disc_plogi(vport); - if (!phba->num_disc_nodes) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_NDISC_ACTIVE; - spin_unlock_irq(phba->host->host_lock); + if (!vport->num_disc_nodes) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); } - phba->hba_state = LPFC_HBA_READY; + vport->port_state = LPFC_VPORT_READY; out: /* Device Discovery completes */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_DISCOVERY, - "%d:0225 Device Discovery completes\n", - phba->brd_no); + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0225 Device Discovery completes\n", + phba->brd_no, vport->vpi); - mempool_free( pmb, phba->mbox_mem_pool); + mempool_free(pmb, phba->mbox_mem_pool); - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_ABORT_DISCOVERY; - if (phba->fc_flag & FC_ESTABLISH_LINK) { - phba->fc_flag &= ~FC_ESTABLISH_LINK; - } - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK); + spin_unlock_irq(shost->host_lock); del_timer_sync(&phba->fc_estabtmo); - lpfc_can_disctmo(phba); + lpfc_can_disctmo(vport); /* turn on Link Attention interrupts */ - spin_lock_irq(phba->host->host_lock); + + spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); return; } + static void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - struct lpfc_sli *psli = &phba->sli; - int rc; + struct lpfc_vport *vport = pmb->vport; if (pmb->mb.mbxStatus) goto out; @@ -592,154 +840,139 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) mempool_free(pmb, phba->mbox_mem_pool); if (phba->fc_topology == TOPOLOGY_LOOP && - phba->fc_flag & FC_PUBLIC_LOOP && - !(phba->fc_flag & FC_LBIT)) { + vport->fc_flag & FC_PUBLIC_LOOP && + !(vport->fc_flag & FC_LBIT)) { /* Need to wait for FAN - use discovery timer - * for timeout. hba_state is identically + * for timeout. port_state is identically * LPFC_LOCAL_CFG_LINK while waiting for FAN */ - lpfc_set_disctmo(phba); + lpfc_set_disctmo(vport); return; - } + } - /* Start discovery by sending a FLOGI. hba_state is identically + /* Start discovery by sending a FLOGI. port_state is identically * LPFC_FLOGI while waiting for FLOGI cmpl */ - phba->hba_state = LPFC_FLOGI; - lpfc_set_disctmo(phba); - lpfc_initial_flogi(phba); + if (vport->port_state != LPFC_FLOGI) { + vport->port_state = LPFC_FLOGI; + lpfc_set_disctmo(vport); + lpfc_initial_flogi(vport); + } return; out: lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, - "%d:0306 CONFIG_LINK mbxStatus error x%x " + "%d (%d):0306 CONFIG_LINK mbxStatus error x%x " "HBA state x%x\n", - phba->brd_no, pmb->mb.mbxStatus, phba->hba_state); + phba->brd_no, vport->vpi, pmb->mb.mbxStatus, + vport->port_state); - lpfc_linkdown(phba); + mempool_free(pmb, phba->mbox_mem_pool); - phba->hba_state = LPFC_HBA_ERROR; + lpfc_linkdown(phba); lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0200 CONFIG_LINK bad hba state x%x\n", - phba->brd_no, phba->hba_state); + "%d (%d):0200 CONFIG_LINK bad hba state x%x\n", + phba->brd_no, vport->vpi, vport->port_state); - lpfc_clear_la(phba, pmb); - pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la; - rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); - if (rc == MBX_NOT_FINISHED) { - mempool_free(pmb, phba->mbox_mem_pool); - lpfc_disc_flush_list(phba); - psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; - psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; - psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; - phba->hba_state = LPFC_HBA_READY; - } + lpfc_issue_clear_la(phba, vport); return; } static void -lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - struct lpfc_sli *psli = &phba->sli; MAILBOX_t *mb = &pmb->mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; + struct lpfc_vport *vport = pmb->vport; /* Check for error */ if (mb->mbxStatus) { /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, - "%d:0319 READ_SPARAM mbxStatus error x%x " + "%d (%d):0319 READ_SPARAM mbxStatus error x%x " "hba state x%x>\n", - phba->brd_no, mb->mbxStatus, phba->hba_state); + phba->brd_no, vport->vpi, mb->mbxStatus, + vport->port_state); lpfc_linkdown(phba); - phba->hba_state = LPFC_HBA_ERROR; goto out; } - memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt, + memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, sizeof (struct serv_parm)); if (phba->cfg_soft_wwnn) - u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn); + u64_to_wwn(phba->cfg_soft_wwnn, + vport->fc_sparam.nodeName.u.wwn); if (phba->cfg_soft_wwpn) - u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); - memcpy((uint8_t *) & phba->fc_nodename, - (uint8_t *) & phba->fc_sparam.nodeName, - sizeof (struct lpfc_name)); - memcpy((uint8_t *) & phba->fc_portname, - (uint8_t *) & phba->fc_sparam.portName, - sizeof (struct lpfc_name)); + u64_to_wwn(phba->cfg_soft_wwpn, + vport->fc_sparam.portName.u.wwn); + memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, + sizeof(vport->fc_nodename)); + memcpy(&vport->fc_portname, &vport->fc_sparam.portName, + sizeof(vport->fc_portname)); + if (vport->port_type == LPFC_PHYSICAL_PORT) { + memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); + memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); + } + lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); - mempool_free( pmb, phba->mbox_mem_pool); + mempool_free(pmb, phba->mbox_mem_pool); return; out: pmb->context1 = NULL; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); - if (phba->hba_state != LPFC_CLEAR_LA) { - lpfc_clear_la(phba, pmb); - pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la; - if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)) - == MBX_NOT_FINISHED) { - mempool_free( pmb, phba->mbox_mem_pool); - lpfc_disc_flush_list(phba); - psli->ring[(psli->extra_ring)].flag &= - ~LPFC_STOP_IOCB_EVENT; - psli->ring[(psli->fcp_ring)].flag &= - ~LPFC_STOP_IOCB_EVENT; - psli->ring[(psli->next_ring)].flag &= - ~LPFC_STOP_IOCB_EVENT; - phba->hba_state = LPFC_HBA_READY; - } - } else { - mempool_free( pmb, phba->mbox_mem_pool); - } + lpfc_issue_clear_la(phba, vport); + mempool_free(pmb, phba->mbox_mem_pool); return; } static void lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) { - int i; + struct lpfc_vport *vport = phba->pport; LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; + int i; struct lpfc_dmabuf *mp; int rc; sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); switch (la->UlnkSpeed) { - case LA_1GHZ_LINK: - phba->fc_linkspeed = LA_1GHZ_LINK; - break; - case LA_2GHZ_LINK: - phba->fc_linkspeed = LA_2GHZ_LINK; - break; - case LA_4GHZ_LINK: - phba->fc_linkspeed = LA_4GHZ_LINK; - break; - case LA_8GHZ_LINK: - phba->fc_linkspeed = LA_8GHZ_LINK; - break; - default: - phba->fc_linkspeed = LA_UNKNW_LINK; - break; + case LA_1GHZ_LINK: + phba->fc_linkspeed = LA_1GHZ_LINK; + break; + case LA_2GHZ_LINK: + phba->fc_linkspeed = LA_2GHZ_LINK; + break; + case LA_4GHZ_LINK: + phba->fc_linkspeed = LA_4GHZ_LINK; + break; + case LA_8GHZ_LINK: + phba->fc_linkspeed = LA_8GHZ_LINK; + break; + default: + phba->fc_linkspeed = LA_UNKNW_LINK; + break; } phba->fc_topology = la->topology; + phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; if (phba->fc_topology == TOPOLOGY_LOOP) { - /* Get Loop Map information */ + phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; + /* Get Loop Map information */ if (la->il) - phba->fc_flag |= FC_LBIT; + vport->fc_flag |= FC_LBIT; - phba->fc_myDID = la->granted_AL_PA; + vport->fc_myDID = la->granted_AL_PA; i = la->un.lilpBde64.tus.f.bdeSize; if (i == 0) { @@ -769,29 +1002,35 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) } /* Link Up Event ALPA map */ lpfc_printf_log(phba, - KERN_WARNING, - LOG_LINK_EVENT, - "%d:1304 Link Up Event " - "ALPA map Data: x%x " - "x%x x%x x%x\n", - phba->brd_no, - un.pa.wd1, un.pa.wd2, - un.pa.wd3, un.pa.wd4); + KERN_WARNING, + LOG_LINK_EVENT, + "%d:1304 Link Up Event " + "ALPA map Data: x%x " + "x%x x%x x%x\n", + phba->brd_no, + un.pa.wd1, un.pa.wd2, + un.pa.wd3, un.pa.wd4); } } } } else { - phba->fc_myDID = phba->fc_pref_DID; - phba->fc_flag |= FC_LBIT; + if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { + if (phba->max_vpi && phba->cfg_npiv_enable && + (phba->sli_rev == 3)) + phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; + } + vport->fc_myDID = phba->fc_pref_DID; + vport->fc_flag |= FC_LBIT; } - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); lpfc_linkup(phba); if (sparam_mbox) { - lpfc_read_sparam(phba, sparam_mbox); + lpfc_read_sparam(phba, sparam_mbox, 0); + sparam_mbox->vport = vport; sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; rc = lpfc_sli_issue_mbox(phba, sparam_mbox, - (MBX_NOWAIT | MBX_STOP_IOCB)); + (MBX_NOWAIT | MBX_STOP_IOCB)); if (rc == MBX_NOT_FINISHED) { mp = (struct lpfc_dmabuf *) sparam_mbox->context1; lpfc_mbuf_free(phba, mp->virt, mp->phys); @@ -799,36 +1038,48 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) mempool_free(sparam_mbox, phba->mbox_mem_pool); if (cfglink_mbox) mempool_free(cfglink_mbox, phba->mbox_mem_pool); - return; + goto out; } } if (cfglink_mbox) { - phba->hba_state = LPFC_LOCAL_CFG_LINK; + vport->port_state = LPFC_LOCAL_CFG_LINK; lpfc_config_link(phba, cfglink_mbox); + cfglink_mbox->vport = vport; cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, - (MBX_NOWAIT | MBX_STOP_IOCB)); - if (rc == MBX_NOT_FINISHED) - mempool_free(cfglink_mbox, phba->mbox_mem_pool); + (MBX_NOWAIT | MBX_STOP_IOCB)); + if (rc != MBX_NOT_FINISHED) + return; + mempool_free(cfglink_mbox, phba->mbox_mem_pool); } +out: + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "%d (%d):0263 Discovery Mailbox error: state: 0x%x : %p %p\n", + phba->brd_no, vport->vpi, + vport->port_state, sparam_mbox, cfglink_mbox); + + lpfc_issue_clear_la(phba, vport); + return; } static void -lpfc_mbx_issue_link_down(struct lpfc_hba *phba) { +lpfc_mbx_issue_link_down(struct lpfc_hba *phba) +{ uint32_t control; struct lpfc_sli *psli = &phba->sli; lpfc_linkdown(phba); /* turn on Link Attention interrupts - no CLEAR_LA needed */ - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); } /* @@ -838,22 +1089,21 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba) { * handed off to the SLI layer. */ void -lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { + struct lpfc_vport *vport = pmb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); READ_LA_VAR *la; MAILBOX_t *mb = &pmb->mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); /* Check for error */ if (mb->mbxStatus) { - lpfc_printf_log(phba, - KERN_INFO, - LOG_LINK_EVENT, + lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "%d:1307 READ_LA mbox error x%x state x%x\n", - phba->brd_no, - mb->mbxStatus, phba->hba_state); + phba->brd_no, mb->mbxStatus, vport->port_state); lpfc_mbx_issue_link_down(phba); - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; goto lpfc_mbx_cmpl_read_la_free_mbuf; } @@ -861,27 +1111,26 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) memcpy(&phba->alpa_map[0], mp->virt, 128); - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); if (la->pb) - phba->fc_flag |= FC_BYPASSED_MODE; + vport->fc_flag |= FC_BYPASSED_MODE; else - phba->fc_flag &= ~FC_BYPASSED_MODE; - spin_unlock_irq(phba->host->host_lock); + vport->fc_flag &= ~FC_BYPASSED_MODE; + spin_unlock_irq(shost->host_lock); if (((phba->fc_eventTag + 1) < la->eventTag) || - (phba->fc_eventTag == la->eventTag)) { + (phba->fc_eventTag == la->eventTag)) { phba->fc_stat.LinkMultiEvent++; - if (la->attType == AT_LINK_UP) { + if (la->attType == AT_LINK_UP) if (phba->fc_eventTag != 0) lpfc_linkdown(phba); - } } phba->fc_eventTag = la->eventTag; if (la->attType == AT_LINK_UP) { phba->fc_stat.LinkUp++; - if (phba->fc_flag & FC_LOOPBACK_MODE) { + if (phba->link_flag & LS_LOOPBACK_MODE) { lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "%d:1306 Link Up Event in loop back mode " "x%x received Data: x%x x%x x%x x%x\n", @@ -903,7 +1152,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) "%d:1305 Link Down Event x%x received " "Data: x%x x%x x%x\n", phba->brd_no, la->eventTag, phba->fc_eventTag, - phba->hba_state, phba->fc_flag); + phba->pport->port_state, vport->fc_flag); lpfc_mbx_issue_link_down(phba); } @@ -921,31 +1170,115 @@ lpfc_mbx_cmpl_read_la_free_mbuf: * handed off to the SLI layer. */ void -lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - struct lpfc_sli *psli; - MAILBOX_t *mb; - struct lpfc_dmabuf *mp; - struct lpfc_nodelist *ndlp; - - psli = &phba->sli; - mb = &pmb->mb; - - ndlp = (struct lpfc_nodelist *) pmb->context2; - mp = (struct lpfc_dmabuf *) (pmb->context1); + struct lpfc_vport *vport = pmb->vport; + struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; pmb->context1 = NULL; /* Good status, call state machine */ - lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); + lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); - mempool_free( pmb, phba->mbox_mem_pool); + mempool_free(pmb, phba->mbox_mem_pool); lpfc_nlp_put(ndlp); return; } +static void +lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb = &pmb->mb; + struct lpfc_vport *vport = pmb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + switch (mb->mbxStatus) { + case 0x0011: + case 0x0020: + case 0x9700: + lpfc_printf_log(phba, KERN_INFO, LOG_NODE, + "%d (%d):0911 cmpl_unreg_vpi, " + "mb status = 0x%x\n", + phba->brd_no, vport->vpi, mb->mbxStatus); + break; + } + vport->unreg_vpi_cmpl = VPORT_OK; + mempool_free(pmb, phba->mbox_mem_pool); + /* + * This shost reference might have been taken at the beginning of + * lpfc_vport_delete() + */ + if (vport->load_flag & FC_UNLOADING) + scsi_host_put(shost); +} + +void +lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; + int rc; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return; + + lpfc_unreg_vpi(phba, vport->vpi, mbox); + mbox->vport = vport; + mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; + rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, + "%d (%d):1800 Could not issue unreg_vpi\n", + phba->brd_no, vport->vpi); + mempool_free(mbox, phba->mbox_mem_pool); + vport->unreg_vpi_cmpl = VPORT_ERROR; + } +} + +static void +lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + MAILBOX_t *mb = &pmb->mb; + + switch (mb->mbxStatus) { + case 0x0011: + case 0x9601: + case 0x9602: + lpfc_printf_log(phba, KERN_INFO, LOG_NODE, + "%d (%d):0912 cmpl_reg_vpi, mb status = 0x%x\n", + phba->brd_no, vport->vpi, mb->mbxStatus); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + spin_unlock_irq(shost->host_lock); + vport->fc_myDID = 0; + goto out; + } + + vport->num_disc_nodes = 0; + /* go thru NPR list and issue ELS PLOGIs */ + if (vport->fc_npr_cnt) + lpfc_els_disc_plogi(vport); + + if (!vport->num_disc_nodes) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + } + vport->port_state = LPFC_VPORT_READY; + +out: + mempool_free(pmb, phba->mbox_mem_pool); + return; +} + /* * This routine handles processing a Fabric REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ @@ -953,20 +1286,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) * handed off to the SLI layer. */ void -lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - struct lpfc_sli *psli; - MAILBOX_t *mb; - struct lpfc_dmabuf *mp; + struct lpfc_vport *vport = pmb->vport; + struct lpfc_vport *next_vport; + MAILBOX_t *mb = &pmb->mb; + struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp; - struct lpfc_nodelist *ndlp_fdmi; - - - psli = &phba->sli; - mb = &pmb->mb; - ndlp = (struct lpfc_nodelist *) pmb->context2; - mp = (struct lpfc_dmabuf *) (pmb->context1); pmb->context1 = NULL; pmb->context2 = NULL; @@ -977,60 +1304,46 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) mempool_free(pmb, phba->mbox_mem_pool); lpfc_nlp_put(ndlp); - /* FLOGI failed, so just use loop map to make discovery list */ - lpfc_disc_list_loopmap(phba); + if (phba->fc_topology == TOPOLOGY_LOOP) { + /* FLOGI failed, use loop map to make discovery list */ + lpfc_disc_list_loopmap(vport); + + /* Start discovery */ + lpfc_disc_start(vport); + return; + } + + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "%d (%d):0258 Register Fabric login error: 0x%x\n", + phba->brd_no, vport->vpi, mb->mbxStatus); - /* Start discovery */ - lpfc_disc_start(phba); return; } ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_type |= NLP_FABRIC; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */ - if (phba->hba_state == LPFC_FABRIC_CFG_LINK) { - /* This NPort has been assigned an NPort_ID by the fabric as a - * result of the completed fabric login. Issue a State Change - * Registration (SCR) ELS request to the fabric controller - * (SCR_DID) so that this NPort gets RSCN events from the - * fabric. - */ - lpfc_issue_els_scr(phba, SCR_DID, 0); - - ndlp = lpfc_findnode_did(phba, NameServer_DID); - if (!ndlp) { - /* Allocate a new node instance. If the pool is empty, - * start the discovery process and skip the Nameserver - * login process. This is attempted again later on. - * Otherwise, issue a Port Login (PLOGI) to NameServer. - */ - ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); - if (!ndlp) { - lpfc_disc_start(phba); - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); - mempool_free(pmb, phba->mbox_mem_pool); - return; - } else { - lpfc_nlp_init(phba, ndlp, NameServer_DID); - ndlp->nlp_type |= NLP_FABRIC; - } - } - lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); - lpfc_issue_els_plogi(phba, NameServer_DID, 0); - if (phba->cfg_fdmi_on) { - ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, - GFP_KERNEL); - if (ndlp_fdmi) { - lpfc_nlp_init(phba, ndlp_fdmi, FDMI_DID); - ndlp_fdmi->nlp_type |= NLP_FABRIC; - ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE; - lpfc_issue_els_plogi(phba, FDMI_DID, 0); + if (vport->port_state == LPFC_FABRIC_CFG_LINK) { + list_for_each_entry(next_vport, &phba->port_list, listentry) { + if (next_vport->port_type == LPFC_PHYSICAL_PORT) + continue; + + if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) + lpfc_initial_fdisc(next_vport); + else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { + lpfc_vport_set_state(vport, + FC_VPORT_NO_FABRIC_SUPP); + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0259 No NPIV Fabric " + "support\n", + phba->brd_no, vport->vpi); } } + lpfc_do_scr_ns_plogi(phba, vport); } lpfc_mbuf_free(phba, mp->virt, mp->phys); @@ -1046,32 +1359,36 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) * handed off to the SLI layer. */ void -lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - struct lpfc_sli *psli; - MAILBOX_t *mb; - struct lpfc_dmabuf *mp; - struct lpfc_nodelist *ndlp; - - psli = &phba->sli; - mb = &pmb->mb; - - ndlp = (struct lpfc_nodelist *) pmb->context2; - mp = (struct lpfc_dmabuf *) (pmb->context1); + MAILBOX_t *mb = &pmb->mb; + struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; + struct lpfc_vport *vport = pmb->vport; if (mb->mbxStatus) { +out: lpfc_nlp_put(ndlp); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); - /* RegLogin failed, so just use loop map to make discovery - list */ - lpfc_disc_list_loopmap(phba); + if (phba->fc_topology == TOPOLOGY_LOOP) { + /* + * RegLogin failed, use loop map to make discovery + * list + */ + lpfc_disc_list_loopmap(vport); - /* Start discovery */ - lpfc_disc_start(phba); + /* Start discovery */ + lpfc_disc_start(vport); + return; + } + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0260 Register NameServer error: 0x%x\n", + phba->brd_no, vport->vpi, mb->mbxStatus); return; } @@ -1079,37 +1396,43 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_type |= NLP_FABRIC; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); - if (phba->hba_state < LPFC_HBA_READY) { - /* Link up discovery requires Fabrib registration. */ - lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID); - lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN); - lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID); - lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID); + if (vport->port_state < LPFC_VPORT_READY) { + /* Link up discovery requires Fabric registration. */ + lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */ + lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); + lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); + lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); + lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); + + /* Issue SCR just before NameServer GID_FT Query */ + lpfc_issue_els_scr(vport, SCR_DID, 0); } - phba->fc_ns_retry = 0; + vport->fc_ns_retry = 0; /* Good status, issue CT Request to NameServer */ - if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) { + if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) { /* Cannot issue NameServer Query, so finish up discovery */ - lpfc_disc_start(phba); + goto out; } lpfc_nlp_put(ndlp); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); - mempool_free( pmb, phba->mbox_mem_pool); + mempool_free(pmb, phba->mbox_mem_pool); return; } static void -lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { - struct fc_rport *rport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct fc_rport *rport; struct lpfc_rport_data *rdata; struct fc_rport_identifiers rport_ids; + struct lpfc_hba *phba = vport->phba; /* Remote port has reappeared. Re-register w/ FC transport */ rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); @@ -1125,10 +1448,15 @@ lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) * registered the port. */ if (ndlp->rport && ndlp->rport->dd_data && - *(struct lpfc_rport_data **) ndlp->rport->dd_data) { + ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) { lpfc_nlp_put(ndlp); } - ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, + "rport add: did:x%x flg:x%x type x%x", + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); + + ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); if (!rport || !get_device(&rport->dev)) { dev_printk(KERN_WARNING, &phba->pcidev->dev, "Warning: fc_remote_port_add failed\n"); @@ -1151,25 +1479,20 @@ lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) fc_remote_port_rolechg(rport, rport_ids.roles); if ((rport->scsi_target_id != -1) && - (rport->scsi_target_id < LPFC_MAX_TARGET)) { + (rport->scsi_target_id < LPFC_MAX_TARGET)) { ndlp->nlp_sid = rport->scsi_target_id; } - return; } static void -lpfc_unregister_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) { struct fc_rport *rport = ndlp->rport; - struct lpfc_rport_data *rdata = rport->dd_data; - if (rport->scsi_target_id == -1) { - ndlp->rport = NULL; - rdata->pnode = NULL; - lpfc_nlp_put(ndlp); - put_device(&rport->dev); - } + lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, + "rport delete: did:x%x flg:x%x type x%x", + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); fc_remote_port_delete(rport); @@ -1177,42 +1500,46 @@ lpfc_unregister_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) } static void -lpfc_nlp_counters(struct lpfc_hba *phba, int state, int count) +lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) { - spin_lock_irq(phba->host->host_lock); + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + spin_lock_irq(shost->host_lock); switch (state) { case NLP_STE_UNUSED_NODE: - phba->fc_unused_cnt += count; + vport->fc_unused_cnt += count; break; case NLP_STE_PLOGI_ISSUE: - phba->fc_plogi_cnt += count; + vport->fc_plogi_cnt += count; break; case NLP_STE_ADISC_ISSUE: - phba->fc_adisc_cnt += count; + vport->fc_adisc_cnt += count; break; case NLP_STE_REG_LOGIN_ISSUE: - phba->fc_reglogin_cnt += count; + vport->fc_reglogin_cnt += count; break; case NLP_STE_PRLI_ISSUE: - phba->fc_prli_cnt += count; + vport->fc_prli_cnt += count; break; case NLP_STE_UNMAPPED_NODE: - phba->fc_unmap_cnt += count; + vport->fc_unmap_cnt += count; break; case NLP_STE_MAPPED_NODE: - phba->fc_map_cnt += count; + vport->fc_map_cnt += count; break; case NLP_STE_NPR_NODE: - phba->fc_npr_cnt += count; + vport->fc_npr_cnt += count; break; } - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); } static void -lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, +lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int old_state, int new_state) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + if (new_state == NLP_STE_UNMAPPED_NODE) { ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; @@ -1226,35 +1553,34 @@ lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, /* Transport interface */ if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE || old_state == NLP_STE_UNMAPPED_NODE)) { - phba->nport_event_cnt++; - lpfc_unregister_remote_port(phba, ndlp); + vport->phba->nport_event_cnt++; + lpfc_unregister_remote_port(ndlp); } if (new_state == NLP_STE_MAPPED_NODE || new_state == NLP_STE_UNMAPPED_NODE) { - phba->nport_event_cnt++; - /* - * Tell the fc transport about the port, if we haven't - * already. If we have, and it's a scsi entity, be - * sure to unblock any attached scsi devices - */ - lpfc_register_remote_port(phba, ndlp); + vport->phba->nport_event_cnt++; + /* + * Tell the fc transport about the port, if we haven't + * already. If we have, and it's a scsi entity, be + * sure to unblock any attached scsi devices + */ + lpfc_register_remote_port(vport, ndlp); } - - /* - * if we added to Mapped list, but the remote port - * registration failed or assigned a target id outside - * our presentable range - move the node to the - * Unmapped List - */ + /* + * if we added to Mapped list, but the remote port + * registration failed or assigned a target id outside + * our presentable range - move the node to the + * Unmapped List + */ if (new_state == NLP_STE_MAPPED_NODE && (!ndlp->rport || ndlp->rport->scsi_target_id == -1 || ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; - spin_unlock_irq(phba->host->host_lock); - lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); + spin_unlock_irq(shost->host_lock); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } } @@ -1280,61 +1606,74 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state) } void -lpfc_nlp_set_state(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int state) +lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + int state) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); int old_state = ndlp->nlp_state; char name1[16], name2[16]; - lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0904 NPort state transition x%06x, %s -> %s\n", - phba->brd_no, + lpfc_printf_log(vport->phba, KERN_INFO, LOG_NODE, + "%d (%d):0904 NPort state transition x%06x, %s -> %s\n", + vport->phba->brd_no, vport->vpi, ndlp->nlp_DID, lpfc_nlp_state_name(name1, sizeof(name1), old_state), lpfc_nlp_state_name(name2, sizeof(name2), state)); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, + "node statechg did:x%x old:%d ste:%d", + ndlp->nlp_DID, old_state, state); + if (old_state == NLP_STE_NPR_NODE && (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 && state != NLP_STE_NPR_NODE) - lpfc_cancel_retry_delay_tmo(phba, ndlp); + lpfc_cancel_retry_delay_tmo(vport, ndlp); if (old_state == NLP_STE_UNMAPPED_NODE) { ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; ndlp->nlp_type &= ~NLP_FC_NODE; } if (list_empty(&ndlp->nlp_listp)) { - spin_lock_irq(phba->host->host_lock); - list_add_tail(&ndlp->nlp_listp, &phba->fc_nodes); - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); + spin_unlock_irq(shost->host_lock); } else if (old_state) - lpfc_nlp_counters(phba, old_state, -1); + lpfc_nlp_counters(vport, old_state, -1); ndlp->nlp_state = state; - lpfc_nlp_counters(phba, state, 1); - lpfc_nlp_state_cleanup(phba, ndlp, old_state, state); + lpfc_nlp_counters(vport, state, 1); + lpfc_nlp_state_cleanup(vport, ndlp, old_state, state); } void -lpfc_dequeue_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) - lpfc_cancel_retry_delay_tmo(phba, ndlp); + lpfc_cancel_retry_delay_tmo(vport, ndlp); if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) - lpfc_nlp_counters(phba, ndlp->nlp_state, -1); - spin_lock_irq(phba->host->host_lock); + lpfc_nlp_counters(vport, ndlp->nlp_state, -1); + spin_lock_irq(shost->host_lock); list_del_init(&ndlp->nlp_listp); - spin_unlock_irq(phba->host->host_lock); - lpfc_nlp_state_cleanup(phba, ndlp, ndlp->nlp_state, 0); + spin_unlock_irq(shost->host_lock); + lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, + NLP_STE_UNUSED_NODE); } void -lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) - lpfc_cancel_retry_delay_tmo(phba, ndlp); + lpfc_cancel_retry_delay_tmo(vport, ndlp); if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) - lpfc_nlp_counters(phba, ndlp->nlp_state, -1); - spin_lock_irq(phba->host->host_lock); + lpfc_nlp_counters(vport, ndlp->nlp_state, -1); + spin_lock_irq(shost->host_lock); list_del_init(&ndlp->nlp_listp); - spin_unlock_irq(phba->host->host_lock); + ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; + spin_unlock_irq(shost->host_lock); lpfc_nlp_put(ndlp); } @@ -1342,11 +1681,13 @@ lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) * Start / ReStart rescue timer for Discovery / RSCN handling */ void -lpfc_set_disctmo(struct lpfc_hba * phba) +lpfc_set_disctmo(struct lpfc_vport *vport) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; uint32_t tmo; - if (phba->hba_state == LPFC_LOCAL_CFG_LINK) { + if (vport->port_state == LPFC_LOCAL_CFG_LINK) { /* For FAN, timeout should be greater then edtov */ tmo = (((phba->fc_edtov + 999) / 1000) + 1); } else { @@ -1356,18 +1697,25 @@ lpfc_set_disctmo(struct lpfc_hba * phba) tmo = ((phba->fc_ratov * 3) + 3); } - mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo); - spin_lock_irq(phba->host->host_lock); - phba->fc_flag |= FC_DISC_TMO; - spin_unlock_irq(phba->host->host_lock); + + if (!timer_pending(&vport->fc_disctmo)) { + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "set disc timer: tmo:x%x state:x%x flg:x%x", + tmo, vport->port_state, vport->fc_flag); + } + + mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_DISC_TMO; + spin_unlock_irq(shost->host_lock); /* Start Discovery Timer state <hba_state> */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0247 Start Discovery Timer state x%x " + "%d (%d):0247 Start Discovery Timer state x%x " "Data: x%x x%lx x%x x%x\n", - phba->brd_no, - phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo, - phba->fc_plogi_cnt, phba->fc_adisc_cnt); + phba->brd_no, vport->vpi, vport->port_state, tmo, + (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt, + vport->fc_adisc_cnt); return; } @@ -1376,23 +1724,34 @@ lpfc_set_disctmo(struct lpfc_hba * phba) * Cancel rescue timer for Discovery / RSCN handling */ int -lpfc_can_disctmo(struct lpfc_hba * phba) +lpfc_can_disctmo(struct lpfc_vport *vport) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + unsigned long iflags; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "can disc timer: state:x%x rtry:x%x flg:x%x", + vport->port_state, vport->fc_ns_retry, vport->fc_flag); + /* Turn off discovery timer if its running */ - if (phba->fc_flag & FC_DISC_TMO) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_DISC_TMO; - spin_unlock_irq(phba->host->host_lock); - del_timer_sync(&phba->fc_disctmo); - phba->work_hba_events &= ~WORKER_DISC_TMO; + if (vport->fc_flag & FC_DISC_TMO) { + spin_lock_irqsave(shost->host_lock, iflags); + vport->fc_flag &= ~FC_DISC_TMO; + spin_unlock_irqrestore(shost->host_lock, iflags); + del_timer_sync(&vport->fc_disctmo); + spin_lock_irqsave(&vport->work_port_lock, iflags); + vport->work_port_events &= ~WORKER_DISC_TMO; + spin_unlock_irqrestore(&vport->work_port_lock, iflags); } /* Cancel Discovery Timer state <hba_state> */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0248 Cancel Discovery Timer state x%x " + "%d (%d):0248 Cancel Discovery Timer state x%x " "Data: x%x x%x x%x\n", - phba->brd_no, phba->hba_state, phba->fc_flag, - phba->fc_plogi_cnt, phba->fc_adisc_cnt); + phba->brd_no, vport->vpi, vport->port_state, + vport->fc_flag, vport->fc_plogi_cnt, + vport->fc_adisc_cnt); return 0; } @@ -1402,15 +1761,18 @@ lpfc_can_disctmo(struct lpfc_hba * phba) * Return true if iocb matches the specified nport */ int -lpfc_check_sli_ndlp(struct lpfc_hba * phba, - struct lpfc_sli_ring * pring, - struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp) +lpfc_check_sli_ndlp(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + struct lpfc_iocbq *iocb, + struct lpfc_nodelist *ndlp) { - struct lpfc_sli *psli; - IOCB_t *icmd; + struct lpfc_sli *psli = &phba->sli; + IOCB_t *icmd = &iocb->iocb; + struct lpfc_vport *vport = ndlp->vport; + + if (iocb->vport != vport) + return 0; - psli = &phba->sli; - icmd = &iocb->iocb; if (pring->ringno == LPFC_ELS_RING) { switch (icmd->ulpCommand) { case CMD_GEN_REQUEST64_CR: @@ -1428,7 +1790,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba, } else if (pring->ringno == psli->fcp_ring) { /* Skip match check if waiting to relogin to FCP target */ if ((ndlp->nlp_type & NLP_FCP_TARGET) && - (ndlp->nlp_flag & NLP_DELAY_TMO)) { + (ndlp->nlp_flag & NLP_DELAY_TMO)) { return 0; } if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { @@ -1445,7 +1807,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba, * associated with nlp_rpi in the LPFC_NODELIST entry. */ static int -lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) +lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(completions); struct lpfc_sli *psli; @@ -1454,6 +1816,8 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) IOCB_t *icmd; uint32_t rpi, i; + lpfc_fabric_abort_nport(ndlp); + /* * Everything that matches on txcmplq will be returned * by firmware with a no rpi error. @@ -1465,15 +1829,15 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(iocb, next_iocb, &pring->txq, - list) { + list) { /* * Check to see if iocb matches the nport we are * looking for */ - if ((lpfc_check_sli_ndlp - (phba, pring, iocb, ndlp))) { + if ((lpfc_check_sli_ndlp(phba, pring, iocb, + ndlp))) { /* It matches, so deque and call compl with an error */ list_move_tail(&iocb->list, @@ -1481,22 +1845,22 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) pring->txq_cnt--; } } - spin_unlock_irq(phba->host->host_lock); - + spin_unlock_irq(&phba->hbalock); } } while (!list_empty(&completions)) { iocb = list_get_first(&completions, struct lpfc_iocbq, list); - list_del(&iocb->list); + list_del_init(&iocb->list); - if (iocb->iocb_cmpl) { + if (!iocb->iocb_cmpl) + lpfc_sli_release_iocbq(phba, iocb); + else { icmd = &iocb->iocb; icmd->ulpStatus = IOSTAT_LOCAL_REJECT; icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; - (iocb->iocb_cmpl) (phba, iocb, iocb); - } else - lpfc_sli_release_iocbq(phba, iocb); + (iocb->iocb_cmpl)(phba, iocb, iocb); + } } return 0; @@ -1512,19 +1876,22 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) * we are waiting to PLOGI back to the remote NPort. */ int -lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) +lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { - LPFC_MBOXQ_t *mbox; + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; int rc; if (ndlp->nlp_rpi) { - if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { - lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox); - mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl; - rc = lpfc_sli_issue_mbox - (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox) { + lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); + mbox->vport = vport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(phba, mbox, + (MBX_NOWAIT | MBX_STOP_IOCB)); if (rc == MBX_NOT_FINISHED) - mempool_free( mbox, phba->mbox_mem_pool); + mempool_free(mbox, phba->mbox_mem_pool); } lpfc_no_rpi(phba, ndlp); ndlp->nlp_rpi = 0; @@ -1533,25 +1900,70 @@ lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) return 0; } +void +lpfc_unreg_all_rpis(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; + int rc; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox) { + lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); + mbox->vport = vport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(phba, mbox, + (MBX_NOWAIT | MBX_STOP_IOCB)); + if (rc == MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + } + } +} + +void +lpfc_unreg_default_rpis(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; + int rc; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox) { + lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox); + mbox->vport = vport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(phba, mbox, + (MBX_NOWAIT | MBX_STOP_IOCB)); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, + "%d (%d):1815 Could not issue " + "unreg_did (default rpis)\n", + phba->brd_no, vport->vpi); + mempool_free(mbox, phba->mbox_mem_pool); + } + } +} + /* * Free resources associated with LPFC_NODELIST entry * so it can be freed. */ static int -lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) +lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { - LPFC_MBOXQ_t *mb; - LPFC_MBOXQ_t *nextmb; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mb, *nextmb; struct lpfc_dmabuf *mp; /* Cleanup node for NPort <nlp_DID> */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0900 Cleanup node for NPort x%x " + "%d (%d):0900 Cleanup node for NPort x%x " "Data: x%x x%x x%x\n", - phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, + phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); - lpfc_dequeue_node(phba, ndlp); + lpfc_dequeue_node(vport, ndlp); /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ if ((mb = phba->sli.mbox_active)) { @@ -1562,13 +1974,13 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) } } - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && - (ndlp == (struct lpfc_nodelist *) mb->context2)) { + (ndlp == (struct lpfc_nodelist *) mb->context2)) { mp = (struct lpfc_dmabuf *) (mb->context1); if (mp) { - lpfc_mbuf_free(phba, mp->virt, mp->phys); + __lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } list_del(&mb->list); @@ -1576,20 +1988,27 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) lpfc_nlp_put(ndlp); } } - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); lpfc_els_abort(phba,ndlp); - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = 0; del_timer_sync(&ndlp->nlp_delayfunc); if (!list_empty(&ndlp->els_retry_evt.evt_listp)) list_del_init(&ndlp->els_retry_evt.evt_listp); + if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) + list_del_init(&ndlp->dev_loss_evt.evt_listp); + + if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) { + list_del_init(&ndlp->dev_loss_evt.evt_listp); + complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2)); + } - lpfc_unreg_rpi(phba, ndlp); + lpfc_unreg_rpi(vport, ndlp); return 0; } @@ -1600,18 +2019,22 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) * machine, defer the free till we reach the end of the state machine. */ static void -lpfc_nlp_remove(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct lpfc_rport_data *rdata; if (ndlp->nlp_flag & NLP_DELAY_TMO) { - lpfc_cancel_retry_delay_tmo(phba, ndlp); + lpfc_cancel_retry_delay_tmo(vport, ndlp); } - lpfc_cleanup_node(phba, ndlp); + lpfc_cleanup_node(vport, ndlp); - if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) { - put_device(&ndlp->rport->dev); + /* + * We can get here with a non-NULL ndlp->rport because when we + * unregister a rport we don't break the rport/node linkage. So if we + * do, make sure we don't leaving any dangling pointers behind. + */ + if (ndlp->rport) { rdata = ndlp->rport->dd_data; rdata->pnode = NULL; ndlp->rport = NULL; @@ -1619,11 +2042,10 @@ lpfc_nlp_remove(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) } static int -lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did) +lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + uint32_t did) { - D_ID mydid; - D_ID ndlpdid; - D_ID matchdid; + D_ID mydid, ndlpdid, matchdid; if (did == Bcast_DID) return 0; @@ -1637,7 +2059,7 @@ lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did) return 1; /* Next check for area/domain identically equals 0 match */ - mydid.un.word = phba->fc_myDID; + mydid.un.word = vport->fc_myDID; if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { return 0; } @@ -1669,101 +2091,116 @@ lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did) } /* Search for a nodelist entry */ -struct lpfc_nodelist * -lpfc_findnode_did(struct lpfc_hba *phba, uint32_t did) +static struct lpfc_nodelist * +__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { + struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp; uint32_t data1; - spin_lock_irq(phba->host->host_lock); - list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { - if (lpfc_matchdid(phba, ndlp, did)) { + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (lpfc_matchdid(vport, ndlp, did)) { data1 = (((uint32_t) ndlp->nlp_state << 24) | ((uint32_t) ndlp->nlp_xri << 16) | ((uint32_t) ndlp->nlp_type << 8) | ((uint32_t) ndlp->nlp_rpi & 0xff)); lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0929 FIND node DID " + "%d (%d):0929 FIND node DID " " Data: x%p x%x x%x x%x\n", - phba->brd_no, + phba->brd_no, vport->vpi, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1); - spin_unlock_irq(phba->host->host_lock); return ndlp; } } - spin_unlock_irq(phba->host->host_lock); /* FIND node did <did> NOT FOUND */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0932 FIND node did x%x NOT FOUND.\n", - phba->brd_no, did); + "%d (%d):0932 FIND node did x%x NOT FOUND.\n", + phba->brd_no, vport->vpi, did); return NULL; } struct lpfc_nodelist * -lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did) +lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; - ndlp = lpfc_findnode_did(phba, did); + spin_lock_irq(shost->host_lock); + ndlp = __lpfc_findnode_did(vport, did); + spin_unlock_irq(shost->host_lock); + return ndlp; +} + +struct lpfc_nodelist * +lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_nodelist *ndlp; + + ndlp = lpfc_findnode_did(vport, did); if (!ndlp) { - if ((phba->fc_flag & FC_RSCN_MODE) && - ((lpfc_rscn_payload_check(phba, did) == 0))) + if ((vport->fc_flag & FC_RSCN_MODE) != 0 && + lpfc_rscn_payload_check(vport, did) == 0) return NULL; ndlp = (struct lpfc_nodelist *) - mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); + mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL); if (!ndlp) return NULL; - lpfc_nlp_init(phba, ndlp, did); - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); + lpfc_nlp_init(vport, ndlp, did); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irq(shost->host_lock); return ndlp; } - if (phba->fc_flag & FC_RSCN_MODE) { - if (lpfc_rscn_payload_check(phba, did)) { + if (vport->fc_flag & FC_RSCN_MODE) { + if (lpfc_rscn_payload_check(vport, did)) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irq(shost->host_lock); /* Since this node is marked for discovery, * delay timeout is not needed. */ if (ndlp->nlp_flag & NLP_DELAY_TMO) - lpfc_cancel_retry_delay_tmo(phba, ndlp); + lpfc_cancel_retry_delay_tmo(vport, ndlp); } else ndlp = NULL; } else { if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) return NULL; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irq(shost->host_lock); } return ndlp; } /* Build a list of nodes to discover based on the loopmap */ void -lpfc_disc_list_loopmap(struct lpfc_hba * phba) +lpfc_disc_list_loopmap(struct lpfc_vport *vport) { + struct lpfc_hba *phba = vport->phba; int j; uint32_t alpa, index; - if (phba->hba_state <= LPFC_LINK_DOWN) { + if (!lpfc_is_link_up(phba)) return; - } - if (phba->fc_topology != TOPOLOGY_LOOP) { + + if (phba->fc_topology != TOPOLOGY_LOOP) return; - } /* Check for loop map present or not */ if (phba->alpa_map[0]) { for (j = 1; j <= phba->alpa_map[0]; j++) { alpa = phba->alpa_map[j]; - - if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) { + if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0)) continue; - } - lpfc_setup_disc_node(phba, alpa); + lpfc_setup_disc_node(vport, alpa); } } else { /* No alpamap, so try all alpa's */ @@ -1776,113 +2213,167 @@ lpfc_disc_list_loopmap(struct lpfc_hba * phba) else index = FC_MAXLOOP - j - 1; alpa = lpfcAlpaArray[index]; - if ((phba->fc_myDID & 0xff) == alpa) { + if ((vport->fc_myDID & 0xff) == alpa) continue; - } - - lpfc_setup_disc_node(phba, alpa); + lpfc_setup_disc_node(vport, alpa); } } return; } -/* Start Link up / RSCN discovery on NPR list */ void -lpfc_disc_start(struct lpfc_hba * phba) +lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) { - struct lpfc_sli *psli; LPFC_MBOXQ_t *mbox; - struct lpfc_nodelist *ndlp, *next_ndlp; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring]; + struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring]; + struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring]; + int rc; + + /* + * if it's not a physical port or if we already send + * clear_la then don't send it. + */ + if ((phba->link_state >= LPFC_CLEAR_LA) || + (vport->port_type != LPFC_PHYSICAL_PORT)) + return; + + /* Link up discovery */ + if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { + phba->link_state = LPFC_CLEAR_LA; + lpfc_clear_la(phba, mbox); + mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; + mbox->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | + MBX_STOP_IOCB)); + if (rc == MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + lpfc_disc_flush_list(vport); + extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; + fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; + next_ring->flag &= ~LPFC_STOP_IOCB_EVENT; + phba->link_state = LPFC_HBA_ERROR; + } + } +} + +/* Reg_vpi to tell firmware to resume normal operations */ +void +lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) +{ + LPFC_MBOXQ_t *regvpimbox; + + regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (regvpimbox) { + lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); + regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; + regvpimbox->vport = vport; + if (lpfc_sli_issue_mbox(phba, regvpimbox, + (MBX_NOWAIT | MBX_STOP_IOCB)) + == MBX_NOT_FINISHED) { + mempool_free(regvpimbox, phba->mbox_mem_pool); + } + } +} + +/* Start Link up / RSCN discovery on NPR nodes */ +void +lpfc_disc_start(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; uint32_t num_sent; uint32_t clear_la_pending; int did_changed; - int rc; - psli = &phba->sli; - - if (phba->hba_state <= LPFC_LINK_DOWN) { + if (!lpfc_is_link_up(phba)) return; - } - if (phba->hba_state == LPFC_CLEAR_LA) + + if (phba->link_state == LPFC_CLEAR_LA) clear_la_pending = 1; else clear_la_pending = 0; - if (phba->hba_state < LPFC_HBA_READY) { - phba->hba_state = LPFC_DISC_AUTH; - } - lpfc_set_disctmo(phba); + if (vport->port_state < LPFC_VPORT_READY) + vport->port_state = LPFC_DISC_AUTH; - if (phba->fc_prevDID == phba->fc_myDID) { + lpfc_set_disctmo(vport); + + if (vport->fc_prevDID == vport->fc_myDID) did_changed = 0; - } else { + else did_changed = 1; - } - phba->fc_prevDID = phba->fc_myDID; - phba->num_disc_nodes = 0; + + vport->fc_prevDID = vport->fc_myDID; + vport->num_disc_nodes = 0; /* Start Discovery state <hba_state> */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0202 Start Discovery hba state x%x " + "%d (%d):0202 Start Discovery hba state x%x " "Data: x%x x%x x%x\n", - phba->brd_no, phba->hba_state, phba->fc_flag, - phba->fc_plogi_cnt, phba->fc_adisc_cnt); - - /* If our did changed, we MUST do PLOGI */ - list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) { - if (ndlp->nlp_state == NLP_STE_NPR_NODE && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && - did_changed) { - spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(phba->host->host_lock); - } - } + phba->brd_no, vport->vpi, vport->port_state, + vport->fc_flag, vport->fc_plogi_cnt, + vport->fc_adisc_cnt); /* First do ADISCs - if any */ - num_sent = lpfc_els_disc_adisc(phba); + num_sent = lpfc_els_disc_adisc(vport); if (num_sent) return; - if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) { + /* + * For SLI3, cmpl_reg_vpi will set port_state to READY, and + * continue discovery. + */ + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + !(vport->fc_flag & FC_RSCN_MODE)) { + lpfc_issue_reg_vpi(phba, vport); + return; + } + + /* + * For SLI2, we need to set port_state to READY and continue + * discovery. + */ + if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { /* If we get here, there is nothing to ADISC */ - if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { - phba->hba_state = LPFC_CLEAR_LA; - lpfc_clear_la(phba, mbox); - mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; - rc = lpfc_sli_issue_mbox(phba, mbox, - (MBX_NOWAIT | MBX_STOP_IOCB)); - if (rc == MBX_NOT_FINISHED) { - mempool_free( mbox, phba->mbox_mem_pool); - lpfc_disc_flush_list(phba); - psli->ring[(psli->extra_ring)].flag &= - ~LPFC_STOP_IOCB_EVENT; - psli->ring[(psli->fcp_ring)].flag &= - ~LPFC_STOP_IOCB_EVENT; - psli->ring[(psli->next_ring)].flag &= - ~LPFC_STOP_IOCB_EVENT; - phba->hba_state = LPFC_HBA_READY; + if (vport->port_type == LPFC_PHYSICAL_PORT) + lpfc_issue_clear_la(phba, vport); + + if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { + vport->num_disc_nodes = 0; + /* go thru NPR nodes and issue ELS PLOGIs */ + if (vport->fc_npr_cnt) + lpfc_els_disc_plogi(vport); + + if (!vport->num_disc_nodes) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); } } + vport->port_state = LPFC_VPORT_READY; } else { /* Next do PLOGIs - if any */ - num_sent = lpfc_els_disc_plogi(phba); + num_sent = lpfc_els_disc_plogi(vport); if (num_sent) return; - if (phba->fc_flag & FC_RSCN_MODE) { + if (vport->fc_flag & FC_RSCN_MODE) { /* Check to see if more RSCNs came in while we * were processing this one. */ - if ((phba->fc_rscn_id_cnt == 0) && - (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_RSCN_MODE; - spin_unlock_irq(phba->host->host_lock); + if ((vport->fc_rscn_id_cnt == 0) && + (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_RSCN_MODE; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); } else - lpfc_els_handle_rscn(phba); + lpfc_els_handle_rscn(vport); } } return; @@ -1893,7 +2384,7 @@ lpfc_disc_start(struct lpfc_hba * phba) * ring the match the sppecified nodelist. */ static void -lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) +lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(completions); struct lpfc_sli *psli; @@ -1907,7 +2398,7 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) /* Error matching iocb on txq or txcmplq * First check the txq. */ - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { if (iocb->context1 != ndlp) { continue; @@ -1927,36 +2418,36 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) continue; } icmd = &iocb->iocb; - if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) || - (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { + if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR || + icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) { lpfc_sli_issue_abort_iotag(phba, pring, iocb); } } - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); while (!list_empty(&completions)) { iocb = list_get_first(&completions, struct lpfc_iocbq, list); - list_del(&iocb->list); + list_del_init(&iocb->list); - if (iocb->iocb_cmpl) { + if (!iocb->iocb_cmpl) + lpfc_sli_release_iocbq(phba, iocb); + else { icmd = &iocb->iocb; icmd->ulpStatus = IOSTAT_LOCAL_REJECT; icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; (iocb->iocb_cmpl) (phba, iocb, iocb); - } else - lpfc_sli_release_iocbq(phba, iocb); + } } - - return; } void -lpfc_disc_flush_list(struct lpfc_hba * phba) +lpfc_disc_flush_list(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp, *next_ndlp; + struct lpfc_hba *phba = vport->phba; - if (phba->fc_plogi_cnt || phba->fc_adisc_cnt) { - list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, + if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { @@ -1967,6 +2458,14 @@ lpfc_disc_flush_list(struct lpfc_hba * phba) } } +void +lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) +{ + lpfc_els_flush_rscn(vport); + lpfc_els_flush_cmd(vport); + lpfc_disc_flush_list(vport); +} + /*****************************************************************************/ /* * NAME: lpfc_disc_timeout @@ -1985,158 +2484,154 @@ lpfc_disc_flush_list(struct lpfc_hba * phba) void lpfc_disc_timeout(unsigned long ptr) { - struct lpfc_hba *phba = (struct lpfc_hba *)ptr; + struct lpfc_vport *vport = (struct lpfc_vport *) ptr; + struct lpfc_hba *phba = vport->phba; unsigned long flags = 0; if (unlikely(!phba)) return; - spin_lock_irqsave(phba->host->host_lock, flags); - if (!(phba->work_hba_events & WORKER_DISC_TMO)) { - phba->work_hba_events |= WORKER_DISC_TMO; + if ((vport->work_port_events & WORKER_DISC_TMO) == 0) { + spin_lock_irqsave(&vport->work_port_lock, flags); + vport->work_port_events |= WORKER_DISC_TMO; + spin_unlock_irqrestore(&vport->work_port_lock, flags); + + spin_lock_irqsave(&phba->hbalock, flags); if (phba->work_wait) - wake_up(phba->work_wait); + lpfc_worker_wake_up(phba); + spin_unlock_irqrestore(&phba->hbalock, flags); } - spin_unlock_irqrestore(phba->host->host_lock, flags); return; } static void -lpfc_disc_timeout_handler(struct lpfc_hba *phba) +lpfc_disc_timeout_handler(struct lpfc_vport *vport) { - struct lpfc_sli *psli; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli *psli = &phba->sli; struct lpfc_nodelist *ndlp, *next_ndlp; - LPFC_MBOXQ_t *clearlambox, *initlinkmbox; + LPFC_MBOXQ_t *initlinkmbox; int rc, clrlaerr = 0; - if (unlikely(!phba)) + if (!(vport->fc_flag & FC_DISC_TMO)) return; - if (!(phba->fc_flag & FC_DISC_TMO)) - return; + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_DISC_TMO; + spin_unlock_irq(shost->host_lock); - psli = &phba->sli; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, + "disc timeout: state:x%x rtry:x%x flg:x%x", + vport->port_state, vport->fc_ns_retry, vport->fc_flag); - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_DISC_TMO; - spin_unlock_irq(phba->host->host_lock); - - switch (phba->hba_state) { + switch (vport->port_state) { case LPFC_LOCAL_CFG_LINK: - /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */ - /* FAN timeout */ - lpfc_printf_log(phba, - KERN_WARNING, - LOG_DISCOVERY, - "%d:0221 FAN timeout\n", - phba->brd_no); + /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for + * FAN + */ + /* FAN timeout */ + lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY, + "%d (%d):0221 FAN timeout\n", + phba->brd_no, vport->vpi); /* Start discovery by sending FLOGI, clean up old rpis */ - list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state != NLP_STE_NPR_NODE) continue; if (ndlp->nlp_type & NLP_FABRIC) { /* Clean up the ndlp on Fabric connections */ - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { /* Fail outstanding IO now since device * is marked for PLOGI. */ - lpfc_unreg_rpi(phba, ndlp); + lpfc_unreg_rpi(vport, ndlp); } } - phba->hba_state = LPFC_FLOGI; - lpfc_set_disctmo(phba); - lpfc_initial_flogi(phba); + if (vport->port_state != LPFC_FLOGI) { + vport->port_state = LPFC_FLOGI; + lpfc_set_disctmo(vport); + lpfc_initial_flogi(vport); + } break; + case LPFC_FDISC: case LPFC_FLOGI: - /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ + /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ /* Initial FLOGI timeout */ - lpfc_printf_log(phba, - KERN_ERR, - LOG_DISCOVERY, - "%d:0222 Initial FLOGI timeout\n", - phba->brd_no); + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "%d (%d):0222 Initial %s timeout\n", + phba->brd_no, vport->vpi, + vport->vpi ? "FLOGI" : "FDISC"); /* Assume no Fabric and go on with discovery. * Check for outstanding ELS FLOGI to abort. */ /* FLOGI failed, so just use loop map to make discovery list */ - lpfc_disc_list_loopmap(phba); + lpfc_disc_list_loopmap(vport); /* Start discovery */ - lpfc_disc_start(phba); + lpfc_disc_start(vport); break; case LPFC_FABRIC_CFG_LINK: /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for NameServer login */ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0223 Timeout while waiting for NameServer " - "login\n", phba->brd_no); + "%d (%d):0223 Timeout while waiting for " + "NameServer login\n", + phba->brd_no, vport->vpi); /* Next look for NameServer ndlp */ - ndlp = lpfc_findnode_did(phba, NameServer_DID); + ndlp = lpfc_findnode_did(vport, NameServer_DID); if (ndlp) lpfc_nlp_put(ndlp); /* Start discovery */ - lpfc_disc_start(phba); + lpfc_disc_start(vport); break; case LPFC_NS_QRY: /* Check for wait for NameServer Rsp timeout */ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0224 NameServer Query timeout " + "%d (%d):0224 NameServer Query timeout " "Data: x%x x%x\n", - phba->brd_no, - phba->fc_ns_retry, LPFC_MAX_NS_RETRY); - - ndlp = lpfc_findnode_did(phba, NameServer_DID); - if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { - if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) { - /* Try it one more time */ - rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT); - if (rc == 0) - break; - } - phba->fc_ns_retry = 0; + phba->brd_no, vport->vpi, + vport->fc_ns_retry, LPFC_MAX_NS_RETRY); + + if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { + /* Try it one more time */ + vport->fc_ns_retry++; + rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, + vport->fc_ns_retry, 0); + if (rc == 0) + break; } + vport->fc_ns_retry = 0; - /* Nothing to authenticate, so CLEAR_LA right now */ - clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!clearlambox) { - clrlaerr = 1; - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0226 Device Discovery " - "completion error\n", - phba->brd_no); - phba->hba_state = LPFC_HBA_ERROR; - break; - } - - phba->hba_state = LPFC_CLEAR_LA; - lpfc_clear_la(phba, clearlambox); - clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; - rc = lpfc_sli_issue_mbox(phba, clearlambox, - (MBX_NOWAIT | MBX_STOP_IOCB)); - if (rc == MBX_NOT_FINISHED) { - mempool_free(clearlambox, phba->mbox_mem_pool); - clrlaerr = 1; - break; + /* + * Discovery is over. + * set port_state to PORT_READY if SLI2. + * cmpl_reg_vpi will set port_state to READY for SLI3. + */ + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + lpfc_issue_reg_vpi(phba, vport); + else { /* NPIV Not enabled */ + lpfc_issue_clear_la(phba, vport); + vport->port_state = LPFC_VPORT_READY; } /* Setup and issue mailbox INITIALIZE LINK command */ initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!initlinkmbox) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0206 Device Discovery " + "%d (%d):0206 Device Discovery " "completion error\n", - phba->brd_no); - phba->hba_state = LPFC_HBA_ERROR; + phba->brd_no, vport->vpi); + phba->link_state = LPFC_HBA_ERROR; break; } @@ -2144,6 +2639,8 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, phba->cfg_link_speed); initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; + initlinkmbox->vport = vport; + initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, initlinkmbox, (MBX_NOWAIT | MBX_STOP_IOCB)); lpfc_set_loopback_flag(phba); @@ -2154,67 +2651,81 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) case LPFC_DISC_AUTH: /* Node Authentication timeout */ - lpfc_printf_log(phba, - KERN_ERR, - LOG_DISCOVERY, - "%d:0227 Node Authentication timeout\n", - phba->brd_no); - lpfc_disc_flush_list(phba); - clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!clearlambox) { - clrlaerr = 1; - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0207 Device Discovery " - "completion error\n", - phba->brd_no); - phba->hba_state = LPFC_HBA_ERROR; - break; + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "%d (%d):0227 Node Authentication timeout\n", + phba->brd_no, vport->vpi); + lpfc_disc_flush_list(vport); + + /* + * set port_state to PORT_READY if SLI2. + * cmpl_reg_vpi will set port_state to READY for SLI3. + */ + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + lpfc_issue_reg_vpi(phba, vport); + else { /* NPIV Not enabled */ + lpfc_issue_clear_la(phba, vport); + vport->port_state = LPFC_VPORT_READY; } - phba->hba_state = LPFC_CLEAR_LA; - lpfc_clear_la(phba, clearlambox); - clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; - rc = lpfc_sli_issue_mbox(phba, clearlambox, - (MBX_NOWAIT | MBX_STOP_IOCB)); - if (rc == MBX_NOT_FINISHED) { - mempool_free(clearlambox, phba->mbox_mem_pool); - clrlaerr = 1; + break; + + case LPFC_VPORT_READY: + if (vport->fc_flag & FC_RSCN_MODE) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "%d (%d):0231 RSCN timeout Data: x%x " + "x%x\n", + phba->brd_no, vport->vpi, + vport->fc_ns_retry, LPFC_MAX_NS_RETRY); + + /* Cleanup any outstanding ELS commands */ + lpfc_els_flush_cmd(vport); + + lpfc_els_flush_rscn(vport); + lpfc_disc_flush_list(vport); } break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "%d (%d):0229 Unexpected discovery timeout, " + "vport State x%x\n", + phba->brd_no, vport->vpi, vport->port_state); + + break; + } + + switch (phba->link_state) { case LPFC_CLEAR_LA: - /* CLEAR LA timeout */ - lpfc_printf_log(phba, - KERN_ERR, - LOG_DISCOVERY, - "%d:0228 CLEAR LA timeout\n", - phba->brd_no); + /* CLEAR LA timeout */ + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "%d (%d):0228 CLEAR LA timeout\n", + phba->brd_no, vport->vpi); clrlaerr = 1; break; - case LPFC_HBA_READY: - if (phba->fc_flag & FC_RSCN_MODE) { - lpfc_printf_log(phba, - KERN_ERR, - LOG_DISCOVERY, - "%d:0231 RSCN timeout Data: x%x x%x\n", - phba->brd_no, - phba->fc_ns_retry, LPFC_MAX_NS_RETRY); - - /* Cleanup any outstanding ELS commands */ - lpfc_els_flush_cmd(phba); + case LPFC_LINK_UNKNOWN: + case LPFC_WARM_START: + case LPFC_INIT_START: + case LPFC_INIT_MBX_CMDS: + case LPFC_LINK_DOWN: + case LPFC_LINK_UP: + case LPFC_HBA_ERROR: + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "%d (%d):0230 Unexpected timeout, hba link " + "state x%x\n", + phba->brd_no, vport->vpi, phba->link_state); + clrlaerr = 1; + break; - lpfc_els_flush_rscn(phba); - lpfc_disc_flush_list(phba); - } + case LPFC_HBA_READY: break; } if (clrlaerr) { - lpfc_disc_flush_list(phba); + lpfc_disc_flush_list(vport); psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; - phba->hba_state = LPFC_HBA_READY; + vport->port_state = LPFC_VPORT_READY; } return; @@ -2227,37 +2738,29 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) * handed off to the SLI layer. */ void -lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - struct lpfc_sli *psli; - MAILBOX_t *mb; - struct lpfc_dmabuf *mp; - struct lpfc_nodelist *ndlp; - - psli = &phba->sli; - mb = &pmb->mb; - - ndlp = (struct lpfc_nodelist *) pmb->context2; - mp = (struct lpfc_dmabuf *) (pmb->context1); + MAILBOX_t *mb = &pmb->mb; + struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; + struct lpfc_vport *vport = pmb->vport; pmb->context1 = NULL; ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_type |= NLP_FABRIC; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); - /* Start issuing Fabric-Device Management Interface (FDMI) - * command to 0xfffffa (FDMI well known port) + /* + * Start issuing Fabric-Device Management Interface (FDMI) command to + * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if + * fdmi-on=2 (supporting RPA/hostnmae) */ - if (phba->cfg_fdmi_on == 1) { - lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA); - } else { - /* - * Delay issuing FDMI command if fdmi-on=2 - * (supporting RPA/hostnmae) - */ - mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60); - } + + if (phba->cfg_fdmi_on == 1) + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); + else + mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); /* Mailbox took a reference to the node */ lpfc_nlp_put(ndlp); @@ -2283,16 +2786,12 @@ lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param) sizeof(ndlp->nlp_portname)) == 0; } -/* - * Search node lists for a remote port matching filter criteria - * Caller needs to hold host_lock before calling this routine. - */ struct lpfc_nodelist * -__lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param) +__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) { struct lpfc_nodelist *ndlp; - list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state != NLP_STE_UNUSED_NODE && filter(ndlp, param)) return ndlp; @@ -2302,68 +2801,104 @@ __lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param) /* * Search node lists for a remote port matching filter criteria - * This routine is used when the caller does NOT have host_lock. + * Caller needs to hold host_lock before calling this routine. */ struct lpfc_nodelist * -lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param) +lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; - spin_lock_irq(phba->host->host_lock); - ndlp = __lpfc_find_node(phba, filter, param); - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + ndlp = __lpfc_find_node(vport, filter, param); + spin_unlock_irq(shost->host_lock); return ndlp; } /* * This routine looks up the ndlp lists for the given RPI. If rpi found it - * returns the node list pointer else return NULL. + * returns the node list element pointer else return NULL. */ struct lpfc_nodelist * -__lpfc_findnode_rpi(struct lpfc_hba *phba, uint16_t rpi) +__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) { - return __lpfc_find_node(phba, lpfc_filter_by_rpi, &rpi); + return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); } struct lpfc_nodelist * -lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi) +lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; - spin_lock_irq(phba->host->host_lock); - ndlp = __lpfc_findnode_rpi(phba, rpi); - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); + ndlp = __lpfc_findnode_rpi(vport, rpi); + spin_unlock_irq(shost->host_lock); return ndlp; } /* * This routine looks up the ndlp lists for the given WWPN. If WWPN found it - * returns the node list pointer else return NULL. + * returns the node element list pointer else return NULL. */ struct lpfc_nodelist * -lpfc_findnode_wwpn(struct lpfc_hba *phba, struct lpfc_name *wwpn) +lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; - spin_lock_irq(phba->host->host_lock); - ndlp = __lpfc_find_node(phba, lpfc_filter_by_wwpn, wwpn); - spin_unlock_irq(phba->host->host_lock); - return NULL; + spin_lock_irq(shost->host_lock); + ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn); + spin_unlock_irq(shost->host_lock); + return ndlp; } void -lpfc_nlp_init(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did) +lpfc_dev_loss_delay(unsigned long ptr) +{ + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr; + struct lpfc_vport *vport = ndlp->vport; + struct lpfc_hba *phba = vport->phba; + struct lpfc_work_evt *evtp = &ndlp->dev_loss_evt; + unsigned long flags; + + evtp = &ndlp->dev_loss_evt; + + spin_lock_irqsave(&phba->hbalock, flags); + if (!list_empty(&evtp->evt_listp)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return; + } + + evtp->evt_arg1 = ndlp; + evtp->evt = LPFC_EVT_DEV_LOSS_DELAY; + list_add_tail(&evtp->evt_listp, &phba->work_list); + if (phba->work_wait) + lpfc_worker_wake_up(phba); + spin_unlock_irqrestore(&phba->hbalock, flags); + return; +} + +void +lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + uint32_t did) { memset(ndlp, 0, sizeof (struct lpfc_nodelist)); INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); + INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); init_timer(&ndlp->nlp_delayfunc); ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; ndlp->nlp_delayfunc.data = (unsigned long)ndlp; ndlp->nlp_DID = did; - ndlp->nlp_phba = phba; + ndlp->vport = vport; ndlp->nlp_sid = NLP_NO_SID; INIT_LIST_HEAD(&ndlp->nlp_listp); kref_init(&ndlp->kref); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, + "node init: did:x%x", + ndlp->nlp_DID, 0, 0); + return; } @@ -2372,8 +2907,13 @@ lpfc_nlp_release(struct kref *kref) { struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, kref); - lpfc_nlp_remove(ndlp->nlp_phba, ndlp); - mempool_free(ndlp, ndlp->nlp_phba->nlp_mem_pool); + + lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, + "node release: did:x%x flg:x%x type:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); + + lpfc_nlp_remove(ndlp->vport, ndlp); + mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); } struct lpfc_nodelist * diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 2623a9bc7775..c2fb59f595f3 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -59,6 +59,12 @@ #define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24 #define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32 +#define SLI2_IOCB_CMD_SIZE 32 +#define SLI2_IOCB_RSP_SIZE 32 +#define SLI3_IOCB_CMD_SIZE 128 +#define SLI3_IOCB_RSP_SIZE 64 + + /* Common Transport structures and definitions */ union CtRevisionId { @@ -79,6 +85,9 @@ union CtCommandResponse { uint32_t word; }; +#define FC4_FEATURE_INIT 0x2 +#define FC4_FEATURE_TARGET 0x1 + struct lpfc_sli_ct_request { /* Structure is in Big Endian format */ union CtRevisionId RevisionId; @@ -121,20 +130,6 @@ struct lpfc_sli_ct_request { uint32_t rsvd[7]; } rft; - struct rff { - uint32_t PortId; - uint8_t reserved[2]; -#ifdef __BIG_ENDIAN_BITFIELD - uint8_t feature_res:6; - uint8_t feature_init:1; - uint8_t feature_tgt:1; -#else /* __LITTLE_ENDIAN_BITFIELD */ - uint8_t feature_tgt:1; - uint8_t feature_init:1; - uint8_t feature_res:6; -#endif - uint8_t type_code; /* type=8 for FCP */ - } rff; struct rnn { uint32_t PortId; /* For RNN_ID requests */ uint8_t wwnn[8]; @@ -144,15 +139,42 @@ struct lpfc_sli_ct_request { uint8_t len; uint8_t symbname[255]; } rsnn; + struct rspn { /* For RSPN_ID requests */ + uint32_t PortId; + uint8_t len; + uint8_t symbname[255]; + } rspn; + struct gff { + uint32_t PortId; + } gff; + struct gff_acc { + uint8_t fbits[128]; + } gff_acc; +#define FCP_TYPE_FEATURE_OFFSET 4 + struct rff { + uint32_t PortId; + uint8_t reserved[2]; + uint8_t fbits; + uint8_t type_code; /* type=8 for FCP */ + } rff; } un; }; #define SLI_CT_REVISION 1 -#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260) -#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228) -#define RFF_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 235) -#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252) -#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request)) +#define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct gid)) +#define GFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct gff)) +#define RFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct rft)) +#define RFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct rff)) +#define RNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct rnn)) +#define RSNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct rsnn)) +#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ + sizeof(struct rspn)) /* * FsType Definitions @@ -227,6 +249,7 @@ struct lpfc_sli_ct_request { #define SLI_CTNS_GFT_ID 0x0117 #define SLI_CTNS_GSPN_ID 0x0118 #define SLI_CTNS_GPT_ID 0x011A +#define SLI_CTNS_GFF_ID 0x011F #define SLI_CTNS_GID_PN 0x0121 #define SLI_CTNS_GID_NN 0x0131 #define SLI_CTNS_GIP_NN 0x0135 @@ -240,9 +263,9 @@ struct lpfc_sli_ct_request { #define SLI_CTNS_RNN_ID 0x0213 #define SLI_CTNS_RCS_ID 0x0214 #define SLI_CTNS_RFT_ID 0x0217 -#define SLI_CTNS_RFF_ID 0x021F #define SLI_CTNS_RSPN_ID 0x0218 #define SLI_CTNS_RPT_ID 0x021A +#define SLI_CTNS_RFF_ID 0x021F #define SLI_CTNS_RIP_NN 0x0235 #define SLI_CTNS_RIPA_NN 0x0236 #define SLI_CTNS_RSNN_NN 0x0239 @@ -311,9 +334,9 @@ struct csp { uint8_t bbCreditlsb; /* FC Word 0, byte 3 */ #ifdef __BIG_ENDIAN_BITFIELD - uint16_t increasingOffset:1; /* FC Word 1, bit 31 */ + uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */ uint16_t randomOffset:1; /* FC Word 1, bit 30 */ - uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */ + uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */ uint16_t fPort:1; /* FC Word 1, bit 28 */ uint16_t altBbCredit:1; /* FC Word 1, bit 27 */ uint16_t edtovResolution:1; /* FC Word 1, bit 26 */ @@ -332,9 +355,9 @@ struct csp { uint16_t edtovResolution:1; /* FC Word 1, bit 26 */ uint16_t altBbCredit:1; /* FC Word 1, bit 27 */ uint16_t fPort:1; /* FC Word 1, bit 28 */ - uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */ + uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */ uint16_t randomOffset:1; /* FC Word 1, bit 30 */ - uint16_t increasingOffset:1; /* FC Word 1, bit 31 */ + uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */ uint16_t payloadlength:1; /* FC Word 1, bit 16 */ uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */ @@ -1255,7 +1278,9 @@ typedef struct { /* FireFly BIU registers */ #define MBX_KILL_BOARD 0x24 #define MBX_CONFIG_FARP 0x25 #define MBX_BEACON 0x2A +#define MBX_HEARTBEAT 0x31 +#define MBX_CONFIG_HBQ 0x7C #define MBX_LOAD_AREA 0x81 #define MBX_RUN_BIU_DIAG64 0x84 #define MBX_CONFIG_PORT 0x88 @@ -1263,6 +1288,10 @@ typedef struct { /* FireFly BIU registers */ #define MBX_READ_RPI64 0x8F #define MBX_REG_LOGIN64 0x93 #define MBX_READ_LA64 0x95 +#define MBX_REG_VPI 0x96 +#define MBX_UNREG_VPI 0x97 +#define MBX_REG_VNPID 0x96 +#define MBX_UNREG_VNPID 0x97 #define MBX_FLASH_WR_ULA 0x98 #define MBX_SET_DEBUG 0x99 @@ -1335,6 +1364,10 @@ typedef struct { /* FireFly BIU registers */ #define CMD_FCP_TRECEIVE64_CX 0xA1 #define CMD_FCP_TRSP64_CX 0xA3 +#define CMD_IOCB_RCV_SEQ64_CX 0xB5 +#define CMD_IOCB_RCV_ELS64_CX 0xB7 +#define CMD_IOCB_RCV_CONT64_CX 0xBB + #define CMD_GEN_REQUEST64_CR 0xC2 #define CMD_GEN_REQUEST64_CX 0xC3 @@ -1561,6 +1594,7 @@ typedef struct { #define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */ #define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */ #define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */ +#define FLAGS_UNREG_LOGIN_ALL 0x08 /* UNREG_LOGIN all on link down */ #define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */ #define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */ @@ -1744,8 +1778,6 @@ typedef struct { #define LMT_4Gb 0x040 #define LMT_8Gb 0x080 #define LMT_10Gb 0x100 - - uint32_t rsvd2; uint32_t rsvd3; uint32_t max_xri; @@ -1754,7 +1786,10 @@ typedef struct { uint32_t avail_xri; uint32_t avail_iocb; uint32_t avail_rpi; - uint32_t default_rpi; + uint32_t max_vpi; + uint32_t rsvd4; + uint32_t rsvd5; + uint32_t avail_vpi; } READ_CONFIG_VAR; /* Structure for MB Command READ_RCONFIG (12) */ @@ -1818,6 +1853,13 @@ typedef struct { structure */ struct ulp_bde64 sp64; } un; +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t rsvd3; + uint16_t vpi; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t vpi; + uint16_t rsvd3; +#endif } READ_SPARM_VAR; /* Structure for MB Command READ_STATUS (14) */ @@ -1918,11 +1960,17 @@ typedef struct { #ifdef __BIG_ENDIAN_BITFIELD uint32_t cv:1; uint32_t rr:1; - uint32_t rsvd1:29; + uint32_t rsvd2:2; + uint32_t v3req:1; + uint32_t v3rsp:1; + uint32_t rsvd1:25; uint32_t rv:1; #else /* __LITTLE_ENDIAN_BITFIELD */ uint32_t rv:1; - uint32_t rsvd1:29; + uint32_t rsvd1:25; + uint32_t v3rsp:1; + uint32_t v3req:1; + uint32_t rsvd2:2; uint32_t rr:1; uint32_t cv:1; #endif @@ -1972,8 +2020,8 @@ typedef struct { uint8_t sli1FwName[16]; uint32_t sli2FwRev; uint8_t sli2FwName[16]; - uint32_t rsvd2; - uint32_t RandomData[7]; + uint32_t sli3Feat; + uint32_t RandomData[6]; } READ_REV_VAR; /* Structure for MB Command READ_LINK_STAT (18) */ @@ -2013,6 +2061,14 @@ typedef struct { struct ulp_bde64 sp64; } un; +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t rsvd6; + uint16_t vpi; +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint16_t vpi; + uint16_t rsvd6; +#endif + } REG_LOGIN_VAR; /* Word 30 contents for REG_LOGIN */ @@ -2037,16 +2093,78 @@ typedef struct { #ifdef __BIG_ENDIAN_BITFIELD uint16_t rsvd1; uint16_t rpi; + uint32_t rsvd2; + uint32_t rsvd3; + uint32_t rsvd4; + uint32_t rsvd5; + uint16_t rsvd6; + uint16_t vpi; #else /* __LITTLE_ENDIAN_BITFIELD */ uint16_t rpi; uint16_t rsvd1; + uint32_t rsvd2; + uint32_t rsvd3; + uint32_t rsvd4; + uint32_t rsvd5; + uint16_t vpi; + uint16_t rsvd6; #endif } UNREG_LOGIN_VAR; +/* Structure for MB Command REG_VPI (0x96) */ +typedef struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd1; + uint32_t rsvd2:8; + uint32_t sid:24; + uint32_t rsvd3; + uint32_t rsvd4; + uint32_t rsvd5; + uint16_t rsvd6; + uint16_t vpi; +#else /* __LITTLE_ENDIAN */ + uint32_t rsvd1; + uint32_t sid:24; + uint32_t rsvd2:8; + uint32_t rsvd3; + uint32_t rsvd4; + uint32_t rsvd5; + uint16_t vpi; + uint16_t rsvd6; +#endif +} REG_VPI_VAR; + +/* Structure for MB Command UNREG_VPI (0x97) */ +typedef struct { + uint32_t rsvd1; + uint32_t rsvd2; + uint32_t rsvd3; + uint32_t rsvd4; + uint32_t rsvd5; +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t rsvd6; + uint16_t vpi; +#else /* __LITTLE_ENDIAN */ + uint16_t vpi; + uint16_t rsvd6; +#endif +} UNREG_VPI_VAR; + /* Structure for MB Command UNREG_D_ID (0x23) */ typedef struct { uint32_t did; + uint32_t rsvd2; + uint32_t rsvd3; + uint32_t rsvd4; + uint32_t rsvd5; +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t rsvd6; + uint16_t vpi; +#else + uint16_t vpi; + uint16_t rsvd6; +#endif } UNREG_D_ID_VAR; /* Structure for MB Command READ_LA (21) */ @@ -2178,13 +2296,240 @@ typedef struct { #define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ #define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ -/* Structure for MB Command CONFIG_PORT (0x88) */ +struct hbq_mask { +#ifdef __BIG_ENDIAN_BITFIELD + uint8_t tmatch; + uint8_t tmask; + uint8_t rctlmatch; + uint8_t rctlmask; +#else /* __LITTLE_ENDIAN */ + uint8_t rctlmask; + uint8_t rctlmatch; + uint8_t tmask; + uint8_t tmatch; +#endif +}; + + +/* Structure for MB Command CONFIG_HBQ (7c) */ + +struct config_hbq_var { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd1 :7; + uint32_t recvNotify :1; /* Receive Notification */ + uint32_t numMask :8; /* # Mask Entries */ + uint32_t profile :8; /* Selection Profile */ + uint32_t rsvd2 :8; +#else /* __LITTLE_ENDIAN */ + uint32_t rsvd2 :8; + uint32_t profile :8; /* Selection Profile */ + uint32_t numMask :8; /* # Mask Entries */ + uint32_t recvNotify :1; /* Receive Notification */ + uint32_t rsvd1 :7; +#endif + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t hbqId :16; + uint32_t rsvd3 :12; + uint32_t ringMask :4; +#else /* __LITTLE_ENDIAN */ + uint32_t ringMask :4; + uint32_t rsvd3 :12; + uint32_t hbqId :16; +#endif + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t entry_count :16; + uint32_t rsvd4 :8; + uint32_t headerLen :8; +#else /* __LITTLE_ENDIAN */ + uint32_t headerLen :8; + uint32_t rsvd4 :8; + uint32_t entry_count :16; +#endif + + uint32_t hbqaddrLow; + uint32_t hbqaddrHigh; + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd5 :31; + uint32_t logEntry :1; +#else /* __LITTLE_ENDIAN */ + uint32_t logEntry :1; + uint32_t rsvd5 :31; +#endif + + uint32_t rsvd6; /* w7 */ + uint32_t rsvd7; /* w8 */ + uint32_t rsvd8; /* w9 */ + + struct hbq_mask hbqMasks[6]; + + + union { + uint32_t allprofiles[12]; + + struct { + #ifdef __BIG_ENDIAN_BITFIELD + uint32_t seqlenoff :16; + uint32_t maxlen :16; + #else /* __LITTLE_ENDIAN */ + uint32_t maxlen :16; + uint32_t seqlenoff :16; + #endif + #ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd1 :28; + uint32_t seqlenbcnt :4; + #else /* __LITTLE_ENDIAN */ + uint32_t seqlenbcnt :4; + uint32_t rsvd1 :28; + #endif + uint32_t rsvd[10]; + } profile2; + + struct { + #ifdef __BIG_ENDIAN_BITFIELD + uint32_t seqlenoff :16; + uint32_t maxlen :16; + #else /* __LITTLE_ENDIAN */ + uint32_t maxlen :16; + uint32_t seqlenoff :16; + #endif + #ifdef __BIG_ENDIAN_BITFIELD + uint32_t cmdcodeoff :28; + uint32_t rsvd1 :12; + uint32_t seqlenbcnt :4; + #else /* __LITTLE_ENDIAN */ + uint32_t seqlenbcnt :4; + uint32_t rsvd1 :12; + uint32_t cmdcodeoff :28; + #endif + uint32_t cmdmatch[8]; + + uint32_t rsvd[2]; + } profile3; + + struct { + #ifdef __BIG_ENDIAN_BITFIELD + uint32_t seqlenoff :16; + uint32_t maxlen :16; + #else /* __LITTLE_ENDIAN */ + uint32_t maxlen :16; + uint32_t seqlenoff :16; + #endif + #ifdef __BIG_ENDIAN_BITFIELD + uint32_t cmdcodeoff :28; + uint32_t rsvd1 :12; + uint32_t seqlenbcnt :4; + #else /* __LITTLE_ENDIAN */ + uint32_t seqlenbcnt :4; + uint32_t rsvd1 :12; + uint32_t cmdcodeoff :28; + #endif + uint32_t cmdmatch[8]; + + uint32_t rsvd[2]; + } profile5; + + } profiles; +}; + + + +/* Structure for MB Command CONFIG_PORT (0x88) */ typedef struct { - uint32_t pcbLen; +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t cBE : 1; + uint32_t cET : 1; + uint32_t cHpcb : 1; + uint32_t cMA : 1; + uint32_t sli_mode : 4; + uint32_t pcbLen : 24; /* bit 23:0 of memory based port + * config block */ +#else /* __LITTLE_ENDIAN */ + uint32_t pcbLen : 24; /* bit 23:0 of memory based port + * config block */ + uint32_t sli_mode : 4; + uint32_t cMA : 1; + uint32_t cHpcb : 1; + uint32_t cET : 1; + uint32_t cBE : 1; +#endif + uint32_t pcbLow; /* bit 31:0 of memory based port config block */ uint32_t pcbHigh; /* bit 63:32 of memory based port config block */ - uint32_t hbainit[5]; + uint32_t hbainit[6]; + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd : 24; /* Reserved */ + uint32_t cmv : 1; /* Configure Max VPIs */ + uint32_t ccrp : 1; /* Config Command Ring Polling */ + uint32_t csah : 1; /* Configure Synchronous Abort Handling */ + uint32_t chbs : 1; /* Cofigure Host Backing store */ + uint32_t cinb : 1; /* Enable Interrupt Notification Block */ + uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */ + uint32_t cmx : 1; /* Configure Max XRIs */ + uint32_t cmr : 1; /* Configure Max RPIs */ +#else /* __LITTLE_ENDIAN */ + uint32_t cmr : 1; /* Configure Max RPIs */ + uint32_t cmx : 1; /* Configure Max XRIs */ + uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */ + uint32_t cinb : 1; /* Enable Interrupt Notification Block */ + uint32_t chbs : 1; /* Cofigure Host Backing store */ + uint32_t csah : 1; /* Configure Synchronous Abort Handling */ + uint32_t ccrp : 1; /* Config Command Ring Polling */ + uint32_t cmv : 1; /* Configure Max VPIs */ + uint32_t rsvd : 24; /* Reserved */ +#endif +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd2 : 24; /* Reserved */ + uint32_t gmv : 1; /* Grant Max VPIs */ + uint32_t gcrp : 1; /* Grant Command Ring Polling */ + uint32_t gsah : 1; /* Grant Synchronous Abort Handling */ + uint32_t ghbs : 1; /* Grant Host Backing Store */ + uint32_t ginb : 1; /* Grant Interrupt Notification Block */ + uint32_t gerbm : 1; /* Grant ERBM Request */ + uint32_t gmx : 1; /* Grant Max XRIs */ + uint32_t gmr : 1; /* Grant Max RPIs */ +#else /* __LITTLE_ENDIAN */ + uint32_t gmr : 1; /* Grant Max RPIs */ + uint32_t gmx : 1; /* Grant Max XRIs */ + uint32_t gerbm : 1; /* Grant ERBM Request */ + uint32_t ginb : 1; /* Grant Interrupt Notification Block */ + uint32_t ghbs : 1; /* Grant Host Backing Store */ + uint32_t gsah : 1; /* Grant Synchronous Abort Handling */ + uint32_t gcrp : 1; /* Grant Command Ring Polling */ + uint32_t gmv : 1; /* Grant Max VPIs */ + uint32_t rsvd2 : 24; /* Reserved */ +#endif + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t max_rpi : 16; /* Max RPIs Port should configure */ + uint32_t max_xri : 16; /* Max XRIs Port should configure */ +#else /* __LITTLE_ENDIAN */ + uint32_t max_xri : 16; /* Max XRIs Port should configure */ + uint32_t max_rpi : 16; /* Max RPIs Port should configure */ +#endif + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ + uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ +#else /* __LITTLE_ENDIAN */ + uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ + uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ +#endif + + uint32_t rsvd4; /* Reserved */ + +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t rsvd5 : 16; /* Reserved */ + uint32_t max_vpi : 16; /* Max number of virt N-Ports */ +#else /* __LITTLE_ENDIAN */ + uint32_t max_vpi : 16; /* Max number of virt N-Ports */ + uint32_t rsvd5 : 16; /* Reserved */ +#endif + } CONFIG_PORT_VAR; /* SLI-2 Port Control Block */ @@ -2262,33 +2607,40 @@ typedef struct { #define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t)) typedef union { - uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; - LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */ - READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */ - WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */ - BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */ - INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */ + uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/ + * feature/max ring number + */ + LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */ + READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */ + WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */ + BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */ + INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */ DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */ - CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */ - PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */ + CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */ + PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */ CONFIG_RING_VAR varCfgRing; /* cmd = 9 (CONFIG_RING) */ RESET_RING_VAR varRstRing; /* cmd = 10 (RESET_RING) */ READ_CONFIG_VAR varRdConfig; /* cmd = 11 (READ_CONFIG) */ READ_RCONF_VAR varRdRConfig; /* cmd = 12 (READ_RCONFIG) */ READ_SPARM_VAR varRdSparm; /* cmd = 13 (READ_SPARM(64)) */ READ_STATUS_VAR varRdStatus; /* cmd = 14 (READ_STATUS) */ - READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */ - READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */ - READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */ - READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */ + READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */ + READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */ + READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */ + READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */ REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */ UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */ - READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */ + READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */ CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */ - DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */ - UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */ - CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP) NEW_FEATURE */ - CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ + DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */ + UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */ + CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP) + * NEW_FEATURE + */ + struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */ + CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ + REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */ + UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */ } MAILVARIANTS; /* @@ -2305,14 +2657,27 @@ struct lpfc_pgp { __le32 rspPutInx; }; -typedef struct _SLI2_DESC { - struct lpfc_hgp host[MAX_RINGS]; +struct sli2_desc { uint32_t unused1[16]; + struct lpfc_hgp host[MAX_RINGS]; + struct lpfc_pgp port[MAX_RINGS]; +}; + +struct sli3_desc { + struct lpfc_hgp host[MAX_RINGS]; + uint32_t reserved[8]; + uint32_t hbq_put[16]; +}; + +struct sli3_pgp { struct lpfc_pgp port[MAX_RINGS]; -} SLI2_DESC; + uint32_t hbq_get[16]; +}; typedef union { - SLI2_DESC s2; + struct sli2_desc s2; + struct sli3_desc s3; + struct sli3_pgp s3_pgp; } SLI_VAR; typedef struct { @@ -2618,6 +2983,25 @@ typedef struct { uint32_t fcpt_Length; /* transfer ready for IWRITE */ } FCPT_FIELDS64; +/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7) + or CMD_IOCB_RCV_SEQ64_CX (0xB5) */ + +struct rcv_sli3 { + uint32_t word8Rsvd; +#ifdef __BIG_ENDIAN_BITFIELD + uint16_t vpi; + uint16_t word9Rsvd; +#else /* __LITTLE_ENDIAN */ + uint16_t word9Rsvd; + uint16_t vpi; +#endif + uint32_t word10Rsvd; + uint32_t acc_len; /* accumulated length */ + struct ulp_bde64 bde2; +}; + + + typedef struct _IOCB { /* IOCB structure */ union { GENERIC_RSP grsp; /* Generic response */ @@ -2632,8 +3016,8 @@ typedef struct _IOCB { /* IOCB structure */ /* SLI-2 structures */ - struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation - bde_64s */ + struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation + * bde_64s */ ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */ GEN_REQUEST64 genreq64; /* GEN_REQUEST template */ RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */ @@ -2695,9 +3079,20 @@ typedef struct _IOCB { /* IOCB structure */ uint32_t ulpTimeout:8; #endif + union { + struct rcv_sli3 rcvsli3; /* words 8 - 15 */ + uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */ + } unsli3; + +#define ulpCt_h ulpXS +#define ulpCt_l ulpFCP2Rcvy + +#define IOCB_FCP 1 /* IOCB is used for FCP ELS cmds-ulpRsvByte */ +#define IOCB_IP 2 /* IOCB is used for IP ELS cmds */ #define PARM_UNUSED 0 /* PU field (Word 4) not used */ #define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */ #define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */ +#define PARM_NPIV_DID 3 #define CLASS1 0 /* Class 1 */ #define CLASS2 1 /* Class 2 */ #define CLASS3 2 /* Class 3 */ @@ -2718,39 +3113,51 @@ typedef struct _IOCB { /* IOCB structure */ #define IOSTAT_RSVD2 0xC #define IOSTAT_RSVD3 0xD #define IOSTAT_RSVD4 0xE -#define IOSTAT_RSVD5 0xF +#define IOSTAT_NEED_BUFFER 0xF #define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */ #define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */ #define IOSTAT_CNT 0x11 } IOCB_t; +/* Structure used for a single HBQ entry */ +struct lpfc_hbq_entry { + struct ulp_bde64 bde; + uint32_t buffer_tag; +}; + #define SLI1_SLIM_SIZE (4 * 1024) /* Up to 498 IOCBs will fit into 16k * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384 */ -#define SLI2_SLIM_SIZE (16 * 1024) +#define SLI2_SLIM_SIZE (64 * 1024) /* Maximum IOCBs that will fit in SLI2 slim */ #define MAX_SLI2_IOCB 498 +#define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \ + (sizeof(MAILBOX_t) + sizeof(PCB_t))) + +/* HBQ entries are 4 words each = 4k */ +#define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \ + lpfc_sli_hbq_count()) struct lpfc_sli2_slim { MAILBOX_t mbx; PCB_t pcb; - IOCB_t IOCBs[MAX_SLI2_IOCB]; + IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE]; }; -/******************************************************************* -This macro check PCI device to allow special handling for LC HBAs. - -Parameters: -device : struct pci_dev 's device field - -return 1 => TRUE - 0 => FALSE - *******************************************************************/ +/* + * This function checks PCI device to allow special handling for LC HBAs. + * + * Parameters: + * device : struct pci_dev 's device field + * + * return 1 => TRUE + * 0 => FALSE + */ static inline int lpfc_is_LC_HBA(unsigned short device) { @@ -2766,3 +3173,16 @@ lpfc_is_LC_HBA(unsigned short device) else return 0; } + +/* + * Determine if an IOCB failed because of a link event or firmware reset. + */ + +static inline int +lpfc_error_lost_link(IOCB_t *iocbp) +{ + return (iocbp->ulpStatus == IOSTAT_LOCAL_REJECT && + (iocbp->un.ulpWord[4] == IOERR_SLI_ABORTED || + iocbp->un.ulpWord[4] == IOERR_LINK_DOWN || + iocbp->un.ulpWord[4] == IOERR_SLI_DOWN)); +} diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 955b2e48d041..f81f85ee190f 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -27,6 +27,7 @@ #include <linux/kthread.h> #include <linux/pci.h> #include <linux/spinlock.h> +#include <linux/ctype.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -40,15 +41,20 @@ #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" +#include "lpfc_vport.h" #include "lpfc_version.h" +#include "lpfc_vport.h" static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); static struct scsi_transport_template *lpfc_transport_template = NULL; +static struct scsi_transport_template *lpfc_vport_transport_template = NULL; static DEFINE_IDR(lpfc_hba_index); + + /************************************************************************/ /* */ /* lpfc_config_port_prep */ @@ -61,7 +67,7 @@ static DEFINE_IDR(lpfc_hba_index); /* */ /************************************************************************/ int -lpfc_config_port_prep(struct lpfc_hba * phba) +lpfc_config_port_prep(struct lpfc_hba *phba) { lpfc_vpd_t *vp = &phba->vpd; int i = 0, rc; @@ -75,12 +81,12 @@ lpfc_config_port_prep(struct lpfc_hba * phba) pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } mb = &pmb->mb; - phba->hba_state = LPFC_INIT_MBX_CMDS; + phba->link_state = LPFC_INIT_MBX_CMDS; if (lpfc_is_LC_HBA(phba->pcidev->device)) { if (init_key) { @@ -100,9 +106,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba) rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, - KERN_ERR, - LOG_MBOX, + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "%d:0324 Config Port initialization " "error, mbxCmd x%x READ_NVPARM, " "mbxStatus x%x\n", @@ -112,16 +116,18 @@ lpfc_config_port_prep(struct lpfc_hba * phba) return -ERESTART; } memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, - sizeof (mb->un.varRDnvp.nodename)); + sizeof(phba->wwnn)); + memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, + sizeof(phba->wwpn)); } + phba->sli3_options = 0x0; + /* Setup and issue mailbox READ REV command */ lpfc_read_rev(phba, pmb); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, - KERN_ERR, - LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0439 Adapter failed to init, mbxCmd x%x " "READ_REV, mbxStatus x%x\n", phba->brd_no, @@ -130,6 +136,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba) return -ERESTART; } + /* * The value of rr must be 1 since the driver set the cv field to 1. * This setting requires the FW to set all revision fields. @@ -144,8 +151,12 @@ lpfc_config_port_prep(struct lpfc_hba * phba) return -ERESTART; } + if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) + return -EINVAL; + /* Save information as VPD data */ vp->rev.rBit = 1; + memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; @@ -161,6 +172,13 @@ lpfc_config_port_prep(struct lpfc_hba * phba) vp->rev.postKernRev = mb->un.varRdRev.postKernRev; vp->rev.opFwRev = mb->un.varRdRev.opFwRev; + /* If the sli feature level is less then 9, we must + * tear down all RPIs and VPIs on link down if NPIV + * is enabled. + */ + if (vp->rev.feaLevelHigh < 9) + phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; + if (lpfc_is_LC_HBA(phba->pcidev->device)) memcpy(phba->RandomData, (char *)&mb->un.varWords[24], sizeof (phba->RandomData)); @@ -188,7 +206,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba) if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, - mb->un.varDmp.word_cnt); + mb->un.varDmp.word_cnt); offset += mb->un.varDmp.word_cnt; } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); lpfc_parse_vpd(phba, lpfc_vpd_data, offset); @@ -212,48 +230,34 @@ out_free_mbox: /* */ /************************************************************************/ int -lpfc_config_port_post(struct lpfc_hba * phba) +lpfc_config_port_post(struct lpfc_hba *phba) { + struct lpfc_vport *vport = phba->pport; LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; struct lpfc_dmabuf *mp; struct lpfc_sli *psli = &phba->sli; uint32_t status, timeout; - int i, j, rc; + int i, j; + int rc; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } mb = &pmb->mb; - lpfc_config_link(phba, pmb); - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); - if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, - KERN_ERR, - LOG_INIT, - "%d:0447 Adapter failed init, mbxCmd x%x " - "CONFIG_LINK mbxStatus x%x\n", - phba->brd_no, - mb->mbxCommand, mb->mbxStatus); - phba->hba_state = LPFC_HBA_ERROR; - mempool_free( pmb, phba->mbox_mem_pool); - return -EIO; - } - /* Get login parameters for NID. */ - lpfc_read_sparam(phba, pmb); + lpfc_read_sparam(phba, pmb, 0); + pmb->vport = vport; if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { - lpfc_printf_log(phba, - KERN_ERR, - LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0448 Adapter failed init, mbxCmd x%x " "READ_SPARM mbxStatus x%x\n", phba->brd_no, mb->mbxCommand, mb->mbxStatus); - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; mp = (struct lpfc_dmabuf *) pmb->context1; mempool_free( pmb, phba->mbox_mem_pool); lpfc_mbuf_free(phba, mp->virt, mp->phys); @@ -263,25 +267,27 @@ lpfc_config_port_post(struct lpfc_hba * phba) mp = (struct lpfc_dmabuf *) pmb->context1; - memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm)); + memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); pmb->context1 = NULL; if (phba->cfg_soft_wwnn) - u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn); + u64_to_wwn(phba->cfg_soft_wwnn, + vport->fc_sparam.nodeName.u.wwn); if (phba->cfg_soft_wwpn) - u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); - memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName, + u64_to_wwn(phba->cfg_soft_wwpn, + vport->fc_sparam.portName.u.wwn); + memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, sizeof (struct lpfc_name)); - memcpy(&phba->fc_portname, &phba->fc_sparam.portName, + memcpy(&vport->fc_portname, &vport->fc_sparam.portName, sizeof (struct lpfc_name)); /* If no serial number in VPD data, use low 6 bytes of WWNN */ /* This should be consolidated into parse_vpd ? - mr */ if (phba->SerialNumber[0] == 0) { uint8_t *outptr; - outptr = &phba->fc_nodename.u.s.IEEE[0]; + outptr = &vport->fc_nodename.u.s.IEEE[0]; for (i = 0; i < 12; i++) { status = *outptr++; j = ((status & 0xf0) >> 4); @@ -303,15 +309,14 @@ lpfc_config_port_post(struct lpfc_hba * phba) } lpfc_read_config(phba, pmb); + pmb->vport = vport; if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { - lpfc_printf_log(phba, - KERN_ERR, - LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0453 Adapter failed to init, mbxCmd x%x " "READ_CONFIG, mbxStatus x%x\n", phba->brd_no, mb->mbxCommand, mb->mbxStatus); - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; mempool_free( pmb, phba->mbox_mem_pool); return -EIO; } @@ -338,9 +343,7 @@ lpfc_config_port_post(struct lpfc_hba * phba) || ((phba->cfg_link_speed == LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb))) { /* Reset link speed to auto */ - lpfc_printf_log(phba, - KERN_WARNING, - LOG_LINK_EVENT, + lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, "%d:1302 Invalid speed for this board: " "Reset link speed to auto: x%x\n", phba->brd_no, @@ -348,7 +351,7 @@ lpfc_config_port_post(struct lpfc_hba * phba) phba->cfg_link_speed = LINK_SPEED_AUTO; } - phba->hba_state = LPFC_LINK_DOWN; + phba->link_state = LPFC_LINK_DOWN; /* Only process IOCBs on ring 0 till hba_state is READY */ if (psli->ring[psli->extra_ring].cmdringaddr) @@ -359,10 +362,11 @@ lpfc_config_port_post(struct lpfc_hba * phba) psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; /* Post receive buffers for desired rings */ - lpfc_post_rcv_buf(phba); + if (phba->sli_rev != 3) + lpfc_post_rcv_buf(phba); /* Enable appropriate host interrupts */ - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); status = readl(phba->HCregaddr); status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; if (psli->num_rings > 0) @@ -380,22 +384,24 @@ lpfc_config_port_post(struct lpfc_hba * phba) writel(status, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); /* * Setup the ring 0 (els) timeout handler */ timeout = phba->fc_ratov << 1; - mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout); + mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); + mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + phba->hb_outstanding = 0; + phba->last_completion_time = jiffies; lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + pmb->vport = vport; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); lpfc_set_loopback_flag(phba); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, - KERN_ERR, - LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0454 Adapter failed to init, mbxCmd x%x " "INIT_LINK, mbxStatus x%x\n", phba->brd_no, @@ -408,7 +414,7 @@ lpfc_config_port_post(struct lpfc_hba * phba) writel(0xffffffff, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; if (rc != MBX_BUSY) mempool_free(pmb, phba->mbox_mem_pool); return -EIO; @@ -429,18 +435,19 @@ lpfc_config_port_post(struct lpfc_hba * phba) /* */ /************************************************************************/ int -lpfc_hba_down_prep(struct lpfc_hba * phba) +lpfc_hba_down_prep(struct lpfc_hba *phba) { + struct lpfc_vport *vport = phba->pport; + /* Disable interrupts */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ - /* Cleanup potential discovery resources */ - lpfc_els_flush_rscn(phba); - lpfc_els_flush_cmd(phba); - lpfc_disc_flush_list(phba); + list_for_each_entry(vport, &phba->port_list, listentry) { + lpfc_cleanup_discovery_resources(vport); + } - return (0); + return 0; } /************************************************************************/ @@ -453,20 +460,24 @@ lpfc_hba_down_prep(struct lpfc_hba * phba) /* */ /************************************************************************/ int -lpfc_hba_down_post(struct lpfc_hba * phba) +lpfc_hba_down_post(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; struct lpfc_dmabuf *mp, *next_mp; int i; - /* Cleanup preposted buffers on the ELS ring */ - pring = &psli->ring[LPFC_ELS_RING]; - list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { - list_del(&mp->list); - pring->postbufq_cnt--; - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) + lpfc_sli_hbqbuf_free_all(phba); + else { + /* Cleanup preposted buffers on the ELS ring */ + pring = &psli->ring[LPFC_ELS_RING]; + list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { + list_del(&mp->list); + pring->postbufq_cnt--; + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + } } for (i = 0; i < psli->num_rings; i++) { @@ -477,6 +488,119 @@ lpfc_hba_down_post(struct lpfc_hba * phba) return 0; } +/* HBA heart beat timeout handler */ +void +lpfc_hb_timeout(unsigned long ptr) +{ + struct lpfc_hba *phba; + unsigned long iflag; + + phba = (struct lpfc_hba *)ptr; + spin_lock_irqsave(&phba->pport->work_port_lock, iflag); + if (!(phba->pport->work_port_events & WORKER_HB_TMO)) + phba->pport->work_port_events |= WORKER_HB_TMO; + spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); + + if (phba->work_wait) + wake_up(phba->work_wait); + return; +} + +static void +lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) +{ + unsigned long drvr_flag; + + spin_lock_irqsave(&phba->hbalock, drvr_flag); + phba->hb_outstanding = 0; + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + + mempool_free(pmboxq, phba->mbox_mem_pool); + if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && + !(phba->link_state == LPFC_HBA_ERROR) && + !(phba->pport->fc_flag & FC_UNLOADING)) + mod_timer(&phba->hb_tmofunc, + jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + return; +} + +void +lpfc_hb_timeout_handler(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *pmboxq; + int retval; + struct lpfc_sli *psli = &phba->sli; + + if ((phba->link_state == LPFC_HBA_ERROR) || + (phba->pport->fc_flag & FC_UNLOADING) || + (phba->pport->fc_flag & FC_OFFLINE_MODE)) + return; + + spin_lock_irq(&phba->pport->work_port_lock); + /* If the timer is already canceled do nothing */ + if (!(phba->pport->work_port_events & WORKER_HB_TMO)) { + spin_unlock_irq(&phba->pport->work_port_lock); + return; + } + + if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, + jiffies)) { + spin_unlock_irq(&phba->pport->work_port_lock); + if (!phba->hb_outstanding) + mod_timer(&phba->hb_tmofunc, + jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + else + mod_timer(&phba->hb_tmofunc, + jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + return; + } + spin_unlock_irq(&phba->pport->work_port_lock); + + /* If there is no heart beat outstanding, issue a heartbeat command */ + if (!phba->hb_outstanding) { + pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); + if (!pmboxq) { + mod_timer(&phba->hb_tmofunc, + jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + return; + } + + lpfc_heart_beat(phba, pmboxq); + pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; + pmboxq->vport = phba->pport; + retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); + + if (retval != MBX_BUSY && retval != MBX_SUCCESS) { + mempool_free(pmboxq, phba->mbox_mem_pool); + mod_timer(&phba->hb_tmofunc, + jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + return; + } + mod_timer(&phba->hb_tmofunc, + jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + phba->hb_outstanding = 1; + return; + } else { + /* + * If heart beat timeout called with hb_outstanding set we + * need to take the HBA offline. + */ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "%d:0459 Adapter heartbeat failure, taking " + "this port offline.\n", phba->brd_no); + + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + spin_unlock_irq(&phba->hbalock); + + lpfc_offline_prep(phba); + lpfc_offline(phba); + lpfc_unblock_mgmt_io(phba); + phba->link_state = LPFC_HBA_ERROR; + lpfc_hba_down_post(phba); + } +} + /************************************************************************/ /* */ /* lpfc_handle_eratt */ @@ -486,11 +610,15 @@ lpfc_hba_down_post(struct lpfc_hba * phba) /* */ /************************************************************************/ void -lpfc_handle_eratt(struct lpfc_hba * phba) +lpfc_handle_eratt(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; + struct lpfc_vport *vport = phba->pport; + struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; + struct lpfc_vport *port_iterator; uint32_t event_data; + struct Scsi_Host *shost; + /* If the pci channel is offline, ignore possible errors, * since we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) @@ -504,10 +632,17 @@ lpfc_handle_eratt(struct lpfc_hba * phba) "Data: x%x x%x x%x\n", phba->brd_no, phba->work_hs, phba->work_status[0], phba->work_status[1]); - spin_lock_irq(phba->host->host_lock); - phba->fc_flag |= FC_ESTABLISH_LINK; + list_for_each_entry(port_iterator, &phba->port_list, + listentry) { + shost = lpfc_shost_from_vport(port_iterator); + + spin_lock_irq(shost->host_lock); + port_iterator->fc_flag |= FC_ESTABLISH_LINK; + spin_unlock_irq(shost->host_lock); + } + spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI2_ACTIVE; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); /* * Firmware stops when it triggled erratt with HS_FFER6. @@ -544,15 +679,18 @@ lpfc_handle_eratt(struct lpfc_hba * phba) phba->work_status[0], phba->work_status[1]); event_data = FC_REG_DUMP_EVENT; - fc_host_post_vendor_event(phba->host, fc_get_event_number(), + shost = lpfc_shost_from_vport(vport); + fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(event_data), (char *) &event_data, SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); + spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + spin_unlock_irq(&phba->hbalock); lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_unblock_mgmt_io(phba); - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; lpfc_hba_down_post(phba); } } @@ -566,9 +704,11 @@ lpfc_handle_eratt(struct lpfc_hba * phba) /* */ /************************************************************************/ void -lpfc_handle_latt(struct lpfc_hba * phba) +lpfc_handle_latt(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; + struct lpfc_vport *vport = phba->pport; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_vport *port_iterator; LPFC_MBOXQ_t *pmb; volatile uint32_t control; struct lpfc_dmabuf *mp; @@ -589,20 +729,22 @@ lpfc_handle_latt(struct lpfc_hba * phba) rc = -EIO; /* Cleanup any outstanding ELS commands */ - lpfc_els_flush_cmd(phba); + list_for_each_entry(port_iterator, &phba->port_list, listentry) + lpfc_els_flush_cmd(port_iterator); psli->slistat.link_event++; lpfc_read_la(phba, pmb, mp); pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; + pmb->vport = vport; rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); if (rc == MBX_NOT_FINISHED) goto lpfc_handle_latt_free_mbuf; /* Clear Link Attention in HA REG */ - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); writel(HA_LATT, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); return; @@ -614,7 +756,7 @@ lpfc_handle_latt_free_pmb: mempool_free(pmb, phba->mbox_mem_pool); lpfc_handle_latt_err_exit: /* Enable Link attention interrupts */ - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; control = readl(phba->HCregaddr); control |= HC_LAINT_ENA; @@ -624,15 +766,13 @@ lpfc_handle_latt_err_exit: /* Clear Link Attention in HA REG */ writel(HA_LATT, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); lpfc_linkdown(phba); - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; /* The other case is an error from issue_mbox */ if (rc == -ENOMEM) - lpfc_printf_log(phba, - KERN_WARNING, - LOG_MBOX, + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "%d:0300 READ_LA: no buffers\n", phba->brd_no); @@ -646,7 +786,7 @@ lpfc_handle_latt_err_exit: /* */ /************************************************************************/ static int -lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len) +lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) { uint8_t lenlo, lenhi; int Length; @@ -658,9 +798,7 @@ lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len) return 0; /* Vital Product */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_INIT, + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "%d:0455 Vital Product Data: x%x x%x x%x x%x\n", phba->brd_no, (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], @@ -785,7 +923,7 @@ lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len) } static void -lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp) +lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) { lpfc_vpd_t *vp; uint16_t dev_id = phba->pcidev->device; @@ -943,7 +1081,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp) /* Returns the number of buffers NOT posted. */ /**************************************************/ int -lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt, +lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt, int type) { IOCB_t *icmd; @@ -955,9 +1093,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt, /* While there are buffers to post */ while (cnt > 0) { /* Allocate buffer for command iocb */ - spin_lock_irq(phba->host->host_lock); iocb = lpfc_sli_get_iocbq(phba); - spin_unlock_irq(phba->host->host_lock); if (iocb == NULL) { pring->missbufcnt = cnt; return cnt; @@ -972,9 +1108,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt, &mp1->phys); if (mp1 == 0 || mp1->virt == 0) { kfree(mp1); - spin_lock_irq(phba->host->host_lock); lpfc_sli_release_iocbq(phba, iocb); - spin_unlock_irq(phba->host->host_lock); pring->missbufcnt = cnt; return cnt; } @@ -990,9 +1124,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt, kfree(mp2); lpfc_mbuf_free(phba, mp1->virt, mp1->phys); kfree(mp1); - spin_lock_irq(phba->host->host_lock); lpfc_sli_release_iocbq(phba, iocb); - spin_unlock_irq(phba->host->host_lock); pring->missbufcnt = cnt; return cnt; } @@ -1018,7 +1150,6 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt, icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; icmd->ulpLe = 1; - spin_lock_irq(phba->host->host_lock); if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { lpfc_mbuf_free(phba, mp1->virt, mp1->phys); kfree(mp1); @@ -1030,14 +1161,11 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt, } lpfc_sli_release_iocbq(phba, iocb); pring->missbufcnt = cnt; - spin_unlock_irq(phba->host->host_lock); return cnt; } - spin_unlock_irq(phba->host->host_lock); lpfc_sli_ringpostbuf_put(phba, pring, mp1); - if (mp2) { + if (mp2) lpfc_sli_ringpostbuf_put(phba, pring, mp2); - } } pring->missbufcnt = 0; return 0; @@ -1050,7 +1178,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt, /* */ /************************************************************************/ static int -lpfc_post_rcv_buf(struct lpfc_hba * phba) +lpfc_post_rcv_buf(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; @@ -1151,7 +1279,7 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) { int t; uint32_t *HashWorking; - uint32_t *pwwnn = phba->wwnn; + uint32_t *pwwnn = (uint32_t *) phba->wwnn; HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL); if (!HashWorking) @@ -1170,64 +1298,76 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) } static void -lpfc_cleanup(struct lpfc_hba * phba) +lpfc_cleanup(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp, *next_ndlp; /* clean up phba - lpfc specific */ - lpfc_can_disctmo(phba); - list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) + lpfc_can_disctmo(vport); + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) lpfc_nlp_put(ndlp); - - INIT_LIST_HEAD(&phba->fc_nodes); - return; } static void lpfc_establish_link_tmo(unsigned long ptr) { - struct lpfc_hba *phba = (struct lpfc_hba *)ptr; + struct lpfc_hba *phba = (struct lpfc_hba *) ptr; + struct lpfc_vport *vport = phba->pport; unsigned long iflag; - /* Re-establishing Link, timer expired */ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "%d:1300 Re-establishing Link, timer expired " "Data: x%x x%x\n", - phba->brd_no, phba->fc_flag, phba->hba_state); - spin_lock_irqsave(phba->host->host_lock, iflag); - phba->fc_flag &= ~FC_ESTABLISH_LINK; - spin_unlock_irqrestore(phba->host->host_lock, iflag); + phba->brd_no, vport->fc_flag, + vport->port_state); + list_for_each_entry(vport, &phba->port_list, listentry) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + spin_lock_irqsave(shost->host_lock, iflag); + vport->fc_flag &= ~FC_ESTABLISH_LINK; + spin_unlock_irqrestore(shost->host_lock, iflag); + } } -static int -lpfc_stop_timer(struct lpfc_hba * phba) +void +lpfc_stop_vport_timers(struct lpfc_vport *vport) { - struct lpfc_sli *psli = &phba->sli; + del_timer_sync(&vport->els_tmofunc); + del_timer_sync(&vport->fc_fdmitmo); + lpfc_can_disctmo(vport); + return; +} + +static void +lpfc_stop_phba_timers(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; del_timer_sync(&phba->fcp_poll_timer); del_timer_sync(&phba->fc_estabtmo); - del_timer_sync(&phba->fc_disctmo); - del_timer_sync(&phba->fc_fdmitmo); - del_timer_sync(&phba->els_tmofunc); - psli = &phba->sli; - del_timer_sync(&psli->mbox_tmo); - return(1); + list_for_each_entry(vport, &phba->port_list, listentry) + lpfc_stop_vport_timers(vport); + del_timer_sync(&phba->sli.mbox_tmo); + del_timer_sync(&phba->fabric_block_timer); + phba->hb_outstanding = 0; + del_timer_sync(&phba->hb_tmofunc); + return; } int -lpfc_online(struct lpfc_hba * phba) +lpfc_online(struct lpfc_hba *phba) { + struct lpfc_vport *vport = phba->pport; + if (!phba) return 0; - if (!(phba->fc_flag & FC_OFFLINE_MODE)) + if (!(vport->fc_flag & FC_OFFLINE_MODE)) return 0; - lpfc_printf_log(phba, - KERN_WARNING, - LOG_INIT, + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "%d:0458 Bring Adapter online\n", phba->brd_no); @@ -1243,9 +1383,14 @@ lpfc_online(struct lpfc_hba * phba) return 1; } - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_OFFLINE_MODE; - spin_unlock_irq(phba->host->host_lock); + list_for_each_entry(vport, &phba->port_list, listentry) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_OFFLINE_MODE; + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); + } lpfc_unblock_mgmt_io(phba); return 0; @@ -1256,9 +1401,9 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba) { unsigned long iflag; - spin_lock_irqsave(phba->host->host_lock, iflag); - phba->fc_flag |= FC_BLOCK_MGMT_IO; - spin_unlock_irqrestore(phba->host->host_lock, iflag); + spin_lock_irqsave(&phba->hbalock, iflag); + phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; + spin_unlock_irqrestore(&phba->hbalock, iflag); } void @@ -1266,17 +1411,18 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba) { unsigned long iflag; - spin_lock_irqsave(phba->host->host_lock, iflag); - phba->fc_flag &= ~FC_BLOCK_MGMT_IO; - spin_unlock_irqrestore(phba->host->host_lock, iflag); + spin_lock_irqsave(&phba->hbalock, iflag); + phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; + spin_unlock_irqrestore(&phba->hbalock, iflag); } void lpfc_offline_prep(struct lpfc_hba * phba) { + struct lpfc_vport *vport = phba->pport; struct lpfc_nodelist *ndlp, *next_ndlp; - if (phba->fc_flag & FC_OFFLINE_MODE) + if (vport->fc_flag & FC_OFFLINE_MODE) return; lpfc_block_mgmt_io(phba); @@ -1284,39 +1430,49 @@ lpfc_offline_prep(struct lpfc_hba * phba) lpfc_linkdown(phba); /* Issue an unreg_login to all nodes */ - list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) - lpfc_unreg_rpi(phba, ndlp); + lpfc_unreg_rpi(vport, ndlp); lpfc_sli_flush_mbox_queue(phba); } void -lpfc_offline(struct lpfc_hba * phba) +lpfc_offline(struct lpfc_hba *phba) { - unsigned long iflag; + struct lpfc_vport *vport = phba->pport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_vport *port_iterator; - if (phba->fc_flag & FC_OFFLINE_MODE) + if (vport->fc_flag & FC_OFFLINE_MODE) return; /* stop all timers associated with this hba */ - lpfc_stop_timer(phba); + lpfc_stop_phba_timers(phba); + list_for_each_entry(port_iterator, &phba->port_list, listentry) { + port_iterator->work_port_events = 0; + } - lpfc_printf_log(phba, - KERN_WARNING, - LOG_INIT, + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "%d:0460 Bring Adapter offline\n", phba->brd_no); /* Bring down the SLI Layer and cleanup. The HBA is offline now. */ lpfc_sli_hba_down(phba); - lpfc_cleanup(phba); - spin_lock_irqsave(phba->host->host_lock, iflag); - phba->work_hba_events = 0; + spin_lock_irq(&phba->hbalock); phba->work_ha = 0; - phba->fc_flag |= FC_OFFLINE_MODE; - spin_unlock_irqrestore(phba->host->host_lock, iflag); + vport->fc_flag |= FC_OFFLINE_MODE; + spin_unlock_irq(&phba->hbalock); + list_for_each_entry(port_iterator, &phba->port_list, listentry) { + shost = lpfc_shost_from_vport(port_iterator); + + lpfc_cleanup(port_iterator); + spin_lock_irq(shost->host_lock); + vport->work_port_events = 0; + vport->fc_flag |= FC_OFFLINE_MODE; + spin_unlock_irq(shost->host_lock); + } } /****************************************************************************** @@ -1326,17 +1482,17 @@ lpfc_offline(struct lpfc_hba * phba) * ******************************************************************************/ static int -lpfc_scsi_free(struct lpfc_hba * phba) +lpfc_scsi_free(struct lpfc_hba *phba) { struct lpfc_scsi_buf *sb, *sb_next; struct lpfc_iocbq *io, *io_next; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); /* Release all the lpfc_scsi_bufs maintained by this host. */ list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { list_del(&sb->list); pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, - sb->dma_handle); + sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; } @@ -1348,134 +1504,183 @@ lpfc_scsi_free(struct lpfc_hba * phba) phba->total_iocbq_bufs--; } - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); return 0; } -void lpfc_remove_device(struct lpfc_hba *phba) -{ - unsigned long iflag; - lpfc_free_sysfs_attr(phba); - - spin_lock_irqsave(phba->host->host_lock, iflag); - phba->fc_flag |= FC_UNLOADING; +struct lpfc_vport * +lpfc_create_port(struct lpfc_hba *phba, int instance, struct fc_vport *fc_vport) +{ + struct lpfc_vport *vport; + struct Scsi_Host *shost; + int error = 0; - spin_unlock_irqrestore(phba->host->host_lock, iflag); + shost = scsi_host_alloc(&lpfc_template, sizeof(struct lpfc_vport)); + if (!shost) + goto out; - fc_remove_host(phba->host); - scsi_remove_host(phba->host); + vport = (struct lpfc_vport *) shost->hostdata; + vport->phba = phba; - kthread_stop(phba->worker_thread); + vport->load_flag |= FC_LOADING; + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + shost->unique_id = instance; + shost->max_id = LPFC_MAX_TARGET; + shost->max_lun = phba->cfg_max_luns; + shost->this_id = -1; + shost->max_cmd_len = 16; /* - * Bring down the SLI Layer. This step disable all interrupts, - * clears the rings, discards all mailbox commands, and resets - * the HBA. + * Set initial can_queue value since 0 is no longer supported and + * scsi_add_host will fail. This will be adjusted later based on the + * max xri value determined in hba setup. */ - lpfc_sli_hba_down(phba); - lpfc_sli_brdrestart(phba); + shost->can_queue = phba->cfg_hba_queue_depth - 10; + if (fc_vport != NULL) { + shost->transportt = lpfc_vport_transport_template; + vport->port_type = LPFC_NPIV_PORT; + } else { + shost->transportt = lpfc_transport_template; + vport->port_type = LPFC_PHYSICAL_PORT; + } - /* Release the irq reservation */ - free_irq(phba->pcidev->irq, phba); - pci_disable_msi(phba->pcidev); + /* Initialize all internally managed lists. */ + INIT_LIST_HEAD(&vport->fc_nodes); + spin_lock_init(&vport->work_port_lock); - lpfc_cleanup(phba); - lpfc_stop_timer(phba); - phba->work_hba_events = 0; + init_timer(&vport->fc_disctmo); + vport->fc_disctmo.function = lpfc_disc_timeout; + vport->fc_disctmo.data = (unsigned long)vport; - /* - * Call scsi_free before mem_free since scsi bufs are released to their - * corresponding pools here. - */ - lpfc_scsi_free(phba); - lpfc_mem_free(phba); + init_timer(&vport->fc_fdmitmo); + vport->fc_fdmitmo.function = lpfc_fdmi_tmo; + vport->fc_fdmitmo.data = (unsigned long)vport; - /* Free resources associated with SLI2 interface */ - dma_free_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE, - phba->slim2p, phba->slim2p_mapping); + init_timer(&vport->els_tmofunc); + vport->els_tmofunc.function = lpfc_els_timeout; + vport->els_tmofunc.data = (unsigned long)vport; - /* unmap adapter SLIM and Control Registers */ - iounmap(phba->ctrl_regs_memmap_p); - iounmap(phba->slim_memmap_p); + if (fc_vport != NULL) { + error = scsi_add_host(shost, &fc_vport->dev); + } else { + error = scsi_add_host(shost, &phba->pcidev->dev); + } + if (error) + goto out_put_shost; - pci_release_regions(phba->pcidev); - pci_disable_device(phba->pcidev); + list_add_tail(&vport->listentry, &phba->port_list); + return vport; - idr_remove(&lpfc_hba_index, phba->brd_no); - scsi_host_put(phba->host); +out_put_shost: + scsi_host_put(shost); +out: + return NULL; } -void lpfc_scan_start(struct Scsi_Host *host) +void +destroy_port(struct lpfc_vport *vport) { - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; - if (lpfc_alloc_sysfs_attr(phba)) - goto error; + kfree(vport->vname); - phba->MBslimaddr = phba->slim_memmap_p; - phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; - phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; - phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; - phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; + lpfc_debugfs_terminate(vport); + fc_remove_host(shost); + scsi_remove_host(shost); - if (lpfc_sli_hba_setup(phba)) - goto error; + spin_lock_irq(&phba->hbalock); + list_del_init(&vport->listentry); + spin_unlock_irq(&phba->hbalock); - /* - * hba setup may have changed the hba_queue_depth so we need to adjust - * the value of can_queue. - */ - host->can_queue = phba->cfg_hba_queue_depth - 10; + lpfc_cleanup(vport); return; +} + +int +lpfc_get_instance(void) +{ + int instance = 0; -error: - lpfc_remove_device(phba); + /* Assign an unused number */ + if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) + return -1; + if (idr_get_new(&lpfc_hba_index, NULL, &instance)) + return -1; + return instance; } +/* + * Note: there is no scan_start function as adapter initialization + * will have asynchronously kicked off the link initialization. + */ + int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) { - struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int stat = 0; - if (!phba->host) - return 1; - if (time >= 30 * HZ) + spin_lock_irq(shost->host_lock); + + if (vport->fc_flag & FC_UNLOADING) { + stat = 1; + goto finished; + } + if (time >= 30 * HZ) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "%d:0461 Scanning longer than 30 " + "seconds. Continuing initialization\n", + phba->brd_no); + stat = 1; + goto finished; + } + if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "%d:0465 Link down longer than 15 " + "seconds. Continuing initialization\n", + phba->brd_no); + stat = 1; goto finished; + } - if (phba->hba_state != LPFC_HBA_READY) - return 0; - if (phba->num_disc_nodes || phba->fc_prli_sent) - return 0; - if ((phba->fc_map_cnt == 0) && (time < 2 * HZ)) - return 0; - if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) - return 0; - if ((phba->hba_state > LPFC_LINK_DOWN) || (time < 15 * HZ)) - return 0; + if (vport->port_state != LPFC_VPORT_READY) + goto finished; + if (vport->num_disc_nodes || vport->fc_prli_sent) + goto finished; + if (vport->fc_map_cnt == 0 && time < 2 * HZ) + goto finished; + if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) + goto finished; + + stat = 1; finished: - if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - spin_lock_irq(shost->host_lock); - lpfc_poll_start_timer(phba); - spin_unlock_irq(shost->host_lock); - } + spin_unlock_irq(shost->host_lock); + return stat; +} +void lpfc_host_attrib_init(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; /* - * set fixed host attributes - * Must done after lpfc_sli_hba_setup() + * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). */ - fc_host_node_name(shost) = wwn_to_u64(phba->fc_nodename.u.wwn); - fc_host_port_name(shost) = wwn_to_u64(phba->fc_portname.u.wwn); + fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); + fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); fc_host_supported_classes(shost) = FC_COS_CLASS3; memset(fc_host_supported_fc4s(shost), 0, - sizeof(fc_host_supported_fc4s(shost))); + sizeof(fc_host_supported_fc4s(shost))); fc_host_supported_fc4s(shost)[2] = 1; fc_host_supported_fc4s(shost)[7] = 1; - lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost)); + lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), + sizeof fc_host_symbolic_name(shost)); fc_host_supported_speeds(shost) = 0; if (phba->lmt & LMT_10Gb) @@ -1488,31 +1693,31 @@ finished: fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; fc_host_maxframe_size(shost) = - ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | - (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb); + (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | + (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; /* This value is also unchanging */ memset(fc_host_active_fc4s(shost), 0, - sizeof(fc_host_active_fc4s(shost))); + sizeof(fc_host_active_fc4s(shost))); fc_host_active_fc4s(shost)[2] = 1; fc_host_active_fc4s(shost)[7] = 1; + fc_host_max_npiv_vports(shost) = phba->max_vpi; spin_lock_irq(shost->host_lock); - phba->fc_flag &= ~FC_LOADING; + vport->fc_flag &= ~FC_LOADING; spin_unlock_irq(shost->host_lock); - - return 1; } static int __devinit lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) { - struct Scsi_Host *host; - struct lpfc_hba *phba; - struct lpfc_sli *psli; + struct lpfc_vport *vport = NULL; + struct lpfc_hba *phba; + struct lpfc_sli *psli; struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; + struct Scsi_Host *shost = NULL; unsigned long bar0map_len, bar2map_len; - int error = -ENODEV, retval; + int error = -ENODEV; int i; uint16_t iotag; @@ -1521,61 +1726,46 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) if (pci_request_regions(pdev, LPFC_DRIVER_NAME)) goto out_disable_device; - host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba)); - if (!host) + phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); + if (!phba) goto out_release_regions; - phba = (struct lpfc_hba*)host->hostdata; - memset(phba, 0, sizeof (struct lpfc_hba)); - phba->host = host; + spin_lock_init(&phba->hbalock); - phba->fc_flag |= FC_LOADING; phba->pcidev = pdev; /* Assign an unused board number */ - if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) - goto out_put_host; + if ((phba->brd_no = lpfc_get_instance()) < 0) + goto out_free_phba; - error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no); - if (error) - goto out_put_host; - - host->unique_id = phba->brd_no; + INIT_LIST_HEAD(&phba->port_list); + INIT_LIST_HEAD(&phba->hbq_buffer_list); + /* + * Get all the module params for configuring this host and then + * establish the host. + */ + lpfc_get_cfgparam(phba); + phba->max_vpi = LPFC_MAX_VPI; /* Initialize timers used by driver */ init_timer(&phba->fc_estabtmo); phba->fc_estabtmo.function = lpfc_establish_link_tmo; phba->fc_estabtmo.data = (unsigned long)phba; - init_timer(&phba->fc_disctmo); - phba->fc_disctmo.function = lpfc_disc_timeout; - phba->fc_disctmo.data = (unsigned long)phba; - - init_timer(&phba->fc_fdmitmo); - phba->fc_fdmitmo.function = lpfc_fdmi_tmo; - phba->fc_fdmitmo.data = (unsigned long)phba; - init_timer(&phba->els_tmofunc); - phba->els_tmofunc.function = lpfc_els_timeout; - phba->els_tmofunc.data = (unsigned long)phba; + + init_timer(&phba->hb_tmofunc); + phba->hb_tmofunc.function = lpfc_hb_timeout; + phba->hb_tmofunc.data = (unsigned long)phba; + psli = &phba->sli; init_timer(&psli->mbox_tmo); psli->mbox_tmo.function = lpfc_mbox_timeout; - psli->mbox_tmo.data = (unsigned long)phba; - + psli->mbox_tmo.data = (unsigned long) phba; init_timer(&phba->fcp_poll_timer); phba->fcp_poll_timer.function = lpfc_poll_timeout; - phba->fcp_poll_timer.data = (unsigned long)phba; - - /* - * Get all the module params for configuring this host and then - * establish the host parameters. - */ - lpfc_get_cfgparam(phba); - - host->max_id = LPFC_MAX_TARGET; - host->max_lun = phba->cfg_max_luns; - host->this_id = -1; - - INIT_LIST_HEAD(&phba->fc_nodes); + phba->fcp_poll_timer.data = (unsigned long) phba; + init_timer(&phba->fabric_block_timer); + phba->fabric_block_timer.function = lpfc_fabric_block_timeout; + phba->fabric_block_timer.data = (unsigned long) phba; pci_set_master(pdev); pci_try_set_mwi(pdev); @@ -1620,13 +1810,22 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) memset(phba->slim2p, 0, SLI2_SLIM_SIZE); + phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, + lpfc_sli_hbq_size(), + &phba->hbqslimp.phys, + GFP_KERNEL); + if (!phba->hbqslimp.virt) + goto out_free_slim; + + memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); + /* Initialize the SLI Layer to run with lpfc HBAs. */ lpfc_sli_setup(phba); lpfc_sli_queue_setup(phba); error = lpfc_mem_alloc(phba); if (error) - goto out_free_slim; + goto out_free_hbqslimp; /* Initialize and populate the iocb list per host. */ INIT_LIST_HEAD(&phba->lpfc_iocb_list); @@ -1650,10 +1849,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) error = -ENOMEM; goto out_free_iocbq; } - spin_lock_irq(phba->host->host_lock); + + spin_lock_irq(&phba->hbalock); list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); phba->total_iocbq_bufs++; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); } /* Initialize HBA structure */ @@ -1674,22 +1874,22 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_iocbq; } - /* - * Set initial can_queue value since 0 is no longer supported and - * scsi_add_host will fail. This will be adjusted later based on the - * max xri value determined in hba setup. - */ - host->can_queue = phba->cfg_hba_queue_depth - 10; - - /* Tell the midlayer we support 16 byte commands */ - host->max_cmd_len = 16; - /* Initialize the list of scsi buffers used by driver for scsi IO. */ spin_lock_init(&phba->scsi_buf_list_lock); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); - host->transportt = lpfc_transport_template; - pci_set_drvdata(pdev, host); + /* Initialize list of fabric iocbs */ + INIT_LIST_HEAD(&phba->fabric_iocb_list); + + vport = lpfc_create_port(phba, phba->brd_no, NULL); + if (!vport) + goto out_kthread_stop; + + shost = lpfc_shost_from_vport(vport); + phba->pport = vport; + lpfc_debugfs_initialize(vport); + + pci_set_drvdata(pdev, shost); if (phba->cfg_use_msi) { error = pci_enable_msi(phba->pcidev); @@ -1700,38 +1900,68 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) } error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, - LPFC_DRIVER_NAME, phba); + LPFC_DRIVER_NAME, phba); if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0451 Enable interrupt handler failed\n", phba->brd_no); - goto out_kthread_stop; + goto out_disable_msi; } - error = scsi_add_host(host, &pdev->dev); - if (error) + phba->MBslimaddr = phba->slim_memmap_p; + phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; + phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; + phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; + phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; + + if (lpfc_alloc_sysfs_attr(vport)) goto out_free_irq; - scsi_scan_host(host); + if (lpfc_sli_hba_setup(phba)) + goto out_remove_device; + + /* + * hba setup may have changed the hba_queue_depth so we need to adjust + * the value of can_queue. + */ + shost->can_queue = phba->cfg_hba_queue_depth - 10; + + lpfc_host_attrib_init(shost); + + if (phba->cfg_poll & DISABLE_FCP_RING_INT) { + spin_lock_irq(shost->host_lock); + lpfc_poll_start_timer(phba); + spin_unlock_irq(shost->host_lock); + } + + scsi_scan_host(shost); return 0; +out_remove_device: + lpfc_free_sysfs_attr(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_UNLOADING; + spin_unlock_irq(shost->host_lock); out_free_irq: - lpfc_stop_timer(phba); - phba->work_hba_events = 0; + lpfc_stop_phba_timers(phba); + phba->pport->work_port_events = 0; free_irq(phba->pcidev->irq, phba); +out_disable_msi: pci_disable_msi(phba->pcidev); + destroy_port(vport); out_kthread_stop: kthread_stop(phba->worker_thread); out_free_iocbq: list_for_each_entry_safe(iocbq_entry, iocbq_next, &phba->lpfc_iocb_list, list) { - spin_lock_irq(phba->host->host_lock); kfree(iocbq_entry); phba->total_iocbq_bufs--; - spin_unlock_irq(phba->host->host_lock); } lpfc_mem_free(phba); +out_free_hbqslimp: + dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, + phba->hbqslimp.phys); out_free_slim: dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p, phba->slim2p_mapping); @@ -1741,27 +1971,85 @@ out_iounmap_slim: iounmap(phba->slim_memmap_p); out_idr_remove: idr_remove(&lpfc_hba_index, phba->brd_no); -out_put_host: - phba->host = NULL; - scsi_host_put(host); +out_free_phba: + kfree(phba); out_release_regions: pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); out: pci_set_drvdata(pdev, NULL); + if (shost) + scsi_host_put(shost); return error; } static void __devexit lpfc_pci_remove_one(struct pci_dev *pdev) { - struct Scsi_Host *host = pci_get_drvdata(pdev); - struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata; + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_vport *port_iterator; + list_for_each_entry(port_iterator, &phba->port_list, listentry) + port_iterator->load_flag |= FC_UNLOADING; + + kfree(vport->vname); + lpfc_free_sysfs_attr(vport); + + fc_remove_host(shost); + scsi_remove_host(shost); + + /* + * Bring down the SLI Layer. This step disable all interrupts, + * clears the rings, discards all mailbox commands, and resets + * the HBA. + */ + lpfc_sli_hba_down(phba); + lpfc_sli_brdrestart(phba); + + lpfc_stop_phba_timers(phba); + spin_lock_irq(&phba->hbalock); + list_del_init(&vport->listentry); + spin_unlock_irq(&phba->hbalock); + - lpfc_remove_device(phba); + lpfc_debugfs_terminate(vport); + lpfc_cleanup(vport); + + kthread_stop(phba->worker_thread); + + /* Release the irq reservation */ + free_irq(phba->pcidev->irq, phba); + pci_disable_msi(phba->pcidev); pci_set_drvdata(pdev, NULL); + scsi_host_put(shost); + + /* + * Call scsi_free before mem_free since scsi bufs are released to their + * corresponding pools here. + */ + lpfc_scsi_free(phba); + lpfc_mem_free(phba); + + dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, + phba->hbqslimp.phys); + + /* Free resources associated with SLI2 interface */ + dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, + phba->slim2p, phba->slim2p_mapping); + + /* unmap adapter SLIM and Control Registers */ + iounmap(phba->ctrl_regs_memmap_p); + iounmap(phba->slim_memmap_p); + + idr_remove(&lpfc_hba_index, phba->brd_no); + + kfree(phba); + + pci_release_regions(pdev); + pci_disable_device(pdev); } /** @@ -1819,10 +2107,13 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) pci_set_master(pdev); /* Re-establishing Link */ - spin_lock_irq(phba->host->host_lock); - phba->fc_flag |= FC_ESTABLISH_LINK; + spin_lock_irq(host->host_lock); + phba->pport->fc_flag |= FC_ESTABLISH_LINK; + spin_unlock_irq(host->host_lock); + + spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI2_ACTIVE; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); /* Take device offline; this will perform cleanup */ @@ -1932,7 +2223,7 @@ static struct pci_driver lpfc_driver = { .id_table = lpfc_id_table, .probe = lpfc_pci_probe_one, .remove = __devexit_p(lpfc_pci_remove_one), - .err_handler = &lpfc_err_handler, + .err_handler = &lpfc_err_handler, }; static int __init @@ -1945,11 +2236,15 @@ lpfc_init(void) lpfc_transport_template = fc_attach_transport(&lpfc_transport_functions); - if (!lpfc_transport_template) + lpfc_vport_transport_template = + fc_attach_transport(&lpfc_vport_transport_functions); + if (!lpfc_transport_template || !lpfc_vport_transport_template) return -ENOMEM; error = pci_register_driver(&lpfc_driver); - if (error) + if (error) { fc_release_transport(lpfc_transport_template); + fc_release_transport(lpfc_vport_transport_template); + } return error; } @@ -1959,6 +2254,7 @@ lpfc_exit(void) { pci_unregister_driver(&lpfc_driver); fc_release_transport(lpfc_transport_template); + fc_release_transport(lpfc_vport_transport_template); } module_init(lpfc_init); diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index 438cbcd9eb13..8a6ceffeabcf 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -30,6 +30,7 @@ #define LOG_SLI 0x800 /* SLI events */ #define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ #define LOG_LIBDFC 0x2000 /* Libdfc events */ +#define LOG_VPORT 0x4000 /* NPIV events */ #define LOG_ALL_MSG 0xffff /* LOG all messages */ #define lpfc_printf_log(phba, level, mask, fmt, arg...) \ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 8041c3f06f7b..8f42fbfdd29e 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -82,6 +82,22 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) } /**********************************************/ +/* lpfc_heart_beat Issue a HEART_BEAT */ +/* mailbox command */ +/**********************************************/ +void +lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +{ + MAILBOX_t *mb; + + mb = &pmb->mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + mb->mbxCommand = MBX_HEARTBEAT; + mb->mbxOwner = OWN_HOST; + return; +} + +/**********************************************/ /* lpfc_read_la Issue a READ LA */ /* mailbox command */ /**********************************************/ @@ -134,6 +150,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) void lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { + struct lpfc_vport *vport = phba->pport; MAILBOX_t *mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); @@ -147,7 +164,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) mb->un.varCfgLnk.cr_count = phba->cfg_cr_count; } - mb->un.varCfgLnk.myId = phba->fc_myDID; + mb->un.varCfgLnk.myId = vport->fc_myDID; mb->un.varCfgLnk.edtov = phba->fc_edtov; mb->un.varCfgLnk.arbtov = phba->fc_arbtov; mb->un.varCfgLnk.ratov = phba->fc_ratov; @@ -239,7 +256,7 @@ lpfc_init_link(struct lpfc_hba * phba, /* mailbox command */ /**********************************************/ int -lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) { struct lpfc_dmabuf *mp; MAILBOX_t *mb; @@ -270,6 +287,7 @@ lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); + mb->un.varRdSparm.vpi = vpi; /* save address for completion */ pmb->context1 = mp; @@ -282,7 +300,8 @@ lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) /* mailbox command */ /********************************************/ void -lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb) +lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did, + LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; @@ -290,6 +309,7 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb) memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varUnregDID.did = did; + mb->un.varUnregDID.vpi = vpi; mb->mbxCommand = MBX_UNREG_D_ID; mb->mbxOwner = OWN_HOST; @@ -335,19 +355,17 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) /* mailbox command */ /********************************************/ int -lpfc_reg_login(struct lpfc_hba * phba, - uint32_t did, uint8_t * param, LPFC_MBOXQ_t * pmb, uint32_t flag) +lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, + uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) { + MAILBOX_t *mb = &pmb->mb; uint8_t *sparam; struct lpfc_dmabuf *mp; - MAILBOX_t *mb; - struct lpfc_sli *psli; - psli = &phba->sli; - mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varRegLogin.rpi = 0; + mb->un.varRegLogin.vpi = vpi; mb->un.varRegLogin.did = did; mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ @@ -359,12 +377,10 @@ lpfc_reg_login(struct lpfc_hba * phba, kfree(mp); mb->mbxCommand = MBX_REG_LOGIN64; /* REG_LOGIN: no buffers */ - lpfc_printf_log(phba, - KERN_WARNING, - LOG_MBOX, - "%d:0302 REG_LOGIN: no buffers Data x%x x%x\n", - phba->brd_no, - (uint32_t) did, (uint32_t) flag); + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, + "%d (%d):0302 REG_LOGIN: no buffers, DID x%x, " + "flag x%x\n", + phba->brd_no, vpi, did, flag); return (1); } INIT_LIST_HEAD(&mp->list); @@ -389,7 +405,8 @@ lpfc_reg_login(struct lpfc_hba * phba, /* mailbox command */ /**********************************************/ void -lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb) +lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, + LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; @@ -398,12 +415,52 @@ lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb) mb->un.varUnregLogin.rpi = (uint16_t) rpi; mb->un.varUnregLogin.rsvd1 = 0; + mb->un.varUnregLogin.vpi = vpi; mb->mbxCommand = MBX_UNREG_LOGIN; mb->mbxOwner = OWN_HOST; return; } +/**************************************************/ +/* lpfc_reg_vpi Issue a REG_VPI */ +/* mailbox command */ +/**************************************************/ +void +lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, + LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb = &pmb->mb; + + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + mb->un.varRegVpi.vpi = vpi; + mb->un.varRegVpi.sid = sid; + + mb->mbxCommand = MBX_REG_VPI; + mb->mbxOwner = OWN_HOST; + return; + +} + +/**************************************************/ +/* lpfc_unreg_vpi Issue a UNREG_VNPI */ +/* mailbox command */ +/**************************************************/ +void +lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb = &pmb->mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + mb->un.varUnregVpi.vpi = vpi; + + mb->mbxCommand = MBX_UNREG_VPI; + mb->mbxOwner = OWN_HOST; + return; + +} + static void lpfc_config_pcb_setup(struct lpfc_hba * phba) { @@ -412,14 +469,18 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba) PCB_t *pcbp = &phba->slim2p->pcb; dma_addr_t pdma_addr; uint32_t offset; - uint32_t iocbCnt; + uint32_t iocbCnt = 0; int i; pcbp->maxRing = (psli->num_rings - 1); - iocbCnt = 0; for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; + + pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE: + SLI2_IOCB_CMD_SIZE; + pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE: + SLI2_IOCB_RSP_SIZE; /* A ring MUST have both cmd and rsp entries defined to be valid */ if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) { @@ -434,20 +495,18 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba) continue; } /* Command ring setup for ring */ - pring->cmdringaddr = - (void *)&phba->slim2p->IOCBs[iocbCnt]; + pring->cmdringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt]; pcbp->rdsc[i].cmdEntries = pring->numCiocb; - offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] - - (uint8_t *)phba->slim2p; + offset = (uint8_t *) &phba->slim2p->IOCBs[iocbCnt] - + (uint8_t *) phba->slim2p; pdma_addr = phba->slim2p_mapping + offset; pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); iocbCnt += pring->numCiocb; /* Response ring setup for ring */ - pring->rspringaddr = - (void *)&phba->slim2p->IOCBs[iocbCnt]; + pring->rspringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt]; pcbp->rdsc[i].rspEntries = pring->numRiocb; offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] - @@ -462,16 +521,108 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba) void lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { - MAILBOX_t *mb; - - mb = &pmb->mb; + MAILBOX_t *mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varRdRev.cv = 1; + mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ mb->mbxCommand = MBX_READ_REV; mb->mbxOwner = OWN_HOST; return; } +static void +lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb, + struct lpfc_hbq_init *hbq_desc) +{ + hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt; + hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen; + hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff; +} + +static void +lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb, + struct lpfc_hbq_init *hbq_desc) +{ + hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt; + hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen; + hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff; + hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff; + memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch, + sizeof(hbqmb->profiles.profile3.cmdmatch)); +} + +static void +lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb, + struct lpfc_hbq_init *hbq_desc) +{ + hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt; + hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen; + hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff; + hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff; + memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch, + sizeof(hbqmb->profiles.profile5.cmdmatch)); +} + +void +lpfc_config_hbq(struct lpfc_hba *phba, struct lpfc_hbq_init *hbq_desc, + uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) +{ + int i; + MAILBOX_t *mb = &pmb->mb; + struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; + + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */ + hbqmb->recvNotify = hbq_desc->rn; /* Receive + * Notification */ + hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks + * # in words 0-19 */ + hbqmb->profile = hbq_desc->profile; /* Selection profile: + * 0 = all, + * 7 = logentry */ + hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring + * e.g. Ring0=b0001, + * ring2=b0100 */ + hbqmb->headerLen = hbq_desc->headerLen; /* 0 if not profile 4 + * or 5 */ + hbqmb->logEntry = hbq_desc->logEntry; /* Set to 1 if this + * HBQ will be used + * for LogEntry + * buffers */ + hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) + + hbq_entry_index * sizeof(struct lpfc_hbq_entry); + hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys); + + mb->mbxCommand = MBX_CONFIG_HBQ; + mb->mbxOwner = OWN_HOST; + + /* Copy info for profiles 2,3,5. Other + * profiles this area is reserved + */ + if (hbq_desc->profile == 2) + lpfc_build_hbq_profile2(hbqmb, hbq_desc); + else if (hbq_desc->profile == 3) + lpfc_build_hbq_profile3(hbqmb, hbq_desc); + else if (hbq_desc->profile == 5) + lpfc_build_hbq_profile5(hbqmb, hbq_desc); + + /* Return if no rctl / type masks for this HBQ */ + if (!hbq_desc->mask_count) + return; + + /* Otherwise we setup specific rctl / type masks for this HBQ */ + for (i = 0; i < hbq_desc->mask_count; i++) { + hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch; + hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask; + hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch; + hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask; + } + + return; +} + + + void lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) { @@ -514,15 +665,16 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) } void -lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { + MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; MAILBOX_t *mb = &pmb->mb; dma_addr_t pdma_addr; uint32_t bar_low, bar_high; size_t offset; struct lpfc_hgp hgp; - void __iomem *to_slim; int i; + uint32_t pgp_offset; memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_CONFIG_PORT; @@ -535,12 +687,29 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); + /* If HBA supports SLI=3 ask for it */ + + if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { + mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ + mb->un.varCfgPort.max_hbq = 1; /* Requesting 2 HBQs */ + if (phba->max_vpi && phba->cfg_npiv_enable && + phba->vpd.sli3Feat.cmv) { + mb->un.varCfgPort.max_vpi = phba->max_vpi; + mb->un.varCfgPort.cmv = 1; + phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; + } else + mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; + } else + phba->sli_rev = 2; + mb->un.varCfgPort.sli_mode = phba->sli_rev; + /* Now setup pcb */ phba->slim2p->pcb.type = TYPE_NATIVE_SLI2; phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2; /* Setup Mailbox pointers */ - phba->slim2p->pcb.mailBoxSize = sizeof(MAILBOX_t); + phba->slim2p->pcb.mailBoxSize = offsetof(MAILBOX_t, us) + + sizeof(struct sli2_desc); offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p; pdma_addr = phba->slim2p_mapping + offset; phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr); @@ -568,29 +737,70 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low); pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high); + /* + * Set up HGP - Port Memory + * + * The port expects the host get/put pointers to reside in memory + * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes) + * area of SLIM. In SLI-2 mode, there's an additional 16 reserved + * words (0x40 bytes). This area is not reserved if HBQs are + * configured in SLI-3. + * + * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80 + * RR0Get 0xc4 0x84 + * CR1Put 0xc8 0x88 + * RR1Get 0xcc 0x8c + * CR2Put 0xd0 0x90 + * RR2Get 0xd4 0x94 + * CR3Put 0xd8 0x98 + * RR3Get 0xdc 0x9c + * + * Reserved 0xa0-0xbf + * If HBQs configured: + * HBQ 0 Put ptr 0xc0 + * HBQ 1 Put ptr 0xc4 + * HBQ 2 Put ptr 0xc8 + * ...... + * HBQ(M-1)Put Pointer 0xc0+(M-1)*4 + * + */ + + if (phba->sli_rev == 3) { + phba->host_gp = &mb_slim->us.s3.host[0]; + phba->hbq_put = &mb_slim->us.s3.hbq_put[0]; + } else { + phba->host_gp = &mb_slim->us.s2.host[0]; + phba->hbq_put = NULL; + } /* mask off BAR0's flag bits 0 - 3 */ phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) + - (SLIMOFF*sizeof(uint32_t)); + (void __iomem *) phba->host_gp - + (void __iomem *)phba->MBslimaddr; if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64) phba->slim2p->pcb.hgpAddrHigh = bar_high; else phba->slim2p->pcb.hgpAddrHigh = 0; /* write HGP data to SLIM at the required longword offset */ memset(&hgp, 0, sizeof(struct lpfc_hgp)); - to_slim = phba->MBslimaddr + (SLIMOFF*sizeof (uint32_t)); for (i=0; i < phba->sli.num_rings; i++) { - lpfc_memcpy_to_slim(to_slim, &hgp, sizeof(struct lpfc_hgp)); - to_slim += sizeof (struct lpfc_hgp); + lpfc_memcpy_to_slim(phba->host_gp + i, &hgp, + sizeof(*phba->host_gp)); } /* Setup Port Group ring pointer */ - offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port - - (uint8_t *)phba->slim2p; - pdma_addr = phba->slim2p_mapping + offset; + if (phba->sli_rev == 3) + pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s3_pgp.port - + (uint8_t *)phba->slim2p; + else + pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port - + (uint8_t *)phba->slim2p; + + pdma_addr = phba->slim2p_mapping + pgp_offset; phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr); phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr); + phba->hbq_get = &phba->slim2p->mbx.us.s3_pgp.hbq_get[0]; /* Use callback routine to setp rings in the pcb */ lpfc_config_pcb_setup(phba); @@ -606,11 +816,7 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) /* Swap PCB if needed */ lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb, - sizeof (PCB_t)); - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "%d:0405 Service Level Interface (SLI) 2 selected\n", - phba->brd_no); + sizeof(PCB_t)); } void @@ -644,15 +850,23 @@ lpfc_mbox_get(struct lpfc_hba * phba) LPFC_MBOXQ_t *mbq = NULL; struct lpfc_sli *psli = &phba->sli; - list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, - list); - if (mbq) { + list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list); + if (mbq) psli->mboxq_cnt--; - } return mbq; } +void +lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) +{ + /* This function expects to be called from interupt context */ + spin_lock(&phba->hbalock); + list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); + spin_unlock(&phba->hbalock); + return; +} + int lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) { diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index ec3bbbde6f7a..3594c469494f 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2005 Emulex. All rights reserved. * + * Copyright (C) 2004-2006 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -38,10 +38,13 @@ #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ + + int lpfc_mem_alloc(struct lpfc_hba * phba) { struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; + int longs; int i; phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", @@ -80,10 +83,27 @@ lpfc_mem_alloc(struct lpfc_hba * phba) if (!phba->nlp_mem_pool) goto fail_free_mbox_pool; + phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev, + LPFC_BPL_SIZE, 8, 0); + if (!phba->lpfc_hbq_pool) + goto fail_free_nlp_mem_pool; + + /* vpi zero is reserved for the physical port so add 1 to max */ + longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; + phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); + if (!phba->vpi_bmask) + goto fail_free_hbq_pool; + return 0; + fail_free_hbq_pool: + lpfc_sli_hbqbuf_free_all(phba); + fail_free_nlp_mem_pool: + mempool_destroy(phba->nlp_mem_pool); + phba->nlp_mem_pool = NULL; fail_free_mbox_pool: mempool_destroy(phba->mbox_mem_pool); + phba->mbox_mem_pool = NULL; fail_free_mbuf_pool: while (i--) pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, @@ -91,8 +111,10 @@ lpfc_mem_alloc(struct lpfc_hba * phba) kfree(pool->elements); fail_free_lpfc_mbuf_pool: pci_pool_destroy(phba->lpfc_mbuf_pool); + phba->lpfc_mbuf_pool = NULL; fail_free_dma_buf_pool: pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); + phba->lpfc_scsi_dma_buf_pool = NULL; fail: return -ENOMEM; } @@ -106,6 +128,9 @@ lpfc_mem_free(struct lpfc_hba * phba) struct lpfc_dmabuf *mp; int i; + kfree(phba->vpi_bmask); + lpfc_sli_hbqbuf_free_all(phba); + list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { mp = (struct lpfc_dmabuf *) (mbox->context1); if (mp) { @@ -115,6 +140,15 @@ lpfc_mem_free(struct lpfc_hba * phba) list_del(&mbox->list); mempool_free(mbox, phba->mbox_mem_pool); } + list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { + mp = (struct lpfc_dmabuf *) (mbox->context1); + if (mp) { + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + } + list_del(&mbox->list); + mempool_free(mbox, phba->mbox_mem_pool); + } psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; if (psli->mbox_active) { @@ -132,13 +166,21 @@ lpfc_mem_free(struct lpfc_hba * phba) pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, pool->elements[i].phys); kfree(pool->elements); + + pci_pool_destroy(phba->lpfc_hbq_pool); mempool_destroy(phba->nlp_mem_pool); mempool_destroy(phba->mbox_mem_pool); pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); pci_pool_destroy(phba->lpfc_mbuf_pool); - /* Free the iocb lookup array */ + phba->lpfc_hbq_pool = NULL; + phba->nlp_mem_pool = NULL; + phba->mbox_mem_pool = NULL; + phba->lpfc_scsi_dma_buf_pool = NULL; + phba->lpfc_mbuf_pool = NULL; + + /* Free the iocb lookup array */ kfree(psli->iocbq_lookup); psli->iocbq_lookup = NULL; @@ -148,20 +190,23 @@ void * lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) { struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; + unsigned long iflags; void *ret; ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); - if (!ret && ( mem_flags & MEM_PRI) && pool->current_count) { + spin_lock_irqsave(&phba->hbalock, iflags); + if (!ret && (mem_flags & MEM_PRI) && pool->current_count) { pool->current_count--; ret = pool->elements[pool->current_count].virt; *handle = pool->elements[pool->current_count].phys; } + spin_unlock_irqrestore(&phba->hbalock, iflags); return ret; } void -lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) +__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) { struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; @@ -174,3 +219,51 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) } return; } + +void +lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) +{ + unsigned long iflags; + + spin_lock_irqsave(&phba->hbalock, iflags); + __lpfc_mbuf_free(phba, virt, dma); + spin_unlock_irqrestore(&phba->hbalock, iflags); + return; +} + +void * +lpfc_hbq_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) +{ + void *ret; + ret = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_ATOMIC, handle); + return ret; +} + +void +lpfc_hbq_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma) +{ + pci_pool_free(phba->lpfc_hbq_pool, virt, dma); + return; +} + +void +lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) +{ + struct hbq_dmabuf *hbq_entry; + + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); + if (hbq_entry->tag == -1) { + lpfc_hbq_free(phba, hbq_entry->dbuf.virt, + hbq_entry->dbuf.phys); + kfree(hbq_entry); + } else { + lpfc_sli_free_hbq(phba, hbq_entry); + } + } else { + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + } + return; +} + diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index b309841e3846..bca2f5c9b4ba 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,4 +1,4 @@ -/******************************************************************* + /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2007 Emulex. All rights reserved. * @@ -35,20 +35,22 @@ #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" +#include "lpfc_vport.h" +#include "lpfc_debugfs.h" /* Called to verify a rcv'ed ADISC was intended for us. */ static int -lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, - struct lpfc_name * nn, struct lpfc_name * pn) +lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct lpfc_name *nn, struct lpfc_name *pn) { /* Compare the ADISC rsp WWNN / WWPN matches our internal node * table entry for that node. */ - if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0) + if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name))) return 0; - if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0) + if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name))) return 0; /* we match, return success */ @@ -56,11 +58,10 @@ lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, } int -lpfc_check_sparm(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, struct serv_parm * sp, - uint32_t class) +lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct serv_parm * sp, uint32_t class) { - volatile struct serv_parm *hsp = &phba->fc_sparam; + volatile struct serv_parm *hsp = &vport->fc_sparam; uint16_t hsp_value, ssp_value = 0; /* @@ -75,12 +76,14 @@ lpfc_check_sparm(struct lpfc_hba * phba, hsp->cls1.rcvDataSizeLsb; ssp_value = (sp->cls1.rcvDataSizeMsb << 8) | sp->cls1.rcvDataSizeLsb; + if (!ssp_value) + goto bad_service_param; if (ssp_value > hsp_value) { sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; } } else if (class == CLASS1) { - return 0; + goto bad_service_param; } if (sp->cls2.classValid) { @@ -88,12 +91,14 @@ lpfc_check_sparm(struct lpfc_hba * phba, hsp->cls2.rcvDataSizeLsb; ssp_value = (sp->cls2.rcvDataSizeMsb << 8) | sp->cls2.rcvDataSizeLsb; + if (!ssp_value) + goto bad_service_param; if (ssp_value > hsp_value) { sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; } } else if (class == CLASS2) { - return 0; + goto bad_service_param; } if (sp->cls3.classValid) { @@ -101,12 +106,14 @@ lpfc_check_sparm(struct lpfc_hba * phba, hsp->cls3.rcvDataSizeLsb; ssp_value = (sp->cls3.rcvDataSizeMsb << 8) | sp->cls3.rcvDataSizeLsb; + if (!ssp_value) + goto bad_service_param; if (ssp_value > hsp_value) { sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; } } else if (class == CLASS3) { - return 0; + goto bad_service_param; } /* @@ -125,12 +132,22 @@ lpfc_check_sparm(struct lpfc_hba * phba, memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); return 1; +bad_service_param: + lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY, + "%d (%d):0207 Device %x " + "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent " + "invalid service parameters. Ignoring device.\n", + vport->phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID, + sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1], + sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3], + sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5], + sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]); + return 0; } static void * -lpfc_check_elscmpl_iocb(struct lpfc_hba * phba, - struct lpfc_iocbq *cmdiocb, - struct lpfc_iocbq *rspiocb) +lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { struct lpfc_dmabuf *pcmd, *prsp; uint32_t *lp; @@ -168,32 +185,29 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba * phba, * routine effectively results in a "software abort". */ int -lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) +lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(completions); - struct lpfc_sli *psli; - struct lpfc_sli_ring *pring; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; struct lpfc_iocbq *iocb, *next_iocb; IOCB_t *cmd; /* Abort outstanding I/O on NPort <nlp_DID> */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0205 Abort outstanding I/O on NPort x%x " + "%d (%d):0205 Abort outstanding I/O on NPort x%x " "Data: x%x x%x x%x\n", - phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, - ndlp->nlp_state, ndlp->nlp_rpi); + phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID, + ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); - psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; + lpfc_fabric_abort_nport(ndlp); /* First check the txq */ - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { - /* Check to see if iocb matches the nport we are looking - for */ + /* Check to see if iocb matches the nport we are looking for */ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { - /* It matches, so deque and call compl with an - error */ + /* It matches, so deque and call compl with anp error */ list_move_tail(&iocb->list, &completions); pring->txq_cnt--; } @@ -201,37 +215,39 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) /* Next check the txcmplq */ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { - /* Check to see if iocb matches the nport we are looking - for */ - if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) + /* Check to see if iocb matches the nport we are looking for */ + if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { lpfc_sli_issue_abort_iotag(phba, pring, iocb); + } } - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); while (!list_empty(&completions)) { iocb = list_get_first(&completions, struct lpfc_iocbq, list); cmd = &iocb->iocb; - list_del(&iocb->list); + list_del_init(&iocb->list); - if (iocb->iocb_cmpl) { + if (!iocb->iocb_cmpl) + lpfc_sli_release_iocbq(phba, iocb); + else { cmd->ulpStatus = IOSTAT_LOCAL_REJECT; cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; (iocb->iocb_cmpl) (phba, iocb, iocb); - } else - lpfc_sli_release_iocbq(phba, iocb); + } } /* If we are delaying issuing an ELS command, cancel it */ if (ndlp->nlp_flag & NLP_DELAY_TMO) - lpfc_cancel_retry_delay_tmo(phba, ndlp); + lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); return 0; } static int -lpfc_rcv_plogi(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, - struct lpfc_iocbq *cmdiocb) +lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct lpfc_iocbq *cmdiocb) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; uint32_t *lp; IOCB_t *icmd; @@ -241,14 +257,14 @@ lpfc_rcv_plogi(struct lpfc_hba * phba, int rc; memset(&stat, 0, sizeof (struct ls_rjt)); - if (phba->hba_state <= LPFC_FLOGI) { + if (vport->port_state <= LPFC_FLOGI) { /* Before responding to PLOGI, check for pt2pt mode. * If we are pt2pt, with an outstanding FLOGI, abort * the FLOGI and resend it first. */ - if (phba->fc_flag & FC_PT2PT) { - lpfc_els_abort_flogi(phba); - if (!(phba->fc_flag & FC_PT2PT_PLOGI)) { + if (vport->fc_flag & FC_PT2PT) { + lpfc_els_abort_flogi(phba); + if (!(vport->fc_flag & FC_PT2PT_PLOGI)) { /* If the other side is supposed to initiate * the PLOGI anyway, just ACC it now and * move on with discovery. @@ -257,45 +273,42 @@ lpfc_rcv_plogi(struct lpfc_hba * phba, phba->fc_ratov = FF_DEF_RATOV; /* Start discovery - this should just do CLEAR_LA */ - lpfc_disc_start(phba); - } else { - lpfc_initial_flogi(phba); - } + lpfc_disc_start(vport); + } else + lpfc_initial_flogi(vport); } else { stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; - lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, - ndlp); + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, + ndlp, NULL); return 0; } } pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; lp = (uint32_t *) pcmd->virt; sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); - if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) { + if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) { /* Reject this request because invalid parameters */ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; - lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, + NULL); return 0; } icmd = &cmdiocb->iocb; /* PLOGI chkparm OK */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_ELS, - "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", - phba->brd_no, + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "%d (%d):0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", + phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); - if ((phba->cfg_fcp_class == 2) && - (sp->cls2.classValid)) { + if (phba->cfg_fcp_class == 2 && sp->cls2.classValid) ndlp->nlp_fcp_info |= CLASS2; - } else { + else ndlp->nlp_fcp_info |= CLASS3; - } + ndlp->nlp_class_sup = 0; if (sp->cls1.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS1; @@ -317,35 +330,37 @@ lpfc_rcv_plogi(struct lpfc_hba * phba, case NLP_STE_PRLI_ISSUE: case NLP_STE_UNMAPPED_NODE: case NLP_STE_MAPPED_NODE: - lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0); return 1; } - if ((phba->fc_flag & FC_PT2PT) - && !(phba->fc_flag & FC_PT2PT_PLOGI)) { + if ((vport->fc_flag & FC_PT2PT) && + !(vport->fc_flag & FC_PT2PT_PLOGI)) { /* rcv'ed PLOGI decides what our NPortId will be */ - phba->fc_myDID = icmd->un.rcvels.parmRo; + vport->fc_myDID = icmd->un.rcvels.parmRo; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox == NULL) goto out; lpfc_config_link(phba, mbox); mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; rc = lpfc_sli_issue_mbox (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); if (rc == MBX_NOT_FINISHED) { - mempool_free( mbox, phba->mbox_mem_pool); + mempool_free(mbox, phba->mbox_mem_pool); goto out; } - lpfc_can_disctmo(phba); + lpfc_can_disctmo(vport); } mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (mbox == NULL) + if (!mbox) goto out; - if (lpfc_reg_login(phba, icmd->un.rcvels.remoteID, - (uint8_t *) sp, mbox, 0)) { - mempool_free( mbox, phba->mbox_mem_pool); + rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID, + (uint8_t *) sp, mbox, 0); + if (rc) { + mempool_free(mbox, phba->mbox_mem_pool); goto out; } @@ -357,7 +372,10 @@ lpfc_rcv_plogi(struct lpfc_hba * phba, * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox * command issued in lpfc_cmpl_els_acc(). */ + mbox->vport = vport; + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); + spin_unlock_irq(shost->host_lock); /* * If there is an outstanding PLOGI issued, abort it before @@ -373,24 +391,41 @@ lpfc_rcv_plogi(struct lpfc_hba * phba, lpfc_els_abort(phba, ndlp); } - lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); + if ((vport->port_type == LPFC_NPIV_PORT && + phba->cfg_vport_restrict_login)) { + + /* In order to preserve RPIs, we want to cleanup + * the default RPI the firmware created to rcv + * this ELS request. The only way to do this is + * to register, then unregister the RPI. + */ + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_RM_DFLT_RPI; + spin_unlock_irq(shost->host_lock); + stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, + ndlp, mbox); + return 1; + } + lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); return 1; out: stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; - lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); return 0; } static int -lpfc_rcv_padisc(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, +lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_dmabuf *pcmd; - struct serv_parm *sp; - struct lpfc_name *pnn, *ppn; + struct serv_parm *sp; + struct lpfc_name *pnn, *ppn; struct ls_rjt stat; ADISC *ap; IOCB_t *icmd; @@ -412,13 +447,12 @@ lpfc_rcv_padisc(struct lpfc_hba * phba, } icmd = &cmdiocb->iocb; - if ((icmd->ulpStatus == 0) && - (lpfc_check_adisc(phba, ndlp, pnn, ppn))) { + if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) { if (cmd == ELS_CMD_ADISC) { - lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp); + lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp); } else { - lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, - NULL, 0); + lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, + NULL, 0); } return 1; } @@ -427,55 +461,57 @@ lpfc_rcv_padisc(struct lpfc_hba * phba, stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; stat.un.b.vendorUnique = 0; - lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return 0; } static int -lpfc_rcv_logo(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, - struct lpfc_iocbq *cmdiocb, - uint32_t els_cmd) +lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct lpfc_iocbq *cmdiocb, uint32_t els_cmd) { - /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */ /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary * PLOGIs during LOGO storms from a device. */ + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_LOGO_ACC; + spin_unlock_irq(shost->host_lock); if (els_cmd == ELS_CMD_PRLO) - lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); else - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); if (!(ndlp->nlp_type & NLP_FABRIC) || - (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { + (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { /* Only try to re-login if this is NOT a Fabric Node */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } else { ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); } - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); /* The driver has to wait until the ACC completes before it continues * processing the LOGO. The action will resume in * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an @@ -485,9 +521,8 @@ lpfc_rcv_logo(struct lpfc_hba * phba, } static void -lpfc_rcv_prli(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, - struct lpfc_iocbq *cmdiocb) +lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + struct lpfc_iocbq *cmdiocb) { struct lpfc_dmabuf *pcmd; uint32_t *lp; @@ -501,8 +536,7 @@ lpfc_rcv_prli(struct lpfc_hba * phba, ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && - (npr->prliType == PRLI_FCP_TYPE)) { + if (npr->prliType == PRLI_FCP_TYPE) { if (npr->initiatorFunc) ndlp->nlp_type |= NLP_FCP_INITIATOR; if (npr->targetFunc) @@ -517,36 +551,42 @@ lpfc_rcv_prli(struct lpfc_hba * phba, roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (ndlp->nlp_type & NLP_FCP_TARGET) roles |= FC_RPORT_ROLE_FCP_TARGET; + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, + "rport rolechg: role:x%x did:x%x flg:x%x", + roles, ndlp->nlp_DID, ndlp->nlp_flag); + fc_remote_port_rolechg(rport, roles); } } static uint32_t -lpfc_disc_set_adisc(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp) +lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + /* Check config parameter use-adisc or FCP-2 */ - if ((phba->cfg_use_adisc == 0) && - !(phba->fc_flag & FC_RSCN_MODE)) { - if (!(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) - return 0; + if ((phba->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || + ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_NPR_ADISC; + spin_unlock_irq(shost->host_lock); + return 1; } - spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag |= NLP_NPR_ADISC; - spin_unlock_irq(phba->host->host_lock); - return 1; + ndlp->nlp_flag &= ~NLP_NPR_ADISC; + lpfc_unreg_rpi(vport, ndlp); + return 0; } static uint32_t -lpfc_disc_illegal(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) -{ - lpfc_printf_log(phba, - KERN_ERR, - LOG_DISCOVERY, - "%d:0253 Illegal State Transition: node x%x event x%x, " - "state x%x Data: x%x x%x\n", - phba->brd_no, +lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY, + "%d (%d):0253 Illegal State Transition: node x%x " + "event x%x, state x%x Data: x%x x%x\n", + vport->phba->brd_no, vport->vpi, ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); return ndlp->nlp_state; @@ -555,151 +595,162 @@ lpfc_disc_illegal(struct lpfc_hba * phba, /* Start of Discovery State Machine routines */ static uint32_t -lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; - if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) { + if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); return ndlp->nlp_state; } - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } static uint32_t -lpfc_rcv_els_unused_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - lpfc_issue_els_logo(phba, ndlp, 0); - lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); + lpfc_issue_els_logo(vport, ndlp, 0); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_logo_unused_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(phba->host->host_lock); - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); - lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); + spin_unlock_irq(shost->host_lock); + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); return ndlp->nlp_state; } static uint32_t -lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } static uint32_t -lpfc_device_rm_unused_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } static uint32_t -lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, +lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { + struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb = arg; - struct lpfc_dmabuf *pcmd; - struct serv_parm *sp; - uint32_t *lp; + struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; + uint32_t *lp = (uint32_t *) pcmd->virt; + struct serv_parm *sp = (struct serv_parm *) (lp + 1); struct ls_rjt stat; int port_cmp; - pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; - lp = (uint32_t *) pcmd->virt; - sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); - memset(&stat, 0, sizeof (struct ls_rjt)); /* For a PLOGI, we only accept if our portname is less * than the remote portname. */ phba->fc_stat.elsLogiCol++; - port_cmp = memcmp(&phba->fc_portname, &sp->portName, - sizeof (struct lpfc_name)); + port_cmp = memcmp(&vport->fc_portname, &sp->portName, + sizeof(struct lpfc_name)); if (port_cmp >= 0) { /* Reject this request because the remote node will accept ours */ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; - lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, + NULL); } else { - lpfc_rcv_plogi(phba, ndlp, cmdiocb); - } /* if our portname was less */ + lpfc_rcv_plogi(vport, ndlp, cmdiocb); + } /* If our portname was less */ return ndlp->nlp_state; } static uint32_t -lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + struct ls_rjt stat; - cmdiocb = (struct lpfc_iocbq *) arg; + memset(&stat, 0, sizeof (struct ls_rjt)); + stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; +} - /* software abort outstanding PLOGI */ - lpfc_els_abort(phba, ndlp); +static uint32_t +lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) +{ + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* software abort outstanding PLOGI */ lpfc_els_abort(phba, ndlp); if (evt == NLP_EVT_RCV_LOGO) { - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); } else { - lpfc_issue_els_logo(phba, ndlp, 0); + lpfc_issue_els_logo(vport, ndlp, 0); } - /* Put ndlp in npr list set plogi timer for 1 sec */ + /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return ndlp->nlp_state; } static uint32_t -lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, +lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb, *rspiocb; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb, *rspiocb; struct lpfc_dmabuf *pcmd, *prsp, *mp; uint32_t *lp; IOCB_t *irsp; @@ -721,31 +772,26 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba, pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; - prsp = list_get_first(&pcmd->list, - struct lpfc_dmabuf, - list); - lp = (uint32_t *) prsp->virt; + prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); + lp = (uint32_t *) prsp->virt; sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); - if (!lpfc_check_sparm(phba, ndlp, sp, CLASS3)) + if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3)) goto out; /* PLOGI chkparm OK */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_ELS, - "%d:0121 PLOGI chkparm OK " + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "%d (%d):0121 PLOGI chkparm OK " "Data: x%x x%x x%x x%x\n", - phba->brd_no, + phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); - if ((phba->cfg_fcp_class == 2) && - (sp->cls2.classValid)) { + if (phba->cfg_fcp_class == 2 && (sp->cls2.classValid)) ndlp->nlp_fcp_info |= CLASS2; - } else { + else ndlp->nlp_fcp_info |= CLASS3; - } + ndlp->nlp_class_sup = 0; if (sp->cls1.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS1; @@ -756,16 +802,23 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba, if (sp->cls4.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = - ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | - sp->cmn.bbRcvSizeLsb; + ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; - if (!(mbox = mempool_alloc(phba->mbox_mem_pool, - GFP_KERNEL))) + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0133 PLOGI: no memory for reg_login " + "Data: x%x x%x x%x x%x\n", + phba->brd_no, vport->vpi, + ndlp->nlp_DID, ndlp->nlp_state, + ndlp->nlp_flag, ndlp->nlp_rpi); goto out; + } - lpfc_unreg_rpi(phba, ndlp); - if (lpfc_reg_login(phba, irsp->un.elsreq64.remoteID, (uint8_t *) sp, - mbox, 0) == 0) { + lpfc_unreg_rpi(vport, ndlp); + + if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID, + (uint8_t *) sp, mbox, 0) == 0) { switch (ndlp->nlp_DID) { case NameServer_DID: mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; @@ -777,68 +830,104 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba, mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; } mbox->context2 = lpfc_nlp_get(ndlp); + mbox->vport = vport; if (lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) { - lpfc_nlp_set_state(phba, ndlp, NLP_STE_REG_LOGIN_ISSUE); + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_REG_LOGIN_ISSUE); return ndlp->nlp_state; } lpfc_nlp_put(ndlp); - mp = (struct lpfc_dmabuf *)mbox->context1; + mp = (struct lpfc_dmabuf *) mbox->context1; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(mbox, phba->mbox_mem_pool); + + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0134 PLOGI: cannot issue reg_login " + "Data: x%x x%x x%x x%x\n", + phba->brd_no, vport->vpi, + ndlp->nlp_DID, ndlp->nlp_state, + ndlp->nlp_flag, ndlp->nlp_rpi); } else { mempool_free(mbox, phba->mbox_mem_pool); + + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0135 PLOGI: cannot format reg_login " + "Data: x%x x%x x%x x%x\n", + phba->brd_no, vport->vpi, + ndlp->nlp_DID, ndlp->nlp_state, + ndlp->nlp_flag, ndlp->nlp_rpi); } - out: +out: + if (ndlp->nlp_DID == NameServer_DID) { + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0261 Cannot Register NameServer login\n", + phba->brd_no, vport->vpi); + } + /* Free this node since the driver cannot login or has the wrong sparm */ - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } static uint32_t -lpfc_device_rm_plogi_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; + spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; - } - else { + } else { /* software abort outstanding PLOGI */ - lpfc_els_abort(phba, ndlp); + lpfc_els_abort(vport->phba, ndlp); - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } static uint32_t -lpfc_device_recov_plogi_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + + /* Don't do anything that will mess up processing of the + * previous RSCN. + */ + if (vport->fc_flag & FC_RSCN_DEFERRED) + return ndlp->nlp_state; + /* software abort outstanding PLOGI */ lpfc_els_abort(phba, ndlp); ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(phba->host->host_lock); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { + struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb; /* software abort outstanding ADISC */ @@ -846,34 +935,31 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) { + if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) return ndlp->nlp_state; - } + ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); - lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp); + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { + struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; @@ -881,42 +967,43 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba, /* software abort outstanding ADISC */ lpfc_els_abort(phba, ndlp); - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_padisc(phba, ndlp, cmdiocb); + lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; /* Treat like rcv logo */ - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO); + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); return ndlp->nlp_state; } static uint32_t -lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb, *rspiocb; IOCB_t *irsp; ADISC *ap; @@ -928,101 +1015,112 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba, irsp = &rspiocb->iocb; if ((irsp->ulpStatus) || - (!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) { + (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; - memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name)); - memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name)); + memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name)); + memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name)); ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); - lpfc_unreg_rpi(phba, ndlp); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + lpfc_unreg_rpi(vport, ndlp); return ndlp->nlp_state; } if (ndlp->nlp_type & NLP_FCP_TARGET) { ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); } else { ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } return ndlp->nlp_state; } static uint32_t -lpfc_device_rm_adisc_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; + spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; - } - else { + } else { /* software abort outstanding ADISC */ - lpfc_els_abort(phba, ndlp); + lpfc_els_abort(vport->phba, ndlp); - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } static uint32_t -lpfc_device_recov_adisc_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + + /* Don't do anything that will mess up processing of the + * previous RSCN. + */ + if (vport->fc_flag & FC_RSCN_DEFERRED) + return ndlp->nlp_state; + /* software abort outstanding ADISC */ lpfc_els_abort(phba, ndlp); ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(phba->host->host_lock); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - ndlp->nlp_flag |= NLP_NPR_ADISC; - spin_unlock_irq(phba->host->host_lock); - + spin_unlock_irq(shost->host_lock); + lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, +lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - cmdiocb = (struct lpfc_iocbq *) arg; - - lpfc_rcv_plogi(phba, ndlp, cmdiocb); + lpfc_rcv_plogi(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, +lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - cmdiocb = (struct lpfc_iocbq *) arg; - - lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp); + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, +lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; LPFC_MBOXQ_t *mb; LPFC_MBOXQ_t *nextmb; struct lpfc_dmabuf *mp; @@ -1033,12 +1131,13 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba, if ((mb = phba->sli.mbox_active)) { if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { + lpfc_nlp_put(ndlp); mb->context2 = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { @@ -1047,61 +1146,61 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba, lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } + lpfc_nlp_put(ndlp); list_del(&mb->list); mempool_free(mb, phba->mbox_mem_pool); } } - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, +lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - cmdiocb = (struct lpfc_iocbq *) arg; - - lpfc_rcv_padisc(phba, ndlp, cmdiocb); + lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, +lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); return ndlp->nlp_state; } static uint32_t -lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, - void *arg, uint32_t evt) +lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) { - LPFC_MBOXQ_t *pmb; - MAILBOX_t *mb; - uint32_t did; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; + MAILBOX_t *mb = &pmb->mb; + uint32_t did = mb->un.varWords[1]; - pmb = (LPFC_MBOXQ_t *) arg; - mb = &pmb->mb; - did = mb->un.varWords[1]; if (mb->mbxStatus) { /* RegLogin failed */ - lpfc_printf_log(phba, - KERN_ERR, - LOG_DISCOVERY, - "%d:0246 RegLogin failed Data: x%x x%x x%x\n", - phba->brd_no, - did, mb->mbxStatus, phba->hba_state); + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "%d (%d):0246 RegLogin failed Data: x%x x%x " + "x%x\n", + phba->brd_no, vport->vpi, + did, mb->mbxStatus, vport->port_state); /* * If RegLogin failed due to lack of HBA resources do not @@ -1109,20 +1208,20 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba, */ if (mb->mbxStatus == MBXERR_RPI_FULL) { ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); return ndlp->nlp_state; } - /* Put ndlp in npr list set plogi timer for 1 sec */ + /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; - lpfc_issue_els_logo(phba, ndlp, 0); + lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return ndlp->nlp_state; } @@ -1131,91 +1230,99 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba, /* Only if we are not a fabric nport do we issue PRLI */ if (!(ndlp->nlp_type & NLP_FABRIC)) { ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE); - lpfc_issue_els_prli(phba, ndlp, 0); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); + lpfc_issue_els_prli(vport, ndlp, 0); } else { ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } return ndlp->nlp_state; } static uint32_t -lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, +lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; + spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; - } - else { - lpfc_drop_node(phba, ndlp); + } else { + lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } static uint32_t -lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + /* Don't do anything that will mess up processing of the + * previous RSCN. + */ + if (vport->fc_flag & FC_RSCN_DEFERRED) + return ndlp->nlp_state; + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(phba->host->host_lock); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); + lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_plogi(phba, ndlp, cmdiocb); + lpfc_rcv_plogi(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp); + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* Software abort outstanding PRLI before sending acc */ - lpfc_els_abort(phba, ndlp); + lpfc_els_abort(vport->phba, ndlp); - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_padisc(phba, ndlp, cmdiocb); + lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } @@ -1225,21 +1332,22 @@ lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba, * NEXT STATE = PRLI_ISSUE */ static uint32_t -lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); return ndlp->nlp_state; } static uint32_t -lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_iocbq *cmdiocb, *rspiocb; + struct lpfc_hba *phba = vport->phba; IOCB_t *irsp; PRLI *npr; @@ -1249,8 +1357,12 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba, irsp = &rspiocb->iocb; if (irsp->ulpStatus) { + if ((vport->port_type == LPFC_NPIV_PORT) && + phba->cfg_vport_restrict_login) { + goto out; + } ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); return ndlp->nlp_state; } @@ -1266,319 +1378,329 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba, if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; } + if (!(ndlp->nlp_type & NLP_FCP_TARGET) && + (vport->port_type == LPFC_NPIV_PORT) && + phba->cfg_vport_restrict_login) { +out: + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_TARGET_REMOVE; + spin_unlock_irq(shost->host_lock); + lpfc_issue_els_logo(vport, ndlp, 0); + + ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); + return ndlp->nlp_state; + } ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE); + if (ndlp->nlp_type & NLP_FCP_TARGET) + lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); + else + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); return ndlp->nlp_state; } /*! lpfc_device_rm_prli_issue - * - * \pre - * \post - * \param phba - * \param ndlp - * \param arg - * \param evt - * \return uint32_t - * - * \b Description: - * This routine is envoked when we a request to remove a nport we are in the - * process of PRLIing. We should software abort outstanding prli, unreg - * login, send a logout. We will change node state to UNUSED_NODE, put it - * on plogi list so it can be freed when LOGO completes. - * - */ + * + * \pre + * \post + * \param phba + * \param ndlp + * \param arg + * \param evt + * \return uint32_t + * + * \b Description: + * This routine is envoked when we a request to remove a nport we are in the + * process of PRLIing. We should software abort outstanding prli, unreg + * login, send a logout. We will change node state to UNUSED_NODE, put it + * on plogi list so it can be freed when LOGO completes. + * + */ + static uint32_t -lpfc_device_rm_prli_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; + spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; - } - else { + } else { /* software abort outstanding PLOGI */ - lpfc_els_abort(phba, ndlp); + lpfc_els_abort(vport->phba, ndlp); - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } /*! lpfc_device_recov_prli_issue - * - * \pre - * \post - * \param phba - * \param ndlp - * \param arg - * \param evt - * \return uint32_t - * - * \b Description: - * The routine is envoked when the state of a device is unknown, like - * during a link down. We should remove the nodelist entry from the - * unmapped list, issue a UNREG_LOGIN, do a software abort of the - * outstanding PRLI command, then free the node entry. - */ + * + * \pre + * \post + * \param phba + * \param ndlp + * \param arg + * \param evt + * \return uint32_t + * + * \b Description: + * The routine is envoked when the state of a device is unknown, like + * during a link down. We should remove the nodelist entry from the + * unmapped list, issue a UNREG_LOGIN, do a software abort of the + * outstanding PRLI command, then free the node entry. + */ static uint32_t -lpfc_device_recov_prli_issue(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_device_recov_prli_issue(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + + /* Don't do anything that will mess up processing of the + * previous RSCN. + */ + if (vport->fc_flag & FC_RSCN_DEFERRED) + return ndlp->nlp_state; + /* software abort outstanding PRLI */ lpfc_els_abort(phba, ndlp); ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(phba->host->host_lock); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); + lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_plogi(phba, ndlp, cmdiocb); + lpfc_rcv_plogi(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - cmdiocb = (struct lpfc_iocbq *) arg; - - lpfc_rcv_prli(phba, ndlp, cmdiocb); - lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp); + lpfc_rcv_prli(vport, ndlp, cmdiocb); + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - cmdiocb = (struct lpfc_iocbq *) arg; - - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - cmdiocb = (struct lpfc_iocbq *) arg; - - lpfc_rcv_padisc(phba, ndlp, cmdiocb); + lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); return ndlp->nlp_state; } static uint32_t -lpfc_device_recov_unmap_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_device_recov_unmap_node(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - lpfc_disc_set_adisc(phba, ndlp); + spin_unlock_irq(shost->host_lock); + lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_plogi(phba, ndlp, cmdiocb); + lpfc_rcv_plogi(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - cmdiocb = (struct lpfc_iocbq *) arg; - - lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp); + lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - cmdiocb = (struct lpfc_iocbq *) arg; - - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_padisc(phba, ndlp, cmdiocb); + lpfc_rcv_padisc(vport, ndlp, cmdiocb); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* flush the target */ - spin_lock_irq(phba->host->host_lock); lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], - ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); - spin_unlock_irq(phba->host->host_lock); + ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); /* Treat like rcv logo */ - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO); + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); return ndlp->nlp_state; } static uint32_t -lpfc_device_recov_mapped_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_device_recov_mapped_node(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, + uint32_t evt) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(phba->host->host_lock); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(phba->host->host_lock); - lpfc_disc_set_adisc(phba, ndlp); + spin_unlock_irq(shost->host_lock); + lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* Ignore PLOGI if we have an outstanding LOGO */ - if (ndlp->nlp_flag & NLP_LOGO_SND) { + if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) { return ndlp->nlp_state; } - if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) { - spin_lock_irq(phba->host->host_lock); + if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; } /* send PLOGI immediately, move to PLOGI issue state */ if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); - lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } return ndlp->nlp_state; } static uint32_t -lpfc_rcv_prli_npr_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - struct ls_rjt stat; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; + struct ls_rjt stat; memset(&stat, 0, sizeof (struct ls_rjt)); stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; - lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { if (ndlp->nlp_flag & NLP_NPR_ADISC) { - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(phba->host->host_lock); ndlp->nlp_prev_state = NLP_STE_NPR_NODE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); - lpfc_issue_els_adisc(phba, ndlp, 0); + spin_unlock_irq(shost->host_lock); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); + lpfc_issue_els_adisc(vport, ndlp, 0); } else { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); - lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } } return ndlp->nlp_state; } static uint32_t -lpfc_rcv_logo_npr_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - cmdiocb = (struct lpfc_iocbq *) arg; - - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } static uint32_t -lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_padisc(phba, ndlp, cmdiocb); + lpfc_rcv_padisc(vport, ndlp, cmdiocb); /* * Do not start discovery if discovery is about to start @@ -1586,53 +1708,52 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba, * here will affect the counting of discovery threads. */ if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && - !(ndlp->nlp_flag & NLP_NPR_2B_DISC)){ + !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { if (ndlp->nlp_flag & NLP_NPR_ADISC) { + ndlp->nlp_flag &= ~NLP_NPR_ADISC; ndlp->nlp_prev_state = NLP_STE_NPR_NODE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); - lpfc_issue_els_adisc(phba, ndlp, 0); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); + lpfc_issue_els_adisc(vport, ndlp, 0); } else { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; - lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); - lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } } return ndlp->nlp_state; } static uint32_t -lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - struct lpfc_iocbq *cmdiocb; - - cmdiocb = (struct lpfc_iocbq *) arg; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; } else { - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); } return ndlp->nlp_state; } static uint32_t -lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; IOCB_t *irsp; @@ -1642,15 +1763,15 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba, irsp = &rspiocb->iocb; if (irsp->ulpStatus) { - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } return ndlp->nlp_state; } static uint32_t -lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; IOCB_t *irsp; @@ -1660,25 +1781,24 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba, irsp = &rspiocb->iocb; if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } return ndlp->nlp_state; } static uint32_t -lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - lpfc_unreg_rpi(phba, ndlp); + lpfc_unreg_rpi(vport, ndlp); /* This routine does nothing, just return the current state */ return ndlp->nlp_state; } static uint32_t -lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; IOCB_t *irsp; @@ -1688,28 +1808,25 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba, irsp = &rspiocb->iocb; if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } return ndlp->nlp_state; } static uint32_t -lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - LPFC_MBOXQ_t *pmb; - MAILBOX_t *mb; - - pmb = (LPFC_MBOXQ_t *) arg; - mb = &pmb->mb; + LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; + MAILBOX_t *mb = &pmb->mb; if (!mb->mbxStatus) ndlp->nlp_rpi = mb->un.varWords[0]; else { if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } } @@ -1717,28 +1834,38 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba, } static uint32_t -lpfc_device_rm_npr_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NODEV_REMOVE; + spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; } - lpfc_drop_node(phba, ndlp); + lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } static uint32_t -lpfc_device_recov_npr_node(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, - uint32_t evt) +lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { - spin_lock_irq(phba->host->host_lock); + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + /* Don't do anything that will mess up processing of the + * previous RSCN. + */ + if (vport->fc_flag & FC_RSCN_DEFERRED) + return ndlp->nlp_state; + + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(shost->host_lock); if (ndlp->nlp_flag & NLP_DELAY_TMO) { - lpfc_cancel_retry_delay_tmo(phba, ndlp); + lpfc_cancel_retry_delay_tmo(vport, ndlp); } return ndlp->nlp_state; } @@ -1801,7 +1928,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba, */ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) - (struct lpfc_hba *, struct lpfc_nodelist *, void *, uint32_t) = { + (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = { /* Action routine Event Current State */ lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */ lpfc_rcv_els_unused_node, /* RCV_PRLI */ @@ -1818,7 +1945,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) lpfc_disc_illegal, /* DEVICE_RECOVERY */ lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */ - lpfc_rcv_els_plogi_issue, /* RCV_PRLI */ + lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */ lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */ lpfc_rcv_els_plogi_issue, /* RCV_ADISC */ lpfc_rcv_els_plogi_issue, /* RCV_PDISC */ @@ -1917,35 +2044,41 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) }; int -lpfc_disc_state_machine(struct lpfc_hba * phba, - struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) +lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + void *arg, uint32_t evt) { + struct lpfc_hba *phba = vport->phba; uint32_t cur_state, rc; - uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *, + uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t); lpfc_nlp_get(ndlp); cur_state = ndlp->nlp_state; /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_DISCOVERY, - "%d:0211 DSM in event x%x on NPort x%x in state %d " - "Data: x%x\n", - phba->brd_no, + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0211 DSM in event x%x on NPort x%x in " + "state %d Data: x%x\n", + phba->brd_no, vport->vpi, evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag); + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, + "DSM in: evt:%d ste:%d did:x%x", + evt, cur_state, ndlp->nlp_DID); + func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt]; - rc = (func) (phba, ndlp, arg, evt); + rc = (func) (vport, ndlp, arg, evt); /* DSM out state <rc> on NPort <nlp_DID> */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_DISCOVERY, - "%d:0212 DSM out state %d on NPort x%x Data: x%x\n", - phba->brd_no, - rc, ndlp->nlp_DID, ndlp->nlp_flag); + lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, + "%d (%d):0212 DSM out state %d on NPort x%x " + "Data: x%x\n", + phba->brd_no, vport->vpi, + rc, ndlp->nlp_DID, ndlp->nlp_flag); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, + "DSM out: ste:%d did:x%x flg:x%x", + rc, ndlp->nlp_DID, ndlp->nlp_flag); lpfc_nlp_put(ndlp); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 9a12d05e99e4..8f45bbc42126 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -37,10 +37,158 @@ #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" +#include "lpfc_vport.h" #define LPFC_RESET_WAIT 2 #define LPFC_ABORT_WAIT 2 +/* + * This function is called with no lock held when there is a resource + * error in driver or in firmware. + */ +void +lpfc_adjust_queue_depth(struct lpfc_hba *phba) +{ + unsigned long flags; + + spin_lock_irqsave(&phba->hbalock, flags); + atomic_inc(&phba->num_rsrc_err); + phba->last_rsrc_error_time = jiffies; + + if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return; + } + + phba->last_ramp_down_time = jiffies; + + spin_unlock_irqrestore(&phba->hbalock, flags); + + spin_lock_irqsave(&phba->pport->work_port_lock, flags); + if ((phba->pport->work_port_events & + WORKER_RAMP_DOWN_QUEUE) == 0) { + phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; + } + spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); + + spin_lock_irqsave(&phba->hbalock, flags); + if (phba->work_wait) + wake_up(phba->work_wait); + spin_unlock_irqrestore(&phba->hbalock, flags); + + return; +} + +/* + * This function is called with no lock held when there is a successful + * SCSI command completion. + */ +static inline void +lpfc_rampup_queue_depth(struct lpfc_hba *phba, + struct scsi_device *sdev) +{ + unsigned long flags; + atomic_inc(&phba->num_cmd_success); + + if (phba->cfg_lun_queue_depth <= sdev->queue_depth) + return; + + spin_lock_irqsave(&phba->hbalock, flags); + if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) || + ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return; + } + + phba->last_ramp_up_time = jiffies; + spin_unlock_irqrestore(&phba->hbalock, flags); + + spin_lock_irqsave(&phba->pport->work_port_lock, flags); + if ((phba->pport->work_port_events & + WORKER_RAMP_UP_QUEUE) == 0) { + phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; + } + spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); + + spin_lock_irqsave(&phba->hbalock, flags); + if (phba->work_wait) + wake_up(phba->work_wait); + spin_unlock_irqrestore(&phba->hbalock, flags); +} + +void +lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; + struct Scsi_Host *host; + struct scsi_device *sdev; + unsigned long new_queue_depth; + unsigned long num_rsrc_err, num_cmd_success; + + num_rsrc_err = atomic_read(&phba->num_rsrc_err); + num_cmd_success = atomic_read(&phba->num_cmd_success); + + spin_lock_irq(&phba->hbalock); + list_for_each_entry(vport, &phba->port_list, listentry) { + host = lpfc_shost_from_vport(vport); + if (!scsi_host_get(host)) + continue; + + spin_unlock_irq(&phba->hbalock); + + shost_for_each_device(sdev, host) { + new_queue_depth = sdev->queue_depth * num_rsrc_err / + (num_rsrc_err + num_cmd_success); + if (!new_queue_depth) + new_queue_depth = sdev->queue_depth - 1; + else + new_queue_depth = + sdev->queue_depth - new_queue_depth; + + if (sdev->ordered_tags) + scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, + new_queue_depth); + else + scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, + new_queue_depth); + } + spin_lock_irq(&phba->hbalock); + scsi_host_put(host); + } + spin_unlock_irq(&phba->hbalock); + atomic_set(&phba->num_rsrc_err, 0); + atomic_set(&phba->num_cmd_success, 0); +} + +void +lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; + struct Scsi_Host *host; + struct scsi_device *sdev; + + spin_lock_irq(&phba->hbalock); + list_for_each_entry(vport, &phba->port_list, listentry) { + host = lpfc_shost_from_vport(vport); + if (!scsi_host_get(host)) + continue; + + spin_unlock_irq(&phba->hbalock); + shost_for_each_device(sdev, host) { + if (sdev->ordered_tags) + scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, + sdev->queue_depth+1); + else + scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, + sdev->queue_depth+1); + } + spin_lock_irq(&phba->hbalock); + scsi_host_put(host); + } + spin_unlock_irq(&phba->hbalock); + atomic_set(&phba->num_rsrc_err, 0); + atomic_set(&phba->num_cmd_success, 0); +} /* * This routine allocates a scsi buffer, which contains all the necessary @@ -51,8 +199,9 @@ * and the BPL BDE is setup in the IOCB. */ static struct lpfc_scsi_buf * -lpfc_new_scsi_buf(struct lpfc_hba * phba) +lpfc_new_scsi_buf(struct lpfc_vport *vport) { + struct lpfc_hba *phba = vport->phba; struct lpfc_scsi_buf *psb; struct ulp_bde64 *bpl; IOCB_t *iocb; @@ -63,7 +212,6 @@ lpfc_new_scsi_buf(struct lpfc_hba * phba) if (!psb) return NULL; memset(psb, 0, sizeof (struct lpfc_scsi_buf)); - psb->scsi_hba = phba; /* * Get memory from the pci pool to map the virt space to pci bus space @@ -155,7 +303,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba) } static void -lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) +lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; @@ -166,7 +314,7 @@ lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) } static int -lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) +lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct scatterlist *sgel = NULL; @@ -175,8 +323,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; dma_addr_t physaddr; uint32_t i, num_bde = 0; - int datadir = scsi_cmnd->sc_data_direction; - int dma_error; + int nseg, datadir = scsi_cmnd->sc_data_direction; /* * There are three possibilities here - use scatter-gather segment, use @@ -185,26 +332,26 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) * data bde entry. */ bpl += 2; - if (scsi_cmnd->use_sg) { + if (scsi_sg_count(scsi_cmnd)) { /* * The driver stores the segment count returned from pci_map_sg * because this a count of dma-mappings used to map the use_sg * pages. They are not guaranteed to be the same for those * architectures that implement an IOMMU. */ - sgel = (struct scatterlist *)scsi_cmnd->request_buffer; - lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel, - scsi_cmnd->use_sg, datadir); - if (lpfc_cmd->seg_cnt == 0) + + nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), + scsi_sg_count(scsi_cmnd), datadir); + if (unlikely(!nseg)) return 1; + lpfc_cmd->seg_cnt = nseg; if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { printk(KERN_ERR "%s: Too many sg segments from " "dma_map_sg. Config %d, seg_cnt %d", __FUNCTION__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); - dma_unmap_sg(&phba->pcidev->dev, sgel, - lpfc_cmd->seg_cnt, datadir); + scsi_dma_unmap(scsi_cmnd); return 1; } @@ -214,7 +361,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) * single scsi command. Just run through the seg_cnt and format * the bde's. */ - for (i = 0; i < lpfc_cmd->seg_cnt; i++) { + scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) { physaddr = sg_dma_address(sgel); bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); @@ -225,34 +372,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) bpl->tus.f.bdeFlags = BUFF_USE_RCV; bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl++; - sgel++; num_bde++; } - } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { - physaddr = dma_map_single(&phba->pcidev->dev, - scsi_cmnd->request_buffer, - scsi_cmnd->request_bufflen, - datadir); - dma_error = dma_mapping_error(physaddr); - if (dma_error) { - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "%d:0718 Unable to dma_map_single " - "request_buffer: x%x\n", - phba->brd_no, dma_error); - return 1; - } - - lpfc_cmd->nonsg_phys = physaddr; - bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); - bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); - bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen; - if (datadir == DMA_TO_DEVICE) - bpl->tus.f.bdeFlags = 0; - else - bpl->tus.f.bdeFlags = BUFF_USE_RCV; - bpl->tus.w = le32_to_cpu(bpl->tus.w); - num_bde = 1; - bpl++; } /* @@ -266,7 +387,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) (num_bde * sizeof (struct ulp_bde64)); iocb_cmd->ulpBdeCount = 1; iocb_cmd->ulpLe = 1; - fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen); + fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd)); return 0; } @@ -279,26 +400,20 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) * a request buffer, but did not request use_sg. There is a third * case, but it does not require resource deallocation. */ - if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { - dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, - psb->seg_cnt, psb->pCmd->sc_data_direction); - } else { - if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { - dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, - psb->pCmd->request_bufflen, - psb->pCmd->sc_data_direction); - } - } + if (psb->seg_cnt > 0) + scsi_dma_unmap(psb->pCmd); } static void -lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) +lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, + struct lpfc_iocbq *rsp_iocb) { struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; - struct lpfc_hba *phba = lpfc_cmd->scsi_hba; + struct lpfc_hba *phba = vport->phba; uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; + uint32_t vpi = vport->vpi; uint32_t resp_info = fcprsp->rspStatus2; uint32_t scsi_status = fcprsp->rspStatus3; uint32_t *lp; @@ -331,9 +446,9 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) logit = LOG_FCP; lpfc_printf_log(phba, KERN_WARNING, logit, - "%d:0730 FCP command x%x failed: x%x SNS x%x x%x " + "%d (%d):0730 FCP command x%x failed: x%x SNS x%x x%x " "Data: x%x x%x x%x x%x x%x\n", - phba->brd_no, cmnd->cmnd[0], scsi_status, + phba->brd_no, vpi, cmnd->cmnd[0], scsi_status, be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, be32_to_cpu(fcprsp->rspResId), be32_to_cpu(fcprsp->rspSnsLen), @@ -349,15 +464,16 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) } } - cmnd->resid = 0; + scsi_set_resid(cmnd, 0); if (resp_info & RESID_UNDER) { - cmnd->resid = be32_to_cpu(fcprsp->rspResId); + scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); lpfc_printf_log(phba, KERN_INFO, LOG_FCP, - "%d:0716 FCP Read Underrun, expected %d, " - "residual %d Data: x%x x%x x%x\n", phba->brd_no, - be32_to_cpu(fcpcmd->fcpDl), cmnd->resid, - fcpi_parm, cmnd->cmnd[0], cmnd->underflow); + "%d (%d):0716 FCP Read Underrun, expected %d, " + "residual %d Data: x%x x%x x%x\n", + phba->brd_no, vpi, be32_to_cpu(fcpcmd->fcpDl), + scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], + cmnd->underflow); /* * If there is an under run check if under run reported by @@ -366,15 +482,16 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) */ if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && fcpi_parm && - (cmnd->resid != fcpi_parm)) { + (scsi_get_resid(cmnd) != fcpi_parm)) { lpfc_printf_log(phba, KERN_WARNING, - LOG_FCP | LOG_FCP_ERROR, - "%d:0735 FCP Read Check Error and Underrun " - "Data: x%x x%x x%x x%x\n", phba->brd_no, - be32_to_cpu(fcpcmd->fcpDl), - cmnd->resid, - fcpi_parm, cmnd->cmnd[0]); - cmnd->resid = cmnd->request_bufflen; + LOG_FCP | LOG_FCP_ERROR, + "%d (%d):0735 FCP Read Check Error " + "and Underrun Data: x%x x%x x%x x%x\n", + phba->brd_no, vpi, + be32_to_cpu(fcpcmd->fcpDl), + scsi_get_resid(cmnd), fcpi_parm, + cmnd->cmnd[0]); + scsi_set_resid(cmnd, scsi_bufflen(cmnd)); host_status = DID_ERROR; } /* @@ -385,22 +502,23 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) */ if (!(resp_info & SNS_LEN_VALID) && (scsi_status == SAM_STAT_GOOD) && - (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) { + (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) + < cmnd->underflow)) { lpfc_printf_log(phba, KERN_INFO, LOG_FCP, - "%d:0717 FCP command x%x residual " + "%d (%d):0717 FCP command x%x residual " "underrun converted to error " - "Data: x%x x%x x%x\n", phba->brd_no, - cmnd->cmnd[0], cmnd->request_bufflen, - cmnd->resid, cmnd->underflow); - + "Data: x%x x%x x%x\n", + phba->brd_no, vpi, cmnd->cmnd[0], + scsi_bufflen(cmnd), + scsi_get_resid(cmnd), cmnd->underflow); host_status = DID_ERROR; } } else if (resp_info & RESID_OVER) { lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, - "%d:0720 FCP command x%x residual " + "%d (%d):0720 FCP command x%x residual " "overrun error. Data: x%x x%x \n", - phba->brd_no, cmnd->cmnd[0], - cmnd->request_bufflen, cmnd->resid); + phba->brd_no, vpi, cmnd->cmnd[0], + scsi_bufflen(cmnd), scsi_get_resid(cmnd)); host_status = DID_ERROR; /* @@ -410,13 +528,14 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, - "%d:0734 FCP Read Check Error Data: " - "x%x x%x x%x x%x\n", phba->brd_no, - be32_to_cpu(fcpcmd->fcpDl), - be32_to_cpu(fcprsp->rspResId), - fcpi_parm, cmnd->cmnd[0]); + "%d (%d):0734 FCP Read Check Error Data: " + "x%x x%x x%x x%x\n", + phba->brd_no, vpi, + be32_to_cpu(fcpcmd->fcpDl), + be32_to_cpu(fcprsp->rspResId), + fcpi_parm, cmnd->cmnd[0]); host_status = DID_ERROR; - cmnd->resid = cmnd->request_bufflen; + scsi_set_resid(cmnd, scsi_bufflen(cmnd)); } out: @@ -429,9 +548,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, { struct lpfc_scsi_buf *lpfc_cmd = (struct lpfc_scsi_buf *) pIocbIn->context1; + struct lpfc_vport *vport = pIocbIn->vport; struct lpfc_rport_data *rdata = lpfc_cmd->rdata; struct lpfc_nodelist *pnode = rdata->pnode; struct scsi_cmnd *cmd = lpfc_cmd->pCmd; + uint32_t vpi = (lpfc_cmd->cur_iocbq.vport + ? lpfc_cmd->cur_iocbq.vport->vpi + : 0); int result; struct scsi_device *sdev, *tmp_sdev; int depth = 0; @@ -447,22 +570,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_cmd->status = IOSTAT_DEFAULT; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, - "%d:0729 FCP cmd x%x failed <%d/%d> status: " - "x%x result: x%x Data: x%x x%x\n", - phba->brd_no, cmd->cmnd[0], cmd->device->id, - cmd->device->lun, lpfc_cmd->status, - lpfc_cmd->result, pIocbOut->iocb.ulpContext, + "%d (%d):0729 FCP cmd x%x failed <%d/%d> " + "status: x%x result: x%x Data: x%x x%x\n", + phba->brd_no, vpi, cmd->cmnd[0], + cmd->device ? cmd->device->id : 0xffff, + cmd->device ? cmd->device->lun : 0xffff, + lpfc_cmd->status, lpfc_cmd->result, + pIocbOut->iocb.ulpContext, lpfc_cmd->cur_iocbq.iocb.ulpIoTag); switch (lpfc_cmd->status) { case IOSTAT_FCP_RSP_ERROR: /* Call FCP RSP handler to determine result */ - lpfc_handle_fcp_err(lpfc_cmd,pIocbOut); + lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut); break; case IOSTAT_NPORT_BSY: case IOSTAT_FABRIC_BSY: cmd->result = ScsiResult(DID_BUS_BUSY, 0); break; + case IOSTAT_LOCAL_REJECT: + if (lpfc_cmd->result == RJT_UNAVAIL_PERM || + lpfc_cmd->result == IOERR_NO_RESOURCES || + lpfc_cmd->result == RJT_LOGIN_REQUIRED) { + cmd->result = ScsiResult(DID_REQUEUE, 0); + break; + } /* else: fall through */ default: cmd->result = ScsiResult(DID_ERROR, 0); break; @@ -479,11 +611,12 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, uint32_t *lp = (uint32_t *)cmd->sense_buffer; lpfc_printf_log(phba, KERN_INFO, LOG_FCP, - "%d:0710 Iodone <%d/%d> cmd %p, error x%x " - "SNS x%x x%x Data: x%x x%x\n", - phba->brd_no, cmd->device->id, + "%d (%d):0710 Iodone <%d/%d> cmd %p, error " + "x%x SNS x%x x%x Data: x%x x%x\n", + phba->brd_no, vpi, cmd->device->id, cmd->device->lun, cmd, cmd->result, - *lp, *(lp + 3), cmd->retries, cmd->resid); + *lp, *(lp + 3), cmd->retries, + scsi_get_resid(cmd)); } result = cmd->result; @@ -496,6 +629,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, return; } + + if (!result) + lpfc_rampup_queue_depth(phba, sdev); + if (!result && pnode != NULL && ((jiffies - pnode->last_ramp_up_time) > LPFC_Q_RAMP_UP_INTERVAL * HZ) && @@ -534,7 +671,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, tmp_sdev->queue_depth - 1); } /* - * The queue depth cannot be lowered any more. + * The queue depth cannot be lowered any more. * Modify the returned error code to store * the final depth value set by * scsi_track_queue_full. @@ -544,8 +681,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, if (depth) { lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, - "%d:0711 detected queue full - lun queue depth " - " adjusted to %d.\n", phba->brd_no, depth); + "%d (%d):0711 detected queue full - " + "lun queue depth adjusted to %d.\n", + phba->brd_no, vpi, depth); } } @@ -553,9 +691,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, } static void -lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, - struct lpfc_nodelist *pnode) +lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, + struct lpfc_nodelist *pnode) { + struct lpfc_hba *phba = vport->phba; struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; @@ -592,7 +731,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first * data bde entry. */ - if (scsi_cmnd->use_sg) { + if (scsi_sg_count(scsi_cmnd)) { if (datadir == DMA_TO_DEVICE) { iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; iocb_cmd->un.fcpi.fcpi_parm = 0; @@ -602,23 +741,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, } else { iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; - iocb_cmd->un.fcpi.fcpi_parm = - scsi_cmnd->request_bufflen; - fcp_cmnd->fcpCntl3 = READ_DATA; - phba->fc4InputRequests++; - } - } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { - if (datadir == DMA_TO_DEVICE) { - iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; - iocb_cmd->un.fcpi.fcpi_parm = 0; - iocb_cmd->ulpPU = 0; - fcp_cmnd->fcpCntl3 = WRITE_DATA; - phba->fc4OutputRequests++; - } else { - iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; - iocb_cmd->ulpPU = PARM_READ_CHECK; - iocb_cmd->un.fcpi.fcpi_parm = - scsi_cmnd->request_bufflen; + iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); fcp_cmnd->fcpCntl3 = READ_DATA; phba->fc4InputRequests++; } @@ -642,15 +765,15 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, piocbq->context1 = lpfc_cmd; piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; + piocbq->vport = vport; } static int -lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, +lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, unsigned int lun, uint8_t task_mgmt_cmd) { - struct lpfc_sli *psli; struct lpfc_iocbq *piocbq; IOCB_t *piocb; struct fcp_cmnd *fcp_cmnd; @@ -661,8 +784,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, return 0; } - psli = &phba->sli; piocbq = &(lpfc_cmd->cur_iocbq); + piocbq->vport = vport; + piocb = &piocbq->iocb; fcp_cmnd = lpfc_cmd->fcp_cmnd; @@ -688,7 +812,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, piocb->ulpTimeout = lpfc_cmd->timeout; } - return (1); + return 1; } static void @@ -704,10 +828,11 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, } static int -lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, +lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, unsigned tgt_id, unsigned int lun, struct lpfc_rport_data *rdata) { + struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbqrsp; int ret; @@ -716,12 +841,11 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, return FAILED; lpfc_cmd->rdata = rdata; - ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun, + ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, FCP_TARGET_RESET); if (!ret) return FAILED; - lpfc_cmd->scsi_hba = phba; iocbq = &lpfc_cmd->cur_iocbq; iocbqrsp = lpfc_sli_get_iocbq(phba); @@ -730,10 +854,10 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, /* Issue Target Reset to TGT <num> */ lpfc_printf_log(phba, KERN_INFO, LOG_FCP, - "%d:0702 Issue Target Reset to TGT %d " + "%d (%d):0702 Issue Target Reset to TGT %d " "Data: x%x x%x\n", - phba->brd_no, tgt_id, rdata->pnode->nlp_rpi, - rdata->pnode->nlp_flag); + phba->brd_no, vport->vpi, tgt_id, + rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); ret = lpfc_sli_issue_iocb_wait(phba, &phba->sli.ring[phba->sli.fcp_ring], @@ -758,7 +882,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, const char * lpfc_info(struct Scsi_Host *host) { - struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata; + struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; + struct lpfc_hba *phba = vport->phba; int len; static char lpfcinfobuf[384]; @@ -800,26 +925,22 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba) void lpfc_poll_timeout(unsigned long ptr) { - struct lpfc_hba *phba = (struct lpfc_hba *)ptr; - unsigned long iflag; - - spin_lock_irqsave(phba->host->host_lock, iflag); + struct lpfc_hba *phba = (struct lpfc_hba *) ptr; if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_poll_fcp_ring (phba); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); } - - spin_unlock_irqrestore(phba->host->host_lock, iflag); } static int lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) { - struct lpfc_hba *phba = - (struct lpfc_hba *) cmnd->device->host->hostdata; - struct lpfc_sli *psli = &phba->sli; + struct Scsi_Host *shost = cmnd->device->host; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli *psli = &phba->sli; struct lpfc_rport_data *rdata = cmnd->device->hostdata; struct lpfc_nodelist *ndlp = rdata->pnode; struct lpfc_scsi_buf *lpfc_cmd; @@ -840,11 +961,14 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) cmnd->result = ScsiResult(DID_BUS_BUSY, 0); goto out_fail_command; } - lpfc_cmd = lpfc_get_scsi_buf (phba); + lpfc_cmd = lpfc_get_scsi_buf(phba); if (lpfc_cmd == NULL) { + lpfc_adjust_queue_depth(phba); + lpfc_printf_log(phba, KERN_INFO, LOG_FCP, - "%d:0707 driver's buffer pool is empty, " - "IO busied\n", phba->brd_no); + "%d (%d):0707 driver's buffer pool is empty, " + "IO busied\n", + phba->brd_no, vport->vpi); goto out_host_busy; } @@ -862,10 +986,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) if (err) goto out_host_busy_free_buf; - lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp); + lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], - &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); + &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); if (err) goto out_host_busy_free_buf; @@ -907,8 +1031,9 @@ lpfc_block_error_handler(struct scsi_cmnd *cmnd) static int lpfc_abort_handler(struct scsi_cmnd *cmnd) { - struct Scsi_Host *shost = cmnd->device->host; - struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; + struct Scsi_Host *shost = cmnd->device->host; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; struct lpfc_iocbq *iocb; struct lpfc_iocbq *abtsiocb; @@ -918,8 +1043,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) int ret = SUCCESS; lpfc_block_error_handler(cmnd); - spin_lock_irq(shost->host_lock); - lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; BUG_ON(!lpfc_cmd); @@ -956,12 +1079,13 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) icmd->ulpLe = 1; icmd->ulpClass = cmd->ulpClass; - if (phba->hba_state >= LPFC_LINK_UP) + if (lpfc_is_link_up(phba)) icmd->ulpCommand = CMD_ABORT_XRI_CN; else icmd->ulpCommand = CMD_CLOSE_XRI_CN; abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; + abtsiocb->vport = vport; if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocb); ret = FAILED; @@ -977,9 +1101,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_sli_poll_fcp_ring (phba); - spin_unlock_irq(phba->host->host_lock); - schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ); - spin_lock_irq(phba->host->host_lock); + schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ); if (++loop_count > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT) break; @@ -988,30 +1110,30 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) if (lpfc_cmd->pCmd == cmnd) { ret = FAILED; lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "%d:0748 abort handler timed out waiting for " - "abort to complete: ret %#x, ID %d, LUN %d, " - "snum %#lx\n", - phba->brd_no, ret, cmnd->device->id, - cmnd->device->lun, cmnd->serial_number); + "%d (%d):0748 abort handler timed out waiting " + "for abort to complete: ret %#x, ID %d, " + "LUN %d, snum %#lx\n", + phba->brd_no, vport->vpi, ret, + cmnd->device->id, cmnd->device->lun, + cmnd->serial_number); } out: lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, - "%d:0749 SCSI Layer I/O Abort Request " + "%d (%d):0749 SCSI Layer I/O Abort Request " "Status x%x ID %d LUN %d snum %#lx\n", - phba->brd_no, ret, cmnd->device->id, + phba->brd_no, vport->vpi, ret, cmnd->device->id, cmnd->device->lun, cmnd->serial_number); - spin_unlock_irq(shost->host_lock); - return ret; } static int lpfc_device_reset_handler(struct scsi_cmnd *cmnd) { - struct Scsi_Host *shost = cmnd->device->host; - struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; + struct Scsi_Host *shost = cmnd->device->host; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; struct lpfc_scsi_buf *lpfc_cmd; struct lpfc_iocbq *iocbq, *iocbqrsp; struct lpfc_rport_data *rdata = cmnd->device->hostdata; @@ -1022,28 +1144,26 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) int cnt, loopcnt; lpfc_block_error_handler(cmnd); - spin_lock_irq(shost->host_lock); loopcnt = 0; /* * If target is not in a MAPPED state, delay the reset until * target is rediscovered or devloss timeout expires. */ - while ( 1 ) { + while (1) { if (!pnode) goto out; if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { - spin_unlock_irq(phba->host->host_lock); schedule_timeout_uninterruptible(msecs_to_jiffies(500)); - spin_lock_irq(phba->host->host_lock); loopcnt++; rdata = cmnd->device->hostdata; if (!rdata || (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "%d:0721 LUN Reset rport failure:" - " cnt x%x rdata x%p\n", - phba->brd_no, loopcnt, rdata); + "%d (%d):0721 LUN Reset rport " + "failure: cnt x%x rdata x%p\n", + phba->brd_no, vport->vpi, + loopcnt, rdata); goto out; } pnode = rdata->pnode; @@ -1054,15 +1174,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) break; } - lpfc_cmd = lpfc_get_scsi_buf (phba); + lpfc_cmd = lpfc_get_scsi_buf(phba); if (lpfc_cmd == NULL) goto out; lpfc_cmd->timeout = 60; - lpfc_cmd->scsi_hba = phba; lpfc_cmd->rdata = rdata; - ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun, + ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun, FCP_TARGET_RESET); if (!ret) goto out_free_scsi_buf; @@ -1075,8 +1194,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) goto out_free_scsi_buf; lpfc_printf_log(phba, KERN_INFO, LOG_FCP, - "%d:0703 Issue target reset to TGT %d LUN %d rpi x%x " - "nlp_flag x%x\n", phba->brd_no, cmnd->device->id, + "%d (%d):0703 Issue target reset to TGT %d LUN %d " + "rpi x%x nlp_flag x%x\n", + phba->brd_no, vport->vpi, cmnd->device->id, cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); iocb_status = lpfc_sli_issue_iocb_wait(phba, @@ -1111,9 +1231,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 0, LPFC_CTX_LUN); loopcnt = 0; while(cnt) { - spin_unlock_irq(phba->host->host_lock); schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); - spin_lock_irq(phba->host->host_lock); if (++loopcnt > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT) @@ -1127,8 +1245,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) if (cnt) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "%d:0719 device reset I/O flush failure: cnt x%x\n", - phba->brd_no, cnt); + "%d (%d):0719 device reset I/O flush failure: " + "cnt x%x\n", + phba->brd_no, vport->vpi, cnt); ret = FAILED; } @@ -1137,21 +1256,21 @@ out_free_scsi_buf: lpfc_release_scsi_buf(phba, lpfc_cmd); } lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "%d:0713 SCSI layer issued device reset (%d, %d) " + "%d (%d):0713 SCSI layer issued device reset (%d, %d) " "return x%x status x%x result x%x\n", - phba->brd_no, cmnd->device->id, cmnd->device->lun, - ret, cmd_status, cmd_result); + phba->brd_no, vport->vpi, cmnd->device->id, + cmnd->device->lun, ret, cmd_status, cmd_result); out: - spin_unlock_irq(shost->host_lock); return ret; } static int lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) { - struct Scsi_Host *shost = cmnd->device->host; - struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; + struct Scsi_Host *shost = cmnd->device->host; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = NULL; int match; int ret = FAILED, i, err_count = 0; @@ -1159,7 +1278,6 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) struct lpfc_scsi_buf * lpfc_cmd; lpfc_block_error_handler(cmnd); - spin_lock_irq(shost->host_lock); lpfc_cmd = lpfc_get_scsi_buf(phba); if (lpfc_cmd == NULL) @@ -1167,7 +1285,6 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) /* The lpfc_cmd storage is reused. Set all loop invariants. */ lpfc_cmd->timeout = 60; - lpfc_cmd->scsi_hba = phba; /* * Since the driver manages a single bus device, reset all @@ -1177,7 +1294,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) for (i = 0; i < LPFC_MAX_TARGET; i++) { /* Search for mapped node by target ID */ match = 0; - list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { + spin_lock_irq(shost->host_lock); + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && i == ndlp->nlp_sid && ndlp->rport) { @@ -1185,15 +1303,18 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) break; } } + spin_unlock_irq(shost->host_lock); if (!match) continue; - ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun, + ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, + cmnd->device->lun, ndlp->rport->dd_data); if (ret != SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "%d:0700 Bus Reset on target %d failed\n", - phba->brd_no, i); + "%d (%d):0700 Bus Reset on target %d " + "failed\n", + phba->brd_no, vport->vpi, i); err_count++; break; } @@ -1219,9 +1340,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 0, 0, 0, LPFC_CTX_HOST); loopcnt = 0; while(cnt) { - spin_unlock_irq(phba->host->host_lock); schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); - spin_lock_irq(phba->host->host_lock); if (++loopcnt > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT) @@ -1234,25 +1353,24 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) if (cnt) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n", - phba->brd_no, cnt, i); + "%d (%d):0715 Bus Reset I/O flush failure: " + "cnt x%x left x%x\n", + phba->brd_no, vport->vpi, cnt, i); ret = FAILED; } - lpfc_printf_log(phba, - KERN_ERR, - LOG_FCP, - "%d:0714 SCSI layer issued Bus Reset Data: x%x\n", - phba->brd_no, ret); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "%d (%d):0714 SCSI layer issued Bus Reset Data: x%x\n", + phba->brd_no, vport->vpi, ret); out: - spin_unlock_irq(shost->host_lock); return ret; } static int lpfc_slave_alloc(struct scsi_device *sdev) { - struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata; + struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; + struct lpfc_hba *phba = vport->phba; struct lpfc_scsi_buf *scsi_buf = NULL; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); uint32_t total = 0, i; @@ -1273,27 +1391,35 @@ lpfc_slave_alloc(struct scsi_device *sdev) */ total = phba->total_scsi_bufs; num_to_alloc = phba->cfg_lun_queue_depth + 2; - if (total >= phba->cfg_hba_queue_depth) { + + /* Allow some exchanges to be available always to complete discovery */ + if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, - "%d:0704 At limitation of %d preallocated " - "command buffers\n", phba->brd_no, total); + "%d (%d):0704 At limitation of %d " + "preallocated command buffers\n", + phba->brd_no, vport->vpi, total); return 0; - } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) { + + /* Allow some exchanges to be available always to complete discovery */ + } else if (total + num_to_alloc > + phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, - "%d:0705 Allocation request of %d command " - "buffers will exceed max of %d. Reducing " - "allocation request to %d.\n", phba->brd_no, - num_to_alloc, phba->cfg_hba_queue_depth, + "%d (%d):0705 Allocation request of %d " + "command buffers will exceed max of %d. " + "Reducing allocation request to %d.\n", + phba->brd_no, vport->vpi, num_to_alloc, + phba->cfg_hba_queue_depth, (phba->cfg_hba_queue_depth - total)); num_to_alloc = phba->cfg_hba_queue_depth - total; } for (i = 0; i < num_to_alloc; i++) { - scsi_buf = lpfc_new_scsi_buf(phba); + scsi_buf = lpfc_new_scsi_buf(vport); if (!scsi_buf) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "%d:0706 Failed to allocate command " - "buffer\n", phba->brd_no); + "%d (%d):0706 Failed to allocate " + "command buffer\n", + phba->brd_no, vport->vpi); break; } @@ -1308,8 +1434,9 @@ lpfc_slave_alloc(struct scsi_device *sdev) static int lpfc_slave_configure(struct scsi_device *sdev) { - struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata; - struct fc_rport *rport = starget_to_rport(sdev->sdev_target); + struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; + struct lpfc_hba *phba = vport->phba; + struct fc_rport *rport = starget_to_rport(sdev->sdev_target); if (sdev->tagged_supported) scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth); @@ -1340,6 +1467,7 @@ lpfc_slave_destroy(struct scsi_device *sdev) return; } + struct scsi_host_template lpfc_template = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, @@ -1352,11 +1480,10 @@ struct scsi_host_template lpfc_template = { .slave_configure = lpfc_slave_configure, .slave_destroy = lpfc_slave_destroy, .scan_finished = lpfc_scan_finished, - .scan_start = lpfc_scan_start, .this_id = -1, .sg_tablesize = LPFC_SG_SEG_CNT, .cmd_per_lun = LPFC_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, - .shost_attrs = lpfc_host_attrs, + .shost_attrs = lpfc_hba_attrs, .max_sectors = 0xFFFF, }; diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index cdcd2535803f..31787bb6d53e 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2005 Emulex. All rights reserved. * + * Copyright (C) 2004-2006 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -110,7 +110,6 @@ struct fcp_cmnd { struct lpfc_scsi_buf { struct list_head list; struct scsi_cmnd *pCmd; - struct lpfc_hba *scsi_hba; struct lpfc_rport_data *rdata; uint32_t timeout; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a1e721459e2b..f4d5a6b00fde 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -38,23 +38,25 @@ #include "lpfc_crtn.h" #include "lpfc_logmsg.h" #include "lpfc_compat.h" +#include "lpfc_debugfs.h" /* * Define macro to log: Mailbox command x%x cannot issue Data * This allows multiple uses of lpfc_msgBlk0311 * w/o perturbing log msg utility. */ -#define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \ +#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \ lpfc_printf_log(phba, \ KERN_INFO, \ LOG_MBOX | LOG_SLI, \ - "%d:0311 Mailbox command x%x cannot issue " \ - "Data: x%x x%x x%x\n", \ + "%d (%d):0311 Mailbox command x%x cannot " \ + "issue Data: x%x x%x x%x\n", \ phba->brd_no, \ - mb->mbxCommand, \ - phba->hba_state, \ + pmbox->vport ? pmbox->vport->vpi : 0, \ + pmbox->mb.mbxCommand, \ + phba->pport->port_state, \ psli->sli_flag, \ - flag); + flag) /* There are only four IOCB completion types. */ @@ -65,8 +67,26 @@ typedef enum _lpfc_iocb_type { LPFC_ABORT_IOCB } lpfc_iocb_type; -struct lpfc_iocbq * -lpfc_sli_get_iocbq(struct lpfc_hba * phba) + /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer + * to the start of the ring, and the slot number of the + * desired iocb entry, calc a pointer to that entry. + */ +static inline IOCB_t * +lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) +{ + return (IOCB_t *) (((char *) pring->cmdringaddr) + + pring->cmdidx * phba->iocb_cmd_size); +} + +static inline IOCB_t * +lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) +{ + return (IOCB_t *) (((char *) pring->rspringaddr) + + pring->rspidx * phba->iocb_rsp_size); +} + +static struct lpfc_iocbq * +__lpfc_sli_get_iocbq(struct lpfc_hba *phba) { struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; struct lpfc_iocbq * iocbq = NULL; @@ -75,10 +95,22 @@ lpfc_sli_get_iocbq(struct lpfc_hba * phba) return iocbq; } +struct lpfc_iocbq * +lpfc_sli_get_iocbq(struct lpfc_hba *phba) +{ + struct lpfc_iocbq * iocbq = NULL; + unsigned long iflags; + + spin_lock_irqsave(&phba->hbalock, iflags); + iocbq = __lpfc_sli_get_iocbq(phba); + spin_unlock_irqrestore(&phba->hbalock, iflags); + return iocbq; +} + void -lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) +__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { - size_t start_clean = (size_t)(&((struct lpfc_iocbq *)NULL)->iocb); + size_t start_clean = offsetof(struct lpfc_iocbq, iocb); /* * Clean all volatile data fields, preserve iotag and node struct. @@ -87,6 +119,19 @@ lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); } +void +lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + unsigned long iflags; + + /* + * Clean all volatile data fields, preserve iotag and node struct. + */ + spin_lock_irqsave(&phba->hbalock, iflags); + __lpfc_sli_release_iocbq(phba, iocbq); + spin_unlock_irqrestore(&phba->hbalock, iflags); +} + /* * Translate the iocb command to an iocb command type used to decide the final * disposition of each completed IOCB. @@ -155,6 +200,9 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) case CMD_RCV_ELS_REQ_CX: case CMD_RCV_SEQUENCE64_CX: case CMD_RCV_ELS_REQ64_CX: + case CMD_IOCB_RCV_SEQ64_CX: + case CMD_IOCB_RCV_ELS64_CX: + case CMD_IOCB_RCV_CONT64_CX: type = LPFC_UNSOL_IOCB; break; default: @@ -166,73 +214,77 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) } static int -lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb) +lpfc_sli_ring_map(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; - MAILBOX_t *pmbox = &pmb->mb; - int i, rc; + LPFC_MBOXQ_t *pmb; + MAILBOX_t *pmbox; + int i, rc, ret = 0; + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) + return -ENOMEM; + pmbox = &pmb->mb; + phba->link_state = LPFC_INIT_MBX_CMDS; for (i = 0; i < psli->num_rings; i++) { - phba->hba_state = LPFC_INIT_MBX_CMDS; lpfc_config_ring(phba, i, pmb); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, - KERN_ERR, - LOG_INIT, - "%d:0446 Adapter failed to init, " + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "%d:0446 Adapter failed to init (%d), " "mbxCmd x%x CFG_RING, mbxStatus x%x, " "ring %d\n", - phba->brd_no, + phba->brd_no, rc, pmbox->mbxCommand, pmbox->mbxStatus, i); - phba->hba_state = LPFC_HBA_ERROR; - return -ENXIO; + phba->link_state = LPFC_HBA_ERROR; + ret = -ENXIO; + break; } } - return 0; + mempool_free(pmb, phba->mbox_mem_pool); + return ret; } static int -lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, - struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) +lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *piocb) { list_add_tail(&piocb->list, &pring->txcmplq); pring->txcmplq_cnt++; - if (unlikely(pring->ringno == LPFC_ELS_RING)) - mod_timer(&phba->els_tmofunc, - jiffies + HZ * (phba->fc_ratov << 1)); + if ((unlikely(pring->ringno == LPFC_ELS_RING)) && + (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && + (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { + if (!piocb->vport) + BUG(); + else + mod_timer(&piocb->vport->els_tmofunc, + jiffies + HZ * (phba->fc_ratov << 1)); + } - return (0); + + return 0; } static struct lpfc_iocbq * -lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) +lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { - struct list_head *dlp; struct lpfc_iocbq *cmd_iocb; - dlp = &pring->txq; - cmd_iocb = NULL; - list_remove_head((&pring->txq), cmd_iocb, - struct lpfc_iocbq, - list); - if (cmd_iocb) { - /* If the first ptr is not equal to the list header, - * deque the IOCBQ_t and return it. - */ + list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); + if (cmd_iocb != NULL) pring->txq_cnt--; - } - return (cmd_iocb); + return cmd_iocb; } static IOCB_t * lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { - struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; + struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? + &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : + &phba->slim2p->mbx.us.s2.port[pring->ringno]; uint32_t max_cmd_idx = pring->numCiocb; - IOCB_t *iocb = NULL; if ((pring->next_cmdidx == pring->cmdidx) && (++pring->next_cmdidx >= max_cmd_idx)) @@ -249,15 +301,17 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) phba->brd_no, pring->ringno, pring->local_getidx, max_cmd_idx); - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; /* * All error attention handlers are posted to * worker thread */ phba->work_ha |= HA_ERATT; phba->work_hs = HS_FFER3; + + /* hbalock should already be held */ if (phba->work_wait) - wake_up(phba->work_wait); + lpfc_worker_wake_up(phba); return NULL; } @@ -266,39 +320,34 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) return NULL; } - iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx); - - return iocb; + return lpfc_cmd_iocb(phba, pring); } uint16_t -lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) +lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { - struct lpfc_iocbq ** new_arr; - struct lpfc_iocbq ** old_arr; + struct lpfc_iocbq **new_arr; + struct lpfc_iocbq **old_arr; size_t new_len; struct lpfc_sli *psli = &phba->sli; uint16_t iotag; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); iotag = psli->last_iotag; if(++iotag < psli->iocbq_lookup_len) { psli->last_iotag = iotag; psli->iocbq_lookup[iotag] = iocbq; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); iocbq->iotag = iotag; return iotag; - } - else if (psli->iocbq_lookup_len < (0xffff + } else if (psli->iocbq_lookup_len < (0xffff - LPFC_IOCBQ_LOOKUP_INCREMENT)) { new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; - spin_unlock_irq(phba->host->host_lock); - new_arr = kmalloc(new_len * sizeof (struct lpfc_iocbq *), + spin_unlock_irq(&phba->hbalock); + new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), GFP_KERNEL); if (new_arr) { - memset((char *)new_arr, 0, - new_len * sizeof (struct lpfc_iocbq *)); - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); old_arr = psli->iocbq_lookup; if (new_len <= psli->iocbq_lookup_len) { /* highly unprobable case */ @@ -307,11 +356,11 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) if(++iotag < psli->iocbq_lookup_len) { psli->last_iotag = iotag; psli->iocbq_lookup[iotag] = iocbq; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); iocbq->iotag = iotag; return iotag; } - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); return 0; } if (psli->iocbq_lookup) @@ -322,13 +371,13 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) psli->iocbq_lookup_len = new_len; psli->last_iotag = iotag; psli->iocbq_lookup[iotag] = iocbq; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); iocbq->iotag = iotag; kfree(old_arr); return iotag; } } else - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR,LOG_SLI, "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", @@ -349,7 +398,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* * Issue iocb command to adapter */ - lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t)); + lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); wmb(); pring->stats.iocb_cmd++; @@ -361,20 +410,18 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (nextiocb->iocb_cmpl) lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); else - lpfc_sli_release_iocbq(phba, nextiocb); + __lpfc_sli_release_iocbq(phba, nextiocb); /* * Let the HBA know what IOCB slot will be the next one the * driver will put a command into. */ pring->cmdidx = pring->next_cmdidx; - writel(pring->cmdidx, phba->MBslimaddr - + (SLIMOFF + (pring->ringno * 2)) * 4); + writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); } static void -lpfc_sli_update_full_ring(struct lpfc_hba * phba, - struct lpfc_sli_ring *pring) +lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { int ringno = pring->ringno; @@ -393,8 +440,7 @@ lpfc_sli_update_full_ring(struct lpfc_hba * phba, } static void -lpfc_sli_update_ring(struct lpfc_hba * phba, - struct lpfc_sli_ring *pring) +lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { int ringno = pring->ringno; @@ -407,7 +453,7 @@ lpfc_sli_update_ring(struct lpfc_hba * phba, } static void -lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) +lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { IOCB_t *iocb; struct lpfc_iocbq *nextiocb; @@ -420,7 +466,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) * (d) IOCB processing is not blocked by the outstanding mbox command. */ if (pring->txq_cnt && - (phba->hba_state > LPFC_LINK_DOWN) && + lpfc_is_link_up(phba) && (pring->ringno != phba->sli.fcp_ring || phba->sli.sli_flag & LPFC_PROCESS_LA) && !(pring->flag & LPFC_STOP_IOCB_MBX)) { @@ -440,11 +486,15 @@ lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */ static void -lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno) +lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno) { - struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[ringno]; + struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? + &phba->slim2p->mbx.us.s3_pgp.port[ringno] : + &phba->slim2p->mbx.us.s2.port[ringno]; + unsigned long iflags; /* If the ring is active, flag it */ + spin_lock_irqsave(&phba->hbalock, iflags); if (phba->sli.ring[ringno].cmdringaddr) { if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) { phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX; @@ -453,11 +503,176 @@ lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno) */ phba->sli.ring[ringno].local_getidx = le32_to_cpu(pgp->cmdGetInx); - spin_lock_irq(phba->host->host_lock); lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]); - spin_unlock_irq(phba->host->host_lock); } } + spin_unlock_irqrestore(&phba->hbalock, iflags); +} + +struct lpfc_hbq_entry * +lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) +{ + struct hbq_s *hbqp = &phba->hbqs[hbqno]; + + if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && + ++hbqp->next_hbqPutIdx >= hbqp->entry_count) + hbqp->next_hbqPutIdx = 0; + + if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { + uint32_t raw_index = phba->hbq_get[hbqno]; + uint32_t getidx = le32_to_cpu(raw_index); + + hbqp->local_hbqGetIdx = getidx; + + if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { + lpfc_printf_log(phba, KERN_ERR, + LOG_SLI | LOG_VPORT, + "%d:1802 HBQ %d: local_hbqGetIdx " + "%u is > than hbqp->entry_count %u\n", + phba->brd_no, hbqno, + hbqp->local_hbqGetIdx, + hbqp->entry_count); + + phba->link_state = LPFC_HBA_ERROR; + return NULL; + } + + if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) + return NULL; + } + + return (struct lpfc_hbq_entry *) phba->hbqslimp.virt + hbqp->hbqPutIdx; +} + +void +lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) +{ + struct lpfc_dmabuf *dmabuf, *next_dmabuf; + struct hbq_dmabuf *hbq_buf; + + /* Return all memory used by all HBQs */ + list_for_each_entry_safe(dmabuf, next_dmabuf, + &phba->hbq_buffer_list, list) { + hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); + list_del(&hbq_buf->dbuf.list); + lpfc_hbq_free(phba, hbq_buf->dbuf.virt, hbq_buf->dbuf.phys); + kfree(hbq_buf); + } +} + +static void +lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, + struct hbq_dmabuf *hbq_buf) +{ + struct lpfc_hbq_entry *hbqe; + dma_addr_t physaddr = hbq_buf->dbuf.phys; + + /* Get next HBQ entry slot to use */ + hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); + if (hbqe) { + struct hbq_s *hbqp = &phba->hbqs[hbqno]; + + hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); + hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); + hbqe->bde.tus.f.bdeSize = FCELSSIZE; + hbqe->bde.tus.f.bdeFlags = 0; + hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); + hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); + /* Sync SLIM */ + hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; + writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); + /* flush */ + readl(phba->hbq_put + hbqno); + list_add_tail(&hbq_buf->dbuf.list, &phba->hbq_buffer_list); + } +} + +static struct lpfc_hbq_init lpfc_els_hbq = { + .rn = 1, + .entry_count = 200, + .mask_count = 0, + .profile = 0, + .ring_mask = 1 << LPFC_ELS_RING, + .buffer_count = 0, + .init_count = 20, + .add_count = 5, +}; + +static struct lpfc_hbq_init *lpfc_hbq_defs[] = { + &lpfc_els_hbq, +}; + +int +lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) +{ + uint32_t i, start, end; + struct hbq_dmabuf *hbq_buffer; + + start = lpfc_hbq_defs[hbqno]->buffer_count; + end = count + lpfc_hbq_defs[hbqno]->buffer_count; + if (end > lpfc_hbq_defs[hbqno]->entry_count) { + end = lpfc_hbq_defs[hbqno]->entry_count; + } + + /* Populate HBQ entries */ + for (i = start; i < end; i++) { + hbq_buffer = kmalloc(sizeof(struct hbq_dmabuf), + GFP_KERNEL); + if (!hbq_buffer) + return 1; + hbq_buffer->dbuf.virt = lpfc_hbq_alloc(phba, MEM_PRI, + &hbq_buffer->dbuf.phys); + if (hbq_buffer->dbuf.virt == NULL) + return 1; + hbq_buffer->tag = (i | (hbqno << 16)); + lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer); + lpfc_hbq_defs[hbqno]->buffer_count++; + } + return 0; +} + +int +lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) +{ + return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, + lpfc_hbq_defs[qno]->add_count)); +} + +int +lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) +{ + return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, + lpfc_hbq_defs[qno]->init_count)); +} + +struct hbq_dmabuf * +lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) +{ + struct lpfc_dmabuf *d_buf; + struct hbq_dmabuf *hbq_buf; + + list_for_each_entry(d_buf, &phba->hbq_buffer_list, list) { + hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); + if ((hbq_buf->tag & 0xffff) == tag) { + return hbq_buf; + } + } + lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, + "%d:1803 Bad hbq tag. Data: x%x x%x\n", + phba->brd_no, tag, + lpfc_hbq_defs[tag >> 16]->buffer_count); + return NULL; +} + +void +lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *sp) +{ + uint32_t hbqno; + + if (sp) { + hbqno = sp->tag >> 16; + lpfc_sli_hbq_to_firmware(phba, hbqno, sp); + } } static int @@ -511,32 +726,38 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) case MBX_FLASH_WR_ULA: case MBX_SET_DEBUG: case MBX_LOAD_EXP_ROM: + case MBX_REG_VPI: + case MBX_UNREG_VPI: + case MBX_HEARTBEAT: ret = mbxCommand; break; default: ret = MBX_SHUTDOWN; break; } - return (ret); + return ret; } static void -lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) +lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { wait_queue_head_t *pdone_q; + unsigned long drvr_flag; /* * If pdone_q is empty, the driver thread gave up waiting and * continued running. */ pmboxq->mbox_flag |= LPFC_MBX_WAKE; + spin_lock_irqsave(&phba->hbalock, drvr_flag); pdone_q = (wait_queue_head_t *) pmboxq->context1; if (pdone_q) wake_up_interruptible(pdone_q); + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); return; } void -lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) +lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_dmabuf *mp; uint16_t rpi; @@ -553,79 +774,64 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) * If a REG_LOGIN succeeded after node is destroyed or node * is in re-discovery driver need to cleanup the RPI. */ - if (!(phba->fc_flag & FC_UNLOADING) && - (pmb->mb.mbxCommand == MBX_REG_LOGIN64) && - (!pmb->mb.mbxStatus)) { + if (!(phba->pport->load_flag & FC_UNLOADING) && + pmb->mb.mbxCommand == MBX_REG_LOGIN64 && + !pmb->mb.mbxStatus) { rpi = pmb->mb.un.varWords[0]; - lpfc_unreg_login(phba, rpi, pmb); - pmb->mbox_cmpl=lpfc_sli_def_mbox_cmpl; + lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); + pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc != MBX_NOT_FINISHED) return; } - mempool_free( pmb, phba->mbox_mem_pool); + mempool_free(pmb, phba->mbox_mem_pool); return; } int -lpfc_sli_handle_mb_event(struct lpfc_hba * phba) +lpfc_sli_handle_mb_event(struct lpfc_hba *phba) { - MAILBOX_t *mbox; MAILBOX_t *pmbox; LPFC_MBOXQ_t *pmb; - struct lpfc_sli *psli; - int i, rc; - uint32_t process_next; - - psli = &phba->sli; - /* We should only get here if we are in SLI2 mode */ - if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) { - return (1); - } + int rc; + LIST_HEAD(cmplq); phba->sli.slistat.mbox_event++; + /* Get all completed mailboxe buffers into the cmplq */ + spin_lock_irq(&phba->hbalock); + list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); + spin_unlock_irq(&phba->hbalock); + /* Get a Mailbox buffer to setup mailbox commands for callback */ - if ((pmb = phba->sli.mbox_active)) { - pmbox = &pmb->mb; - mbox = &phba->slim2p->mbx; + do { + list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); + if (pmb == NULL) + break; - /* First check out the status word */ - lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t)); + pmbox = &pmb->mb; - /* Sanity check to ensure the host owns the mailbox */ - if (pmbox->mbxOwner != OWN_HOST) { - /* Lets try for a while */ - for (i = 0; i < 10240; i++) { - /* First copy command data */ - lpfc_sli_pcimem_bcopy(mbox, pmbox, - sizeof (uint32_t)); - if (pmbox->mbxOwner == OWN_HOST) - goto mbout; + if (pmbox->mbxCommand != MBX_HEARTBEAT) { + if (pmb->vport) { + lpfc_debugfs_disc_trc(pmb->vport, + LPFC_DISC_TRC_MBOX_VPORT, + "MBOX cmpl vport: cmd:x%x mb:x%x x%x", + (uint32_t)pmbox->mbxCommand, + pmbox->un.varWords[0], + pmbox->un.varWords[1]); + } + else { + lpfc_debugfs_disc_trc(phba->pport, + LPFC_DISC_TRC_MBOX, + "MBOX cmpl: cmd:x%x mb:x%x x%x", + (uint32_t)pmbox->mbxCommand, + pmbox->un.varWords[0], + pmbox->un.varWords[1]); } - /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus - <status> */ - lpfc_printf_log(phba, - KERN_WARNING, - LOG_MBOX | LOG_SLI, - "%d:0304 Stray Mailbox Interrupt " - "mbxCommand x%x mbxStatus x%x\n", - phba->brd_no, - pmbox->mbxCommand, - pmbox->mbxStatus); - - spin_lock_irq(phba->host->host_lock); - phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; - spin_unlock_irq(phba->host->host_lock); - return (1); } - mbout: - del_timer_sync(&phba->sli.mbox_tmo); - phba->work_hba_events &= ~WORKER_MBOX_TMO; - /* * It is a fatal error if unknown mbox command completion. */ @@ -633,51 +839,50 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba) MBX_SHUTDOWN) { /* Unknow mailbox command compl */ - lpfc_printf_log(phba, - KERN_ERR, - LOG_MBOX | LOG_SLI, - "%d:0323 Unknown Mailbox command %x Cmpl\n", - phba->brd_no, - pmbox->mbxCommand); - phba->hba_state = LPFC_HBA_ERROR; + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "%d (%d):0323 Unknown Mailbox command " + "%x Cmpl\n", + phba->brd_no, + pmb->vport ? pmb->vport->vpi : 0, + pmbox->mbxCommand); + phba->link_state = LPFC_HBA_ERROR; phba->work_hs = HS_FFER3; lpfc_handle_eratt(phba); - return (0); + continue; } - phba->sli.mbox_active = NULL; if (pmbox->mbxStatus) { phba->sli.slistat.mbox_stat_err++; if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { /* Mbox cmd cmpl error - RETRYing */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_MBOX | LOG_SLI, - "%d:0305 Mbox cmd cmpl error - " - "RETRYing Data: x%x x%x x%x x%x\n", - phba->brd_no, - pmbox->mbxCommand, - pmbox->mbxStatus, - pmbox->un.varWords[0], - phba->hba_state); + lpfc_printf_log(phba, KERN_INFO, + LOG_MBOX | LOG_SLI, + "%d (%d):0305 Mbox cmd cmpl " + "error - RETRYing Data: x%x " + "x%x x%x x%x\n", + phba->brd_no, + pmb->vport ? pmb->vport->vpi :0, + pmbox->mbxCommand, + pmbox->mbxStatus, + pmbox->un.varWords[0], + pmb->vport->port_state); pmbox->mbxStatus = 0; pmbox->mbxOwner = OWN_HOST; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_SUCCESS) - return (0); + continue; } } /* Mailbox cmd <cmd> Cmpl <cmpl> */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_MBOX | LOG_SLI, - "%d:0307 Mailbox cmd x%x Cmpl x%p " + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "%d (%d):0307 Mailbox cmd x%x Cmpl x%p " "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", phba->brd_no, + pmb->vport ? pmb->vport->vpi : 0, pmbox->mbxCommand, pmb->mbox_cmpl, *((uint32_t *) pmbox), @@ -690,39 +895,35 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba) pmbox->un.varWords[6], pmbox->un.varWords[7]); - if (pmb->mbox_cmpl) { - lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE); + if (pmb->mbox_cmpl) pmb->mbox_cmpl(phba,pmb); - } - } - - - do { - process_next = 0; /* by default don't loop */ - spin_lock_irq(phba->host->host_lock); - phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - - /* Process next mailbox command if there is one */ - if ((pmb = lpfc_mbox_get(phba))) { - spin_unlock_irq(phba->host->host_lock); - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - pmb->mb.mbxStatus = MBX_NOT_FINISHED; - pmb->mbox_cmpl(phba,pmb); - process_next = 1; - continue; /* loop back */ - } - } else { - spin_unlock_irq(phba->host->host_lock); - /* Turn on IOCB processing */ - for (i = 0; i < phba->sli.num_rings; i++) - lpfc_sli_turn_on_ring(phba, i); - } - - } while (process_next); + } while (1); + return 0; +} - return (0); +static struct lpfc_dmabuf * +lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) +{ + struct hbq_dmabuf *hbq_entry, *new_hbq_entry; + + hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); + if (hbq_entry == NULL) + return NULL; + list_del(&hbq_entry->dbuf.list); + new_hbq_entry = kmalloc(sizeof(struct hbq_dmabuf), GFP_ATOMIC); + if (new_hbq_entry == NULL) + return &hbq_entry->dbuf; + new_hbq_entry->dbuf = hbq_entry->dbuf; + new_hbq_entry->tag = -1; + hbq_entry->dbuf.virt = lpfc_hbq_alloc(phba, 0, &hbq_entry->dbuf.phys); + if (hbq_entry->dbuf.virt == NULL) { + kfree(new_hbq_entry); + return &hbq_entry->dbuf; + } + lpfc_sli_free_hbq(phba, hbq_entry); + return &new_hbq_entry->dbuf; } + static int lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *saveq) @@ -735,7 +936,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, match = 0; irsp = &(saveq->iocb); if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) - || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) { + || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) + || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX) + || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) { Rctl = FC_ELS_REQ; Type = FC_ELS_DATA; } else { @@ -747,13 +950,24 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* Firmware Workaround */ if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && - (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) { + (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || + irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { Rctl = FC_ELS_REQ; Type = FC_ELS_DATA; w5p->hcsw.Rctl = Rctl; w5p->hcsw.Type = Type; } } + + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + if (irsp->ulpBdeCount != 0) + saveq->context2 = lpfc_sli_replace_hbqbuff(phba, + irsp->un.ulpWord[3]); + if (irsp->ulpBdeCount == 2) + saveq->context3 = lpfc_sli_replace_hbqbuff(phba, + irsp->un.ulpWord[15]); + } + /* unSolicited Responses */ if (pring->prt[0].profile) { if (pring->prt[0].lpfc_sli_rcv_unsol_event) @@ -781,23 +995,21 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* Unexpected Rctl / Type received */ /* Ring <ringno> handler: unexpected Rctl <Rctl> Type <Type> received */ - lpfc_printf_log(phba, - KERN_WARNING, - LOG_SLI, + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "%d:0313 Ring %d handler: unexpected Rctl x%x " - "Type x%x received \n", + "Type x%x received\n", phba->brd_no, pring->ringno, Rctl, Type); } - return(1); + return 1; } static struct lpfc_iocbq * -lpfc_sli_iocbq_lookup(struct lpfc_hba * phba, - struct lpfc_sli_ring * pring, - struct lpfc_iocbq * prspiocb) +lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + struct lpfc_iocbq *prspiocb) { struct lpfc_iocbq *cmd_iocb = NULL; uint16_t iotag; @@ -806,7 +1018,7 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba * phba, if (iotag != 0 && iotag <= phba->sli.last_iotag) { cmd_iocb = phba->sli.iocbq_lookup[iotag]; - list_del(&cmd_iocb->list); + list_del_init(&cmd_iocb->list); pring->txcmplq_cnt--; return cmd_iocb; } @@ -821,16 +1033,18 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba * phba, } static int -lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, +lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *saveq) { - struct lpfc_iocbq * cmdiocbp; + struct lpfc_iocbq *cmdiocbp; int rc = 1; unsigned long iflag; /* Based on the iotag field, get the cmd IOCB from the txcmplq */ - spin_lock_irqsave(phba->host->host_lock, iflag); + spin_lock_irqsave(&phba->hbalock, iflag); cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); + spin_unlock_irqrestore(&phba->hbalock, iflag); + if (cmdiocbp) { if (cmdiocbp->iocb_cmpl) { /* @@ -846,17 +1060,8 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, saveq->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; } - spin_unlock_irqrestore(phba->host->host_lock, - iflag); - (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); - spin_lock_irqsave(phba->host->host_lock, iflag); - } - else { - spin_unlock_irqrestore(phba->host->host_lock, - iflag); - (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); - spin_lock_irqsave(phba->host->host_lock, iflag); } + (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); } else lpfc_sli_release_iocbq(phba, cmdiocbp); } else { @@ -870,29 +1075,30 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, * Ring <ringno> handler: unexpected completion IoTag * <IoTag> */ - lpfc_printf_log(phba, - KERN_WARNING, - LOG_SLI, - "%d:0322 Ring %d handler: unexpected " - "completion IoTag x%x Data: x%x x%x x%x x%x\n", - phba->brd_no, - pring->ringno, - saveq->iocb.ulpIoTag, - saveq->iocb.ulpStatus, - saveq->iocb.un.ulpWord[4], - saveq->iocb.ulpCommand, - saveq->iocb.ulpContext); + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "%d (%d):0322 Ring %d handler: " + "unexpected completion IoTag x%x " + "Data: x%x x%x x%x x%x\n", + phba->brd_no, + cmdiocbp->vport->vpi, + pring->ringno, + saveq->iocb.ulpIoTag, + saveq->iocb.ulpStatus, + saveq->iocb.un.ulpWord[4], + saveq->iocb.ulpCommand, + saveq->iocb.ulpContext); } } - spin_unlock_irqrestore(phba->host->host_lock, iflag); return rc; } -static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba, - struct lpfc_sli_ring * pring) +static void +lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { - struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; + struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? + &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : + &phba->slim2p->mbx.us.s2.port[pring->ringno]; /* * Ring <ringno> handler: portRspPut <portRspPut> is bigger then * rsp ring <portRspMax> @@ -904,7 +1110,7 @@ static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba, le32_to_cpu(pgp->rspPutInx), pring->numRiocb); - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; /* * All error attention handlers are posted to @@ -912,16 +1118,18 @@ static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba, */ phba->work_ha |= HA_ERATT; phba->work_hs = HS_FFER3; + + /* hbalock should already be held */ if (phba->work_wait) - wake_up(phba->work_wait); + lpfc_worker_wake_up(phba); return; } -void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) +void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) { - struct lpfc_sli * psli = &phba->sli; - struct lpfc_sli_ring * pring = &psli->ring[LPFC_FCP_RING]; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING]; IOCB_t *irsp = NULL; IOCB_t *entry = NULL; struct lpfc_iocbq *cmdiocbq = NULL; @@ -931,13 +1139,15 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) uint32_t portRspPut, portRspMax; int type; uint32_t rsp_cmpl = 0; - void __iomem *to_slim; uint32_t ha_copy; + unsigned long iflags; pring->stats.iocb_event++; - /* The driver assumes SLI-2 mode */ - pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; + pgp = (phba->sli_rev == 3) ? + &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : + &phba->slim2p->mbx.us.s2.port[pring->ringno]; + /* * The next available response entry should never exceed the maximum @@ -952,15 +1162,13 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) rmb(); while (pring->rspidx != portRspPut) { - - entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); - + entry = lpfc_resp_iocb(phba, pring); if (++pring->rspidx >= portRspMax) pring->rspidx = 0; lpfc_sli_pcimem_bcopy((uint32_t *) entry, (uint32_t *) &rspiocbq.iocb, - sizeof (IOCB_t)); + phba->iocb_rsp_size); irsp = &rspiocbq.iocb; type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); pring->stats.iocb_rsp++; @@ -998,8 +1206,10 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) break; } + spin_lock_irqsave(&phba->hbalock, iflags); cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, &rspiocbq); + spin_unlock_irqrestore(&phba->hbalock, iflags); if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &rspiocbq); @@ -1033,9 +1243,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) * been updated, sync the pgp->rspPutInx and fetch the new port * response put pointer. */ - to_slim = phba->MBslimaddr + - (SLIMOFF + (pring->ringno * 2) + 1) * 4; - writeb(pring->rspidx, to_slim); + writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); if (pring->rspidx == portRspPut) portRspPut = le32_to_cpu(pgp->rspPutInx); @@ -1045,13 +1253,16 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) ha_copy >>= (LPFC_FCP_RING * 4); if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) { + spin_lock_irqsave(&phba->hbalock, iflags); pring->stats.iocb_rsp_full++; status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4)); writel(status, phba->CAregaddr); readl(phba->CAregaddr); + spin_unlock_irqrestore(&phba->hbalock, iflags); } if ((ha_copy & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { + spin_lock_irqsave(&phba->hbalock, iflags); pring->flag &= ~LPFC_CALL_RING_AVAILABLE; pring->stats.iocb_cmd_empty++; @@ -1062,6 +1273,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) if ((pring->lpfc_sli_cmd_available)) (pring->lpfc_sli_cmd_available) (phba, pring); + spin_unlock_irqrestore(&phba->hbalock, iflags); } return; @@ -1072,10 +1284,12 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) * to check it explicitly. */ static int -lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, - struct lpfc_sli_ring * pring, uint32_t mask) +lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, uint32_t mask) { - struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; + struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? + &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : + &phba->slim2p->mbx.us.s2.port[pring->ringno]; IOCB_t *irsp = NULL; IOCB_t *entry = NULL; struct lpfc_iocbq *cmdiocbq = NULL; @@ -1086,9 +1300,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, lpfc_iocb_type type; unsigned long iflag; uint32_t rsp_cmpl = 0; - void __iomem *to_slim; - spin_lock_irqsave(phba->host->host_lock, iflag); + spin_lock_irqsave(&phba->hbalock, iflag); pring->stats.iocb_event++; /* @@ -1099,7 +1312,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, portRspPut = le32_to_cpu(pgp->rspPutInx); if (unlikely(portRspPut >= portRspMax)) { lpfc_sli_rsp_pointers_error(phba, pring); - spin_unlock_irqrestore(phba->host->host_lock, iflag); + spin_unlock_irqrestore(&phba->hbalock, iflag); return 1; } @@ -1110,14 +1323,15 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, * structure. The copy involves a byte-swap since the * network byte order and pci byte orders are different. */ - entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); + entry = lpfc_resp_iocb(phba, pring); + phba->last_completion_time = jiffies; if (++pring->rspidx >= portRspMax) pring->rspidx = 0; lpfc_sli_pcimem_bcopy((uint32_t *) entry, (uint32_t *) &rspiocbq.iocb, - sizeof (IOCB_t)); + phba->iocb_rsp_size); INIT_LIST_HEAD(&(rspiocbq.list)); irsp = &rspiocbq.iocb; @@ -1126,16 +1340,30 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, rsp_cmpl++; if (unlikely(irsp->ulpStatus)) { + /* + * If resource errors reported from HBA, reduce + * queuedepths of the SCSI device. + */ + if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && + (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + lpfc_adjust_queue_depth(phba); + spin_lock_irqsave(&phba->hbalock, iflag); + } + /* Rsp ring <ringno> error: IOCB */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "%d:0336 Rsp Ring %d error: IOCB Data: " - "x%x x%x x%x x%x x%x x%x x%x x%x\n", - phba->brd_no, pring->ringno, - irsp->un.ulpWord[0], irsp->un.ulpWord[1], - irsp->un.ulpWord[2], irsp->un.ulpWord[3], - irsp->un.ulpWord[4], irsp->un.ulpWord[5], - *(((uint32_t *) irsp) + 6), - *(((uint32_t *) irsp) + 7)); + "%d:0336 Rsp Ring %d error: IOCB Data: " + "x%x x%x x%x x%x x%x x%x x%x x%x\n", + phba->brd_no, pring->ringno, + irsp->un.ulpWord[0], + irsp->un.ulpWord[1], + irsp->un.ulpWord[2], + irsp->un.ulpWord[3], + irsp->un.ulpWord[4], + irsp->un.ulpWord[5], + *(((uint32_t *) irsp) + 6), + *(((uint32_t *) irsp) + 7)); } switch (type) { @@ -1149,7 +1377,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "%d:0333 IOCB cmd 0x%x" " processed. Skipping" - " completion\n", phba->brd_no, + " completion\n", + phba->brd_no, irsp->ulpCommand); break; } @@ -1161,19 +1390,19 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &rspiocbq); } else { - spin_unlock_irqrestore( - phba->host->host_lock, iflag); + spin_unlock_irqrestore(&phba->hbalock, + iflag); (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &rspiocbq); - spin_lock_irqsave(phba->host->host_lock, + spin_lock_irqsave(&phba->hbalock, iflag); } } break; case LPFC_UNSOL_IOCB: - spin_unlock_irqrestore(phba->host->host_lock, iflag); + spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); - spin_lock_irqsave(phba->host->host_lock, iflag); + spin_lock_irqsave(&phba->hbalock, iflag); break; default: if (irsp->ulpCommand == CMD_ADAPTER_MSG) { @@ -1186,11 +1415,13 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, } else { /* Unknown IOCB command */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0334 Unknown IOCB command " - "Data: x%x, x%x x%x x%x x%x\n", - phba->brd_no, type, irsp->ulpCommand, - irsp->ulpStatus, irsp->ulpIoTag, - irsp->ulpContext); + "%d:0334 Unknown IOCB command " + "Data: x%x, x%x x%x x%x x%x\n", + phba->brd_no, type, + irsp->ulpCommand, + irsp->ulpStatus, + irsp->ulpIoTag, + irsp->ulpContext); } break; } @@ -1201,9 +1432,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, * been updated, sync the pgp->rspPutInx and fetch the new port * response put pointer. */ - to_slim = phba->MBslimaddr + - (SLIMOFF + (pring->ringno * 2) + 1) * 4; - writel(pring->rspidx, to_slim); + writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); if (pring->rspidx == portRspPut) portRspPut = le32_to_cpu(pgp->rspPutInx); @@ -1228,31 +1457,31 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, } - spin_unlock_irqrestore(phba->host->host_lock, iflag); + spin_unlock_irqrestore(&phba->hbalock, iflag); return rc; } - int -lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, - struct lpfc_sli_ring * pring, uint32_t mask) +lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, uint32_t mask) { + struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? + &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : + &phba->slim2p->mbx.us.s2.port[pring->ringno]; IOCB_t *entry; IOCB_t *irsp = NULL; struct lpfc_iocbq *rspiocbp = NULL; struct lpfc_iocbq *next_iocb; struct lpfc_iocbq *cmdiocbp; struct lpfc_iocbq *saveq; - struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; uint8_t iocb_cmd_type; lpfc_iocb_type type; uint32_t status, free_saveq; uint32_t portRspPut, portRspMax; int rc = 1; unsigned long iflag; - void __iomem *to_slim; - spin_lock_irqsave(phba->host->host_lock, iflag); + spin_lock_irqsave(&phba->hbalock, iflag); pring->stats.iocb_event++; /* @@ -1266,16 +1495,14 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, * Ring <ringno> handler: portRspPut <portRspPut> is bigger then * rsp ring <portRspMax> */ - lpfc_printf_log(phba, - KERN_ERR, - LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "%d:0303 Ring %d handler: portRspPut %d " "is bigger then rsp ring %d\n", - phba->brd_no, - pring->ringno, portRspPut, portRspMax); + phba->brd_no, pring->ringno, portRspPut, + portRspMax); - phba->hba_state = LPFC_HBA_ERROR; - spin_unlock_irqrestore(phba->host->host_lock, iflag); + phba->link_state = LPFC_HBA_ERROR; + spin_unlock_irqrestore(&phba->hbalock, iflag); phba->work_hs = HS_FFER3; lpfc_handle_eratt(phba); @@ -1298,23 +1525,24 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, * the ulpLe field is set, the entire Command has been * received. */ - entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); - rspiocbp = lpfc_sli_get_iocbq(phba); + entry = lpfc_resp_iocb(phba, pring); + + phba->last_completion_time = jiffies; + rspiocbp = __lpfc_sli_get_iocbq(phba); if (rspiocbp == NULL) { printk(KERN_ERR "%s: out of buffers! Failing " "completion.\n", __FUNCTION__); break; } - lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t)); + lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, + phba->iocb_rsp_size); irsp = &rspiocbp->iocb; if (++pring->rspidx >= portRspMax) pring->rspidx = 0; - to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2) - + 1) * 4; - writel(pring->rspidx, to_slim); + writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); if (list_empty(&(pring->iocb_continueq))) { list_add(&rspiocbp->list, &(pring->iocb_continueq)); @@ -1338,23 +1566,44 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, pring->stats.iocb_rsp++; + /* + * If resource errors reported from HBA, reduce + * queuedepths of the SCSI device. + */ + if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && + (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + lpfc_adjust_queue_depth(phba); + spin_lock_irqsave(&phba->hbalock, iflag); + } + if (irsp->ulpStatus) { /* Rsp ring <ringno> error: IOCB */ - lpfc_printf_log(phba, - KERN_WARNING, - LOG_SLI, - "%d:0328 Rsp Ring %d error: IOCB Data: " - "x%x x%x x%x x%x x%x x%x x%x x%x\n", - phba->brd_no, - pring->ringno, - irsp->un.ulpWord[0], - irsp->un.ulpWord[1], - irsp->un.ulpWord[2], - irsp->un.ulpWord[3], - irsp->un.ulpWord[4], - irsp->un.ulpWord[5], - *(((uint32_t *) irsp) + 6), - *(((uint32_t *) irsp) + 7)); + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "%d:0328 Rsp Ring %d error: " + "IOCB Data: " + "x%x x%x x%x x%x " + "x%x x%x x%x x%x " + "x%x x%x x%x x%x " + "x%x x%x x%x x%x\n", + phba->brd_no, + pring->ringno, + irsp->un.ulpWord[0], + irsp->un.ulpWord[1], + irsp->un.ulpWord[2], + irsp->un.ulpWord[3], + irsp->un.ulpWord[4], + irsp->un.ulpWord[5], + *(((uint32_t *) irsp) + 6), + *(((uint32_t *) irsp) + 7), + *(((uint32_t *) irsp) + 8), + *(((uint32_t *) irsp) + 9), + *(((uint32_t *) irsp) + 10), + *(((uint32_t *) irsp) + 11), + *(((uint32_t *) irsp) + 12), + *(((uint32_t *) irsp) + 13), + *(((uint32_t *) irsp) + 14), + *(((uint32_t *) irsp) + 15)); } /* @@ -1366,17 +1615,17 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); if (type == LPFC_SOL_IOCB) { - spin_unlock_irqrestore(phba->host->host_lock, + spin_unlock_irqrestore(&phba->hbalock, iflag); rc = lpfc_sli_process_sol_iocb(phba, pring, - saveq); - spin_lock_irqsave(phba->host->host_lock, iflag); + saveq); + spin_lock_irqsave(&phba->hbalock, iflag); } else if (type == LPFC_UNSOL_IOCB) { - spin_unlock_irqrestore(phba->host->host_lock, + spin_unlock_irqrestore(&phba->hbalock, iflag); rc = lpfc_sli_process_unsol_iocb(phba, pring, - saveq); - spin_lock_irqsave(phba->host->host_lock, iflag); + saveq); + spin_lock_irqsave(&phba->hbalock, iflag); } else if (type == LPFC_ABORT_IOCB) { if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && ((cmdiocbp = @@ -1386,15 +1635,15 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, routine */ if (cmdiocbp->iocb_cmpl) { spin_unlock_irqrestore( - phba->host->host_lock, + &phba->hbalock, iflag); (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); spin_lock_irqsave( - phba->host->host_lock, + &phba->hbalock, iflag); } else - lpfc_sli_release_iocbq(phba, + __lpfc_sli_release_iocbq(phba, cmdiocbp); } } else if (type == LPFC_UNKNOWN_IOCB) { @@ -1411,32 +1660,28 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, phba->brd_no, adaptermsg); } else { /* Unknown IOCB command */ - lpfc_printf_log(phba, - KERN_ERR, - LOG_SLI, - "%d:0335 Unknown IOCB command " - "Data: x%x x%x x%x x%x\n", - phba->brd_no, - irsp->ulpCommand, - irsp->ulpStatus, - irsp->ulpIoTag, - irsp->ulpContext); + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "%d:0335 Unknown IOCB " + "command Data: x%x " + "x%x x%x x%x\n", + phba->brd_no, + irsp->ulpCommand, + irsp->ulpStatus, + irsp->ulpIoTag, + irsp->ulpContext); } } if (free_saveq) { - if (!list_empty(&saveq->list)) { - list_for_each_entry_safe(rspiocbp, - next_iocb, - &saveq->list, - list) { - list_del(&rspiocbp->list); - lpfc_sli_release_iocbq(phba, - rspiocbp); - } + list_for_each_entry_safe(rspiocbp, next_iocb, + &saveq->list, list) { + list_del(&rspiocbp->list); + __lpfc_sli_release_iocbq(phba, + rspiocbp); } - lpfc_sli_release_iocbq(phba, saveq); + __lpfc_sli_release_iocbq(phba, saveq); } + rspiocbp = NULL; } /* @@ -1449,7 +1694,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, } } /* while (pring->rspidx != portRspPut) */ - if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) { + if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { /* At least one response entry has been freed */ pring->stats.iocb_rsp_full++; /* SET RxRE_RSP in Chip Att register */ @@ -1470,24 +1715,25 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, } - spin_unlock_irqrestore(phba->host->host_lock, iflag); + spin_unlock_irqrestore(&phba->hbalock, iflag); return rc; } -int +void lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { LIST_HEAD(completions); struct lpfc_iocbq *iocb, *next_iocb; IOCB_t *cmd = NULL; - int errcnt; - errcnt = 0; + if (pring->ringno == LPFC_ELS_RING) { + lpfc_fabric_abort_hba(phba); + } /* Error everything on txq and txcmplq * First do the txq. */ - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); list_splice_init(&pring->txq, &completions); pring->txq_cnt = 0; @@ -1495,26 +1741,25 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) lpfc_sli_issue_abort_iotag(phba, pring, iocb); - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); while (!list_empty(&completions)) { iocb = list_get_first(&completions, struct lpfc_iocbq, list); cmd = &iocb->iocb; - list_del(&iocb->list); + list_del_init(&iocb->list); - if (iocb->iocb_cmpl) { + if (!iocb->iocb_cmpl) + lpfc_sli_release_iocbq(phba, iocb); + else { cmd->ulpStatus = IOSTAT_LOCAL_REJECT; cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; (iocb->iocb_cmpl) (phba, iocb, iocb); - } else - lpfc_sli_release_iocbq(phba, iocb); + } } - - return errcnt; } int -lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask) +lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) { uint32_t status; int i = 0; @@ -1541,7 +1786,8 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask) msleep(2500); if (i == 15) { - phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */ + /* Do post */ + phba->pport->port_state = LPFC_VPORT_UNKNOWN; lpfc_sli_brdrestart(phba); } /* Read the HBA Host Status Register */ @@ -1550,7 +1796,7 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask) /* Check to see if any errors occurred during init */ if ((status & HS_FFERM) || (i >= 20)) { - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; retval = 1; } @@ -1559,7 +1805,7 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask) #define BARRIER_TEST_PATTERN (0xdeadbeef) -void lpfc_reset_barrier(struct lpfc_hba * phba) +void lpfc_reset_barrier(struct lpfc_hba *phba) { uint32_t __iomem *resp_buf; uint32_t __iomem *mbox_buf; @@ -1584,12 +1830,12 @@ void lpfc_reset_barrier(struct lpfc_hba * phba) hc_copy = readl(phba->HCregaddr); writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); readl(phba->HCregaddr); /* flush */ - phba->fc_flag |= FC_IGNORE_ERATT; + phba->link_flag |= LS_IGNORE_ERATT; if (readl(phba->HAregaddr) & HA_ERATT) { /* Clear Chip error bit */ writel(HA_ERATT, phba->HAregaddr); - phba->stopped = 1; + phba->pport->stopped = 1; } mbox = 0; @@ -1606,7 +1852,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba) if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || - phba->stopped) + phba->pport->stopped) goto restore_hc; else goto clear_errat; @@ -1623,17 +1869,17 @@ clear_errat: if (readl(phba->HAregaddr) & HA_ERATT) { writel(HA_ERATT, phba->HAregaddr); - phba->stopped = 1; + phba->pport->stopped = 1; } restore_hc: - phba->fc_flag &= ~FC_IGNORE_ERATT; + phba->link_flag &= ~LS_IGNORE_ERATT; writel(hc_copy, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } int -lpfc_sli_brdkill(struct lpfc_hba * phba) +lpfc_sli_brdkill(struct lpfc_hba *phba) { struct lpfc_sli *psli; LPFC_MBOXQ_t *pmb; @@ -1645,26 +1891,22 @@ lpfc_sli_brdkill(struct lpfc_hba * phba) psli = &phba->sli; /* Kill HBA */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_SLI, - "%d:0329 Kill HBA Data: x%x x%x\n", - phba->brd_no, - phba->hba_state, - psli->sli_flag); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "%d:0329 Kill HBA Data: x%x x%x\n", + phba->brd_no, phba->pport->port_state, psli->sli_flag); if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) == 0) return 1; /* Disable the error attention */ - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); status = readl(phba->HCregaddr); status &= ~HC_ERINT_ENA; writel(status, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ - phba->fc_flag |= FC_IGNORE_ERATT; - spin_unlock_irq(phba->host->host_lock); + phba->link_flag |= LS_IGNORE_ERATT; + spin_unlock_irq(&phba->hbalock); lpfc_kill_board(phba, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -1673,9 +1915,9 @@ lpfc_sli_brdkill(struct lpfc_hba * phba) if (retval != MBX_SUCCESS) { if (retval != MBX_BUSY) mempool_free(pmb, phba->mbox_mem_pool); - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_IGNORE_ERATT; - spin_unlock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); + phba->link_flag &= ~LS_IGNORE_ERATT; + spin_unlock_irq(&phba->hbalock); return 1; } @@ -1698,22 +1940,22 @@ lpfc_sli_brdkill(struct lpfc_hba * phba) del_timer_sync(&psli->mbox_tmo); if (ha_copy & HA_ERATT) { writel(HA_ERATT, phba->HAregaddr); - phba->stopped = 1; + phba->pport->stopped = 1; } - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - phba->fc_flag &= ~FC_IGNORE_ERATT; - spin_unlock_irq(phba->host->host_lock); + phba->link_flag &= ~LS_IGNORE_ERATT; + spin_unlock_irq(&phba->hbalock); psli->mbox_active = NULL; lpfc_hba_down_post(phba); - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; - return (ha_copy & HA_ERATT ? 0 : 1); + return ha_copy & HA_ERATT ? 0 : 1; } int -lpfc_sli_brdreset(struct lpfc_hba * phba) +lpfc_sli_brdreset(struct lpfc_hba *phba) { struct lpfc_sli *psli; struct lpfc_sli_ring *pring; @@ -1725,12 +1967,12 @@ lpfc_sli_brdreset(struct lpfc_hba * phba) /* Reset HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no, - phba->hba_state, psli->sli_flag); + phba->pport->port_state, psli->sli_flag); /* perform board reset */ phba->fc_eventTag = 0; - phba->fc_myDID = 0; - phba->fc_prevDID = 0; + phba->pport->fc_myDID = 0; + phba->pport->fc_prevDID = 0; /* Turn off parity checking and serr during the physical reset */ pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); @@ -1760,12 +2002,12 @@ lpfc_sli_brdreset(struct lpfc_hba * phba) pring->missbufcnt = 0; } - phba->hba_state = LPFC_WARM_START; + phba->link_state = LPFC_WARM_START; return 0; } int -lpfc_sli_brdrestart(struct lpfc_hba * phba) +lpfc_sli_brdrestart(struct lpfc_hba *phba) { MAILBOX_t *mb; struct lpfc_sli *psli; @@ -1773,14 +2015,14 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) volatile uint32_t word0; void __iomem *to_slim; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); psli = &phba->sli; /* Restart HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no, - phba->hba_state, psli->sli_flag); + phba->pport->port_state, psli->sli_flag); word0 = 0; mb = (MAILBOX_t *) &word0; @@ -1794,7 +2036,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) readl(to_slim); /* flush */ /* Only skip post after fc_ffinit is completed */ - if (phba->hba_state) { + if (phba->pport->port_state) { skip_post = 1; word0 = 1; /* This is really setting up word1 */ } else { @@ -1806,10 +2048,10 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) readl(to_slim); /* flush */ lpfc_sli_brdreset(phba); - phba->stopped = 0; - phba->hba_state = LPFC_INIT_START; + phba->pport->stopped = 0; + phba->link_state = LPFC_INIT_START; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); psli->stats_start = get_seconds(); @@ -1843,14 +2085,11 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) if (i++ >= 20) { /* Adapter failed to init, timeout, status reg <status> */ - lpfc_printf_log(phba, - KERN_ERR, - LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0436 Adapter failed to init, " "timeout, status reg x%x\n", - phba->brd_no, - status); - phba->hba_state = LPFC_HBA_ERROR; + phba->brd_no, status); + phba->link_state = LPFC_HBA_ERROR; return -ETIMEDOUT; } @@ -1859,14 +2098,12 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) /* ERROR: During chipset initialization */ /* Adapter failed to init, chipset, status reg <status> */ - lpfc_printf_log(phba, - KERN_ERR, - LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0437 Adapter failed to init, " "chipset, status reg x%x\n", phba->brd_no, status); - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; return -EIO; } @@ -1879,7 +2116,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) } if (i == 15) { - phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */ + /* Do post */ + phba->pport->port_state = LPFC_VPORT_UNKNOWN; lpfc_sli_brdrestart(phba); } /* Read the HBA Host Status Register */ @@ -1890,14 +2128,12 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) if (status & HS_FFERM) { /* ERROR: During chipset initialization */ /* Adapter failed to init, chipset, status reg <status> */ - lpfc_printf_log(phba, - KERN_ERR, - LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0438 Adapter failed to init, chipset, " "status reg x%x\n", phba->brd_no, status); - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; return -EIO; } @@ -1911,80 +2147,253 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) return 0; } +static int +lpfc_sli_hbq_count(void) +{ + return ARRAY_SIZE(lpfc_hbq_defs); +} + +static int +lpfc_sli_hbq_entry_count(void) +{ + int hbq_count = lpfc_sli_hbq_count(); + int count = 0; + int i; + + for (i = 0; i < hbq_count; ++i) + count += lpfc_hbq_defs[i]->entry_count; + return count; +} + int -lpfc_sli_hba_setup(struct lpfc_hba * phba) +lpfc_sli_hbq_size(void) +{ + return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); +} + +static int +lpfc_sli_hbq_setup(struct lpfc_hba *phba) +{ + int hbq_count = lpfc_sli_hbq_count(); + LPFC_MBOXQ_t *pmb; + MAILBOX_t *pmbox; + uint32_t hbqno; + uint32_t hbq_entry_index; + + /* Get a Mailbox buffer to setup mailbox + * commands for HBA initialization + */ + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + + if (!pmb) + return -ENOMEM; + + pmbox = &pmb->mb; + + /* Initialize the struct lpfc_sli_hbq structure for each hbq */ + phba->link_state = LPFC_INIT_MBX_CMDS; + + hbq_entry_index = 0; + for (hbqno = 0; hbqno < hbq_count; ++hbqno) { + phba->hbqs[hbqno].next_hbqPutIdx = 0; + phba->hbqs[hbqno].hbqPutIdx = 0; + phba->hbqs[hbqno].local_hbqGetIdx = 0; + phba->hbqs[hbqno].entry_count = + lpfc_hbq_defs[hbqno]->entry_count; + lpfc_config_hbq(phba, lpfc_hbq_defs[hbqno], hbq_entry_index, + pmb); + hbq_entry_index += phba->hbqs[hbqno].entry_count; + + if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { + /* Adapter failed to init, mbxCmd <cmd> CFG_RING, + mbxStatus <status>, ring <num> */ + + lpfc_printf_log(phba, KERN_ERR, + LOG_SLI | LOG_VPORT, + "%d:1805 Adapter failed to init. " + "Data: x%x x%x x%x\n", + phba->brd_no, pmbox->mbxCommand, + pmbox->mbxStatus, hbqno); + + phba->link_state = LPFC_HBA_ERROR; + mempool_free(pmb, phba->mbox_mem_pool); + return ENXIO; + } + } + phba->hbq_count = hbq_count; + + mempool_free(pmb, phba->mbox_mem_pool); + + /* Initially populate or replenish the HBQs */ + for (hbqno = 0; hbqno < hbq_count; ++hbqno) { + if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno)) + return -ENOMEM; + } + return 0; +} + +static int +lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode) { LPFC_MBOXQ_t *pmb; uint32_t resetcount = 0, rc = 0, done = 0; pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } + phba->sli_rev = sli_mode; while (resetcount < 2 && !done) { - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; - spin_unlock_irq(phba->host->host_lock); - phba->hba_state = LPFC_STATE_UNKNOWN; + spin_unlock_irq(&phba->hbalock); + phba->pport->port_state = LPFC_VPORT_UNKNOWN; lpfc_sli_brdrestart(phba); msleep(2500); rc = lpfc_sli_chipset_init(phba); if (rc) break; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); resetcount++; - /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 - * means the call was successful. Any other nonzero value is a failure, - * but if ERESTART is returned, the driver may reset the HBA and try - * again. - */ + /* Call pre CONFIG_PORT mailbox command initialization. A + * value of 0 means the call was successful. Any other + * nonzero value is a failure, but if ERESTART is returned, + * the driver may reset the HBA and try again. + */ rc = lpfc_config_port_prep(phba); if (rc == -ERESTART) { - phba->hba_state = 0; + phba->link_state = LPFC_LINK_UNKNOWN; continue; } else if (rc) { break; } - phba->hba_state = LPFC_INIT_MBX_CMDS; + phba->link_state = LPFC_INIT_MBX_CMDS; lpfc_config_port(phba, pmb); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); - if (rc == MBX_SUCCESS) - done = 1; - else { + if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0442 Adapter failed to init, mbxCmd x%x " "CONFIG_PORT, mbxStatus x%x Data: x%x\n", phba->brd_no, pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); + spin_lock_irq(&phba->hbalock); phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; + spin_unlock_irq(&phba->hbalock); + rc = -ENXIO; + } else { + done = 1; + phba->max_vpi = (phba->max_vpi && + pmb->mb.un.varCfgPort.gmv) != 0 + ? pmb->mb.un.varCfgPort.max_vpi + : 0; + } + } + + if (!done) { + rc = -EINVAL; + goto do_prep_failed; + } + + if ((pmb->mb.un.varCfgPort.sli_mode == 3) && + (!pmb->mb.un.varCfgPort.cMA)) { + rc = -ENXIO; + goto do_prep_failed; + } + return rc; + +do_prep_failed: + mempool_free(pmb, phba->mbox_mem_pool); + return rc; +} + +int +lpfc_sli_hba_setup(struct lpfc_hba *phba) +{ + uint32_t rc; + int mode = 3; + + switch (lpfc_sli_mode) { + case 2: + if (phba->cfg_npiv_enable) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, + "%d:1824 NPIV enabled: Override lpfc_sli_mode " + "parameter (%d) to auto (0).\n", + phba->brd_no, lpfc_sli_mode); + break; } + mode = 2; + break; + case 0: + case 3: + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, + "%d:1819 Unrecognized lpfc_sli_mode " + "parameter: %d.\n", + phba->brd_no, lpfc_sli_mode); + + break; } - if (!done) + + rc = lpfc_do_config_port(phba, mode); + if (rc && lpfc_sli_mode == 3) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, + "%d:1820 Unable to select SLI-3. " + "Not supported by adapter.\n", + phba->brd_no); + if (rc && mode != 2) + rc = lpfc_do_config_port(phba, 2); + if (rc) goto lpfc_sli_hba_setup_error; - rc = lpfc_sli_ring_map(phba, pmb); + if (phba->sli_rev == 3) { + phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; + phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; + phba->sli3_options |= LPFC_SLI3_ENABLED; + phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; + + } else { + phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; + phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; + phba->sli3_options = 0; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "%d:0444 Firmware in SLI %x mode. Max_vpi %d\n", + phba->brd_no, phba->sli_rev, phba->max_vpi); + rc = lpfc_sli_ring_map(phba); if (rc) goto lpfc_sli_hba_setup_error; + /* Init HBQs */ + + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + rc = lpfc_sli_hbq_setup(phba); + if (rc) + goto lpfc_sli_hba_setup_error; + } + phba->sli.sli_flag |= LPFC_PROCESS_LA; rc = lpfc_config_port_post(phba); if (rc) goto lpfc_sli_hba_setup_error; - goto lpfc_sli_hba_setup_exit; + return rc; + lpfc_sli_hba_setup_error: - phba->hba_state = LPFC_HBA_ERROR; -lpfc_sli_hba_setup_exit: - mempool_free(pmb, phba->mbox_mem_pool); + phba->link_state = LPFC_HBA_ERROR; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "%d:0445 Firmware initialization failed\n", + phba->brd_no); return rc; } @@ -2004,56 +2413,58 @@ lpfc_sli_hba_setup_exit: void lpfc_mbox_timeout(unsigned long ptr) { - struct lpfc_hba *phba; + struct lpfc_hba *phba = (struct lpfc_hba *) ptr; unsigned long iflag; + uint32_t tmo_posted; - phba = (struct lpfc_hba *)ptr; - spin_lock_irqsave(phba->host->host_lock, iflag); - if (!(phba->work_hba_events & WORKER_MBOX_TMO)) { - phba->work_hba_events |= WORKER_MBOX_TMO; + spin_lock_irqsave(&phba->pport->work_port_lock, iflag); + tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; + if (!tmo_posted) + phba->pport->work_port_events |= WORKER_MBOX_TMO; + spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); + + if (!tmo_posted) { + spin_lock_irqsave(&phba->hbalock, iflag); if (phba->work_wait) - wake_up(phba->work_wait); + lpfc_worker_wake_up(phba); + spin_unlock_irqrestore(&phba->hbalock, iflag); } - spin_unlock_irqrestore(phba->host->host_lock, iflag); } void lpfc_mbox_timeout_handler(struct lpfc_hba *phba) { - LPFC_MBOXQ_t *pmbox; - MAILBOX_t *mb; + LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; + MAILBOX_t *mb = &pmbox->mb; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; - spin_lock_irq(phba->host->host_lock); - if (!(phba->work_hba_events & WORKER_MBOX_TMO)) { - spin_unlock_irq(phba->host->host_lock); + if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) { return; } - pmbox = phba->sli.mbox_active; - mb = &pmbox->mb; - /* Mbox cmd <mbxCommand> timeout */ - lpfc_printf_log(phba, - KERN_ERR, - LOG_MBOX | LOG_SLI, - "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", - phba->brd_no, - mb->mbxCommand, - phba->hba_state, - phba->sli.sli_flag, - phba->sli.mbox_active); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "%d:0310 Mailbox command x%x timeout Data: x%x x%x " + "x%p\n", + phba->brd_no, + mb->mbxCommand, + phba->pport->port_state, + phba->sli.sli_flag, + phba->sli.mbox_active); /* Setting state unknown so lpfc_sli_abort_iocb_ring * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing * it to fail all oustanding SCSI IO. */ - phba->hba_state = LPFC_STATE_UNKNOWN; - phba->work_hba_events &= ~WORKER_MBOX_TMO; - phba->fc_flag |= FC_ESTABLISH_LINK; + spin_lock_irq(&phba->pport->work_port_lock); + phba->pport->work_port_events &= ~WORKER_MBOX_TMO; + spin_unlock_irq(&phba->pport->work_port_lock); + spin_lock_irq(&phba->hbalock); + phba->link_state = LPFC_LINK_UNKNOWN; + phba->pport->fc_flag |= FC_ESTABLISH_LINK; psli->sli_flag &= ~LPFC_SLI2_ACTIVE; - spin_unlock_irq(phba->host->host_lock); + spin_unlock_irq(&phba->hbalock); pring = &psli->ring[psli->fcp_ring]; lpfc_sli_abort_iocb_ring(phba, pring); @@ -2075,10 +2486,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) } int -lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) +lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) { MAILBOX_t *mb; - struct lpfc_sli *psli; + struct lpfc_sli *psli = &phba->sli; uint32_t status, evtctr; uint32_t ha_copy; int i; @@ -2086,31 +2497,44 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) volatile uint32_t word0, ldata; void __iomem *to_slim; + if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && + pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { + if(!pmbox->vport) { + lpfc_printf_log(phba, KERN_ERR, + LOG_MBOX | LOG_VPORT, + "%d:1806 Mbox x%x failed. No vport\n", + phba->brd_no, + pmbox->mb.mbxCommand); + dump_stack(); + return MBXERR_ERROR; + } + } + + /* If the PCI channel is in offline state, do not post mbox. */ if (unlikely(pci_channel_offline(phba->pcidev))) return MBX_NOT_FINISHED; + spin_lock_irqsave(&phba->hbalock, drvr_flag); psli = &phba->sli; - spin_lock_irqsave(phba->host->host_lock, drvr_flag); - mb = &pmbox->mb; status = MBX_SUCCESS; - if (phba->hba_state == LPFC_HBA_ERROR) { - spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); + if (phba->link_state == LPFC_HBA_ERROR) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ - LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) - return (MBX_NOT_FINISHED); + LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) + return MBX_NOT_FINISHED; } if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { - spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); - LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) - return (MBX_NOT_FINISHED); + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) + return MBX_NOT_FINISHED; } if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { @@ -2120,20 +2544,18 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) */ if (flag & MBX_POLL) { - spin_unlock_irqrestore(phba->host->host_lock, - drvr_flag); + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ - LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) - return (MBX_NOT_FINISHED); + LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); + return MBX_NOT_FINISHED; } if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { - spin_unlock_irqrestore(phba->host->host_lock, - drvr_flag); + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ - LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) - return (MBX_NOT_FINISHED); + LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); + return MBX_NOT_FINISHED; } /* Handle STOP IOCB processing flag. This is only meaningful @@ -2157,21 +2579,33 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) lpfc_mbox_put(phba, pmbox); /* Mbox cmd issue - BUSY */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_MBOX | LOG_SLI, - "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n", - phba->brd_no, - mb->mbxCommand, - phba->hba_state, - psli->sli_flag, - flag); + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "%d (%d):0308 Mbox cmd issue - BUSY Data: " + "x%x x%x x%x x%x\n", + phba->brd_no, + pmbox->vport ? pmbox->vport->vpi : 0xffffff, + mb->mbxCommand, phba->pport->port_state, + psli->sli_flag, flag); psli->slistat.mbox_busy++; - spin_unlock_irqrestore(phba->host->host_lock, - drvr_flag); + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + + if (pmbox->vport) { + lpfc_debugfs_disc_trc(pmbox->vport, + LPFC_DISC_TRC_MBOX_VPORT, + "MBOX Bsy vport: cmd:x%x mb:x%x x%x", + (uint32_t)mb->mbxCommand, + mb->un.varWords[0], mb->un.varWords[1]); + } + else { + lpfc_debugfs_disc_trc(phba->pport, + LPFC_DISC_TRC_MBOX, + "MBOX Bsy: cmd:x%x mb:x%x x%x", + (uint32_t)mb->mbxCommand, + mb->un.varWords[0], mb->un.varWords[1]); + } - return (MBX_BUSY); + return MBX_BUSY; } /* Handle STOP IOCB processing flag. This is only meaningful @@ -2198,11 +2632,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && (mb->mbxCommand != MBX_KILL_BOARD)) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - spin_unlock_irqrestore(phba->host->host_lock, - drvr_flag); + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ - LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag); - return (MBX_NOT_FINISHED); + LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); + return MBX_NOT_FINISHED; } /* timeout active mbox command */ mod_timer(&psli->mbox_tmo, (jiffies + @@ -2210,15 +2643,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) } /* Mailbox cmd <cmd> issue */ - lpfc_printf_log(phba, - KERN_INFO, - LOG_MBOX | LOG_SLI, - "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n", - phba->brd_no, - mb->mbxCommand, - phba->hba_state, - psli->sli_flag, - flag); + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "%d (%d):0309 Mailbox cmd x%x issue Data: x%x x%x " + "x%x\n", + phba->brd_no, pmbox->vport ? pmbox->vport->vpi : 0, + mb->mbxCommand, phba->pport->port_state, + psli->sli_flag, flag); + + if (mb->mbxCommand != MBX_HEARTBEAT) { + if (pmbox->vport) { + lpfc_debugfs_disc_trc(pmbox->vport, + LPFC_DISC_TRC_MBOX_VPORT, + "MBOX Send vport: cmd:x%x mb:x%x x%x", + (uint32_t)mb->mbxCommand, + mb->un.varWords[0], mb->un.varWords[1]); + } + else { + lpfc_debugfs_disc_trc(phba->pport, + LPFC_DISC_TRC_MBOX, + "MBOX Send: cmd:x%x mb:x%x x%x", + (uint32_t)mb->mbxCommand, + mb->un.varWords[0], mb->un.varWords[1]); + } + } psli->slistat.mbox_cmd++; evtctr = psli->slistat.mbox_event; @@ -2233,7 +2680,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) if (mb->mbxCommand == MBX_CONFIG_PORT) { /* copy command data into host mbox for cmpl */ lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, - MAILBOX_CMD_SIZE); + MAILBOX_CMD_SIZE); } /* First copy mbox command data to HBA SLIM, skip past first @@ -2285,12 +2732,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) /* Wait for command to complete */ while (((word0 & OWN_CHIP) == OWN_CHIP) || (!(ha_copy & HA_MBATT) && - (phba->hba_state > LPFC_WARM_START))) { + (phba->link_state > LPFC_WARM_START))) { if (i-- <= 0) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - spin_unlock_irqrestore(phba->host->host_lock, + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); - return (MBX_NOT_FINISHED); + return MBX_NOT_FINISHED; } /* Check if we took a mbox interrupt while we were @@ -2299,12 +2746,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) && (evtctr != psli->slistat.mbox_event)) break; - spin_unlock_irqrestore(phba->host->host_lock, + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); msleep(1); - spin_lock_irqsave(phba->host->host_lock, drvr_flag); + spin_lock_irqsave(&phba->hbalock, drvr_flag); if (psli->sli_flag & LPFC_SLI2_ACTIVE) { /* First copy command data */ @@ -2335,7 +2782,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) if (psli->sli_flag & LPFC_SLI2_ACTIVE) { /* copy results back to user */ lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb, - MAILBOX_CMD_SIZE); + MAILBOX_CMD_SIZE); } else { /* First copy command data */ lpfc_memcpy_from_slim(mb, phba->MBslimaddr, @@ -2355,23 +2802,25 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) status = mb->mbxStatus; } - spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); - return (status); + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + return status; } -static int -lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, - struct lpfc_iocbq * piocb) +/* + * Caller needs to hold lock. + */ +static void +__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *piocb) { /* Insert the caller's iocb in the txq tail for later processing. */ list_add_tail(&piocb->list, &pring->txq); pring->txq_cnt++; - return (0); } static struct lpfc_iocbq * lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, - struct lpfc_iocbq ** piocb) + struct lpfc_iocbq **piocb) { struct lpfc_iocbq * nextiocb; @@ -2384,13 +2833,29 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, return nextiocb; } +/* + * Lockless version of lpfc_sli_issue_iocb. + */ int -lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, +__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb, uint32_t flag) { struct lpfc_iocbq *nextiocb; IOCB_t *iocb; + if (piocb->iocb_cmpl && (!piocb->vport) && + (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && + (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { + lpfc_printf_log(phba, KERN_ERR, + LOG_SLI | LOG_VPORT, + "%d:1807 IOCB x%x failed. No vport\n", + phba->brd_no, + piocb->iocb.ulpCommand); + dump_stack(); + return IOCB_ERROR; + } + + /* If the PCI channel is in offline state, do not post iocbs. */ if (unlikely(pci_channel_offline(phba->pcidev))) return IOCB_ERROR; @@ -2398,7 +2863,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* * We should never get an IOCB if we are in a < LINK_DOWN state */ - if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) + if (unlikely(phba->link_state < LPFC_LINK_DOWN)) return IOCB_ERROR; /* @@ -2408,7 +2873,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX)) goto iocb_busy; - if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) { + if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { /* * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF * can be issued if the link is not up. @@ -2436,8 +2901,9 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * attention events. */ } else if (unlikely(pring->ringno == phba->sli.fcp_ring && - !(phba->sli.sli_flag & LPFC_PROCESS_LA))) + !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { goto iocb_busy; + } while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) @@ -2459,13 +2925,28 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, out_busy: if (!(flag & SLI_IOCB_RET_IOCB)) { - lpfc_sli_ringtx_put(phba, pring, piocb); + __lpfc_sli_ringtx_put(phba, pring, piocb); return IOCB_SUCCESS; } return IOCB_BUSY; } + +int +lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *piocb, uint32_t flag) +{ + unsigned long iflags; + int rc; + + spin_lock_irqsave(&phba->hbalock, iflags); + rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + return rc; +} + static int lpfc_extra_ring_setup( struct lpfc_hba *phba) { @@ -2504,7 +2985,7 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba) int lpfc_sli_setup(struct lpfc_hba *phba) { - int i, totiocb = 0; + int i, totiocbsize = 0; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; @@ -2529,6 +3010,12 @@ lpfc_sli_setup(struct lpfc_hba *phba) pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; + pring->sizeCiocb = (phba->sli_rev == 3) ? + SLI3_IOCB_CMD_SIZE : + SLI2_IOCB_CMD_SIZE; + pring->sizeRiocb = (phba->sli_rev == 3) ? + SLI3_IOCB_RSP_SIZE : + SLI2_IOCB_RSP_SIZE; pring->iotag_ctr = 0; pring->iotag_max = (phba->cfg_hba_queue_depth * 2); @@ -2539,12 +3026,25 @@ lpfc_sli_setup(struct lpfc_hba *phba) /* numCiocb and numRiocb are used in config_port */ pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; + pring->sizeCiocb = (phba->sli_rev == 3) ? + SLI3_IOCB_CMD_SIZE : + SLI2_IOCB_CMD_SIZE; + pring->sizeRiocb = (phba->sli_rev == 3) ? + SLI3_IOCB_RSP_SIZE : + SLI2_IOCB_RSP_SIZE; + pring->iotag_max = phba->cfg_hba_queue_depth; pring->num_mask = 0; break; case LPFC_ELS_RING: /* ring 2 - ELS / CT */ /* numCiocb and numRiocb are used in config_port */ pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; + pring->sizeCiocb = (phba->sli_rev == 3) ? + SLI3_IOCB_CMD_SIZE : + SLI2_IOCB_CMD_SIZE; + pring->sizeRiocb = (phba->sli_rev == 3) ? + SLI3_IOCB_RSP_SIZE : + SLI2_IOCB_RSP_SIZE; pring->fast_iotag = 0; pring->iotag_ctr = 0; pring->iotag_max = 4096; @@ -2575,14 +3075,16 @@ lpfc_sli_setup(struct lpfc_hba *phba) lpfc_ct_unsol_event; break; } - totiocb += (pring->numCiocb + pring->numRiocb); + totiocbsize += (pring->numCiocb * pring->sizeCiocb) + + (pring->numRiocb * pring->sizeRiocb); } - if (totiocb > MAX_SLI2_IOCB) { + if (totiocbsize > MAX_SLIM_IOCB_SIZE) { /* Too many cmd / rsp ring entries in SLI2 SLIM */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0462 Too many cmd / rsp ring entries in " - "SLI2 SLIM Data: x%x x%x\n", - phba->brd_no, totiocb, MAX_SLI2_IOCB); + "SLI2 SLIM Data: x%x x%lx\n", + phba->brd_no, totiocbsize, + (unsigned long) MAX_SLIM_IOCB_SIZE); } if (phba->cfg_multi_ring_support == 2) lpfc_extra_ring_setup(phba); @@ -2591,15 +3093,16 @@ lpfc_sli_setup(struct lpfc_hba *phba) } int -lpfc_sli_queue_setup(struct lpfc_hba * phba) +lpfc_sli_queue_setup(struct lpfc_hba *phba) { struct lpfc_sli *psli; struct lpfc_sli_ring *pring; int i; psli = &phba->sli; - spin_lock_irq(phba->host->host_lock); + spin_lock_irq(&phba->hbalock); INIT_LIST_HEAD(&psli->mboxq); + INIT_LIST_HEAD(&psli->mboxq_cmpl); /* Initialize list headers for txq and txcmplq as double linked lists */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; @@ -2612,15 +3115,73 @@ lpfc_sli_queue_setup(struct lpfc_hba * phba) INIT_LIST_HEAD(&pring->iocb_continueq); INIT_LIST_HEAD(&pring->postbufq); } - spin_unlock_irq(phba->host->host_lock); - return (1); + spin_unlock_irq(&phba->hbalock); + return 1; } int -lpfc_sli_hba_down(struct lpfc_hba * phba) +lpfc_sli_host_down(struct lpfc_vport *vport) { LIST_HEAD(completions); - struct lpfc_sli *psli; + struct lpfc_hba *phba = vport->phba; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *iocb, *next_iocb; + int i; + unsigned long flags = 0; + uint16_t prev_pring_flag; + + lpfc_cleanup_discovery_resources(vport); + + spin_lock_irqsave(&phba->hbalock, flags); + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->ring[i]; + prev_pring_flag = pring->flag; + if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ + pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* + * Error everything on the txq since these iocbs have not been + * given to the FW yet. + */ + list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { + if (iocb->vport != vport) + continue; + list_move_tail(&iocb->list, &completions); + pring->txq_cnt--; + } + + /* Next issue ABTS for everything on the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, + list) { + if (iocb->vport != vport) + continue; + lpfc_sli_issue_abort_iotag(phba, pring, iocb); + } + + pring->flag = prev_pring_flag; + } + + spin_unlock_irqrestore(&phba->hbalock, flags); + + while (!list_empty(&completions)) { + list_remove_head(&completions, iocb, struct lpfc_iocbq, list); + + if (!iocb->iocb_cmpl) + lpfc_sli_release_iocbq(phba, iocb); + else { + iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; + iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN; + (iocb->iocb_cmpl) (phba, iocb, iocb); + } + } + return 1; +} + +int +lpfc_sli_hba_down(struct lpfc_hba *phba) +{ + LIST_HEAD(completions); + struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; LPFC_MBOXQ_t *pmb; struct lpfc_iocbq *iocb; @@ -2628,13 +3189,15 @@ lpfc_sli_hba_down(struct lpfc_hba * phba) int i; unsigned long flags = 0; - psli = &phba->sli; lpfc_hba_down_prep(phba); - spin_lock_irqsave(phba->host->host_lock, flags); + lpfc_fabric_abort_hba(phba); + + spin_lock_irqsave(&phba->hbalock, flags); for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; - pring->flag |= LPFC_DEFERRED_RING_EVENT; + if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ + pring->flag |= LPFC_DEFERRED_RING_EVENT; /* * Error everything on the txq since these iocbs have not been @@ -2644,51 +3207,50 @@ lpfc_sli_hba_down(struct lpfc_hba * phba) pring->txq_cnt = 0; } - spin_unlock_irqrestore(phba->host->host_lock, flags); + spin_unlock_irqrestore(&phba->hbalock, flags); while (!list_empty(&completions)) { - iocb = list_get_first(&completions, struct lpfc_iocbq, list); + list_remove_head(&completions, iocb, struct lpfc_iocbq, list); cmd = &iocb->iocb; - list_del(&iocb->list); - if (iocb->iocb_cmpl) { + if (!iocb->iocb_cmpl) + lpfc_sli_release_iocbq(phba, iocb); + else { cmd->ulpStatus = IOSTAT_LOCAL_REJECT; cmd->un.ulpWord[4] = IOERR_SLI_DOWN; (iocb->iocb_cmpl) (phba, iocb, iocb); - } else - lpfc_sli_release_iocbq(phba, iocb); + } } /* Return any active mbox cmds */ del_timer_sync(&psli->mbox_tmo); - spin_lock_irqsave(phba->host->host_lock, flags); - phba->work_hba_events &= ~WORKER_MBOX_TMO; + spin_lock_irqsave(&phba->hbalock, flags); + + spin_lock(&phba->pport->work_port_lock); + phba->pport->work_port_events &= ~WORKER_MBOX_TMO; + spin_unlock(&phba->pport->work_port_lock); + if (psli->mbox_active) { - pmb = psli->mbox_active; - pmb->mb.mbxStatus = MBX_NOT_FINISHED; - if (pmb->mbox_cmpl) { - spin_unlock_irqrestore(phba->host->host_lock, flags); - pmb->mbox_cmpl(phba,pmb); - spin_lock_irqsave(phba->host->host_lock, flags); - } + list_add_tail(&psli->mbox_active->list, &completions); + psli->mbox_active = NULL; + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; } - psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - psli->mbox_active = NULL; - /* Return any pending mbox cmds */ - while ((pmb = lpfc_mbox_get(phba)) != NULL) { + /* Return any pending or completed mbox cmds */ + list_splice_init(&phba->sli.mboxq, &completions); + list_splice_init(&phba->sli.mboxq_cmpl, &completions); + INIT_LIST_HEAD(&psli->mboxq); + INIT_LIST_HEAD(&psli->mboxq_cmpl); + + spin_unlock_irqrestore(&phba->hbalock, flags); + + while (!list_empty(&completions)) { + list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); pmb->mb.mbxStatus = MBX_NOT_FINISHED; if (pmb->mbox_cmpl) { - spin_unlock_irqrestore(phba->host->host_lock, flags); pmb->mbox_cmpl(phba,pmb); - spin_lock_irqsave(phba->host->host_lock, flags); } } - - INIT_LIST_HEAD(&psli->mboxq); - - spin_unlock_irqrestore(phba->host->host_lock, flags); - return 1; } @@ -2710,14 +3272,15 @@ lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) } int -lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, - struct lpfc_dmabuf * mp) +lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_dmabuf *mp) { /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up later */ + spin_lock_irq(&phba->hbalock); list_add_tail(&mp->list, &pring->postbufq); - pring->postbufq_cnt++; + spin_unlock_irq(&phba->hbalock); return 0; } @@ -2730,14 +3293,17 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct list_head *slp = &pring->postbufq; /* Search postbufq, from the begining, looking for a match on phys */ + spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { if (mp->phys == phys) { list_del_init(&mp->list); pring->postbufq_cnt--; + spin_unlock_irq(&phba->hbalock); return mp; } } + spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0410 Cannot find virtual addr for mapped buf on " "ring %d Data x%llx x%p x%p x%x\n", @@ -2747,92 +3313,110 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } static void -lpfc_sli_abort_els_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { - IOCB_t *irsp; + IOCB_t *irsp = &rspiocb->iocb; uint16_t abort_iotag, abort_context; - struct lpfc_iocbq *abort_iocb, *rsp_ab_iocb; + struct lpfc_iocbq *abort_iocb; struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; abort_iocb = NULL; - irsp = &rspiocb->iocb; - - spin_lock_irq(phba->host->host_lock); if (irsp->ulpStatus) { abort_context = cmdiocb->iocb.un.acxri.abortContextTag; abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; + spin_lock_irq(&phba->hbalock); if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0327 Cannot abort els iocb %p" - " with tag %x context %x\n", - phba->brd_no, abort_iocb, - abort_iotag, abort_context); + lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, + "%d:0327 Cannot abort els iocb %p " + "with tag %x context %x, abort status %x, " + "abort code %x\n", + phba->brd_no, abort_iocb, abort_iotag, + abort_context, irsp->ulpStatus, + irsp->un.ulpWord[4]); /* * make sure we have the right iocbq before taking it * off the txcmplq and try to call completion routine. */ - if (abort_iocb && - abort_iocb->iocb.ulpContext == abort_context && - abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) { - list_del(&abort_iocb->list); + if (!abort_iocb || + abort_iocb->iocb.ulpContext != abort_context || + (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) + spin_unlock_irq(&phba->hbalock); + else { + list_del_init(&abort_iocb->list); pring->txcmplq_cnt--; + spin_unlock_irq(&phba->hbalock); - rsp_ab_iocb = lpfc_sli_get_iocbq(phba); - if (rsp_ab_iocb == NULL) - lpfc_sli_release_iocbq(phba, abort_iocb); - else { - abort_iocb->iocb_flag &= - ~LPFC_DRIVER_ABORTED; - rsp_ab_iocb->iocb.ulpStatus = - IOSTAT_LOCAL_REJECT; - rsp_ab_iocb->iocb.un.ulpWord[4] = - IOERR_SLI_ABORTED; - spin_unlock_irq(phba->host->host_lock); - (abort_iocb->iocb_cmpl) - (phba, abort_iocb, rsp_ab_iocb); - spin_lock_irq(phba->host->host_lock); - lpfc_sli_release_iocbq(phba, rsp_ab_iocb); - } + abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; + abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; + abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; + (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); } } lpfc_sli_release_iocbq(phba, cmdiocb); - spin_unlock_irq(phba->host->host_lock); + return; +} + +static void +lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + IOCB_t *irsp = &rspiocb->iocb; + + /* ELS cmd tag <ulpIoTag> completes */ + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "%d (X):0133 Ignoring ELS cmd tag x%x completion Data: " + "x%x x%x x%x\n", + phba->brd_no, irsp->ulpIoTag, irsp->ulpStatus, + irsp->un.ulpWord[4], irsp->ulpTimeout); + if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) + lpfc_ct_free_iocb(phba, cmdiocb); + else + lpfc_els_free_iocb(phba, cmdiocb); return; } int -lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba, - struct lpfc_sli_ring * pring, - struct lpfc_iocbq * cmdiocb) +lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *cmdiocb) { + struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_iocbq *abtsiocbp; IOCB_t *icmd = NULL; IOCB_t *iabt = NULL; int retval = IOCB_ERROR; - /* There are certain command types we don't want - * to abort. + /* + * There are certain command types we don't want to abort. And we + * don't want to abort commands that are already in the process of + * being aborted. */ icmd = &cmdiocb->iocb; - if ((icmd->ulpCommand == CMD_ABORT_XRI_CN) || - (icmd->ulpCommand == CMD_CLOSE_XRI_CN)) + if (icmd->ulpCommand == CMD_ABORT_XRI_CN || + icmd->ulpCommand == CMD_CLOSE_XRI_CN || + (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) return 0; - /* If we're unloading, interrupts are disabled so we - * need to cleanup the iocb here. + /* If we're unloading, don't abort iocb on the ELS ring, but change the + * callback so that nothing happens when it finishes. */ - if (phba->fc_flag & FC_UNLOADING) + if ((vport->load_flag & FC_UNLOADING) && + (pring->ringno == LPFC_ELS_RING)) { + if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) + cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; + else + cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; goto abort_iotag_exit; + } /* issue ABTS for this IOCB based on iotag */ - abtsiocbp = lpfc_sli_get_iocbq(phba); + abtsiocbp = __lpfc_sli_get_iocbq(phba); if (abtsiocbp == NULL) return 0; @@ -2848,7 +3432,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba, iabt->ulpLe = 1; iabt->ulpClass = icmd->ulpClass; - if (phba->hba_state >= LPFC_LINK_UP) + if (phba->link_state >= LPFC_LINK_UP) iabt->ulpCommand = CMD_ABORT_XRI_CN; else iabt->ulpCommand = CMD_CLOSE_XRI_CN; @@ -2856,32 +3440,20 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba, abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "%d:0339 Abort xri x%x, original iotag x%x, abort " - "cmd iotag x%x\n", - phba->brd_no, iabt->un.acxri.abortContextTag, + "%d (%d):0339 Abort xri x%x, original iotag x%x, " + "abort cmd iotag x%x\n", + phba->brd_no, vport->vpi, + iabt->un.acxri.abortContextTag, iabt->un.acxri.abortIoTag, abtsiocbp->iotag); - retval = lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); + retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); abort_iotag_exit: - - /* If we could not issue an abort dequeue the iocb and handle - * the completion here. + /* + * Caller to this routine should check for IOCB_ERROR + * and handle it properly. This routine no longer removes + * iocb off txcmplq and call compl in case of IOCB_ERROR. */ - if (retval == IOCB_ERROR) { - list_del(&cmdiocb->list); - pring->txcmplq_cnt--; - - if (cmdiocb->iocb_cmpl) { - icmd->ulpStatus = IOSTAT_LOCAL_REJECT; - icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; - spin_unlock_irq(phba->host->host_lock); - (cmdiocb->iocb_cmpl) (phba, cmdiocb, cmdiocb); - spin_lock_irq(phba->host->host_lock); - } else - lpfc_sli_release_iocbq(phba, cmdiocb); - } - - return 1; + return retval; } static int @@ -2930,7 +3502,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id, int lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, - uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd) + uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd) { struct lpfc_iocbq *iocbq; int sum, i; @@ -2947,14 +3519,10 @@ lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } void -lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { - unsigned long iflags; - - spin_lock_irqsave(phba->host->host_lock, iflags); lpfc_sli_release_iocbq(phba, cmdiocb); - spin_unlock_irqrestore(phba->host->host_lock, iflags); return; } @@ -2972,8 +3540,8 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, for (i = 1; i <= phba->sli.last_iotag; i++) { iocbq = phba->sli.iocbq_lookup[i]; - if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id, - 0, abort_cmd) != 0) + if (lpfc_sli_validate_fcp_iocb(iocbq, tgt_id, lun_id, 0, + abort_cmd) != 0) continue; /* issue ABTS for this IOCB based on iotag */ @@ -2989,8 +3557,9 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; abtsiocb->iocb.ulpLe = 1; abtsiocb->iocb.ulpClass = cmd->ulpClass; + abtsiocb->vport = phba->pport; - if (phba->hba_state >= LPFC_LINK_UP) + if (lpfc_is_link_up(phba)) abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; else abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; @@ -3016,16 +3585,16 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, wait_queue_head_t *pdone_q; unsigned long iflags; - spin_lock_irqsave(phba->host->host_lock, iflags); + spin_lock_irqsave(&phba->hbalock, iflags); cmdiocbq->iocb_flag |= LPFC_IO_WAKE; if (cmdiocbq->context2 && rspiocbq) memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, &rspiocbq->iocb, sizeof(IOCB_t)); pdone_q = cmdiocbq->context_un.wait_queue; - spin_unlock_irqrestore(phba->host->host_lock, iflags); if (pdone_q) wake_up(pdone_q); + spin_unlock_irqrestore(&phba->hbalock, iflags); return; } @@ -3035,11 +3604,12 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, * lpfc_sli_issue_call since the wake routine sets a unique value and by * definition this is a wait function. */ + int -lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, - struct lpfc_sli_ring * pring, - struct lpfc_iocbq * piocb, - struct lpfc_iocbq * prspiocbq, +lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, + struct lpfc_iocbq *piocb, + struct lpfc_iocbq *prspiocbq, uint32_t timeout) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); @@ -3071,11 +3641,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); if (retval == IOCB_SUCCESS) { timeout_req = timeout * HZ; - spin_unlock_irq(phba->host->host_lock); timeleft = wait_event_timeout(done_q, piocb->iocb_flag & LPFC_IO_WAKE, timeout_req); - spin_lock_irq(phba->host->host_lock); if (piocb->iocb_flag & LPFC_IO_WAKE) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, @@ -3117,16 +3685,16 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, } int -lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, +lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, uint32_t timeout) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); int retval; + unsigned long flag; /* The caller must leave context1 empty. */ - if (pmboxq->context1 != 0) { - return (MBX_NOT_FINISHED); - } + if (pmboxq->context1 != 0) + return MBX_NOT_FINISHED; /* setup wake call as IOCB callback */ pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; @@ -3141,6 +3709,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, pmboxq->mbox_flag & LPFC_MBX_WAKE, timeout * HZ); + spin_lock_irqsave(&phba->hbalock, flag); pmboxq->context1 = NULL; /* * if LPFC_MBX_WAKE flag is set the mailbox is completed @@ -3148,8 +3717,11 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, */ if (pmboxq->mbox_flag & LPFC_MBX_WAKE) retval = MBX_SUCCESS; - else + else { retval = MBX_TIMEOUT; + pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + } + spin_unlock_irqrestore(&phba->hbalock, flag); } return retval; @@ -3158,14 +3730,27 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, int lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) { + struct lpfc_vport *vport = phba->pport; int i = 0; + uint32_t ha_copy; - while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) { + while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { if (i++ > LPFC_MBOX_TMO * 1000) return 1; - if (lpfc_sli_handle_mb_event(phba) == 0) - i = 0; + /* + * Call lpfc_sli_handle_mb_event only if a mailbox cmd + * did finish. This way we won't get the misleading + * "Stray Mailbox Interrupt" message. + */ + spin_lock_irq(&phba->hbalock); + ha_copy = phba->work_ha; + phba->work_ha &= ~HA_MBATT; + spin_unlock_irq(&phba->hbalock); + + if (ha_copy & HA_MBATT) + if (lpfc_sli_handle_mb_event(phba) == 0) + i = 0; msleep(1); } @@ -3176,13 +3761,20 @@ lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) irqreturn_t lpfc_intr_handler(int irq, void *dev_id) { - struct lpfc_hba *phba; + struct lpfc_hba *phba; uint32_t ha_copy; uint32_t work_ha_copy; unsigned long status; int i; uint32_t control; + MAILBOX_t *mbox, *pmbox; + struct lpfc_vport *vport; + struct lpfc_nodelist *ndlp; + struct lpfc_dmabuf *mp; + LPFC_MBOXQ_t *pmb; + int rc; + /* * Get the driver's phba structure from the dev_id and * assume the HBA is not interrupting. @@ -3204,7 +3796,7 @@ lpfc_intr_handler(int irq, void *dev_id) */ /* Ignore all interrupts during initialization. */ - if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) + if (unlikely(phba->link_state < LPFC_LINK_DOWN)) return IRQ_NONE; /* @@ -3212,16 +3804,16 @@ lpfc_intr_handler(int irq, void *dev_id) * Clear Attention Sources, except Error Attention (to * preserve status) and Link Attention */ - spin_lock(phba->host->host_lock); + spin_lock(&phba->hbalock); ha_copy = readl(phba->HAregaddr); /* If somebody is waiting to handle an eratt don't process it * here. The brdkill function will do this. */ - if (phba->fc_flag & FC_IGNORE_ERATT) + if (phba->link_flag & LS_IGNORE_ERATT) ha_copy &= ~HA_ERATT; writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); readl(phba->HAregaddr); /* flush */ - spin_unlock(phba->host->host_lock); + spin_unlock(&phba->hbalock); if (unlikely(!ha_copy)) return IRQ_NONE; @@ -3235,36 +3827,41 @@ lpfc_intr_handler(int irq, void *dev_id) * Turn off Link Attention interrupts * until CLEAR_LA done */ - spin_lock(phba->host->host_lock); + spin_lock(&phba->hbalock); phba->sli.sli_flag &= ~LPFC_PROCESS_LA; control = readl(phba->HCregaddr); control &= ~HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ - spin_unlock(phba->host->host_lock); + spin_unlock(&phba->hbalock); } else work_ha_copy &= ~HA_LATT; } if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) { - for (i = 0; i < phba->sli.num_rings; i++) { - if (work_ha_copy & (HA_RXATT << (4*i))) { - /* - * Turn off Slow Rings interrupts - */ - spin_lock(phba->host->host_lock); - control = readl(phba->HCregaddr); - control &= ~(HC_R0INT_ENA << i); + /* + * Turn off Slow Rings interrupts, LPFC_ELS_RING is + * the only slow ring. + */ + status = (work_ha_copy & + (HA_RXMASK << (4*LPFC_ELS_RING))); + status >>= (4*LPFC_ELS_RING); + if (status & HA_RXMASK) { + spin_lock(&phba->hbalock); + control = readl(phba->HCregaddr); + if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { + control &= + ~(HC_R0INT_ENA << LPFC_ELS_RING); writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ - spin_unlock(phba->host->host_lock); } + spin_unlock(&phba->hbalock); } } if (work_ha_copy & HA_ERATT) { - phba->hba_state = LPFC_HBA_ERROR; + phba->link_state = LPFC_HBA_ERROR; /* * There was a link/board error. Read the * status register to retrieve the error event @@ -3279,14 +3876,108 @@ lpfc_intr_handler(int irq, void *dev_id) /* Clear Chip error bit */ writel(HA_ERATT, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ - phba->stopped = 1; + phba->pport->stopped = 1; + } + + if ((work_ha_copy & HA_MBATT) && + (phba->sli.mbox_active)) { + pmb = phba->sli.mbox_active; + pmbox = &pmb->mb; + mbox = &phba->slim2p->mbx; + vport = pmb->vport; + + /* First check out the status word */ + lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); + if (pmbox->mbxOwner != OWN_HOST) { + /* + * Stray Mailbox Interrupt, mbxCommand <cmd> + * mbxStatus <status> + */ + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | + LOG_SLI, + "%d (%d):0304 Stray Mailbox " + "Interrupt mbxCommand x%x " + "mbxStatus x%x\n", + phba->brd_no, + (vport + ? vport->vpi : 0), + pmbox->mbxCommand, + pmbox->mbxStatus); + } + phba->last_completion_time = jiffies; + del_timer_sync(&phba->sli.mbox_tmo); + + phba->sli.mbox_active = NULL; + if (pmb->mbox_cmpl) { + lpfc_sli_pcimem_bcopy(mbox, pmbox, + MAILBOX_CMD_SIZE); + } + if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { + pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; + + lpfc_debugfs_disc_trc(vport, + LPFC_DISC_TRC_MBOX_VPORT, + "MBOX dflt rpi: : status:x%x rpi:x%x", + (uint32_t)pmbox->mbxStatus, + pmbox->un.varWords[0], 0); + + if ( !pmbox->mbxStatus) { + mp = (struct lpfc_dmabuf *) + (pmb->context1); + ndlp = (struct lpfc_nodelist *) + pmb->context2; + + /* Reg_LOGIN of dflt RPI was successful. + * new lets get rid of the RPI using the + * same mbox buffer. + */ + lpfc_unreg_login(phba, vport->vpi, + pmbox->un.varWords[0], pmb); + pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; + pmb->context1 = mp; + pmb->context2 = ndlp; + pmb->vport = vport; + spin_lock(&phba->hbalock); + phba->sli.sli_flag &= + ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock(&phba->hbalock); + goto send_current_mbox; + } + } + spin_lock(&phba->pport->work_port_lock); + phba->pport->work_port_events &= ~WORKER_MBOX_TMO; + spin_unlock(&phba->pport->work_port_lock); + lpfc_mbox_cmpl_put(phba, pmb); + } + if ((work_ha_copy & HA_MBATT) && + (phba->sli.mbox_active == NULL)) { +send_next_mbox: + spin_lock(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + pmb = lpfc_mbox_get(phba); + spin_unlock(&phba->hbalock); +send_current_mbox: + /* Process next mailbox command if there is one */ + if (pmb != NULL) { + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + pmb->mb.mbxStatus = MBX_NOT_FINISHED; + lpfc_mbox_cmpl_put(phba, pmb); + goto send_next_mbox; + } + } else { + /* Turn on IOCB processing */ + for (i = 0; i < phba->sli.num_rings; i++) + lpfc_sli_turn_on_ring(phba, i); + } + } - spin_lock(phba->host->host_lock); + spin_lock(&phba->hbalock); phba->work_ha |= work_ha_copy; if (phba->work_wait) - wake_up(phba->work_wait); - spin_unlock(phba->host->host_lock); + lpfc_worker_wake_up(phba); + spin_unlock(&phba->hbalock); } ha_copy &= ~(phba->work_ha_mask); @@ -3298,7 +3989,7 @@ lpfc_intr_handler(int irq, void *dev_id) */ status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); status >>= (4*LPFC_FCP_RING); - if (status & HA_RXATT) + if (status & HA_RXMASK) lpfc_sli_handle_fast_ring_event(phba, &phba->sli.ring[LPFC_FCP_RING], status); @@ -3311,7 +4002,7 @@ lpfc_intr_handler(int irq, void *dev_id) */ status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); status >>= (4*LPFC_EXTRA_RING); - if (status & HA_RXATT) { + if (status & HA_RXMASK) { lpfc_sli_handle_fast_ring_event(phba, &phba->sli.ring[LPFC_EXTRA_RING], status); diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 41c38d324ab0..76058505795e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -20,6 +20,7 @@ /* forward declaration for LPFC_IOCB_t's use */ struct lpfc_hba; +struct lpfc_vport; /* Define the context types that SLI handles for abort and sums. */ typedef enum _lpfc_ctx_cmd { @@ -43,10 +44,12 @@ struct lpfc_iocbq { #define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */ #define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ +#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ uint8_t abort_count; uint8_t rsvd2; uint32_t drvrTimeout; /* driver timeout in seconds */ + struct lpfc_vport *vport;/* virtual port pointer */ void *context1; /* caller context information */ void *context2; /* caller context information */ void *context3; /* caller context information */ @@ -56,6 +59,8 @@ struct lpfc_iocbq { struct lpfcMboxq *mbox; } context_un; + void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *); void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); @@ -68,12 +73,14 @@ struct lpfc_iocbq { #define IOCB_ERROR 2 #define IOCB_TIMEDOUT 3 -#define LPFC_MBX_WAKE 1 +#define LPFC_MBX_WAKE 1 +#define LPFC_MBX_IMED_UNREG 2 typedef struct lpfcMboxq { /* MBOXQs are used in single linked lists */ struct list_head list; /* ptr to next mailbox command */ MAILBOX_t mb; /* Mailbox cmd */ + struct lpfc_vport *vport;/* virutal port pointer */ void *context1; /* caller context information */ void *context2; /* caller context information */ @@ -135,6 +142,8 @@ struct lpfc_sli_ring { uint8_t ringno; /* ring number */ uint16_t numCiocb; /* number of command iocb's per ring */ uint16_t numRiocb; /* number of rsp iocb's per ring */ + uint16_t sizeCiocb; /* Size of command iocb's in this ring */ + uint16_t sizeRiocb; /* Size of response iocb's in this ring */ uint32_t fast_iotag; /* max fastlookup based iotag */ uint32_t iotag_ctr; /* keeps track of the next iotag to use */ @@ -165,6 +174,34 @@ struct lpfc_sli_ring { struct lpfc_sli_ring *); }; +/* Structure used for configuring rings to a specific profile or rctl / type */ +struct lpfc_hbq_init { + uint32_t rn; /* Receive buffer notification */ + uint32_t entry_count; /* max # of entries in HBQ */ + uint32_t headerLen; /* 0 if not profile 4 or 5 */ + uint32_t logEntry; /* Set to 1 if this HBQ used for LogEntry */ + uint32_t profile; /* Selection profile 0=all, 7=logentry */ + uint32_t ring_mask; /* Binds HBQ to a ring e.g. Ring0=b0001, + * ring2=b0100 */ + uint32_t hbq_index; /* index of this hbq in ring .HBQs[] */ + + uint32_t seqlenoff; + uint32_t maxlen; + uint32_t seqlenbcnt; + uint32_t cmdcodeoff; + uint32_t cmdmatch[8]; + uint32_t mask_count; /* number of mask entries in prt array */ + struct hbq_mask hbqMasks[6]; + + /* Non-config rings fields to keep track of buffer allocations */ + uint32_t buffer_count; /* number of buffers allocated */ + uint32_t init_count; /* number to allocate when initialized */ + uint32_t add_count; /* number to allocate when starved */ +} ; + +#define LPFC_MAX_HBQ 16 + + /* Structure used to hold SLI statistical counters and info */ struct lpfc_sli_stat { uint64_t mbox_stat_err; /* Mbox cmds completed status error */ @@ -197,6 +234,7 @@ struct lpfc_sli { #define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ #define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ #define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ +#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ struct lpfc_sli_ring ring[LPFC_MAX_RING]; int fcp_ring; /* ring used for FCP initiator commands */ @@ -209,6 +247,7 @@ struct lpfc_sli { uint16_t mboxq_cnt; /* current length of queue */ uint16_t mboxq_max; /* max length */ LPFC_MBOXQ_t *mbox_active; /* active mboxq information */ + struct list_head mboxq_cmpl; struct timer_list mbox_tmo; /* Hold clk to timeout active mbox cmd */ @@ -221,12 +260,6 @@ struct lpfc_sli { struct lpfc_lnk_stat lnk_stat_offsets; }; -/* Given a pointer to the start of the ring, and the slot number of - * the desired iocb entry, calc a pointer to that entry. - * (assume iocb entry size is 32 bytes, or 8 words) - */ -#define IOCB_ENTRY(ring,slot) ((IOCB_t *)(((char *)(ring)) + ((slot) * 32))) - #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox command */ #define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 92a9107019d2..a5bc79eef052 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.1.12" +#define LPFC_DRIVER_VERSION "8.2.1" #define LPFC_DRIVER_NAME "lpfc" diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c new file mode 100644 index 000000000000..85797dbf5478 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -0,0 +1,523 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2004-2006 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/idr.h> +#include <linux/interrupt.h> +#include <linux/kthread.h> +#include <linux/pci.h> +#include <linux/spinlock.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_transport_fc.h> +#include "lpfc_hw.h" +#include "lpfc_sli.h" +#include "lpfc_disc.h" +#include "lpfc_scsi.h" +#include "lpfc.h" +#include "lpfc_logmsg.h" +#include "lpfc_crtn.h" +#include "lpfc_version.h" +#include "lpfc_vport.h" + +inline void lpfc_vport_set_state(struct lpfc_vport *vport, + enum fc_vport_state new_state) +{ + struct fc_vport *fc_vport = vport->fc_vport; + + if (fc_vport) { + /* + * When the transport defines fc_vport_set state we will replace + * this code with the following line + */ + /* fc_vport_set_state(fc_vport, new_state); */ + if (new_state != FC_VPORT_INITIALIZING) + fc_vport->vport_last_state = fc_vport->vport_state; + fc_vport->vport_state = new_state; + } + + /* for all the error states we will set the invternal state to FAILED */ + switch (new_state) { + case FC_VPORT_NO_FABRIC_SUPP: + case FC_VPORT_NO_FABRIC_RSCS: + case FC_VPORT_FABRIC_LOGOUT: + case FC_VPORT_FABRIC_REJ_WWN: + case FC_VPORT_FAILED: + vport->port_state = LPFC_VPORT_FAILED; + break; + case FC_VPORT_LINKDOWN: + vport->port_state = LPFC_VPORT_UNKNOWN; + break; + default: + /* do nothing */ + break; + } +} + +static int +lpfc_alloc_vpi(struct lpfc_hba *phba) +{ + int vpi; + + spin_lock_irq(&phba->hbalock); + /* Start at bit 1 because vpi zero is reserved for the physical port */ + vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1); + if (vpi > phba->max_vpi) + vpi = 0; + else + set_bit(vpi, phba->vpi_bmask); + spin_unlock_irq(&phba->hbalock); + return vpi; +} + +static void +lpfc_free_vpi(struct lpfc_hba *phba, int vpi) +{ + spin_lock_irq(&phba->hbalock); + clear_bit(vpi, phba->vpi_bmask); + spin_unlock_irq(&phba->hbalock); +} + +static int +lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) +{ + LPFC_MBOXQ_t *pmb; + MAILBOX_t *mb; + struct lpfc_dmabuf *mp; + int rc; + + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + return -ENOMEM; + } + mb = &pmb->mb; + + lpfc_read_sparam(phba, pmb, vport->vpi); + /* + * Grab buffer pointer and clear context1 so we can use + * lpfc_sli_issue_box_wait + */ + mp = (struct lpfc_dmabuf *) pmb->context1; + pmb->context1 = NULL; + + pmb->vport = vport; + rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, + "%d (%d):1818 VPort failed init, mbxCmd x%x " + "READ_SPARM mbxStatus x%x, rc = x%x\n", + phba->brd_no, vport->vpi, + mb->mbxCommand, mb->mbxStatus, rc); + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + if (rc != MBX_TIMEOUT) + mempool_free(pmb, phba->mbox_mem_pool); + return -EIO; + } + + memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); + memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, + sizeof (struct lpfc_name)); + memcpy(&vport->fc_portname, &vport->fc_sparam.portName, + sizeof (struct lpfc_name)); + + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + mempool_free(pmb, phba->mbox_mem_pool); + + return 0; +} + +static int +lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn, + const char *name_type) +{ + /* ensure that IEEE format 1 addresses + * contain zeros in bits 59-48 + */ + if (!((wwn->u.wwn[0] >> 4) == 1 && + ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0))) + return 1; + + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + "%d:1822 Invalid %s: %02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x\n", + phba->brd_no, name_type, + wwn->u.wwn[0], wwn->u.wwn[1], + wwn->u.wwn[2], wwn->u.wwn[3], + wwn->u.wwn[4], wwn->u.wwn[5], + wwn->u.wwn[6], wwn->u.wwn[7]); + return 0; +} + +static int +lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport) +{ + struct lpfc_vport *vport; + + list_for_each_entry(vport, &phba->port_list, listentry) { + if (vport == new_vport) + continue; + /* If they match, return not unique */ + if (memcmp(&vport->fc_sparam.portName, + &new_vport->fc_sparam.portName, + sizeof(struct lpfc_name)) == 0) + return 0; + } + return 1; +} + +int +lpfc_vport_create(struct fc_vport *fc_vport, bool disable) +{ + struct lpfc_nodelist *ndlp; + struct lpfc_vport *pport = + (struct lpfc_vport *) fc_vport->shost->hostdata; + struct lpfc_hba *phba = pport->phba; + struct lpfc_vport *vport = NULL; + int instance; + int vpi; + int rc = VPORT_ERROR; + + if ((phba->sli_rev < 3) || + !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + "%d:1808 Create VPORT failed: " + "NPIV is not enabled: SLImode:%d\n", + phba->brd_no, phba->sli_rev); + rc = VPORT_INVAL; + goto error_out; + } + + vpi = lpfc_alloc_vpi(phba); + if (vpi == 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + "%d:1809 Create VPORT failed: " + "Max VPORTs (%d) exceeded\n", + phba->brd_no, phba->max_vpi); + rc = VPORT_NORESOURCES; + goto error_out; + } + + + /* Assign an unused board number */ + if ((instance = lpfc_get_instance()) < 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + "%d:1810 Create VPORT failed: Cannot get " + "instance number\n", phba->brd_no); + lpfc_free_vpi(phba, vpi); + rc = VPORT_NORESOURCES; + goto error_out; + } + + vport = lpfc_create_port(phba, instance, fc_vport); + if (!vport) { + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + "%d:1811 Create VPORT failed: vpi x%x\n", + phba->brd_no, vpi); + lpfc_free_vpi(phba, vpi); + rc = VPORT_NORESOURCES; + goto error_out; + } + + vport->vpi = vpi; + lpfc_debugfs_initialize(vport); + + if (lpfc_vport_sparm(phba, vport)) { + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + "%d:1813 Create VPORT failed: vpi:%d " + "Cannot get sparam\n", + phba->brd_no, vpi); + lpfc_free_vpi(phba, vpi); + destroy_port(vport); + rc = VPORT_NORESOURCES; + goto error_out; + } + + memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8); + memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8); + + if (fc_vport->node_name != 0) + u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); + if (fc_vport->port_name != 0) + u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn); + + memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8); + memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8); + + if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") || + !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) { + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + "%d:1821 Create VPORT failed: vpi:%d " + "Invalid WWN format\n", + phba->brd_no, vpi); + lpfc_free_vpi(phba, vpi); + destroy_port(vport); + rc = VPORT_INVAL; + goto error_out; + } + + if (!lpfc_unique_wwpn(phba, vport)) { + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + "%d:1823 Create VPORT failed: vpi:%d " + "Duplicate WWN on HBA\n", + phba->brd_no, vpi); + lpfc_free_vpi(phba, vpi); + destroy_port(vport); + rc = VPORT_INVAL; + goto error_out; + } + + *(struct lpfc_vport **)fc_vport->dd_data = vport; + vport->fc_vport = fc_vport; + + if ((phba->link_state < LPFC_LINK_UP) || + (phba->fc_topology == TOPOLOGY_LOOP)) { + lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); + rc = VPORT_OK; + goto out; + } + + if (disable) { + rc = VPORT_OK; + goto out; + } + + /* Use the Physical nodes Fabric NDLP to determine if the link is + * up and ready to FDISC. + */ + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { + if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { + lpfc_set_disctmo(vport); + lpfc_initial_fdisc(vport); + } else { + lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0262 No NPIV Fabric " + "support\n", + phba->brd_no, vport->vpi); + } + } else { + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + } + rc = VPORT_OK; + +out: + lpfc_host_attrib_init(lpfc_shost_from_vport(vport)); +error_out: + return rc; +} + +int +disable_vport(struct fc_vport *fc_vport) +{ + struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; + long timeout; + + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (ndlp && phba->link_state >= LPFC_LINK_UP) { + vport->unreg_vpi_cmpl = VPORT_INVAL; + timeout = msecs_to_jiffies(phba->fc_ratov * 2000); + if (!lpfc_issue_els_npiv_logo(vport, ndlp)) + while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) + timeout = schedule_timeout(timeout); + } + + lpfc_sli_host_down(vport); + + /* Mark all nodes for discovery so we can remove them by + * calling lpfc_cleanup_rpis(vport, 1) + */ + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) + continue; + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RECOVERY); + } + lpfc_cleanup_rpis(vport, 1); + + lpfc_stop_vport_timers(vport); + lpfc_unreg_all_rpis(vport); + lpfc_unreg_default_rpis(vport); + /* + * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the + * scsi_host_put() to release the vport. + */ + lpfc_mbx_unreg_vpi(vport); + + lpfc_vport_set_state(vport, FC_VPORT_DISABLED); + return VPORT_OK; +} + +int +enable_vport(struct fc_vport *fc_vport) +{ + struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp = NULL; + + if ((phba->link_state < LPFC_LINK_UP) || + (phba->fc_topology == TOPOLOGY_LOOP)) { + lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); + return VPORT_OK; + } + + vport->load_flag |= FC_LOADING; + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + + /* Use the Physical nodes Fabric NDLP to determine if the link is + * up and ready to FDISC. + */ + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { + if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { + lpfc_set_disctmo(vport); + lpfc_initial_fdisc(vport); + } else { + lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "%d (%d):0264 No NPIV Fabric " + "support\n", + phba->brd_no, vport->vpi); + } + } else { + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + } + + return VPORT_OK; +} + +int +lpfc_vport_disable(struct fc_vport *fc_vport, bool disable) +{ + if (disable) + return disable_vport(fc_vport); + else + return enable_vport(fc_vport); +} + + +int +lpfc_vport_delete(struct fc_vport *fc_vport) +{ + struct lpfc_nodelist *ndlp = NULL; + struct lpfc_nodelist *next_ndlp; + struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost; + struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; + struct lpfc_hba *phba = vport->phba; + long timeout; + int rc = VPORT_ERROR; + + /* + * This is a bit of a mess. We want to ensure the shost doesn't get + * torn down until we're done with the embedded lpfc_vport structure. + * + * Beyond holding a reference for this function, we also need a + * reference for outstanding I/O requests we schedule during delete + * processing. But once we scsi_remove_host() we can no longer obtain + * a reference through scsi_host_get(). + * + * So we take two references here. We release one reference at the + * bottom of the function -- after delinking the vport. And we + * release the other at the completion of the unreg_vpi that get's + * initiated after we've disposed of all other resources associated + * with the port. + */ + if (!scsi_host_get(shost) || !scsi_host_get(shost)) + return VPORT_INVAL; + + if (vport->port_type == LPFC_PHYSICAL_PORT) { + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + "%d:1812 vport_delete failed: Cannot delete " + "physical host\n", phba->brd_no); + goto out; + } + + vport->load_flag |= FC_UNLOADING; + + kfree(vport->vname); + lpfc_debugfs_terminate(vport); + fc_remove_host(lpfc_shost_from_vport(vport)); + scsi_remove_host(lpfc_shost_from_vport(vport)); + + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && + phba->link_state >= LPFC_LINK_UP) { + + /* First look for the Fabric ndlp */ + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) { + /* Cannot find existing Fabric ndlp, allocate one */ + ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); + if (!ndlp) + goto skip_logo; + lpfc_nlp_init(vport, ndlp, Fabric_DID); + } else { + lpfc_dequeue_node(vport, ndlp); + } + vport->unreg_vpi_cmpl = VPORT_INVAL; + timeout = msecs_to_jiffies(phba->fc_ratov * 2000); + if (!lpfc_issue_els_npiv_logo(vport, ndlp)) + while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) + timeout = schedule_timeout(timeout); + } + +skip_logo: + lpfc_sli_host_down(vport); + + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RECOVERY); + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + } + + lpfc_stop_vport_timers(vport); + lpfc_unreg_all_rpis(vport); + lpfc_unreg_default_rpis(vport); + /* + * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the + * scsi_host_put() to release the vport. + */ + lpfc_mbx_unreg_vpi(vport); + + lpfc_free_vpi(phba, vport->vpi); + vport->work_port_events = 0; + spin_lock_irq(&phba->hbalock); + list_del_init(&vport->listentry); + spin_unlock_irq(&phba->hbalock); + + rc = VPORT_OK; +out: + scsi_host_put(shost); + return rc; +} + + +EXPORT_SYMBOL(lpfc_vport_create); +EXPORT_SYMBOL(lpfc_vport_delete); diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h new file mode 100644 index 000000000000..f223550f8cba --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_vport.h @@ -0,0 +1,113 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2004-2006 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#ifndef _H_LPFC_VPORT +#define _H_LPFC_VPORT + +/* API version values (each will be an individual bit) */ +#define VPORT_API_VERSION_1 0x01 + +/* Values returned via lpfc_vport_getinfo() */ +struct vport_info { + + uint32_t api_versions; + uint8_t linktype; +#define VPORT_TYPE_PHYSICAL 0 +#define VPORT_TYPE_VIRTUAL 1 + + uint8_t state; +#define VPORT_STATE_OFFLINE 0 +#define VPORT_STATE_ACTIVE 1 +#define VPORT_STATE_FAILED 2 + + uint8_t fail_reason; + uint8_t prev_fail_reason; +#define VPORT_FAIL_UNKNOWN 0 +#define VPORT_FAIL_LINKDOWN 1 +#define VPORT_FAIL_FAB_UNSUPPORTED 2 +#define VPORT_FAIL_FAB_NORESOURCES 3 +#define VPORT_FAIL_FAB_LOGOUT 4 +#define VPORT_FAIL_ADAP_NORESOURCES 5 + + uint8_t node_name[8]; /* WWNN */ + uint8_t port_name[8]; /* WWPN */ + + struct Scsi_Host *shost; + +/* Following values are valid only on physical links */ + uint32_t vports_max; + uint32_t vports_inuse; + uint32_t rpi_max; + uint32_t rpi_inuse; +#define VPORT_CNT_INVALID 0xFFFFFFFF +}; + +/* data used in link creation */ +struct vport_data { + uint32_t api_version; + + uint32_t options; +#define VPORT_OPT_AUTORETRY 0x01 + + uint8_t node_name[8]; /* WWNN */ + uint8_t port_name[8]; /* WWPN */ + +/* + * Upon successful creation, vport_shost will point to the new Scsi_Host + * structure for the new virtual link. + */ + struct Scsi_Host *vport_shost; +}; + +/* API function return codes */ +#define VPORT_OK 0 +#define VPORT_ERROR -1 +#define VPORT_INVAL -2 +#define VPORT_NOMEM -3 +#define VPORT_NORESOURCES -4 + +int lpfc_vport_create(struct fc_vport *, bool); +int lpfc_vport_delete(struct fc_vport *); +int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *); +int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint); + +/* + * queuecommand VPORT-specific return codes. Specified in the host byte code. + * Returned when the virtual link has failed or is not active. + */ +#define DID_VPORT_ERROR 0x0f + +#define VPORT_INFO 0x1 +#define VPORT_CREATE 0x2 +#define VPORT_DELETE 0x4 + +struct vport_cmd_tag { + uint32_t cmd; + struct vport_data cdata; + struct vport_info cinfo; + void *vport; + int vport_num; +}; + +void lpfc_vport_set_state(struct lpfc_vport *vport, + enum fc_vport_state new_state); + +#endif /* H_LPFC_VPORT */ |