diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-25 21:06:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-25 21:06:13 -0700 |
commit | 9f34217c846a96dea03f4418e2f27423658d3542 (patch) | |
tree | 5b137af50db5758261700015911afb197ac8fc9f /drivers/scsi | |
parent | 95e14ed7fc4b2db62eb597a70850a0fede48b78a (diff) | |
parent | 3703b2c5d041a68095cdd22380c23ce27d449ad7 (diff) | |
download | blackbird-op-linux-9f34217c846a96dea03f4418e2f27423658d3542.tar.gz blackbird-op-linux-9f34217c846a96dea03f4418e2f27423658d3542.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (55 commits)
[SCSI] tcm_loop: Add multi-fabric Linux/SCSI LLD fabric module
[SCSI] qla4xxx: Use polling mode for disable interrupt mailbox completion
[SCSI] Revert "[SCSI] Retrieve the Caching mode page"
[SCSI] bnx2fc: IO completion not processed due to missed wakeup
[SCSI] qla4xxx: Update driver version to 5.02.00-k6
[SCSI] qla4xxx: masking required bits of add_fw_options during initialization
[SCSI] qla4xxx: added new function qla4xxx_relogin_all_devices
[SCSI] qla4xxx: add support for ql4xsess_recovery_tmo cmd line param
[SCSI] qla4xxx: Add support for ql4xmaxqdepth command line parameter
[SCSI] qla4xxx: cleanup function qla4xxx_process_ddb_changed
[SCSI] qla4xxx: Prevent other port reinitialization during remove_adapter
[SCSI] qla4xxx: remove unused ddb flag DF_NO_RELOGIN
[SCSI] qla4xxx: cleanup DDB relogin logic during initialization
[SCSI] qla4xxx: Do not retry ISP82XX initialization if H/W state is failed
[SCSI] qla4xxx: Do not send mbox command if FW is in failed state
[SCSI] qla4xxx: cleanup qla4xxx_initialize_ddb_list()
[SCSI] ses: add subenclosure support
[SCSI] bnx2fc: Bump version to 1.0.1
[SCSI] bnx2fc: Remove unnecessary module state checks
[SCSI] bnx2fc: Fix MTU issue by using static MTU
...
Diffstat (limited to 'drivers/scsi')
47 files changed, 1855 insertions, 772 deletions
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile index 92df4d6b6147..1bd9fd18f7f3 100644 --- a/drivers/scsi/aacraid/Makefile +++ b/drivers/scsi/aacraid/Makefile @@ -3,6 +3,6 @@ obj-$(CONFIG_SCSI_AACRAID) := aacraid.o aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \ - dpcsup.o rx.o sa.o rkt.o nark.o + dpcsup.o rx.o sa.o rkt.o nark.o src.o ccflags-y := -Idrivers/scsi diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 7df2dd1d2c6f..118ce83a737c 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -5,7 +5,8 @@ * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -1486,7 +1487,9 @@ int aac_get_adapter_info(struct aac_dev* dev) dev->a_ops.adapter_write = aac_write_block; } dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; - if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { + if (dev->adapter_info.options & AAC_OPT_NEW_COMM_TYPE1) + dev->adapter_info.options |= AAC_OPT_NEW_COMM; + if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { /* * Worst case size that could cause sg overflow when * we break up SG elements that are larger than 64KB. diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 4dbcc055ac78..29ab00016b78 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -12,7 +12,7 @@ *----------------------------------------------------------------------------*/ #ifndef AAC_DRIVER_BUILD -# define AAC_DRIVER_BUILD 26400 +# define AAC_DRIVER_BUILD 28000 # define AAC_DRIVER_BRANCH "-ms" #endif #define MAXIMUM_NUM_CONTAINERS 32 @@ -277,6 +277,16 @@ enum aac_queue_types { #define FsaNormal 1 +/* transport FIB header (PMC) */ +struct aac_fib_xporthdr { + u64 HostAddress; /* FIB host address w/o xport header */ + u32 Size; /* FIB size excluding xport header */ + u32 Handle; /* driver handle to reference the FIB */ + u64 Reserved[2]; +}; + +#define ALIGN32 32 + /* * Define the FIB. The FIB is the where all the requested data and * command information are put to the application on the FSA adapter. @@ -394,7 +404,9 @@ enum fib_xfer_state { AdapterMicroFib = (1<<17), BIOSFibPath = (1<<18), FastResponseCapable = (1<<19), - ApiFib = (1<<20) // Its an API Fib. + ApiFib = (1<<20), /* Its an API Fib */ + /* PMC NEW COMM: There is no more AIF data pending */ + NoMoreAifDataAvailable = (1<<21) }; /* @@ -404,6 +416,7 @@ enum fib_xfer_state { #define ADAPTER_INIT_STRUCT_REVISION 3 #define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science +#define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */ struct aac_init { @@ -428,9 +441,15 @@ struct aac_init #define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 #define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010 #define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020 +#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000041 __le32 MaxIoCommands; /* max outstanding commands */ __le32 MaxIoSize; /* largest I/O command */ __le32 MaxFibSize; /* largest FIB to adapter */ + /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */ + __le32 MaxNumAif; /* max number of aif */ + /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */ + __le32 HostRRQ_AddrLow; + __le32 HostRRQ_AddrHigh; /* Host RRQ (response queue) for SRC */ }; enum aac_log_level { @@ -685,7 +704,7 @@ struct rx_inbound { #define OutboundDoorbellReg MUnit.ODR struct rx_registers { - struct rx_mu_registers MUnit; /* 1300h - 1344h */ + struct rx_mu_registers MUnit; /* 1300h - 1347h */ __le32 reserved1[2]; /* 1348h - 134ch */ struct rx_inbound IndexRegs; }; @@ -703,7 +722,7 @@ struct rx_registers { #define rkt_inbound rx_inbound struct rkt_registers { - struct rkt_mu_registers MUnit; /* 1300h - 1344h */ + struct rkt_mu_registers MUnit; /* 1300h - 1347h */ __le32 reserved1[1006]; /* 1348h - 22fch */ struct rkt_inbound IndexRegs; /* 2300h - */ }; @@ -713,6 +732,44 @@ struct rkt_registers { #define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR)) #define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR)) +/* + * PMC SRC message unit registers + */ + +#define src_inbound rx_inbound + +struct src_mu_registers { + /* PCI*| Name */ + __le32 reserved0[8]; /* 00h | Reserved */ + __le32 IDR; /* 20h | Inbound Doorbell Register */ + __le32 IISR; /* 24h | Inbound Int. Status Register */ + __le32 reserved1[3]; /* 28h | Reserved */ + __le32 OIMR; /* 34h | Outbound Int. Mask Register */ + __le32 reserved2[25]; /* 38h | Reserved */ + __le32 ODR_R; /* 9ch | Outbound Doorbell Read */ + __le32 ODR_C; /* a0h | Outbound Doorbell Clear */ + __le32 reserved3[6]; /* a4h | Reserved */ + __le32 OMR; /* bch | Outbound Message Register */ + __le32 IQ_L; /* c0h | Inbound Queue (Low address) */ + __le32 IQ_H; /* c4h | Inbound Queue (High address) */ +}; + +struct src_registers { + struct src_mu_registers MUnit; /* 00h - c7h */ + __le32 reserved1[130790]; /* c8h - 7fc5fh */ + struct src_inbound IndexRegs; /* 7fc60h */ +}; + +#define src_readb(AEP, CSR) readb(&((AEP)->regs.src.bar0->CSR)) +#define src_readl(AEP, CSR) readl(&((AEP)->regs.src.bar0->CSR)) +#define src_writeb(AEP, CSR, value) writeb(value, \ + &((AEP)->regs.src.bar0->CSR)) +#define src_writel(AEP, CSR, value) writel(value, \ + &((AEP)->regs.src.bar0->CSR)) + +#define SRC_ODR_SHIFT 12 +#define SRC_IDR_SHIFT 9 + typedef void (*fib_callback)(void *ctxt, struct fib *fibctx); struct aac_fib_context { @@ -879,6 +936,7 @@ struct aac_supplement_adapter_info #define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001) #define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002) #define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004) +#define AAC_OPTION_DOORBELL_RESET cpu_to_le32(0x00004000) #define AAC_SIS_VERSION_V3 3 #define AAC_SIS_SLOT_UNKNOWN 0xFF @@ -940,6 +998,7 @@ struct aac_bus_info_response { #define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16) #define AAC_OPT_NEW_COMM cpu_to_le32(1<<17) #define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18) +#define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28) struct aac_dev { @@ -952,6 +1011,7 @@ struct aac_dev */ unsigned max_fib_size; unsigned sg_tablesize; + unsigned max_num_aif; /* * Map for 128 fib objects (64k) @@ -980,10 +1040,21 @@ struct aac_dev struct adapter_ops a_ops; unsigned long fsrev; /* Main driver's revision number */ - unsigned base_size; /* Size of mapped in region */ + unsigned long dbg_base; /* address of UART + * debug buffer */ + + unsigned base_size, dbg_size; /* Size of + * mapped in region */ + struct aac_init *init; /* Holds initialization info to communicate with adapter */ dma_addr_t init_pa; /* Holds physical address of the init struct */ + u32 *host_rrq; /* response queue + * if AAC_COMM_MESSAGE_TYPE1 */ + + dma_addr_t host_rrq_pa; /* phys. address */ + u32 host_rrq_idx; /* index into rrq buffer */ + struct pci_dev *pdev; /* Our PCI interface */ void * printfbuf; /* pointer to buffer used for printf's from the adapter */ void * comm_addr; /* Base address of Comm area */ @@ -1003,14 +1074,20 @@ struct aac_dev */ #ifndef AAC_MIN_FOOTPRINT_SIZE # define AAC_MIN_FOOTPRINT_SIZE 8192 +# define AAC_MIN_SRC_BAR0_SIZE 0x400000 +# define AAC_MIN_SRC_BAR1_SIZE 0x800 #endif union { struct sa_registers __iomem *sa; struct rx_registers __iomem *rx; struct rkt_registers __iomem *rkt; + struct { + struct src_registers __iomem *bar0; + char __iomem *bar1; + } src; } regs; - volatile void __iomem *base; + volatile void __iomem *base, *dbg_base_mapped; volatile struct rx_inbound __iomem *IndexRegs; u32 OIMR; /* Mask Register Cache */ /* @@ -1031,9 +1108,8 @@ struct aac_dev u8 comm_interface; # define AAC_COMM_PRODUCER 0 # define AAC_COMM_MESSAGE 1 - /* macro side-effects BEWARE */ -# define raw_io_interface \ - init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) +# define AAC_COMM_MESSAGE_TYPE1 3 + u8 raw_io_interface; u8 raw_io_64; u8 printf_enabled; u8 in_reset; @@ -1789,6 +1865,10 @@ extern struct aac_common aac_config; #define DoorBellAdapterNormCmdNotFull (1<<3) /* Adapter -> Host */ #define DoorBellAdapterNormRespNotFull (1<<4) /* Adapter -> Host */ #define DoorBellPrintfReady (1<<5) /* Adapter -> Host */ +#define DoorBellAifPending (1<<6) /* Adapter -> Host */ + +/* PMC specific outbound doorbell bits */ +#define PmDoorBellResponseSent (1<<1) /* Adapter -> Host */ /* * For FIB communication, we need all of the following things @@ -1831,6 +1911,9 @@ extern struct aac_common aac_config; #define AifReqAPIJobUpdate 109 /* Update a job report from the API */ #define AifReqAPIJobFinish 110 /* Finish a job from the API */ +/* PMC NEW COMM: Request the event data */ +#define AifReqEvent 200 + /* * Adapter Initiated FIB command structures. Start with the adapter * initiated FIBs that really come from the adapter, and get responded @@ -1886,10 +1969,13 @@ int aac_rx_init(struct aac_dev *dev); int aac_rkt_init(struct aac_dev *dev); int aac_nark_init(struct aac_dev *dev); int aac_sa_init(struct aac_dev *dev); +int aac_src_init(struct aac_dev *dev); int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify); unsigned int aac_response_normal(struct aac_queue * q); unsigned int aac_command_normal(struct aac_queue * q); -unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); +unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index, + int isAif, int isFastResponse, + struct hw_fib *aif_fib); int aac_reset_adapter(struct aac_dev * dev, int forced); int aac_check_health(struct aac_dev * dev); int aac_command_thread(void *data); diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 645ddd9d9b9e..8a0b33033177 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -5,7 +5,8 @@ * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index a7261486ccd4..7ac8fdb5577b 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -5,7 +5,8 @@ * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -52,12 +53,16 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co unsigned long size, align; const unsigned long fibsize = 4096; const unsigned long printfbufsiz = 256; + unsigned long host_rrq_size = 0; struct aac_init *init; dma_addr_t phys; unsigned long aac_max_hostphysmempages; - size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz; - + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) + host_rrq_size = (dev->scsi_host_ptr->can_queue + + AAC_NUM_MGT_FIB) * sizeof(u32); + size = fibsize + sizeof(struct aac_init) + commsize + + commalign + printfbufsiz + host_rrq_size; base = pci_alloc_consistent(dev->pdev, size, &phys); @@ -70,8 +75,14 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co dev->comm_phys = phys; dev->comm_size = size; - dev->init = (struct aac_init *)(base + fibsize); - dev->init_pa = phys + fibsize; + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { + dev->host_rrq = (u32 *)(base + fibsize); + dev->host_rrq_pa = phys + fibsize; + memset(dev->host_rrq, 0, host_rrq_size); + } + + dev->init = (struct aac_init *)(base + fibsize + host_rrq_size); + dev->init_pa = phys + fibsize + host_rrq_size; init = dev->init; @@ -106,8 +117,13 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co init->InitFlags = 0; if (dev->comm_interface == AAC_COMM_MESSAGE) { - init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); + init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); + } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { + init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6); + init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_TYPE1_SUPPORTED); + dprintk((KERN_WARNING + "aacraid: New Comm Interface type1 enabled\n")); } init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | INITFLAGS_DRIVER_SUPPORTS_PM); @@ -115,11 +131,18 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); init->MaxFibSize = cpu_to_le32(dev->max_fib_size); + init->MaxNumAif = cpu_to_le32(dev->max_num_aif); + init->HostRRQ_AddrHigh = (u32)((u64)dev->host_rrq_pa >> 32); + init->HostRRQ_AddrLow = (u32)(dev->host_rrq_pa & 0xffffffff); + + /* * Increment the base address by the amount already used */ - base = base + fibsize + sizeof(struct aac_init); - phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init)); + base = base + fibsize + host_rrq_size + sizeof(struct aac_init); + phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size + + sizeof(struct aac_init)); + /* * Align the beginning of Headers to commalign */ @@ -314,15 +337,22 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) - sizeof(struct aac_write) + sizeof(struct sgentry)) / sizeof(struct sgentry); dev->comm_interface = AAC_COMM_PRODUCER; - dev->raw_io_64 = 0; + dev->raw_io_interface = dev->raw_io_64 = 0; + if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && (status[0] == 0x00000001)) { if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64)) dev->raw_io_64 = 1; - if (dev->a_ops.adapter_comm && - (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) - dev->comm_interface = AAC_COMM_MESSAGE; + if (dev->a_ops.adapter_comm) { + if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1)) { + dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; + dev->raw_io_interface = 1; + } else if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)) { + dev->comm_interface = AAC_COMM_MESSAGE; + dev->raw_io_interface = 1; + } + } if ((dev->comm_interface == AAC_COMM_MESSAGE) && (status[2] > dev->base_size)) { aac_adapter_ioremap(dev, 0); @@ -350,10 +380,12 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) * status[3] & 0xFFFF maximum number FIBs outstanding */ host->max_sectors = (status[1] >> 16) << 1; - dev->max_fib_size = status[1] & 0xFFFF; + /* Multiple of 32 for PMC */ + dev->max_fib_size = status[1] & 0xFFE0; host->sg_tablesize = status[2] >> 16; dev->sg_tablesize = status[2] & 0xFFFF; host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; + dev->max_num_aif = status[4] & 0xFFFF; /* * NOTE: * All these overrides are based on a fixed internal diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 060ac4bd5a14..dd7ad3ba2dad 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -5,7 +5,8 @@ * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -63,9 +64,11 @@ static int fib_map_alloc(struct aac_dev *dev) "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); - if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size - * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), - &dev->hw_fib_pa))==NULL) + dev->hw_fib_va = pci_alloc_consistent(dev->pdev, + (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) + * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1), + &dev->hw_fib_pa); + if (dev->hw_fib_va == NULL) return -ENOMEM; return 0; } @@ -110,9 +113,22 @@ int aac_fib_setup(struct aac_dev * dev) if (i<0) return -ENOMEM; + /* 32 byte alignment for PMC */ + hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1); + dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + + (hw_fib_pa - dev->hw_fib_pa)); + dev->hw_fib_pa = hw_fib_pa; + memset(dev->hw_fib_va, 0, + (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) * + (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); + + /* add Xport header */ + dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + + sizeof(struct aac_fib_xporthdr)); + dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr); + hw_fib = dev->hw_fib_va; hw_fib_pa = dev->hw_fib_pa; - memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); /* * Initialise the fibs */ @@ -129,8 +145,10 @@ int aac_fib_setup(struct aac_dev * dev) hw_fib->header.XferState = cpu_to_le32(0xffffffff); hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); fibptr->hw_fib_pa = hw_fib_pa; - hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size); - hw_fib_pa = hw_fib_pa + dev->max_fib_size; + hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + + dev->max_fib_size + sizeof(struct aac_fib_xporthdr)); + hw_fib_pa = hw_fib_pa + + dev->max_fib_size + sizeof(struct aac_fib_xporthdr); } /* * Add the fib chain to the free list @@ -664,9 +682,14 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) unsigned long nointr = 0; unsigned long qflags; + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { + kfree(hw_fib); + return 0; + } + if (hw_fib->header.XferState == 0) { if (dev->comm_interface == AAC_COMM_MESSAGE) - kfree (hw_fib); + kfree(hw_fib); return 0; } /* @@ -674,7 +697,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) */ if (hw_fib->header.StructType != FIB_MAGIC) { if (dev->comm_interface == AAC_COMM_MESSAGE) - kfree (hw_fib); + kfree(hw_fib); return -EINVAL; } /* diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index 9c7408fe8c7d..f0c66a80ad13 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c @@ -5,7 +5,8 @@ * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -228,6 +229,48 @@ unsigned int aac_command_normal(struct aac_queue *q) return 0; } +/* + * + * aac_aif_callback + * @context: the context set in the fib - here it is scsi cmd + * @fibptr: pointer to the fib + * + * Handles the AIFs - new method (SRC) + * + */ + +static void aac_aif_callback(void *context, struct fib * fibptr) +{ + struct fib *fibctx; + struct aac_dev *dev; + struct aac_aifcmd *cmd; + int status; + + fibctx = (struct fib *)context; + BUG_ON(fibptr == NULL); + dev = fibptr->dev; + + if (fibptr->hw_fib_va->header.XferState & + cpu_to_le32(NoMoreAifDataAvailable)) { + aac_fib_complete(fibptr); + aac_fib_free(fibptr); + return; + } + + aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va); + + aac_fib_init(fibctx); + cmd = (struct aac_aifcmd *) fib_data(fibctx); + cmd->command = cpu_to_le32(AifReqEvent); + + status = aac_fib_send(AifRequest, + fibctx, + sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), + FsaNormal, + 0, 1, + (fib_callback)aac_aif_callback, fibctx); +} + /** * aac_intr_normal - Handle command replies @@ -238,19 +281,17 @@ unsigned int aac_command_normal(struct aac_queue *q) * know there is a response on our normal priority queue. We will pull off * all QE there are and wake up all the waiters before exiting. */ - -unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) +unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, + int isAif, int isFastResponse, struct hw_fib *aif_fib) { unsigned long mflags; dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); - if ((index & 0x00000002L)) { + if (isAif == 1) { /* AIF - common */ struct hw_fib * hw_fib; struct fib * fib; struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; unsigned long flags; - if (index == 0xFFFFFFFEL) /* Special Case */ - return 0; /* Do nothing */ /* * Allocate a FIB. For non queued stuff we can just use * the stack so we are happy. We need a fib object in order to @@ -263,8 +304,13 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) kfree (fib); return 1; } - memcpy(hw_fib, (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + - (index & ~0x00000002L)), sizeof(struct hw_fib)); + if (aif_fib != NULL) { + memcpy(hw_fib, aif_fib, sizeof(struct hw_fib)); + } else { + memcpy(hw_fib, + (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + + index), sizeof(struct hw_fib)); + } INIT_LIST_HEAD(&fib->fiblink); fib->type = FSAFS_NTC_FIB_CONTEXT; fib->size = sizeof(struct fib); @@ -277,9 +323,26 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) wake_up_interruptible(&q->cmdready); spin_unlock_irqrestore(q->lock, flags); return 1; + } else if (isAif == 2) { /* AIF - new (SRC) */ + struct fib *fibctx; + struct aac_aifcmd *cmd; + + fibctx = aac_fib_alloc(dev); + if (!fibctx) + return 1; + aac_fib_init(fibctx); + + cmd = (struct aac_aifcmd *) fib_data(fibctx); + cmd->command = cpu_to_le32(AifReqEvent); + + return aac_fib_send(AifRequest, + fibctx, + sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), + FsaNormal, + 0, 1, + (fib_callback)aac_aif_callback, fibctx); } else { - int fast = index & 0x01; - struct fib * fib = &dev->fibs[index >> 2]; + struct fib *fib = &dev->fibs[index]; struct hw_fib * hwfib = fib->hw_fib_va; /* @@ -298,7 +361,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) return 0; } - if (fast) { + if (isFastResponse) { /* * Doctor the fib */ diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 2c93d9496d62..4ff26521d75f 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -5,7 +5,8 @@ * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -54,7 +55,7 @@ #include "aacraid.h" -#define AAC_DRIVER_VERSION "1.1-5" +#define AAC_DRIVER_VERSION "1.1-7" #ifndef AAC_DRIVER_BRANCH #define AAC_DRIVER_BRANCH "" #endif @@ -161,6 +162,7 @@ static const struct pci_device_id aac_pci_tbl[] __devinitdata = { { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */ + { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Catch All */ { 0,} }; MODULE_DEVICE_TABLE(pci, aac_pci_tbl); @@ -235,7 +237,8 @@ static struct aac_driver_ident aac_drivers[] = { { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */ { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ - { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */ + { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ + { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Catch All */ }; /** @@ -653,8 +656,10 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) * This adapter needs a blind reset, only do so for Adapters that * support a register, instead of a commanded, reset. */ - if ((aac->supplement_adapter_info.SupportedOptions2 & - AAC_OPTION_MU_RESET) && + if (((aac->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_MU_RESET) || + (aac->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_DOORBELL_RESET)) && aac_check_reset && ((aac_check_reset != 1) || !(aac->supplement_adapter_info.SupportedOptions2 & diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c index c55f7c862f0e..f397d21a0c06 100644 --- a/drivers/scsi/aacraid/nark.c +++ b/drivers/scsi/aacraid/nark.c @@ -4,7 +4,8 @@ * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * - * Copyright (c) 2006-2007 Adaptec, Inc. (aacraid@adaptec.com) + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c index 16d8db550027..be44de92429a 100644 --- a/drivers/scsi/aacraid/rkt.c +++ b/drivers/scsi/aacraid/rkt.c @@ -5,7 +5,8 @@ * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index 84d77fd86e5b..ce530f113fdb 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -5,7 +5,8 @@ * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -84,15 +85,35 @@ static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id) static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) { + int isAif, isFastResponse, isSpecial; struct aac_dev *dev = dev_id; u32 Index = rx_readl(dev, MUnit.OutboundQueue); if (unlikely(Index == 0xFFFFFFFFL)) Index = rx_readl(dev, MUnit.OutboundQueue); if (likely(Index != 0xFFFFFFFFL)) { do { - if (unlikely(aac_intr_normal(dev, Index))) { - rx_writel(dev, MUnit.OutboundQueue, Index); - rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); + isAif = isFastResponse = isSpecial = 0; + if (Index & 0x00000002L) { + isAif = 1; + if (Index == 0xFFFFFFFEL) + isSpecial = 1; + Index &= ~0x00000002L; + } else { + if (Index & 0x00000001L) + isFastResponse = 1; + Index >>= 2; + } + if (!isSpecial) { + if (unlikely(aac_intr_normal(dev, + Index, isAif, + isFastResponse, NULL))) { + rx_writel(dev, + MUnit.OutboundQueue, + Index); + rx_writel(dev, + MUnit.ODR, + DoorBellAdapterNormRespReady); + } } Index = rx_readl(dev, MUnit.OutboundQueue); } while (Index != 0xFFFFFFFFL); @@ -631,6 +652,10 @@ int _aac_rx_init(struct aac_dev *dev) name, instance); goto error_iounmap; } + dev->dbg_base = dev->scsi_host_ptr->base; + dev->dbg_base_mapped = dev->base; + dev->dbg_size = dev->base_size; + aac_adapter_enable_int(dev); /* * Tell the adapter that all is configured, and it can diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 622c21c68e65..e5d4457121ea 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c @@ -5,7 +5,8 @@ * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * - * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -391,6 +392,10 @@ int aac_sa_init(struct aac_dev *dev) name, instance); goto error_iounmap; } + dev->dbg_base = dev->scsi_host_ptr->base; + dev->dbg_base_mapped = dev->base; + dev->dbg_size = dev->base_size; + aac_adapter_enable_int(dev); /* diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c new file mode 100644 index 000000000000..c20494660603 --- /dev/null +++ b/drivers/scsi/aacraid/src.c @@ -0,0 +1,594 @@ +/* + * Adaptec AAC series RAID controller driver + * (c) Copyright 2001 Red Hat Inc. + * + * based on the old aacraid driver that is.. + * Adaptec aacraid device driver for Linux. + * + * Copyright (c) 2000-2010 Adaptec, Inc. + * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Module Name: + * src.c + * + * Abstract: Hardware Device Interface for PMC SRC based controllers + * + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/version.h> +#include <linux/completion.h> +#include <linux/time.h> +#include <linux/interrupt.h> +#include <scsi/scsi_host.h> + +#include "aacraid.h" + +static irqreturn_t aac_src_intr_message(int irq, void *dev_id) +{ + struct aac_dev *dev = dev_id; + unsigned long bellbits, bellbits_shifted; + int our_interrupt = 0; + int isFastResponse; + u32 index, handle; + + bellbits = src_readl(dev, MUnit.ODR_R); + if (bellbits & PmDoorBellResponseSent) { + bellbits = PmDoorBellResponseSent; + /* handle async. status */ + our_interrupt = 1; + index = dev->host_rrq_idx; + if (dev->host_rrq[index] == 0) { + u32 old_index = index; + /* adjust index */ + do { + index++; + if (index == dev->scsi_host_ptr->can_queue + + AAC_NUM_MGT_FIB) + index = 0; + if (dev->host_rrq[index] != 0) + break; + } while (index != old_index); + dev->host_rrq_idx = index; + } + for (;;) { + isFastResponse = 0; + /* remove toggle bit (31) */ + handle = (dev->host_rrq[index] & 0x7fffffff); + /* check fast response bit (30) */ + if (handle & 0x40000000) + isFastResponse = 1; + handle &= 0x0000ffff; + if (handle == 0) + break; + + aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL); + + dev->host_rrq[index++] = 0; + if (index == dev->scsi_host_ptr->can_queue + + AAC_NUM_MGT_FIB) + index = 0; + dev->host_rrq_idx = index; + } + } else { + bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); + if (bellbits_shifted & DoorBellAifPending) { + our_interrupt = 1; + /* handle AIF */ + aac_intr_normal(dev, 0, 2, 0, NULL); + } + } + + if (our_interrupt) { + src_writel(dev, MUnit.ODR_C, bellbits); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +/** + * aac_src_disable_interrupt - Disable interrupts + * @dev: Adapter + */ + +static void aac_src_disable_interrupt(struct aac_dev *dev) +{ + src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); +} + +/** + * aac_src_enable_interrupt_message - Enable interrupts + * @dev: Adapter + */ + +static void aac_src_enable_interrupt_message(struct aac_dev *dev) +{ + src_writel(dev, MUnit.OIMR, dev->OIMR = 0xfffffff8); +} + +/** + * src_sync_cmd - send a command and wait + * @dev: Adapter + * @command: Command to execute + * @p1: first parameter + * @ret: adapter status + * + * This routine will send a synchronous command to the adapter and wait + * for its completion. + */ + +static int src_sync_cmd(struct aac_dev *dev, u32 command, + u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, + u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) +{ + unsigned long start; + int ok; + + /* + * Write the command into Mailbox 0 + */ + writel(command, &dev->IndexRegs->Mailbox[0]); + /* + * Write the parameters into Mailboxes 1 - 6 + */ + writel(p1, &dev->IndexRegs->Mailbox[1]); + writel(p2, &dev->IndexRegs->Mailbox[2]); + writel(p3, &dev->IndexRegs->Mailbox[3]); + writel(p4, &dev->IndexRegs->Mailbox[4]); + + /* + * Clear the synch command doorbell to start on a clean slate. + */ + src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); + + /* + * Disable doorbell interrupts + */ + src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); + + /* + * Force the completion of the mask register write before issuing + * the interrupt. + */ + src_readl(dev, MUnit.OIMR); + + /* + * Signal that there is a new synch command + */ + src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT); + + ok = 0; + start = jiffies; + + /* + * Wait up to 30 seconds + */ + while (time_before(jiffies, start+30*HZ)) { + /* Delay 5 microseconds to let Mon960 get info. */ + udelay(5); + + /* Mon960 will set doorbell0 bit + * when it has completed the command + */ + if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) { + /* Clear the doorbell */ + src_writel(dev, + MUnit.ODR_C, + OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); + ok = 1; + break; + } + + /* Yield the processor in case we are slow */ + msleep(1); + } + if (unlikely(ok != 1)) { + /* Restore interrupt mask even though we timed out */ + aac_adapter_enable_int(dev); + return -ETIMEDOUT; + } + + /* Pull the synch status from Mailbox 0 */ + if (status) + *status = readl(&dev->IndexRegs->Mailbox[0]); + if (r1) + *r1 = readl(&dev->IndexRegs->Mailbox[1]); + if (r2) + *r2 = readl(&dev->IndexRegs->Mailbox[2]); + if (r3) + *r3 = readl(&dev->IndexRegs->Mailbox[3]); + if (r4) + *r4 = readl(&dev->IndexRegs->Mailbox[4]); + + /* Clear the synch command doorbell */ + src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); + + /* Restore interrupt mask */ + aac_adapter_enable_int(dev); + return 0; + +} + +/** + * aac_src_interrupt_adapter - interrupt adapter + * @dev: Adapter + * + * Send an interrupt to the i960 and breakpoint it. + */ + +static void aac_src_interrupt_adapter(struct aac_dev *dev) +{ + src_sync_cmd(dev, BREAKPOINT_REQUEST, + 0, 0, 0, 0, 0, 0, + NULL, NULL, NULL, NULL, NULL); +} + +/** + * aac_src_notify_adapter - send an event to the adapter + * @dev: Adapter + * @event: Event to send + * + * Notify the i960 that something it probably cares about has + * happened. + */ + +static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) +{ + switch (event) { + + case AdapNormCmdQue: + src_writel(dev, MUnit.ODR_C, + INBOUNDDOORBELL_1 << SRC_ODR_SHIFT); + break; + case HostNormRespNotFull: + src_writel(dev, MUnit.ODR_C, + INBOUNDDOORBELL_4 << SRC_ODR_SHIFT); + break; + case AdapNormRespQue: + src_writel(dev, MUnit.ODR_C, + INBOUNDDOORBELL_2 << SRC_ODR_SHIFT); + break; + case HostNormCmdNotFull: + src_writel(dev, MUnit.ODR_C, + INBOUNDDOORBELL_3 << SRC_ODR_SHIFT); + break; + case FastIo: + src_writel(dev, MUnit.ODR_C, + INBOUNDDOORBELL_6 << SRC_ODR_SHIFT); + break; + case AdapPrintfDone: + src_writel(dev, MUnit.ODR_C, + INBOUNDDOORBELL_5 << SRC_ODR_SHIFT); + break; + default: + BUG(); + break; + } +} + +/** + * aac_src_start_adapter - activate adapter + * @dev: Adapter + * + * Start up processing on an i960 based AAC adapter + */ + +static void aac_src_start_adapter(struct aac_dev *dev) +{ + struct aac_init *init; + + init = dev->init; + init->HostElapsedSeconds = cpu_to_le32(get_seconds()); + + /* We can only use a 32 bit address here */ + src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, + 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); +} + +/** + * aac_src_check_health + * @dev: device to check if healthy + * + * Will attempt to determine if the specified adapter is alive and + * capable of handling requests, returning 0 if alive. + */ +static int aac_src_check_health(struct aac_dev *dev) +{ + u32 status = src_readl(dev, MUnit.OMR); + + /* + * Check to see if the board failed any self tests. + */ + if (unlikely(status & SELF_TEST_FAILED)) + return -1; + + /* + * Check to see if the board panic'd. + */ + if (unlikely(status & KERNEL_PANIC)) + return (status >> 16) & 0xFF; + /* + * Wait for the adapter to be up and running. + */ + if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) + return -3; + /* + * Everything is OK + */ + return 0; +} + +/** + * aac_src_deliver_message + * @fib: fib to issue + * + * Will send a fib, returning 0 if successful. + */ +static int aac_src_deliver_message(struct fib *fib) +{ + struct aac_dev *dev = fib->dev; + struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; + unsigned long qflags; + u32 fibsize; + u64 address; + struct aac_fib_xporthdr *pFibX; + + spin_lock_irqsave(q->lock, qflags); + q->numpending++; + spin_unlock_irqrestore(q->lock, qflags); + + /* Calculate the amount to the fibsize bits */ + fibsize = (sizeof(struct aac_fib_xporthdr) + + fib->hw_fib_va->header.Size + 127) / 128 - 1; + if (fibsize > (ALIGN32 - 1)) + fibsize = ALIGN32 - 1; + + /* Fill XPORT header */ + pFibX = (struct aac_fib_xporthdr *) + ((unsigned char *)fib->hw_fib_va - + sizeof(struct aac_fib_xporthdr)); + pFibX->Handle = fib->hw_fib_va->header.SenderData + 1; + pFibX->HostAddress = fib->hw_fib_pa; + pFibX->Size = fib->hw_fib_va->header.Size; + address = fib->hw_fib_pa - (u64)sizeof(struct aac_fib_xporthdr); + + src_writel(dev, MUnit.IQ_H, (u32)(address >> 32)); + src_writel(dev, MUnit.IQ_L, (u32)(address & 0xffffffff) + fibsize); + return 0; +} + +/** + * aac_src_ioremap + * @size: mapping resize request + * + */ +static int aac_src_ioremap(struct aac_dev *dev, u32 size) +{ + if (!size) { + iounmap(dev->regs.src.bar0); + dev->regs.src.bar0 = NULL; + iounmap(dev->base); + dev->base = NULL; + return 0; + } + dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2), + AAC_MIN_SRC_BAR1_SIZE); + dev->base = NULL; + if (dev->regs.src.bar1 == NULL) + return -1; + dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, + size); + if (dev->base == NULL) { + iounmap(dev->regs.src.bar1); + dev->regs.src.bar1 = NULL; + return -1; + } + dev->IndexRegs = &((struct src_registers __iomem *) + dev->base)->IndexRegs; + return 0; +} + +static int aac_src_restart_adapter(struct aac_dev *dev, int bled) +{ + u32 var, reset_mask; + + if (bled >= 0) { + if (bled) + printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", + dev->name, dev->id, bled); + bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, + 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL); + if (bled || (var != 0x00000001)) + bled = -EINVAL; + if (dev->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_DOORBELL_RESET) { + src_writel(dev, MUnit.IDR, reset_mask); + msleep(5000); /* Delay 5 seconds */ + } + } + + if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) + return -ENODEV; + + if (startup_timeout < 300) + startup_timeout = 300; + + return 0; +} + +/** + * aac_src_select_comm - Select communications method + * @dev: Adapter + * @comm: communications method + */ +int aac_src_select_comm(struct aac_dev *dev, int comm) +{ + switch (comm) { + case AAC_COMM_MESSAGE: + dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; + dev->a_ops.adapter_intr = aac_src_intr_message; + dev->a_ops.adapter_deliver = aac_src_deliver_message; + break; + default: + return 1; + } + return 0; +} + +/** + * aac_src_init - initialize an Cardinal Frey Bar card + * @dev: device to configure + * + */ + +int aac_src_init(struct aac_dev *dev) +{ + unsigned long start; + unsigned long status; + int restart = 0; + int instance = dev->id; + const char *name = dev->name; + + dev->a_ops.adapter_ioremap = aac_src_ioremap; + dev->a_ops.adapter_comm = aac_src_select_comm; + + dev->base_size = AAC_MIN_SRC_BAR0_SIZE; + if (aac_adapter_ioremap(dev, dev->base_size)) { + printk(KERN_WARNING "%s: unable to map adapter.\n", name); + goto error_iounmap; + } + + /* Failure to reset here is an option ... */ + dev->a_ops.adapter_sync_cmd = src_sync_cmd; + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; + if ((aac_reset_devices || reset_devices) && + !aac_src_restart_adapter(dev, 0)) + ++restart; + /* + * Check to see if the board panic'd while booting. + */ + status = src_readl(dev, MUnit.OMR); + if (status & KERNEL_PANIC) { + if (aac_src_restart_adapter(dev, aac_src_check_health(dev))) + goto error_iounmap; + ++restart; + } + /* + * Check to see if the board failed any self tests. + */ + status = src_readl(dev, MUnit.OMR); + if (status & SELF_TEST_FAILED) { + printk(KERN_ERR "%s%d: adapter self-test failed.\n", + dev->name, instance); + goto error_iounmap; + } + /* + * Check to see if the monitor panic'd while booting. + */ + if (status & MONITOR_PANIC) { + printk(KERN_ERR "%s%d: adapter monitor panic.\n", + dev->name, instance); + goto error_iounmap; + } + start = jiffies; + /* + * Wait for the adapter to be up and running. Wait up to 3 minutes + */ + while (!((status = src_readl(dev, MUnit.OMR)) & + KERNEL_UP_AND_RUNNING)) { + if ((restart && + (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || + time_after(jiffies, start+HZ*startup_timeout)) { + printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", + dev->name, instance, status); + goto error_iounmap; + } + if (!restart && + ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || + time_after(jiffies, start + HZ * + ((startup_timeout > 60) + ? (startup_timeout - 60) + : (startup_timeout / 2))))) { + if (likely(!aac_src_restart_adapter(dev, + aac_src_check_health(dev)))) + start = jiffies; + ++restart; + } + msleep(1); + } + if (restart && aac_commit) + aac_commit = 1; + /* + * Fill in the common function dispatch table. + */ + dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; + dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; + dev->a_ops.adapter_notify = aac_src_notify_adapter; + dev->a_ops.adapter_sync_cmd = src_sync_cmd; + dev->a_ops.adapter_check_health = aac_src_check_health; + dev->a_ops.adapter_restart = aac_src_restart_adapter; + + /* + * First clear out all interrupts. Then enable the one's that we + * can handle. + */ + aac_adapter_comm(dev, AAC_COMM_MESSAGE); + aac_adapter_disable_int(dev); + src_writel(dev, MUnit.ODR_C, 0xffffffff); + aac_adapter_enable_int(dev); + + if (aac_init_adapter(dev) == NULL) + goto error_iounmap; + if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1) + goto error_iounmap; + + dev->msi = aac_msi && !pci_enable_msi(dev->pdev); + + if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, + IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { + + if (dev->msi) + pci_disable_msi(dev->pdev); + + printk(KERN_ERR "%s%d: Interrupt unavailable.\n", + name, instance); + goto error_iounmap; + } + dev->dbg_base = pci_resource_start(dev->pdev, 2); + dev->dbg_base_mapped = dev->regs.src.bar1; + dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE; + + aac_adapter_enable_int(dev); + /* + * Tell the adapter that all is configured, and it can + * start accepting requests + */ + aac_src_start_adapter(dev); + + return 0; + +error_iounmap: + + return -1; +} diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index df2fc09ba479..b6d350ac4288 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -62,7 +62,7 @@ #include "bnx2fc_constants.h" #define BNX2FC_NAME "bnx2fc" -#define BNX2FC_VERSION "1.0.0" +#define BNX2FC_VERSION "1.0.1" #define PFX "bnx2fc: " @@ -84,9 +84,15 @@ #define BNX2FC_NUM_MAX_SESS 128 #define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) -#define BNX2FC_MAX_OUTSTANDING_CMNDS 4096 +#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048 +#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS +#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE #define BNX2FC_MIN_PAYLOAD 256 #define BNX2FC_MAX_PAYLOAD 2048 +#define BNX2FC_MFS \ + (BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header)) +#define BNX2FC_MINI_JUMBO_MTU 2500 + #define BNX2FC_RQ_BUF_SZ 256 #define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ)) @@ -98,7 +104,8 @@ #define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) #define BNX2FC_5771X_DB_PAGE_SIZE 128 -#define BNX2FC_MAX_TASKS BNX2FC_MAX_OUTSTANDING_CMNDS +#define BNX2FC_MAX_TASKS \ + (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS) #define BNX2FC_TASK_SIZE 128 #define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) #define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE) @@ -112,10 +119,10 @@ #define BNX2FC_WRITE (1 << 0) #define BNX2FC_MIN_XID 0 -#define BNX2FC_MAX_XID (BNX2FC_MAX_OUTSTANDING_CMNDS - 1) -#define FCOE_MIN_XID (BNX2FC_MAX_OUTSTANDING_CMNDS) -#define FCOE_MAX_XID \ - (BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256)) +#define BNX2FC_MAX_XID \ + (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1) +#define FCOE_MIN_XID (BNX2FC_MAX_XID + 1) +#define FCOE_MAX_XID (FCOE_MIN_XID + 4095) #define BNX2FC_MAX_LUN 0xFFFF #define BNX2FC_MAX_FCP_TGT 256 #define BNX2FC_MAX_CMD_LEN 16 @@ -125,7 +132,6 @@ #define BNX2FC_WAIT_CNT 120 #define BNX2FC_FW_TIMEOUT (3 * HZ) - #define PORT_MAX 2 #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index e476e8753079..e2e647509a73 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -21,7 +21,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); #define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_VERSION BNX2FC_VERSION -#define DRV_MODULE_RELDATE "Jan 25, 2011" +#define DRV_MODULE_RELDATE "Mar 17, 2011" static char version[] __devinitdata = @@ -437,17 +437,16 @@ static int bnx2fc_l2_rcv_thread(void *arg) set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); - set_current_state(TASK_RUNNING); spin_lock_bh(&bg->fcoe_rx_list.lock); while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) { spin_unlock_bh(&bg->fcoe_rx_list.lock); bnx2fc_recv_frame(skb); spin_lock_bh(&bg->fcoe_rx_list.lock); } + __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_bh(&bg->fcoe_rx_list.lock); - set_current_state(TASK_INTERRUPTIBLE); } - set_current_state(TASK_RUNNING); + __set_current_state(TASK_RUNNING); return 0; } @@ -569,7 +568,6 @@ int bnx2fc_percpu_io_thread(void *arg) set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); - set_current_state(TASK_RUNNING); spin_lock_bh(&p->fp_work_lock); while (!list_empty(&p->work_list)) { list_splice_init(&p->work_list, &work_list); @@ -583,10 +581,10 @@ int bnx2fc_percpu_io_thread(void *arg) spin_lock_bh(&p->fp_work_lock); } + __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_bh(&p->fp_work_lock); - set_current_state(TASK_INTERRUPTIBLE); } - set_current_state(TASK_RUNNING); + __set_current_state(TASK_RUNNING); return 0; } @@ -661,31 +659,6 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) return 0; } -static int bnx2fc_mfs_update(struct fc_lport *lport) -{ - struct fcoe_port *port = lport_priv(lport); - struct bnx2fc_hba *hba = port->priv; - struct net_device *netdev = hba->netdev; - u32 mfs; - u32 max_mfs; - - mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + - sizeof(struct fcoe_crc_eof)); - max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header); - BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs); - if (mfs > max_mfs) - mfs = max_mfs; - - /* Adjust mfs to be a multiple of 256 bytes */ - mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) * - BNX2FC_MIN_PAYLOAD); - mfs = mfs + sizeof(struct fc_frame_header); - - BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs); - if (fc_set_mfs(lport, mfs)) - return -EINVAL; - return 0; -} static void bnx2fc_link_speed_update(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); @@ -754,7 +727,7 @@ static int bnx2fc_net_config(struct fc_lport *lport) !hba->phys_dev->ethtool_ops->get_pauseparam) return -EOPNOTSUPP; - if (bnx2fc_mfs_update(lport)) + if (fc_set_mfs(lport, BNX2FC_MFS)) return -EINVAL; skb_queue_head_init(&port->fcoe_pending_queue); @@ -825,14 +798,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event) if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) printk(KERN_ERR "indicate_netevent: "\ "adapter is not UP!!\n"); - /* fall thru to update mfs if MTU has changed */ - case NETDEV_CHANGEMTU: - BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n"); - bnx2fc_mfs_update(lport); - mutex_lock(&lport->lp_mutex); - list_for_each_entry(vport, &lport->vports, list) - bnx2fc_mfs_update(vport); - mutex_unlock(&lport->lp_mutex); break; case NETDEV_DOWN: @@ -1095,13 +1060,6 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba) struct netdev_hw_addr *ha; int sel_san_mac = 0; - /* Do not support for bonding device */ - if ((netdev->priv_flags & IFF_MASTER_ALB) || - (netdev->priv_flags & IFF_SLAVE_INACTIVE) || - (netdev->priv_flags & IFF_MASTER_8023AD)) { - return -EOPNOTSUPP; - } - /* setup Source MAC Address */ rcu_read_lock(); for_each_dev_addr(physdev, ha) { @@ -1432,16 +1390,9 @@ static int bnx2fc_destroy(struct net_device *netdev) struct net_device *phys_dev; int rc = 0; - if (!rtnl_trylock()) - return restart_syscall(); + rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); -#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE - if (THIS_MODULE->state != MODULE_STATE_LIVE) { - rc = -ENODEV; - goto netdev_err; - } -#endif /* obtain physical netdev */ if (netdev->priv_flags & IFF_802_1Q_VLAN) phys_dev = vlan_dev_real_dev(netdev); @@ -1805,18 +1756,10 @@ static int bnx2fc_disable(struct net_device *netdev) struct ethtool_drvinfo drvinfo; int rc = 0; - if (!rtnl_trylock()) { - printk(KERN_ERR PFX "retrying for rtnl_lock\n"); - return -EIO; - } + rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); - if (THIS_MODULE->state != MODULE_STATE_LIVE) { - rc = -ENODEV; - goto nodev; - } - /* obtain physical netdev */ if (netdev->priv_flags & IFF_802_1Q_VLAN) phys_dev = vlan_dev_real_dev(netdev); @@ -1867,19 +1810,11 @@ static int bnx2fc_enable(struct net_device *netdev) struct ethtool_drvinfo drvinfo; int rc = 0; - if (!rtnl_trylock()) { - printk(KERN_ERR PFX "retrying for rtnl_lock\n"); - return -EIO; - } + rtnl_lock(); BNX2FC_MISC_DBG("Entered %s\n", __func__); mutex_lock(&bnx2fc_dev_lock); - if (THIS_MODULE->state != MODULE_STATE_LIVE) { - rc = -ENODEV; - goto nodev; - } - /* obtain physical netdev */ if (netdev->priv_flags & IFF_802_1Q_VLAN) phys_dev = vlan_dev_real_dev(netdev); @@ -1942,18 +1877,9 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) return -EIO; } - if (!rtnl_trylock()) { - printk(KERN_ERR "trying for rtnl_lock\n"); - return -EIO; - } - mutex_lock(&bnx2fc_dev_lock); + rtnl_lock(); -#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE - if (THIS_MODULE->state != MODULE_STATE_LIVE) { - rc = -ENODEV; - goto mod_err; - } -#endif + mutex_lock(&bnx2fc_dev_lock); if (!try_module_get(THIS_MODULE)) { rc = -EINVAL; @@ -2506,7 +2432,7 @@ static struct scsi_host_template bnx2fc_shost_template = { .change_queue_type = fc_change_queue_type, .this_id = -1, .cmd_per_lun = 3, - .can_queue = (BNX2FC_MAX_OUTSTANDING_CMNDS/2), + .can_queue = BNX2FC_CAN_QUEUE, .use_clustering = ENABLE_CLUSTERING, .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, .max_sectors = 512, diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 4f4096836742..1b680e288c56 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -87,7 +87,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; fcoe_init1.task_list_pbl_addr_hi = (u32) ((u64) hba->task_ctx_bd_dma >> 32); - fcoe_init1.mtu = hba->netdev->mtu; + fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU; fcoe_init1.flags = (PAGE_SHIFT << FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); @@ -590,7 +590,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; + spin_lock_bh(&tgt->tgt_lock); rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); + spin_unlock_bh(&tgt->tgt_lock); + if (rq_data) { buf = rq_data; } else { @@ -603,8 +606,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) } for (i = 0; i < num_rq; i++) { + spin_lock_bh(&tgt->tgt_lock); rq_data = (unsigned char *) bnx2fc_get_next_rqe(tgt, 1); + spin_unlock_bh(&tgt->tgt_lock); len = BNX2FC_RQ_BUF_SZ; memcpy(buf1, rq_data, len); buf1 += len; @@ -615,13 +620,15 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) if (buf != rq_data) kfree(buf); + spin_lock_bh(&tgt->tgt_lock); bnx2fc_return_rqe(tgt, num_rq); + spin_unlock_bh(&tgt->tgt_lock); break; case FCOE_ERROR_DETECTION_CQE_TYPE: /* - *In case of error reporting CQE a single RQ entry - * is consumes. + * In case of error reporting CQE a single RQ entry + * is consumed. */ spin_lock_bh(&tgt->tgt_lock); num_rq = 1; @@ -705,6 +712,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) *In case of warning reporting CQE a single RQ entry * is consumes. */ + spin_lock_bh(&tgt->tgt_lock); num_rq = 1; err_entry = (struct fcoe_err_report_entry *) bnx2fc_get_next_rqe(tgt, 1); @@ -717,6 +725,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) err_entry->tx_buf_off, err_entry->rx_buf_off); bnx2fc_return_rqe(tgt, 1); + spin_unlock_bh(&tgt->tgt_lock); break; default: diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 0f1dd23730db..d3fc302c241a 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -11,6 +11,9 @@ */ #include "bnx2fc.h" + +#define RESERVE_FREE_LIST_INDEX num_possible_cpus() + static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, int bd_index); static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); @@ -242,8 +245,9 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, u32 mem_size; u16 xid; int i; - int num_ios; + int num_ios, num_pri_ios; size_t bd_tbl_sz; + int arr_sz = num_possible_cpus() + 1; if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ @@ -263,14 +267,14 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, } cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * - num_possible_cpus(), GFP_KERNEL); + arr_sz, GFP_KERNEL); if (!cmgr->free_list) { printk(KERN_ERR PFX "failed to alloc free_list\n"); goto mem_err; } cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * - num_possible_cpus(), GFP_KERNEL); + arr_sz, GFP_KERNEL); if (!cmgr->free_list_lock) { printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); goto mem_err; @@ -279,13 +283,18 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, cmgr->hba = hba; cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); - for (i = 0; i < num_possible_cpus(); i++) { + for (i = 0; i < arr_sz; i++) { INIT_LIST_HEAD(&cmgr->free_list[i]); spin_lock_init(&cmgr->free_list_lock[i]); } - /* Pre-allocated pool of bnx2fc_cmds */ + /* + * Pre-allocated pool of bnx2fc_cmds. + * Last entry in the free list array is the free list + * of slow path requests. + */ xid = BNX2FC_MIN_XID; + num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS; for (i = 0; i < num_ios; i++) { io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); @@ -298,11 +307,13 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); io_req->xid = xid++; - if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS) - printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n", - io_req->xid); - list_add_tail(&io_req->link, - &cmgr->free_list[io_req->xid % num_possible_cpus()]); + if (i < num_pri_ios) + list_add_tail(&io_req->link, + &cmgr->free_list[io_req->xid % + num_possible_cpus()]); + else + list_add_tail(&io_req->link, + &cmgr->free_list[num_possible_cpus()]); io_req++; } @@ -389,7 +400,7 @@ free_cmd_pool: if (!cmgr->free_list) goto free_cmgr; - for (i = 0; i < num_possible_cpus(); i++) { + for (i = 0; i < num_possible_cpus() + 1; i++) { struct list_head *list; struct list_head *tmp; @@ -413,6 +424,7 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) struct bnx2fc_cmd *io_req; struct list_head *listp; struct io_bdt *bd_tbl; + int index = RESERVE_FREE_LIST_INDEX; u32 max_sqes; u16 xid; @@ -432,26 +444,26 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) * NOTE: Free list insertions and deletions are protected with * cmgr lock */ - spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); - if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) || + spin_lock_bh(&cmd_mgr->free_list_lock[index]); + if ((list_empty(&(cmd_mgr->free_list[index]))) || (tgt->num_active_ios.counter >= max_sqes)) { BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " "ios(%d):sqes(%d)\n", tgt->num_active_ios.counter, tgt->max_sqes); - if (list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) + if (list_empty(&(cmd_mgr->free_list[index]))) printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); - spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); return NULL; } listp = (struct list_head *) - cmd_mgr->free_list[smp_processor_id()].next; + cmd_mgr->free_list[index].next; list_del_init(listp); io_req = (struct bnx2fc_cmd *) listp; xid = io_req->xid; cmd_mgr->cmds[xid] = io_req; atomic_inc(&tgt->num_active_ios); - spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); INIT_LIST_HEAD(&io_req->link); @@ -479,27 +491,30 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) struct io_bdt *bd_tbl; u32 max_sqes; u16 xid; + int index = get_cpu(); max_sqes = BNX2FC_SCSI_MAX_SQES; /* * NOTE: Free list insertions and deletions are protected with * cmgr lock */ - spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); - if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) || + spin_lock_bh(&cmd_mgr->free_list_lock[index]); + if ((list_empty(&cmd_mgr->free_list[index])) || (tgt->num_active_ios.counter >= max_sqes)) { - spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); + put_cpu(); return NULL; } listp = (struct list_head *) - cmd_mgr->free_list[smp_processor_id()].next; + cmd_mgr->free_list[index].next; list_del_init(listp); io_req = (struct bnx2fc_cmd *) listp; xid = io_req->xid; cmd_mgr->cmds[xid] = io_req; atomic_inc(&tgt->num_active_ios); - spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); + put_cpu(); INIT_LIST_HEAD(&io_req->link); @@ -522,8 +537,15 @@ void bnx2fc_cmd_release(struct kref *ref) struct bnx2fc_cmd *io_req = container_of(ref, struct bnx2fc_cmd, refcount); struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; + int index; + + if (io_req->cmd_type == BNX2FC_SCSI_CMD) + index = io_req->xid % num_possible_cpus(); + else + index = RESERVE_FREE_LIST_INDEX; - spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + + spin_lock_bh(&cmd_mgr->free_list_lock[index]); if (io_req->cmd_type != BNX2FC_SCSI_CMD) bnx2fc_free_mp_resc(io_req); cmd_mgr->cmds[io_req->xid] = NULL; @@ -531,9 +553,10 @@ void bnx2fc_cmd_release(struct kref *ref) list_del_init(&io_req->link); /* Add it to the free list */ list_add(&io_req->link, - &cmd_mgr->free_list[smp_processor_id()]); + &cmd_mgr->free_list[index]); atomic_dec(&io_req->tgt->num_active_ios); - spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); + } static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 7ea93af60260..7cc05e4e82d5 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -304,10 +304,8 @@ static void bnx2fc_upload_session(struct fcoe_port *port, " not sent to FW\n"); /* Free session resources */ - spin_lock_bh(&tgt->cq_lock); bnx2fc_free_session_resc(hba, tgt); bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); - spin_unlock_bh(&tgt->cq_lock); } static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, @@ -830,11 +828,13 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, tgt->rq = NULL; } /* Free CQ */ + spin_lock_bh(&tgt->cq_lock); if (tgt->cq) { dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, tgt->cq, tgt->cq_dma); tgt->cq = NULL; } + spin_unlock_bh(&tgt->cq_lock); /* Free SQ */ if (tgt->sq) { dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 8eeb39ffa37f..e98ae33f1295 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -132,14 +132,25 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv) if (page_count(sg_page(sg)) >= 1 && !recv) return; - segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); + if (recv) { + segment->atomic_mapped = true; + segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); + } else { + segment->atomic_mapped = false; + /* the xmit path can sleep with the page mapped so use kmap */ + segment->sg_mapped = kmap(sg_page(sg)); + } + segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; } void iscsi_tcp_segment_unmap(struct iscsi_segment *segment) { if (segment->sg_mapped) { - kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); + if (segment->atomic_mapped) + kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); + else + kunmap(sg_page(segment->sg)); segment->sg_mapped = NULL; segment->data = NULL; } diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index 14de249917f8..88928f00aa2d 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile @@ -1,7 +1,7 @@ #/******************************************************************* # * This file is part of the Emulex Linux Device Driver for * # * Fibre Channel Host Bus Adapters. * -# * Copyright (C) 2004-2006 Emulex. All rights reserved. * +# * Copyright (C) 2004-2011 Emulex. All rights reserved. * # * EMULEX and SLI are trademarks of Emulex. * # * www.emulex.com * # * * diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index b64c6da870d3..60e98a62f308 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -539,6 +539,8 @@ struct lpfc_hba { (struct lpfc_hba *, uint32_t); int (*lpfc_hba_down_link) (struct lpfc_hba *, uint32_t); + int (*lpfc_selective_reset) + (struct lpfc_hba *); /* SLI4 specific HBA data structure */ struct lpfc_sli4_hba sli4_hba; @@ -895,7 +897,18 @@ lpfc_worker_wake_up(struct lpfc_hba *phba) return; } -static inline void +static inline int +lpfc_readl(void __iomem *addr, uint32_t *data) +{ + uint32_t temp; + temp = readl(addr); + if (temp == 0xffffffff) + return -EIO; + *data = temp; + return 0; +} + +static inline int lpfc_sli_read_hs(struct lpfc_hba *phba) { /* @@ -904,15 +917,17 @@ lpfc_sli_read_hs(struct lpfc_hba *phba) */ phba->sli.slistat.err_attn_event++; - /* Save status info */ - phba->work_hs = readl(phba->HSregaddr); - phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); - phba->work_status[1] = readl(phba->MBslimaddr + 0xac); + /* Save status info and check for unplug error */ + if (lpfc_readl(phba->HSregaddr, &phba->work_hs) || + lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) || + lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) { + return -EIO; + } /* Clear chip Host Attention error bit */ writel(HA_ERATT, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ phba->pport->stopped = 1; - return; + return 0; } diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index e7c020df12fa..4e0faa00b96f 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -685,7 +685,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) * -EIO reset not configured or error posting the event * zero for success **/ -static int +int lpfc_selective_reset(struct lpfc_hba *phba) { struct completion online_compl; @@ -746,7 +746,7 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr, int status = -EINVAL; if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) - status = lpfc_selective_reset(phba); + status = phba->lpfc_selective_reset(phba); if (status == 0) return strlen(buf); @@ -1224,7 +1224,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, if (val & ENABLE_FCP_RING_POLLING) { if ((val & DISABLE_FCP_RING_INT) && !(old_val & DISABLE_FCP_RING_INT)) { - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + spin_unlock_irq(&phba->hbalock); + return -EINVAL; + } creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -1242,7 +1245,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, spin_unlock_irq(&phba->hbalock); del_timer(&phba->fcp_poll_timer); spin_lock_irq(&phba->hbalock); - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + spin_unlock_irq(&phba->hbalock); + return -EINVAL; + } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 0dd43bb91618..793b9f1131fb 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2010 Emulex. All rights reserved. * + * Copyright (C) 2009-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -348,7 +348,10 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) dd_data->context_un.iocb.bmp = bmp; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + rc = -EIO ; + goto free_cmdiocbq; + } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -599,7 +602,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) dd_data->context_un.iocb.ndlp = ndlp; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + rc = -EIO; + goto linkdown_err; + } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -613,6 +619,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) else rc = -EIO; +linkdown_err: pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, job->request_payload.sg_cnt, DMA_TO_DEVICE); pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, @@ -1357,7 +1364,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, dd_data->context_un.iocb.ndlp = ndlp; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + rc = -IOCB_ERROR; + goto issue_ct_rsp_exit; + } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -2479,16 +2489,18 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) from = (uint8_t *)dd_data->context_un.mbox.mb; job = dd_data->context_un.mbox.set_job; - size = job->reply_payload.payload_len; - job->reply->reply_payload_rcv_len = - sg_copy_from_buffer(job->reply_payload.sg_list, - job->reply_payload.sg_cnt, - from, size); - job->reply->result = 0; + if (job) { + size = job->reply_payload.payload_len; + job->reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + from, size); + job->reply->result = 0; + job->dd_data = NULL; + job->job_done(job); + } dd_data->context_un.mbox.set_job = NULL; - job->dd_data = NULL; - job->job_done(job); /* need to hold the lock until we call job done to hold off * the timeout handler returning to the midlayer while * we are stillprocessing the job diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 3d40023f4804..f0b332f4eedb 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2010 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -254,8 +254,8 @@ uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t); void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *); - -void lpfc_reset_barrier(struct lpfc_hba * phba); +int lpfc_selective_reset(struct lpfc_hba *); +void lpfc_reset_barrier(struct lpfc_hba *); int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); int lpfc_sli_brdkill(struct lpfc_hba *); int lpfc_sli_brdreset(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 8e28edf9801e..735028fedda5 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -89,7 +89,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport) return 0; /* Read the HBA Host Attention Register */ - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return 1; if (!(ha_copy & HA_LATT)) return 0; diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 94ae37c5111a..95f11ed79463 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1344,7 +1344,7 @@ typedef struct { /* FireFly BIU registers */ #define HS_FFER1 0x80000000 /* Bit 31 */ #define HS_CRIT_TEMP 0x00000100 /* Bit 8 */ #define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */ - +#define UNPLUG_ERR 0x00000001 /* Indicate pci hot unplug */ /* Host Control Register */ #define HC_REG_OFFSET 12 /* Byte offset from register base address */ @@ -1713,6 +1713,17 @@ struct lpfc_pde6 { #define pde6_apptagval_WORD word2 }; +struct lpfc_pde7 { + uint32_t word0; +#define pde7_type_SHIFT 24 +#define pde7_type_MASK 0x000000ff +#define pde7_type_WORD word0 +#define pde7_rsvd0_SHIFT 0 +#define pde7_rsvd0_MASK 0x00ffffff +#define pde7_rsvd0_WORD word0 + uint32_t addrHigh; + uint32_t addrLow; +}; /* Structure for MB Command LOAD_SM and DOWN_LOAD */ @@ -3621,7 +3632,7 @@ typedef struct _IOCB { /* IOCB structure */ ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ - struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */ + struct sli4_bls_rsp bls_rsp; /* UNSOL ABTS BLS_RSP params */ uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ } un; union { diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index c7178d60c7bf..8433ac0d9fb4 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -215,7 +215,7 @@ struct lpfc_sli4_flags { #define lpfc_fip_flag_WORD word0 }; -struct sli4_bls_acc { +struct sli4_bls_rsp { uint32_t word0_rsvd; /* Word0 must be reserved */ uint32_t word1; #define lpfc_abts_orig_SHIFT 0 @@ -231,6 +231,16 @@ struct sli4_bls_acc { #define lpfc_abts_oxid_MASK 0x0000FFFF #define lpfc_abts_oxid_WORD word2 uint32_t word3; +#define lpfc_vndr_code_SHIFT 0 +#define lpfc_vndr_code_MASK 0x000000FF +#define lpfc_vndr_code_WORD word3 +#define lpfc_rsn_expln_SHIFT 8 +#define lpfc_rsn_expln_MASK 0x000000FF +#define lpfc_rsn_expln_WORD word3 +#define lpfc_rsn_code_SHIFT 16 +#define lpfc_rsn_code_MASK 0x000000FF +#define lpfc_rsn_code_WORD word3 + uint32_t word4; uint32_t word5_rsvd; /* Word5 must be reserved */ }; @@ -711,21 +721,27 @@ struct lpfc_sli4_cfg_mhdr { union lpfc_sli4_cfg_shdr { struct { uint32_t word6; -#define lpfc_mbox_hdr_opcode_SHIFT 0 -#define lpfc_mbox_hdr_opcode_MASK 0x000000FF -#define lpfc_mbox_hdr_opcode_WORD word6 -#define lpfc_mbox_hdr_subsystem_SHIFT 8 -#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF -#define lpfc_mbox_hdr_subsystem_WORD word6 -#define lpfc_mbox_hdr_port_number_SHIFT 16 -#define lpfc_mbox_hdr_port_number_MASK 0x000000FF -#define lpfc_mbox_hdr_port_number_WORD word6 -#define lpfc_mbox_hdr_domain_SHIFT 24 -#define lpfc_mbox_hdr_domain_MASK 0x000000FF -#define lpfc_mbox_hdr_domain_WORD word6 +#define lpfc_mbox_hdr_opcode_SHIFT 0 +#define lpfc_mbox_hdr_opcode_MASK 0x000000FF +#define lpfc_mbox_hdr_opcode_WORD word6 +#define lpfc_mbox_hdr_subsystem_SHIFT 8 +#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF +#define lpfc_mbox_hdr_subsystem_WORD word6 +#define lpfc_mbox_hdr_port_number_SHIFT 16 +#define lpfc_mbox_hdr_port_number_MASK 0x000000FF +#define lpfc_mbox_hdr_port_number_WORD word6 +#define lpfc_mbox_hdr_domain_SHIFT 24 +#define lpfc_mbox_hdr_domain_MASK 0x000000FF +#define lpfc_mbox_hdr_domain_WORD word6 uint32_t timeout; uint32_t request_length; - uint32_t reserved9; + uint32_t word9; +#define lpfc_mbox_hdr_version_SHIFT 0 +#define lpfc_mbox_hdr_version_MASK 0x000000FF +#define lpfc_mbox_hdr_version_WORD word9 +#define LPFC_Q_CREATE_VERSION_2 2 +#define LPFC_Q_CREATE_VERSION_1 1 +#define LPFC_Q_CREATE_VERSION_0 0 } request; struct { uint32_t word6; @@ -917,9 +933,12 @@ struct cq_context { #define LPFC_CQ_CNT_512 0x1 #define LPFC_CQ_CNT_1024 0x2 uint32_t word1; -#define lpfc_cq_eq_id_SHIFT 22 +#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */ #define lpfc_cq_eq_id_MASK 0x000000FF #define lpfc_cq_eq_id_WORD word1 +#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */ +#define lpfc_cq_eq_id_2_MASK 0x0000FFFF +#define lpfc_cq_eq_id_2_WORD word1 uint32_t reserved0; uint32_t reserved1; }; @@ -929,6 +948,9 @@ struct lpfc_mbx_cq_create { union { struct { uint32_t word0; +#define lpfc_mbx_cq_create_page_size_SHIFT 16 /* Version 2 Only */ +#define lpfc_mbx_cq_create_page_size_MASK 0x000000FF +#define lpfc_mbx_cq_create_page_size_WORD word0 #define lpfc_mbx_cq_create_num_pages_SHIFT 0 #define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF #define lpfc_mbx_cq_create_num_pages_WORD word0 @@ -969,7 +991,7 @@ struct wq_context { struct lpfc_mbx_wq_create { struct mbox_header header; union { - struct { + struct { /* Version 0 Request */ uint32_t word0; #define lpfc_mbx_wq_create_num_pages_SHIFT 0 #define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF @@ -979,6 +1001,23 @@ struct lpfc_mbx_wq_create { #define lpfc_mbx_wq_create_cq_id_WORD word0 struct dma_address page[LPFC_MAX_WQ_PAGE]; } request; + struct { /* Version 1 Request */ + uint32_t word0; /* Word 0 is the same as in v0 */ + uint32_t word1; +#define lpfc_mbx_wq_create_page_size_SHIFT 0 +#define lpfc_mbx_wq_create_page_size_MASK 0x000000FF +#define lpfc_mbx_wq_create_page_size_WORD word1 +#define lpfc_mbx_wq_create_wqe_size_SHIFT 8 +#define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F +#define lpfc_mbx_wq_create_wqe_size_WORD word1 +#define LPFC_WQ_WQE_SIZE_64 0x5 +#define LPFC_WQ_WQE_SIZE_128 0x6 +#define lpfc_mbx_wq_create_wqe_count_SHIFT 16 +#define lpfc_mbx_wq_create_wqe_count_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_wqe_count_WORD word1 + uint32_t word2; + struct dma_address page[LPFC_MAX_WQ_PAGE-1]; + } request_1; struct { uint32_t word0; #define lpfc_mbx_wq_create_q_id_SHIFT 0 @@ -1007,13 +1046,22 @@ struct lpfc_mbx_wq_destroy { #define LPFC_DATA_BUF_SIZE 2048 struct rq_context { uint32_t word0; -#define lpfc_rq_context_rq_size_SHIFT 16 -#define lpfc_rq_context_rq_size_MASK 0x0000000F -#define lpfc_rq_context_rq_size_WORD word0 +#define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */ +#define lpfc_rq_context_rqe_count_MASK 0x0000000F +#define lpfc_rq_context_rqe_count_WORD word0 #define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ #define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ #define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ #define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ +#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1 Only */ +#define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF +#define lpfc_rq_context_rqe_count_1_WORD word0 +#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */ +#define lpfc_rq_context_rqe_size_MASK 0x0000000F +#define lpfc_rq_context_rqe_size_WORD word0 +#define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */ +#define lpfc_rq_context_page_size_MASK 0x000000FF +#define lpfc_rq_context_page_size_WORD word0 uint32_t reserved1; uint32_t word2; #define lpfc_rq_context_cq_id_SHIFT 16 @@ -1022,7 +1070,7 @@ struct rq_context { #define lpfc_rq_context_buf_size_SHIFT 0 #define lpfc_rq_context_buf_size_MASK 0x0000FFFF #define lpfc_rq_context_buf_size_WORD word2 - uint32_t reserved3; + uint32_t buffer_size; /* Version 1 Only */ }; struct lpfc_mbx_rq_create { @@ -1062,16 +1110,16 @@ struct lpfc_mbx_rq_destroy { struct mq_context { uint32_t word0; -#define lpfc_mq_context_cq_id_SHIFT 22 +#define lpfc_mq_context_cq_id_SHIFT 22 /* Version 0 Only */ #define lpfc_mq_context_cq_id_MASK 0x000003FF #define lpfc_mq_context_cq_id_WORD word0 -#define lpfc_mq_context_count_SHIFT 16 -#define lpfc_mq_context_count_MASK 0x0000000F -#define lpfc_mq_context_count_WORD word0 -#define LPFC_MQ_CNT_16 0x5 -#define LPFC_MQ_CNT_32 0x6 -#define LPFC_MQ_CNT_64 0x7 -#define LPFC_MQ_CNT_128 0x8 +#define lpfc_mq_context_ring_size_SHIFT 16 +#define lpfc_mq_context_ring_size_MASK 0x0000000F +#define lpfc_mq_context_ring_size_WORD word0 +#define LPFC_MQ_RING_SIZE_16 0x5 +#define LPFC_MQ_RING_SIZE_32 0x6 +#define LPFC_MQ_RING_SIZE_64 0x7 +#define LPFC_MQ_RING_SIZE_128 0x8 uint32_t word1; #define lpfc_mq_context_valid_SHIFT 31 #define lpfc_mq_context_valid_MASK 0x00000001 @@ -1105,9 +1153,12 @@ struct lpfc_mbx_mq_create_ext { union { struct { uint32_t word0; -#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 -#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF -#define lpfc_mbx_mq_create_ext_num_pages_WORD word0 +#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 +#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_ext_num_pages_WORD word0 +#define lpfc_mbx_mq_create_ext_cq_id_SHIFT 16 /* Version 1 Only */ +#define lpfc_mbx_mq_create_ext_cq_id_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_ext_cq_id_WORD word0 uint32_t async_evt_bmap; #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK #define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 35665cfb5689..e6ebe516cfbb 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2010 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -507,7 +507,10 @@ lpfc_config_port_post(struct lpfc_hba *phba) phba->hba_flag &= ~HBA_ERATT_HANDLED; /* Enable appropriate host interrupts */ - status = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &status)) { + spin_unlock_irq(&phba->hbalock); + return -EIO; + } status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; if (psli->num_rings > 0) status |= HC_R0INT_ENA; @@ -1222,7 +1225,10 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) /* Wait for the ER1 bit to clear.*/ while (phba->work_hs & HS_FFER1) { msleep(100); - phba->work_hs = readl(phba->HSregaddr); + if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { + phba->work_hs = UNPLUG_ERR ; + break; + } /* If driver is unloading let the worker thread continue */ if (phba->pport->load_flag & FC_UNLOADING) { phba->work_hs = 0; @@ -4474,6 +4480,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) { phba->lpfc_hba_init_link = lpfc_hba_init_link; phba->lpfc_hba_down_link = lpfc_hba_down_link; + phba->lpfc_selective_reset = lpfc_selective_reset; switch (dev_grp) { case LPFC_PCI_DEV_LP: phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; @@ -5385,13 +5392,16 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) int i, port_error = 0; uint32_t if_type; + memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); + memset(®_data, 0, sizeof(reg_data)); if (!phba->sli4_hba.PSMPHRregaddr) return -ENODEV; /* Wait up to 30 seconds for the SLI Port POST done and ready */ for (i = 0; i < 3000; i++) { - portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr); - if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) { + if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, + &portsmphr_reg.word0) || + (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { /* Port has a fatal POST error, break out */ port_error = -ENODEV; break; @@ -5472,9 +5482,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) break; case LPFC_SLI_INTF_IF_TYPE_2: /* Final checks. The port status should be clean. */ - reg_data.word0 = - readl(phba->sli4_hba.u.if_type2.STATUSregaddr); - if (bf_get(lpfc_sliport_status_err, ®_data)) { + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + ®_data.word0) || + bf_get(lpfc_sliport_status_err, ®_data)) { phba->work_status[0] = readl(phba->sli4_hba.u.if_type2. ERR1regaddr); @@ -6760,9 +6770,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) * the loop again. */ for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { - reg_data.word0 = - readl(phba->sli4_hba.u.if_type2. - STATUSregaddr); + if (lpfc_readl(phba->sli4_hba.u.if_type2. + STATUSregaddr, ®_data.word0)) { + rc = -ENODEV; + break; + } if (bf_get(lpfc_sliport_status_rdy, ®_data)) break; if (bf_get(lpfc_sliport_status_rn, ®_data)) { @@ -6783,8 +6795,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) } /* Detect any port errors. */ - reg_data.word0 = readl(phba->sli4_hba.u.if_type2. - STATUSregaddr); + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + ®_data.word0)) { + rc = -ENODEV; + break; + } if ((bf_get(lpfc_sliport_status_err, ®_data)) || (rdy_chk >= 1000)) { phba->work_status[0] = readl( diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index bf34178b80bf..2b962b020cfb 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -1514,10 +1514,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct scatterlist *sgpe = NULL; /* s/g prot entry */ struct lpfc_pde5 *pde5 = NULL; struct lpfc_pde6 *pde6 = NULL; - struct ulp_bde64 *prot_bde = NULL; + struct lpfc_pde7 *pde7 = NULL; dma_addr_t dataphysaddr, protphysaddr; unsigned short curr_data = 0, curr_prot = 0; - unsigned int split_offset, protgroup_len; + unsigned int split_offset; + unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; unsigned int protgrp_blks, protgrp_bytes; unsigned int remainder, subtotal; int status; @@ -1585,23 +1586,33 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, bpl++; /* setup the first BDE that points to protection buffer */ - prot_bde = (struct ulp_bde64 *) bpl; - protphysaddr = sg_dma_address(sgpe); - prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); - prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); - protgroup_len = sg_dma_len(sgpe); + protphysaddr = sg_dma_address(sgpe) + protgroup_offset; + protgroup_len = sg_dma_len(sgpe) - protgroup_offset; /* must be integer multiple of the DIF block length */ BUG_ON(protgroup_len % 8); + pde7 = (struct lpfc_pde7 *) bpl; + memset(pde7, 0, sizeof(struct lpfc_pde7)); + bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); + + pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); + pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); + protgrp_blks = protgroup_len / 8; protgrp_bytes = protgrp_blks * blksize; - prot_bde->tus.f.bdeSize = protgroup_len; - prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR; - prot_bde->tus.w = le32_to_cpu(bpl->tus.w); + /* check if this pde is crossing the 4K boundary; if so split */ + if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { + protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); + protgroup_offset += protgroup_remainder; + protgrp_blks = protgroup_remainder / 8; + protgrp_bytes = protgroup_remainder * blksize; + } else { + protgroup_offset = 0; + curr_prot++; + } - curr_prot++; num_bde++; /* setup BDE's for data blocks associated with DIF data */ @@ -1653,6 +1664,13 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, } + if (protgroup_offset) { + /* update the reference tag */ + reftag += protgrp_blks; + bpl++; + continue; + } + /* are we done ? */ if (curr_prot == protcnt) { alldone = 1; @@ -1675,6 +1693,7 @@ out: return num_bde; } + /* * Given a SCSI command that supports DIF, determine composition of protection * groups involved in setting up buffer lists diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2ee0374a9908..4746dcd756dd 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -3477,7 +3477,8 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) int retval = 0; /* Read the HBA Host Status Register */ - status = readl(phba->HSregaddr); + if (lpfc_readl(phba->HSregaddr, &status)) + return 1; /* * Check status register every 100ms for 5 retries, then every @@ -3502,7 +3503,10 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) lpfc_sli_brdrestart(phba); } /* Read the HBA Host Status Register */ - status = readl(phba->HSregaddr); + if (lpfc_readl(phba->HSregaddr, &status)) { + retval = 1; + break; + } } /* Check to see if any errors occurred during init */ @@ -3584,7 +3588,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) uint32_t __iomem *resp_buf; uint32_t __iomem *mbox_buf; volatile uint32_t mbox; - uint32_t hc_copy; + uint32_t hc_copy, ha_copy, resp_data; int i; uint8_t hdrtype; @@ -3601,12 +3605,15 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) resp_buf = phba->MBslimaddr; /* Disable the error attention */ - hc_copy = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &hc_copy)) + return; writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); readl(phba->HCregaddr); /* flush */ phba->link_flag |= LS_IGNORE_ERATT; - if (readl(phba->HAregaddr) & HA_ERATT) { + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return; + if (ha_copy & HA_ERATT) { /* Clear Chip error bit */ writel(HA_ERATT, phba->HAregaddr); phba->pport->stopped = 1; @@ -3620,11 +3627,18 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) mbox_buf = phba->MBslimaddr; writel(mbox, mbox_buf); - for (i = 0; - readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) - mdelay(1); - - if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { + for (i = 0; i < 50; i++) { + if (lpfc_readl((resp_buf + 1), &resp_data)) + return; + if (resp_data != ~(BARRIER_TEST_PATTERN)) + mdelay(1); + else + break; + } + resp_data = 0; + if (lpfc_readl((resp_buf + 1), &resp_data)) + return; + if (resp_data != ~(BARRIER_TEST_PATTERN)) { if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || phba->pport->stopped) goto restore_hc; @@ -3633,13 +3647,26 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) } ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; - for (i = 0; readl(resp_buf) != mbox && i < 500; i++) - mdelay(1); + resp_data = 0; + for (i = 0; i < 500; i++) { + if (lpfc_readl(resp_buf, &resp_data)) + return; + if (resp_data != mbox) + mdelay(1); + else + break; + } clear_errat: - while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) - mdelay(1); + while (++i < 500) { + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return; + if (!(ha_copy & HA_ERATT)) + mdelay(1); + else + break; + } if (readl(phba->HAregaddr) & HA_ERATT) { writel(HA_ERATT, phba->HAregaddr); @@ -3686,7 +3713,11 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) /* Disable the error attention */ spin_lock_irq(&phba->hbalock); - status = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &status)) { + spin_unlock_irq(&phba->hbalock); + mempool_free(pmb, phba->mbox_mem_pool); + return 1; + } status &= ~HC_ERINT_ENA; writel(status, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -3720,11 +3751,12 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) * 3 seconds we still set HBA_ERROR state because the status of the * board is now undefined. */ - ha_copy = readl(phba->HAregaddr); - + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return 1; while ((i++ < 30) && !(ha_copy & HA_ERATT)) { mdelay(100); - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return 1; } del_timer_sync(&psli->mbox_tmo); @@ -4018,7 +4050,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) uint32_t status, i = 0; /* Read the HBA Host Status Register */ - status = readl(phba->HSregaddr); + if (lpfc_readl(phba->HSregaddr, &status)) + return -EIO; /* Check status register to see what current state is */ i = 0; @@ -4073,7 +4106,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) lpfc_sli_brdrestart(phba); } /* Read the HBA Host Status Register */ - status = readl(phba->HSregaddr); + if (lpfc_readl(phba->HSregaddr, &status)) + return -EIO; } /* Check to see if any errors occurred during init */ @@ -5136,7 +5170,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, MAILBOX_t *mb; struct lpfc_sli *psli = &phba->sli; uint32_t status, evtctr; - uint32_t ha_copy; + uint32_t ha_copy, hc_copy; int i; unsigned long timeout; unsigned long drvr_flag = 0; @@ -5202,15 +5236,17 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, goto out_not_finished; } - if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && - !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { - spin_unlock_irqrestore(&phba->hbalock, drvr_flag); - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { + if (lpfc_readl(phba->HCregaddr, &hc_copy) || + !(hc_copy & HC_MBINT_ENA)) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "(%d):2528 Mailbox command x%x cannot " "issue Data: x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, pmbox->u.mb.mbxCommand, psli->sli_flag, flag); - goto out_not_finished; + goto out_not_finished; + } } if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { @@ -5408,11 +5444,19 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, word0 = le32_to_cpu(word0); } else { /* First read mbox status word */ - word0 = readl(phba->MBslimaddr); + if (lpfc_readl(phba->MBslimaddr, &word0)) { + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + goto out_not_finished; + } } /* Read the HBA Host Attention Register */ - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) { + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + goto out_not_finished; + } timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mb->mbxCommand) * 1000) + jiffies; @@ -5463,7 +5507,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, word0 = readl(phba->MBslimaddr); } /* Read the HBA Host Attention Register */ - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) { + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + goto out_not_finished; + } } if (psli->sli_flag & LPFC_SLI_ACTIVE) { @@ -6263,7 +6311,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, bf_set(lpfc_sli4_sge_last, sgl, 1); else bf_set(lpfc_sli4_sge_last, sgl, 0); - sgl->word2 = cpu_to_le32(sgl->word2); /* swap the size field back to the cpu so we * can assign it to the sgl. */ @@ -6283,6 +6330,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, bf_set(lpfc_sli4_sge_offset, sgl, offset); offset += bde.tus.f.bdeSize; } + sgl->word2 = cpu_to_le32(sgl->word2); bpl++; sgl++; } @@ -6528,9 +6576,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / sizeof(struct ulp_bde64); for (i = 0; i < numBdes; i++) { - if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64) - break; bde.tus.w = le32_to_cpu(bpl[i].tus.w); + if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) + break; xmit_len += bde.tus.f.bdeSize; } /* word3 iocb=IO_TAG wqe=request_payload_len */ @@ -6620,15 +6668,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, xritag = 0; break; case CMD_XMIT_BLS_RSP64_CX: - /* As BLS ABTS-ACC WQE is very different from other WQEs, + /* As BLS ABTS RSP WQE is very different from other WQEs, * we re-construct this WQE here based on information in * iocbq from scratch. */ memset(wqe, 0, sizeof(union lpfc_wqe)); /* OX_ID is invariable to who sent ABTS to CT exchange */ bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, - bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc)); - if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) == + bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); + if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == LPFC_ABTS_UNSOL_INT) { /* ABTS sent by initiator to CT exchange, the * RX_ID field will be filled with the newly @@ -6642,7 +6690,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, * RX_ID from ABTS. */ bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, - bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc)); + bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); } bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); @@ -6653,6 +6701,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, LPFC_WQE_LENLOC_NONE); /* Overwrite the pre-set comnd type with OTHER_COMMAND */ command_type = OTHER_COMMAND; + if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { + bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, + bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); + bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, + bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); + bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, + bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); + } + break; case CMD_XRI_ABORTED_CX: case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ @@ -6701,7 +6758,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, if (piocb->sli4_xritag == NO_XRI) { if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || - piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) + piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || + piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX) sglq = NULL; else { if (pring->txq_cnt) { @@ -8194,7 +8252,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, piocb->iocb_flag &= ~LPFC_IO_WAKE; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) + return IOCB_ERROR; creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -8236,7 +8295,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, } if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) + return IOCB_ERROR; creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -8387,10 +8447,13 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) uint32_t ha_copy; /* Read chip Host Attention (HA) register */ - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + goto unplug_err; + if (ha_copy & HA_ERATT) { /* Read host status register to retrieve error event */ - lpfc_sli_read_hs(phba); + if (lpfc_sli_read_hs(phba)) + goto unplug_err; /* Check if there is a deferred error condition is active */ if ((HS_FFER1 & phba->work_hs) && @@ -8409,6 +8472,15 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) return 1; } return 0; + +unplug_err: + /* Set the driver HS work bitmap */ + phba->work_hs |= UNPLUG_ERR; + /* Set the driver HA work bitmap */ + phba->work_ha |= HA_ERATT; + /* Indicate polling handles this ERATT */ + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; } /** @@ -8436,8 +8508,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: - uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); - uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); + if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, + &uerr_sta_lo) || + lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, + &uerr_sta_hi)) { + phba->work_hs |= UNPLUG_ERR; + phba->work_ha |= HA_ERATT; + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; + } if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -8456,9 +8535,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) } break; case LPFC_SLI_INTF_IF_TYPE_2: - portstat_reg.word0 = - readl(phba->sli4_hba.u.if_type2.STATUSregaddr); - portsmphr = readl(phba->sli4_hba.PSMPHRregaddr); + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + &portstat_reg.word0) || + lpfc_readl(phba->sli4_hba.PSMPHRregaddr, + &portsmphr)){ + phba->work_hs |= UNPLUG_ERR; + phba->work_ha |= HA_ERATT; + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; + } if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { phba->work_status[0] = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); @@ -8639,7 +8724,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) return IRQ_NONE; /* Need to read HA REG for slow-path events */ spin_lock_irqsave(&phba->hbalock, iflag); - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + goto unplug_error; /* If somebody is waiting to handle an eratt don't process it * here. The brdkill function will do this. */ @@ -8665,7 +8751,9 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) } /* Clear up only attention source related to slow-path */ - hc_copy = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &hc_copy)) + goto unplug_error; + writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), phba->HCregaddr); @@ -8688,7 +8776,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) */ spin_lock_irqsave(&phba->hbalock, iflag); phba->sli.sli_flag &= ~LPFC_PROCESS_LA; - control = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &control)) + goto unplug_error; control &= ~HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -8708,7 +8797,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) status >>= (4*LPFC_ELS_RING); if (status & HA_RXMASK) { spin_lock_irqsave(&phba->hbalock, iflag); - control = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &control)) + goto unplug_error; lpfc_debugfs_slow_ring_trc(phba, "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", @@ -8741,7 +8831,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) } spin_lock_irqsave(&phba->hbalock, iflag); if (work_ha_copy & HA_ERATT) { - lpfc_sli_read_hs(phba); + if (lpfc_sli_read_hs(phba)) + goto unplug_error; /* * Check if there is a deferred error condition * is active @@ -8872,6 +8963,9 @@ send_current_mbox: lpfc_worker_wake_up(phba); } return IRQ_HANDLED; +unplug_error: + spin_unlock_irqrestore(&phba->hbalock, iflag); + return IRQ_HANDLED; } /* lpfc_sli_sp_intr_handler */ @@ -8919,7 +9013,8 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) if (lpfc_intr_state_check(phba)) return IRQ_NONE; /* Need to read HA REG for FCP ring and other ring events */ - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return IRQ_HANDLED; /* Clear up only attention source related to fast-path */ spin_lock_irqsave(&phba->hbalock, iflag); /* @@ -9004,7 +9099,11 @@ lpfc_sli_intr_handler(int irq, void *dev_id) return IRQ_NONE; spin_lock(&phba->hbalock); - phba->ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { + spin_unlock(&phba->hbalock); + return IRQ_HANDLED; + } + if (unlikely(!phba->ha_copy)) { spin_unlock(&phba->hbalock); return IRQ_NONE; @@ -9026,7 +9125,10 @@ lpfc_sli_intr_handler(int irq, void *dev_id) } /* Clear attention sources except link and error attentions */ - hc_copy = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &hc_copy)) { + spin_unlock(&phba->hbalock); + return IRQ_HANDLED; + } writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), phba->HCregaddr); @@ -10403,7 +10505,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, if (!phba->sli4_hba.pc_sli4_params.supported) hw_page_size = SLI4_PAGE_SIZE; - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -10413,11 +10514,22 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, LPFC_MBOX_OPCODE_CQ_CREATE, length, LPFC_SLI4_MBX_EMBED); cq_create = &mbox->u.mqe.un.cq_create; + shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, cq->page_count); bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); - bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.cqv); + if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { + bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, + (PAGE_SIZE/SLI4_PAGE_SIZE)); + bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, + eq->queue_id); + } else { + bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, + eq->queue_id); + } switch (cq->entry_count) { default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -10449,7 +10561,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { @@ -10515,20 +10626,20 @@ lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); switch (mq->entry_count) { case 16: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_16); + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_16); break; case 32: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_32); + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_32); break; case 64: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_64); + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_64); break; case 128: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_128); + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_128); break; } list_for_each_entry(dmabuf, &mq->page_list, list) { @@ -10586,6 +10697,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, length, LPFC_SLI4_MBX_EMBED); mq_create_ext = &mbox->u.mqe.un.mq_create_ext; + shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request, mq->page_count); bf_set(lpfc_mbx_mq_create_ext_async_evt_link, @@ -10598,9 +10710,15 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, &mq_create_ext->u.request, 1); bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, &mq_create_ext->u.request, 1); - bf_set(lpfc_mq_context_cq_id, - &mq_create_ext->u.request.context, cq->queue_id); bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.mqv); + if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) + bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, + cq->queue_id); + else + bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, + cq->queue_id); switch (mq->entry_count) { default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -10610,20 +10728,24 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, return -EINVAL; /* otherwise default to smallest count (drop through) */ case 16: - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, - LPFC_MQ_CNT_16); + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_16); break; case 32: - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, - LPFC_MQ_CNT_32); + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_32); break; case 64: - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, - LPFC_MQ_CNT_64); + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_64); break; case 128: - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, - LPFC_MQ_CNT_128); + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_128); break; } list_for_each_entry(dmabuf, &mq->page_list, list) { @@ -10634,7 +10756,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, putPaddrHigh(dmabuf->phys); } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create_ext->u.response); if (rc != MBX_SUCCESS) { @@ -10711,6 +10832,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + struct dma_address *page; if (!phba->sli4_hba.pc_sli4_params.supported) hw_page_size = SLI4_PAGE_SIZE; @@ -10724,20 +10846,42 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, length, LPFC_SLI4_MBX_EMBED); wq_create = &mbox->u.mqe.un.wq_create; + shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, wq->page_count); bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, cq->queue_id); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.wqv); + if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { + bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, + wq->entry_count); + switch (wq->entry_size) { + default: + case 64: + bf_set(lpfc_mbx_wq_create_wqe_size, + &wq_create->u.request_1, + LPFC_WQ_WQE_SIZE_64); + break; + case 128: + bf_set(lpfc_mbx_wq_create_wqe_size, + &wq_create->u.request_1, + LPFC_WQ_WQE_SIZE_128); + break; + } + bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, + (PAGE_SIZE/SLI4_PAGE_SIZE)); + page = wq_create->u.request_1.page; + } else { + page = wq_create->u.request.page; + } list_for_each_entry(dmabuf, &wq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); - wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = - putPaddrLow(dmabuf->phys); - wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = - putPaddrHigh(dmabuf->phys); + page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); + page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { @@ -10815,37 +10959,51 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, LPFC_SLI4_MBX_EMBED); rq_create = &mbox->u.mqe.un.rq_create; - switch (hrq->entry_count) { - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2535 Unsupported RQ count. (%d)\n", - hrq->entry_count); - if (hrq->entry_count < 512) - return -EINVAL; - /* otherwise default to smallest count (drop through) */ - case 512: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_512); - break; - case 1024: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_1024); - break; - case 2048: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_2048); - break; - case 4096: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_4096); - break; + shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.rqv); + if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { + bf_set(lpfc_rq_context_rqe_count_1, + &rq_create->u.request.context, + hrq->entry_count); + rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; + } else { + switch (hrq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2535 Unsupported RQ count. (%d)\n", + hrq->entry_count); + if (hrq->entry_count < 512) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 512: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_512); + break; + case 1024: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_1024); + break; + case 2048: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_2048); + break; + case 4096: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_4096); + break; + } + bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, + LPFC_HDR_BUF_SIZE); } bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, cq->queue_id); bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, hrq->page_count); - bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, - LPFC_HDR_BUF_SIZE); list_for_each_entry(dmabuf, &hrq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = @@ -10855,7 +11013,6 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { @@ -10881,37 +11038,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, LPFC_SLI4_MBX_EMBED); - switch (drq->entry_count) { - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2536 Unsupported RQ count. (%d)\n", - drq->entry_count); - if (drq->entry_count < 512) - return -EINVAL; - /* otherwise default to smallest count (drop through) */ - case 512: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_512); - break; - case 1024: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_1024); - break; - case 2048: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_2048); - break; - case 4096: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_4096); - break; + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.rqv); + if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { + bf_set(lpfc_rq_context_rqe_count_1, + &rq_create->u.request.context, + hrq->entry_count); + rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; + } else { + switch (drq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2536 Unsupported RQ count. (%d)\n", + drq->entry_count); + if (drq->entry_count < 512) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 512: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_512); + break; + case 1024: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_1024); + break; + case 2048: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_2048); + break; + case 4096: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_4096); + break; + } + bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, + LPFC_DATA_BUF_SIZE); } bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, cq->queue_id); bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, drq->page_count); - bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, - LPFC_DATA_BUF_SIZE); list_for_each_entry(dmabuf, &drq->page_list, list) { rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); @@ -11580,6 +11750,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) static char *rctl_names[] = FC_RCTL_NAMES_INIT; char *type_names[] = FC_TYPE_NAMES_INIT; struct fc_vft_header *fc_vft_hdr; + uint32_t *header = (uint32_t *) fc_hdr; switch (fc_hdr->fh_r_ctl) { case FC_RCTL_DD_UNCAT: /* uncategorized information */ @@ -11628,10 +11799,15 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) default: goto drop; } + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2538 Received frame rctl:%s type:%s\n", + "2538 Received frame rctl:%s type:%s " + "Frame Data:%08x %08x %08x %08x %08x %08x\n", rctl_names[fc_hdr->fh_r_ctl], - type_names[fc_hdr->fh_type]); + type_names[fc_hdr->fh_type], + be32_to_cpu(header[0]), be32_to_cpu(header[1]), + be32_to_cpu(header[2]), be32_to_cpu(header[3]), + be32_to_cpu(header[4]), be32_to_cpu(header[5])); return 0; drop: lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, @@ -11928,17 +12104,17 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, } /** - * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler + * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler * @phba: Pointer to HBA context object. * @cmd_iocbq: pointer to the command iocbq structure. * @rsp_iocbq: pointer to the response iocbq structure. * - * This function handles the sequence abort accept iocb command complete + * This function handles the sequence abort response iocb command complete * event. It properly releases the memory allocated to the sequence abort * accept iocb. **/ static void -lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, +lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmd_iocbq, struct lpfc_iocbq *rsp_iocbq) { @@ -11947,15 +12123,15 @@ lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, } /** - * lpfc_sli4_seq_abort_acc - Accept sequence abort + * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort * @phba: Pointer to HBA context object. * @fc_hdr: pointer to a FC frame header. * - * This function sends a basic accept to a previous unsol sequence abort + * This function sends a basic response to a previous unsol sequence abort * event after aborting the sequence handling. **/ static void -lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, +lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) { struct lpfc_iocbq *ctiocb = NULL; @@ -11963,6 +12139,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, uint16_t oxid, rxid; uint32_t sid, fctl; IOCB_t *icmd; + int rc; if (!lpfc_is_link_up(phba)) return; @@ -11983,7 +12160,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, + phba->sli4_hba.max_cfg_param.xri_base)) lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); - /* Allocate buffer for acc iocb */ + /* Allocate buffer for rsp iocb */ ctiocb = lpfc_sli_get_iocbq(phba); if (!ctiocb) return; @@ -12008,32 +12185,54 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, ctiocb->iocb_cmpl = NULL; ctiocb->vport = phba->pport; - ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl; + ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; + ctiocb->sli4_xritag = NO_XRI; + + /* If the oxid maps to the FCP XRI range or if it is out of range, + * send a BLS_RJT. The driver no longer has that exchange. + * Override the IOCB for a BA_RJT. + */ + if (oxid > (phba->sli4_hba.max_cfg_param.max_xri + + phba->sli4_hba.max_cfg_param.xri_base) || + oxid > (lpfc_sli4_get_els_iocb_cnt(phba) + + phba->sli4_hba.max_cfg_param.xri_base)) { + icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; + bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); + bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); + bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); + } if (fctl & FC_FC_EX_CTX) { /* ABTS sent by responder to CT exchange, construction * of BA_ACC will use OX_ID from ABTS for the XRI_TAG * field and RX_ID from ABTS for RX_ID field. */ - bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP); - bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid); - ctiocb->sli4_xritag = oxid; + bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); + bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); } else { /* ABTS sent by initiator to CT exchange, construction * of BA_ACC will need to allocate a new XRI as for the * XRI_TAG and RX_ID fields. */ - bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT); - bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI); - ctiocb->sli4_xritag = NO_XRI; + bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); + bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI); } - bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid); + bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); - /* Xmit CT abts accept on exchange <xid> */ + /* Xmit CT abts response on exchange <xid> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n", - CMD_XMIT_BLS_RSP64_CX, phba->link_state); - lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); + "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", + icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "2925 Failed to issue CT ABTS RSP x%x on " + "xri x%x, Data x%x\n", + icmd->un.xseq64.w5.hcsw.Rctl, oxid, + phba->link_state); + lpfc_sli_release_iocbq(phba, ctiocb); + } } /** @@ -12081,7 +12280,7 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, lpfc_in_buf_free(phba, &dmabuf->dbuf); } /* Send basic accept (BA_ACC) to the abort requester */ - lpfc_sli4_seq_abort_acc(phba, &fc_hdr); + lpfc_sli4_seq_abort_rsp(phba, &fc_hdr); } /** diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 595056b89608..1a3cbf88f2ce 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009 Emulex. All rights reserved. * + * Copyright (C) 2009-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 0a4d376dbca5..2404d1d65563 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.21" +#define LPFC_DRIVER_VERSION "8.3.22" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index e8a6f1cf1e4b..5e001ffd4c13 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -1748,6 +1748,54 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc) } /** + * _base_display_hp_branding - Display branding string + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_base_display_hp_branding(struct MPT2SAS_ADAPTER *ioc) +{ + if (ioc->pdev->subsystem_vendor != MPT2SAS_HP_3PAR_SSVID) + return; + + switch (ioc->pdev->device) { + case MPI2_MFGPAGE_DEVID_SAS2004: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); + break; + default: + break; + } + case MPI2_MFGPAGE_DEVID_SAS2308_2: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_HP_2_4_INTERNAL_SSDID: + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_HP_2_4_INTERNAL_BRANDING); + break; + case MPT2SAS_HP_2_4_EXTERNAL_SSDID: + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_HP_2_4_EXTERNAL_BRANDING); + break; + case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); + break; + case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); + break; + default: + break; + } + default: + break; + } +} + +/** * _base_display_ioc_capabilities - Disply IOC's capabilities. * @ioc: per adapter object * @@ -1778,6 +1826,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) _base_display_dell_branding(ioc); _base_display_intel_branding(ioc); + _base_display_hp_branding(ioc); printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index a3f8aa9baea4..500328245f61 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -168,6 +168,26 @@ #define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E #define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F + +/* + * HP HBA branding + */ +#define MPT2SAS_HP_3PAR_SSVID 0x1590 +#define MPT2SAS_HP_2_4_INTERNAL_BRANDING "HP H220 Host Bus Adapter" +#define MPT2SAS_HP_2_4_EXTERNAL_BRANDING "HP H221 Host Bus Adapter" +#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING "HP H222 Host Bus Adapter" +#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING "HP H220i Host Bus Adapter" +#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING "HP H210i Host Bus Adapter" + +/* + * HO HBA SSDIDs + */ +#define MPT2SAS_HP_2_4_INTERNAL_SSDID 0x0041 +#define MPT2SAS_HP_2_4_EXTERNAL_SSDID 0x0042 +#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID 0x0043 +#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID 0x0044 +#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046 + /* * per target private data */ diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 19ad34f381a5..938d045e4180 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -663,6 +663,13 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = { { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, + { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, + { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, + { PCI_VDEVICE(TTI, 0x2721), chip_9480 }, + { PCI_VDEVICE(TTI, 0x2722), chip_9480 }, + { PCI_VDEVICE(TTI, 0x2740), chip_9480 }, + { PCI_VDEVICE(TTI, 0x2744), chip_9480 }, + { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, { } /* terminate list */ }; diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 2fc0045b1a52..c1f8d1b150f7 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -53,6 +53,9 @@ #define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022 #endif +#define ISP4XXX_PCI_FN_1 0x1 +#define ISP4XXX_PCI_FN_2 0x3 + #define QLA_SUCCESS 0 #define QLA_ERROR 1 @@ -233,9 +236,6 @@ struct ddb_entry { unsigned long flags; /* DDB Flags */ - unsigned long dev_scan_wait_to_start_relogin; - unsigned long dev_scan_wait_to_complete_relogin; - uint16_t fw_ddb_index; /* DDB firmware index */ uint16_t options; uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ @@ -289,8 +289,6 @@ struct ddb_entry { * DDB flags. */ #define DF_RELOGIN 0 /* Relogin to device */ -#define DF_NO_RELOGIN 1 /* Do not relogin if IOCTL - * logged it out */ #define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ #define DF_FO_MASKED 3 @@ -376,7 +374,7 @@ struct scsi_qla_host { #define AF_LINK_UP 8 /* 0x00000100 */ #define AF_IRQ_ATTACHED 10 /* 0x00000400 */ #define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */ -#define AF_HBA_GOING_AWAY 12 /* 0x00001000 */ +#define AF_HA_REMOVAL 12 /* 0x00001000 */ #define AF_INTx_ENABLED 15 /* 0x00008000 */ #define AF_MSI_ENABLED 16 /* 0x00010000 */ #define AF_MSIX_ENABLED 17 /* 0x00020000 */ @@ -479,7 +477,6 @@ struct scsi_qla_host { uint32_t timer_active; /* Recovery Timers */ - uint32_t discovery_wait; atomic_t check_relogin_timeouts; uint32_t retry_reset_ha_cnt; uint32_t isp_reset_timer; /* reset test timer */ @@ -765,6 +762,5 @@ static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a) /* Defines for process_aen() */ #define PROCESS_ALL_AENS 0 #define FLUSH_DDB_CHANGED_AENS 1 -#define RELOGIN_DDB_CHANGED_AENS 2 #endif /*_QLA4XXX_H */ diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index c1985792f034..31e2bf97198c 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h @@ -455,6 +455,7 @@ struct addr_ctrl_blk { uint8_t res0; /* 07 */ uint16_t eth_mtu_size; /* 08-09 */ uint16_t add_fw_options; /* 0A-0B */ +#define SERIALIZE_TASK_MGMT 0x0400 uint8_t hb_interval; /* 0C */ uint8_t inst_num; /* 0D */ diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index 8fad99b7eef4..cc53e3fbd78c 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h @@ -136,7 +136,6 @@ void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); extern int ql4xextended_error_logging; -extern int ql4xdiscoverywait; extern int ql4xdontresethba; extern int ql4xenablemsix; diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 1629c48c35ef..bbb2e903d38a 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -723,13 +723,38 @@ int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err) return relogin; } +static void qla4xxx_flush_AENS(struct scsi_qla_host *ha) +{ + unsigned long wtime; + + /* Flush the 0x8014 AEN from the firmware as a result of + * Auto connect. We are basically doing get_firmware_ddb() + * to determine whether we need to log back in or not. + * Trying to do a set ddb before we have processed 0x8014 + * will result in another set_ddb() for the same ddb. In other + * words there will be stale entries in the aen_q. + */ + wtime = jiffies + (2 * HZ); + do { + if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) + if (ha->firmware_state & (BIT_2 | BIT_0)) + return; + + if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) + qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); + + msleep(1000); + } while (!time_after_eq(jiffies, wtime)); +} + /** - * qla4xxx_configure_ddbs - builds driver ddb list + * qla4xxx_build_ddb_list - builds driver ddb list * @ha: Pointer to host adapter structure. * * This routine searches for all valid firmware ddb entries and builds * an internal ddb list. Ddbs that are considered valid are those with * a device state of SESSION_ACTIVE. + * A relogin (set_ddb) is issued for DDBs that are not online. **/ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) { @@ -744,6 +769,8 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) uint32_t ipv6_device; uint32_t new_tgt; + qla4xxx_flush_AENS(ha); + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (fw_ddb_entry == NULL) { @@ -847,144 +874,6 @@ exit_build_ddb_list_no_free: return status; } -struct qla4_relog_scan { - int halt_wait; - uint32_t conn_err; - uint32_t fw_ddb_index; - uint32_t next_fw_ddb_index; - uint32_t fw_ddb_device_state; -}; - -static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs) -{ - struct ddb_entry *ddb_entry; - - if (qla4_is_relogin_allowed(ha, rs->conn_err)) { - /* We either have a device that is in - * the process of relogging in or a - * device that is waiting to be - * relogged in */ - rs->halt_wait = 0; - - ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, - rs->fw_ddb_index); - if (ddb_entry == NULL) - return QLA_ERROR; - - if (ddb_entry->dev_scan_wait_to_start_relogin != 0 - && time_after_eq(jiffies, - ddb_entry-> - dev_scan_wait_to_start_relogin)) - { - ddb_entry->dev_scan_wait_to_start_relogin = 0; - qla4xxx_set_ddb_entry(ha, rs->fw_ddb_index, 0); - } - } - return QLA_SUCCESS; -} - -static int qla4_scan_for_relogin(struct scsi_qla_host *ha, - struct qla4_relog_scan *rs) -{ - int error; - - /* scan for relogins - * ----------------- */ - for (rs->fw_ddb_index = 0; rs->fw_ddb_index < MAX_DDB_ENTRIES; - rs->fw_ddb_index = rs->next_fw_ddb_index) { - if (qla4xxx_get_fwddb_entry(ha, rs->fw_ddb_index, NULL, 0, - NULL, &rs->next_fw_ddb_index, - &rs->fw_ddb_device_state, - &rs->conn_err, NULL, NULL) - == QLA_ERROR) - return QLA_ERROR; - - if (rs->fw_ddb_device_state == DDB_DS_LOGIN_IN_PROCESS) - rs->halt_wait = 0; - - if (rs->fw_ddb_device_state == DDB_DS_SESSION_FAILED || - rs->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) { - error = qla4_test_rdy(ha, rs); - if (error) - return error; - } - - /* We know we've reached the last device when - * next_fw_ddb_index is 0 */ - if (rs->next_fw_ddb_index == 0) - break; - } - return QLA_SUCCESS; -} - -/** - * qla4xxx_devices_ready - wait for target devices to be logged in - * @ha: pointer to adapter structure - * - * This routine waits up to ql4xdiscoverywait seconds - * F/W database during driver load time. - **/ -static int qla4xxx_devices_ready(struct scsi_qla_host *ha) -{ - int error; - unsigned long discovery_wtime; - struct qla4_relog_scan rs; - - discovery_wtime = jiffies + (ql4xdiscoverywait * HZ); - - DEBUG(printk("Waiting (%d) for devices ...\n", ql4xdiscoverywait)); - do { - /* poll for AEN. */ - qla4xxx_get_firmware_state(ha); - if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) { - /* Set time-between-relogin timer */ - qla4xxx_process_aen(ha, RELOGIN_DDB_CHANGED_AENS); - } - - /* if no relogins active or needed, halt discvery wait */ - rs.halt_wait = 1; - - error = qla4_scan_for_relogin(ha, &rs); - - if (rs.halt_wait) { - DEBUG2(printk("scsi%ld: %s: Delay halted. Devices " - "Ready.\n", ha->host_no, __func__)); - return QLA_SUCCESS; - } - - msleep(2000); - } while (!time_after_eq(jiffies, discovery_wtime)); - - DEBUG3(qla4xxx_get_conn_event_log(ha)); - - return QLA_SUCCESS; -} - -static void qla4xxx_flush_AENS(struct scsi_qla_host *ha) -{ - unsigned long wtime; - - /* Flush the 0x8014 AEN from the firmware as a result of - * Auto connect. We are basically doing get_firmware_ddb() - * to determine whether we need to log back in or not. - * Trying to do a set ddb before we have processed 0x8014 - * will result in another set_ddb() for the same ddb. In other - * words there will be stale entries in the aen_q. - */ - wtime = jiffies + (2 * HZ); - do { - if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) - if (ha->firmware_state & (BIT_2 | BIT_0)) - return; - - if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) - qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); - - msleep(1000); - } while (!time_after_eq(jiffies, wtime)); - -} - static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha) { uint16_t fw_ddb_index; @@ -996,29 +885,12 @@ static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha) for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++) ha->fw_ddb_index_map[fw_ddb_index] = - (struct ddb_entry *)INVALID_ENTRY; + (struct ddb_entry *)INVALID_ENTRY; ha->tot_ddbs = 0; - qla4xxx_flush_AENS(ha); - - /* Wait for an AEN */ - qla4xxx_devices_ready(ha); - - /* - * First perform device discovery for active - * fw ddb indexes and build - * ddb list. - */ - if ((status = qla4xxx_build_ddb_list(ha)) == QLA_ERROR) - return status; - - /* - * Targets can come online after the inital discovery, so processing - * the aens here will catch them. - */ - if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) - qla4xxx_process_aen(ha, PROCESS_ALL_AENS); + /* Perform device discovery and build ddb list. */ + status = qla4xxx_build_ddb_list(ha); return status; } @@ -1537,7 +1409,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, uint32_t state, uint32_t conn_err) { struct ddb_entry * ddb_entry; - uint32_t old_fw_ddb_device_state; /* check for out of range index */ if (fw_ddb_index >= MAX_DDB_ENTRIES) @@ -1553,27 +1424,18 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, } /* Device already exists in our database. */ - old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for " "index [%d]\n", ha->host_no, __func__, ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); - if (old_fw_ddb_device_state == state && - state == DDB_DS_SESSION_ACTIVE) { - if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { - atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); - iscsi_unblock_session(ddb_entry->sess); - } - return QLA_SUCCESS; - } ddb_entry->fw_ddb_device_state = state; /* Device is back online. */ - if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { + if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) && + (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) { atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); atomic_set(&ddb_entry->relogin_retry_count, 0); atomic_set(&ddb_entry->relogin_timer, 0); clear_bit(DF_RELOGIN, &ddb_entry->flags); - clear_bit(DF_NO_RELOGIN, &ddb_entry->flags); iscsi_unblock_session(ddb_entry->sess); iscsi_session_event(ddb_entry->sess, ISCSI_KEVENT_CREATE_SESSION); @@ -1581,7 +1443,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, * Change the lun state to READY in case the lun TIMEOUT before * the device came back. */ - } else { + } else if (ddb_entry->fw_ddb_device_state != DDB_DS_SESSION_ACTIVE) { /* Device went away, mark device missing */ if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s mark missing " @@ -1598,7 +1460,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, */ if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED && !test_bit(DF_RELOGIN, &ddb_entry->flags) && - !test_bit(DF_NO_RELOGIN, &ddb_entry->flags) && qla4_is_relogin_allowed(ha, conn_err)) { /* * This triggers a relogin. After the relogin_timer diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 03e028e6e809..2f40ac761cd4 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -801,7 +801,7 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id) &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); - if (!test_bit(AF_HBA_GOING_AWAY, &ha->flags)) + if (!test_bit(AF_HA_REMOVAL, &ha->flags)) set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); break; @@ -1008,34 +1008,9 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) mbox_sts[0], mbox_sts[2], mbox_sts[3])); break; - } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) { - /* for use during init time, we only want to - * relogin non-active ddbs */ - struct ddb_entry *ddb_entry; - - ddb_entry = - /* FIXME: name length? */ - qla4xxx_lookup_ddb_by_fw_index(ha, - mbox_sts[2]); - if (!ddb_entry) - break; - - ddb_entry->dev_scan_wait_to_complete_relogin = - 0; - ddb_entry->dev_scan_wait_to_start_relogin = - jiffies + - ((ddb_entry->default_time2wait + - 4) * HZ); - - DEBUG2(printk("scsi%ld: ddb [%d] initiate" - " RELOGIN after %d seconds\n", - ha->host_no, - ddb_entry->fw_ddb_index, - ddb_entry->default_time2wait + - 4)); - break; } - + case PROCESS_ALL_AENS: + default: if (mbox_sts[1] == 0) { /* Global DB change. */ qla4xxx_reinitialize_ddb_list(ha); } else if (mbox_sts[1] == 1) { /* Specific device. */ diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index f65626aec7c1..f9d81c8372c3 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -32,6 +32,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, u_long wait_count; uint32_t intr_status; unsigned long flags = 0; + uint32_t dev_state; /* Make sure that pointers are valid */ if (!mbx_cmd || !mbx_sts) { @@ -40,12 +41,23 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, return status; } - if (is_qla8022(ha) && - test_bit(AF_FW_RECOVERY, &ha->flags)) { - DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: prematurely " - "completing mbx cmd as firmware recovery detected\n", - ha->host_no, __func__)); - return status; + if (is_qla8022(ha)) { + if (test_bit(AF_FW_RECOVERY, &ha->flags)) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: " + "prematurely completing mbx cmd as firmware " + "recovery detected\n", ha->host_no, __func__)); + return status; + } + /* Do not send any mbx cmd if h/w is in failed state*/ + qla4_8xxx_idc_lock(ha); + dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + qla4_8xxx_idc_unlock(ha); + if (dev_state == QLA82XX_DEV_FAILED) { + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in " + "failed state, do not send any mailbox commands\n", + ha->host_no, __func__); + return status; + } } if ((is_aer_supported(ha)) && @@ -139,7 +151,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && test_bit(AF_INTERRUPTS_ON, &ha->flags) && test_bit(AF_ONLINE, &ha->flags) && - !test_bit(AF_HBA_GOING_AWAY, &ha->flags)) { + !test_bit(AF_HA_REMOVAL, &ha->flags)) { /* Do not poll for completion. Use completion queue */ set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags); wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ); @@ -395,9 +407,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, /*memcpy(ha->alias, init_fw_cb->Alias, min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ - /* Save Command Line Paramater info */ - ha->discovery_wait = ql4xdiscoverywait; - if (ha->acb_version == ACB_SUPPORTED) { ha->ipv6_options = init_fw_cb->ipv6_opts; ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts; @@ -467,6 +476,11 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); + /* Set bit for "serialize task mgmt" all other bits need to be zero */ + init_fw_cb->add_fw_options = 0; + init_fw_cb->add_fw_options |= + __constant_cpu_to_le16(SERIALIZE_TASK_MGMT); + if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) != QLA_SUCCESS) { DEBUG2(printk(KERN_WARNING diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 3d5ef2df4134..35381cb0936e 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -2304,14 +2304,13 @@ qla4_8xxx_enable_intrs(struct scsi_qla_host *ha) void qla4_8xxx_disable_intrs(struct scsi_qla_host *ha) { - if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) + if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags)) qla4_8xxx_mbx_intr_disable(ha); spin_lock_irq(&ha->hardware_lock); /* BIT 10 - set */ qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); spin_unlock_irq(&ha->hardware_lock); - clear_bit(AF_INTERRUPTS_ON, &ha->flags); } struct ql4_init_msix_entry { diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 967836ef5ab2..a4acb0dd7beb 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -29,10 +29,6 @@ static struct kmem_cache *srb_cachep; /* * Module parameter information and variables */ -int ql4xdiscoverywait = 60; -module_param(ql4xdiscoverywait, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(ql4xdiscoverywait, "Discovery wait time"); - int ql4xdontresethba = 0; module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xdontresethba, @@ -55,6 +51,17 @@ MODULE_PARM_DESC(ql4xenablemsix, " 2 = enable MSI interrupt mechanism."); #define QL4_DEF_QDEPTH 32 +static int ql4xmaxqdepth = QL4_DEF_QDEPTH; +module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ql4xmaxqdepth, + "Maximum queue depth to report for target devices.\n" + " Default: 32."); + +static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; +module_param(ql4xsess_recovery_tmo, int, S_IRUGO); +MODULE_PARM_DESC(ql4xsess_recovery_tmo, + "Target Session Recovery Timeout.\n" + " Default: 30 sec."); /* * SCSI host template entry points @@ -165,7 +172,7 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session) DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout " "of (%d) secs exhausted, marking device DEAD.\n", ha->host_no, __func__, ddb_entry->fw_ddb_index, - QL4_SESS_RECOVERY_TMO)); + ddb_entry->sess->recovery_tmo)); } } @@ -295,7 +302,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry) { int err; - ddb_entry->sess->recovery_tmo = QL4_SESS_RECOVERY_TMO; + ddb_entry->sess->recovery_tmo = ql4xsess_recovery_tmo; err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index); if (err) { @@ -753,12 +760,6 @@ static void qla4xxx_timer(struct scsi_qla_host *ha) if (!pci_channel_offline(ha->pdev)) pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); - if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) { - DEBUG2(ql4_printk(KERN_INFO, ha, "%s exited. HBA GOING AWAY\n", - __func__)); - return; - } - if (is_qla8022(ha)) { qla4_8xxx_watchdog(ha); } @@ -1067,7 +1068,6 @@ void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) /* Disable the board */ ql4_printk(KERN_INFO, ha, "Disabling the board\n"); - set_bit(AF_HBA_GOING_AWAY, &ha->flags); qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); qla4xxx_mark_all_devices_missing(ha); @@ -1218,6 +1218,27 @@ recover_ha_init_adapter: return status; } +static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) +{ + struct ddb_entry *ddb_entry, *dtemp; + + list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) { + if ((atomic_read(&ddb_entry->state) == DDB_STATE_MISSING) || + (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD)) { + if (ddb_entry->fw_ddb_device_state == + DDB_DS_SESSION_ACTIVE) { + atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" + " marked ONLINE\n", ha->host_no, __func__, + ddb_entry->fw_ddb_index); + + iscsi_unblock_session(ddb_entry->sess); + } else + qla4xxx_relogin_device(ha, ddb_entry); + } + } +} + void qla4xxx_wake_dpc(struct scsi_qla_host *ha) { if (ha->dpc_thread && @@ -1259,11 +1280,6 @@ static void qla4xxx_do_dpc(struct work_struct *work) goto do_dpc_exit; } - /* HBA is in the process of being permanently disabled. - * Don't process anything */ - if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) - return; - if (is_qla8022(ha)) { if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { qla4_8xxx_idc_lock(ha); @@ -1331,13 +1347,7 @@ dpc_post_reset_ha: if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { if (!test_bit(AF_LINK_UP, &ha->flags)) { /* ---- link down? --- */ - list_for_each_entry_safe(ddb_entry, dtemp, - &ha->ddb_list, list) { - if (atomic_read(&ddb_entry->state) == - DDB_STATE_ONLINE) - qla4xxx_mark_device_missing(ha, - ddb_entry); - } + qla4xxx_mark_all_devices_missing(ha); } else { /* ---- link up? --- * * F/W will auto login to all devices ONLY ONCE after @@ -1346,30 +1356,7 @@ dpc_post_reset_ha: * manually relogin to devices when recovering from * connection failures, logouts, expired KATO, etc. */ - list_for_each_entry_safe(ddb_entry, dtemp, - &ha->ddb_list, list) { - if ((atomic_read(&ddb_entry->state) == - DDB_STATE_MISSING) || - (atomic_read(&ddb_entry->state) == - DDB_STATE_DEAD)) { - if (ddb_entry->fw_ddb_device_state == - DDB_DS_SESSION_ACTIVE) { - atomic_set(&ddb_entry->state, - DDB_STATE_ONLINE); - ql4_printk(KERN_INFO, ha, - "scsi%ld: %s: ddb[%d]" - " marked ONLINE\n", - ha->host_no, __func__, - ddb_entry->fw_ddb_index); - - iscsi_unblock_session( - ddb_entry->sess); - } else - qla4xxx_relogin_device( - ha, ddb_entry); - } - - } + qla4xxx_relogin_all_devices(ha); } } @@ -1630,6 +1617,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, uint8_t init_retry_count = 0; char buf[34]; struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; + uint32_t dev_state; if (pci_enable_device(pdev)) return -1; @@ -1713,6 +1701,18 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST); while ((!test_bit(AF_ONLINE, &ha->flags)) && init_retry_count++ < MAX_INIT_RETRIES) { + + if (is_qla8022(ha)) { + qla4_8xxx_idc_lock(ha); + dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + qla4_8xxx_idc_unlock(ha); + if (dev_state == QLA82XX_DEV_FAILED) { + ql4_printk(KERN_WARNING, ha, "%s: don't retry " + "initialize adapter. H/W is in failed state\n", + __func__); + break; + } + } DEBUG2(printk("scsi: %s: retrying adapter initialization " "(%d)\n", __func__, init_retry_count)); @@ -1815,6 +1815,44 @@ probe_disable_device: } /** + * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize + * @ha: pointer to adapter structure + * + * Mark the other ISP-4xxx port to indicate that the driver is being removed, + * so that the other port will not re-initialize while in the process of + * removing the ha due to driver unload or hba hotplug. + **/ +static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) +{ + struct scsi_qla_host *other_ha = NULL; + struct pci_dev *other_pdev = NULL; + int fn = ISP4XXX_PCI_FN_2; + + /*iscsi function numbers for ISP4xxx is 1 and 3*/ + if (PCI_FUNC(ha->pdev->devfn) & BIT_1) + fn = ISP4XXX_PCI_FN_1; + + other_pdev = + pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), + ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), + fn)); + + /* Get other_ha if other_pdev is valid and state is enable*/ + if (other_pdev) { + if (atomic_read(&other_pdev->enable_cnt)) { + other_ha = pci_get_drvdata(other_pdev); + if (other_ha) { + set_bit(AF_HA_REMOVAL, &other_ha->flags); + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " + "Prevent %s reinit\n", __func__, + dev_name(&other_ha->pdev->dev))); + } + } + pci_dev_put(other_pdev); + } +} + +/** * qla4xxx_remove_adapter - calback function to remove adapter. * @pci_dev: PCI device pointer **/ @@ -1824,7 +1862,8 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev) ha = pci_get_drvdata(pdev); - set_bit(AF_HBA_GOING_AWAY, &ha->flags); + if (!is_qla8022(ha)) + qla4xxx_prevent_other_port_reinit(ha); /* remove devs from iscsi_sessions to scsi_devices */ qla4xxx_free_ddb_list(ha); @@ -1868,10 +1907,15 @@ static int qla4xxx_slave_alloc(struct scsi_device *sdev) { struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target); struct ddb_entry *ddb = sess->dd_data; + int queue_depth = QL4_DEF_QDEPTH; sdev->hostdata = ddb; sdev->tagged_supported = 1; - scsi_activate_tcq(sdev, QL4_DEF_QDEPTH); + + if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) + queue_depth = ql4xmaxqdepth; + + scsi_activate_tcq(sdev, queue_depth); return 0; } diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index 8475b308e01b..603155769407 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -5,4 +5,4 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.02.00-k5" +#define QLA4XXX_DRIVER_VERSION "5.02.00-k6" diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index b4218390941e..3fd16d7212de 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1917,7 +1917,7 @@ store_priv_session_##field(struct device *dev, \ #define iscsi_priv_session_rw_attr(field, format) \ iscsi_priv_session_attr_show(field, format) \ iscsi_priv_session_attr_store(field) \ -static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUGO, \ +static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \ show_priv_session_##field, \ store_priv_session_##field) iscsi_priv_session_rw_attr(recovery_tmo, "%d"); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 7ff61d76b4c5..b61ebec6bca7 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2027,14 +2027,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) int old_rcd = sdkp->RCD; int old_dpofua = sdkp->DPOFUA; - if (sdp->skip_ms_page_8) { - if (sdp->type == TYPE_RBC) - goto defaults; - else { - modepage = 0x3F; - dbd = 0; - } - } else if (sdp->type == TYPE_RBC) { + if (sdp->skip_ms_page_8) + goto defaults; + + if (sdp->type == TYPE_RBC) { modepage = 6; dbd = 8; } else { @@ -2062,11 +2058,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) */ if (len < 3) goto bad_sense; - else if (len > SD_BUF_SIZE) { - sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " - "data from %d to %d bytes\n", len, SD_BUF_SIZE); - len = SD_BUF_SIZE; - } + if (len > 20) + len = 20; + + /* Take headers and block descriptors into account */ + len += data.header_length + data.block_descriptor_length; + if (len > SD_BUF_SIZE) + goto bad_sense; /* Get the data */ res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); @@ -2074,45 +2072,16 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) if (scsi_status_is_good(res)) { int offset = data.header_length + data.block_descriptor_length; - while (offset < len) { - u8 page_code = buffer[offset] & 0x3F; - u8 spf = buffer[offset] & 0x40; - - if (page_code == 8 || page_code == 6) { - /* We're interested only in the first 3 bytes. - */ - if (len - offset <= 2) { - sd_printk(KERN_ERR, sdkp, "Incomplete " - "mode parameter data\n"); - goto defaults; - } else { - modepage = page_code; - goto Page_found; - } - } else { - /* Go to the next page */ - if (spf && len - offset > 3) - offset += 4 + (buffer[offset+2] << 8) + - buffer[offset+3]; - else if (!spf && len - offset > 1) - offset += 2 + buffer[offset+1]; - else { - sd_printk(KERN_ERR, sdkp, "Incomplete " - "mode parameter data\n"); - goto defaults; - } - } + if (offset >= SD_BUF_SIZE - 2) { + sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n"); + goto defaults; } - if (modepage == 0x3F) { - sd_printk(KERN_ERR, sdkp, "No Caching mode page " - "present\n"); - goto defaults; - } else if ((buffer[offset] & 0x3f) != modepage) { + if ((buffer[offset] & 0x3f) != modepage) { sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); goto defaults; } - Page_found: + if (modepage == 8) { sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 7f5a6a86f820..eb7a3e85304f 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -35,9 +35,11 @@ struct ses_device { unsigned char *page1; + unsigned char *page1_types; unsigned char *page2; unsigned char *page10; short page1_len; + short page1_num_types; short page2_len; short page10_len; }; @@ -110,12 +112,12 @@ static int ses_set_page2_descriptor(struct enclosure_device *edev, int i, j, count = 0, descriptor = ecomp->number; struct scsi_device *sdev = to_scsi_device(edev->edev.parent); struct ses_device *ses_dev = edev->scratch; - unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; + unsigned char *type_ptr = ses_dev->page1_types; unsigned char *desc_ptr = ses_dev->page2 + 8; /* Clear everything */ memset(desc_ptr, 0, ses_dev->page2_len - 8); - for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) { + for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { for (j = 0; j < type_ptr[1]; j++) { desc_ptr += 4; if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && @@ -140,12 +142,12 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev, int i, j, count = 0, descriptor = ecomp->number; struct scsi_device *sdev = to_scsi_device(edev->edev.parent); struct ses_device *ses_dev = edev->scratch; - unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; + unsigned char *type_ptr = ses_dev->page1_types; unsigned char *desc_ptr = ses_dev->page2 + 8; ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); - for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) { + for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { for (j = 0; j < type_ptr[1]; j++) { desc_ptr += 4; if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && @@ -358,7 +360,7 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL; int i, j, page7_len, len, components; struct ses_device *ses_dev = edev->scratch; - int types = ses_dev->page1[10]; + int types = ses_dev->page1_num_types; unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL); if (!hdr_buf) @@ -390,10 +392,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, len = (desc_ptr[2] << 8) + desc_ptr[3]; /* skip past overall descriptor */ desc_ptr += len + 4; - if (ses_dev->page10) - addl_desc_ptr = ses_dev->page10 + 8; } - type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; + if (ses_dev->page10) + addl_desc_ptr = ses_dev->page10 + 8; + type_ptr = ses_dev->page1_types; components = 0; for (i = 0; i < types; i++, type_ptr += 4) { for (j = 0; j < type_ptr[1]; j++) { @@ -503,6 +505,7 @@ static int ses_intf_add(struct device *cdev, u32 result; int i, types, len, components = 0; int err = -ENOMEM; + int num_enclosures; struct enclosure_device *edev; struct ses_component *scomp = NULL; @@ -530,16 +533,6 @@ static int ses_intf_add(struct device *cdev, if (result) goto recv_failed; - if (hdr_buf[1] != 0) { - /* FIXME: need subenclosure support; I've just never - * seen a device with subenclosures and it makes the - * traversal routines more complex */ - sdev_printk(KERN_ERR, sdev, - "FIXME driver has no support for subenclosures (%d)\n", - hdr_buf[1]); - goto err_free; - } - len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; buf = kzalloc(len, GFP_KERNEL); if (!buf) @@ -549,11 +542,24 @@ static int ses_intf_add(struct device *cdev, if (result) goto recv_failed; - types = buf[10]; + types = 0; - type_ptr = buf + 12 + buf[11]; + /* we always have one main enclosure and the rest are referred + * to as secondary subenclosures */ + num_enclosures = buf[1] + 1; - for (i = 0; i < types; i++, type_ptr += 4) { + /* begin at the enclosure descriptor */ + type_ptr = buf + 8; + /* skip all the enclosure descriptors */ + for (i = 0; i < num_enclosures && type_ptr < buf + len; i++) { + types += type_ptr[2]; + type_ptr += type_ptr[3] + 4; + } + + ses_dev->page1_types = type_ptr; + ses_dev->page1_num_types = types; + + for (i = 0; i < types && type_ptr < buf + len; i++, type_ptr += 4) { if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) components += type_ptr[1]; |