diff options
Diffstat (limited to 'drivers/scsi')
227 files changed, 21632 insertions, 11495 deletions
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index 2940bd769936..25aba1613e21 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -1045,6 +1045,9 @@ static int tw_chrdev_open(struct inode *inode, struct file *file) static const struct file_operations tw_fops = { .owner = THIS_MODULE, .unlocked_ioctl = tw_chrdev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = tw_chrdev_ioctl, +#endif .open = tw_chrdev_open, .release = NULL, .llseek = noop_llseek, diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 64eed87d34a8..e80768f8e579 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -194,6 +194,7 @@ config CHR_DEV_SCH config SCSI_ENCLOSURE tristate "SCSI Enclosure Support" depends on SCSI && ENCLOSURE_SERVICES + depends on m || SCSI_SAS_ATTRS != m help Enclosures are devices sitting on or in SCSI backplanes that manage devices. If you have a disk cage, the chances are that @@ -474,6 +475,7 @@ config SCSI_AACRAID source "drivers/scsi/aic7xxx/Kconfig.aic7xxx" source "drivers/scsi/aic7xxx/Kconfig.aic79xx" source "drivers/scsi/aic94xx/Kconfig" +source "drivers/scsi/hisi_sas/Kconfig" source "drivers/scsi/mvsas/Kconfig" config SCSI_MVUMI @@ -594,6 +596,7 @@ config XEN_SCSI_FRONTEND config HYPERV_STORAGE tristate "Microsoft Hyper-V virtual storage driver" depends on SCSI && HYPERV + depends on m || SCSI_FC_ATTRS != m default HYPERV help Select this option to enable the Hyper-V virtual storage driver. @@ -1104,6 +1107,7 @@ config SCSI_IPR tristate "IBM Power Linux RAID adapter support" depends on PCI && SCSI && ATA select FW_LOADER + select IRQ_POLL ---help--- This driver supports the IBM Power Linux family RAID adapters. This includes IBM pSeries 5712, 5703, 5709, and 570A, as well @@ -1618,23 +1622,6 @@ config ATARI_SCSI ST-DMA, replacing ACSI). It does NOT support other schemes, like in the Hades (without DMA). -config ATARI_SCSI_TOSHIBA_DELAY - bool "Long delays for Toshiba CD-ROMs" - depends on ATARI_SCSI - help - This option increases the delay after a SCSI arbitration to - accommodate some flaky Toshiba CD-ROM drives. Say Y if you intend to - use a Toshiba CD-ROM drive; otherwise, the option is not needed and - would impact performance a bit, so say N. - -config ATARI_SCSI_RESET_BOOT - bool "Reset SCSI-devices at boottime" - depends on ATARI_SCSI - help - Reset the devices on your Atari whenever it boots. This makes the - boot process fractionally longer but may assist recovery from errors - that leave the devices with SCSI operations partway completed. - config MAC_SCSI tristate "Macintosh NCR5380 SCSI" depends on MAC && SCSI=y diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index c14bca4a9675..862ab4efad61 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -157,6 +157,7 @@ obj-$(CONFIG_CHR_DEV_SCH) += ch.o obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/ +obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas/ # This goes last, so that "real" scsi devices probe earlier obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index a777e5c412df..3eff2a69fe08 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -1,17 +1,17 @@ -/* +/* * NCR 5380 generic driver routines. These should make it *trivial* - * to implement 5380 SCSI drivers under Linux with a non-trantor - * architecture. + * to implement 5380 SCSI drivers under Linux with a non-trantor + * architecture. * - * Note that these routines also work with NR53c400 family chips. + * Note that these routines also work with NR53c400 family chips. * * Copyright 1993, Drew Eckhardt - * Visionary Computing - * (Unix and Linux consulting and custom programming) - * drew@colorado.edu - * +1 (303) 666-5836 + * Visionary Computing + * (Unix and Linux consulting and custom programming) + * drew@colorado.edu + * +1 (303) 666-5836 * - * For more information, please consult + * For more information, please consult * * NCR 5380 Family * SCSI Protocol Controller @@ -25,84 +25,28 @@ */ /* - * Revision 1.10 1998/9/2 Alan Cox - * (alan@lxorguk.ukuu.org.uk) - * Fixed up the timer lockups reported so far. Things still suck. Looking - * forward to 2.3 and per device request queues. Then it'll be possible to - * SMP thread this beast and improve life no end. - - * Revision 1.9 1997/7/27 Ronald van Cuijlenborg - * (ronald.van.cuijlenborg@tip.nl or nutty@dds.nl) - * (hopefully) fixed and enhanced USLEEP - * added support for DTC3181E card (for Mustek scanner) - * - - * Revision 1.8 Ingmar Baumgart - * (ingmar@gonzo.schwaben.de) - * added support for NCR53C400a card - * - - * Revision 1.7 1996/3/2 Ray Van Tassle (rayvt@comm.mot.com) - * added proc_info - * added support needed for DTC 3180/3280 - * fixed a couple of bugs - * - - * Revision 1.5 1994/01/19 09:14:57 drew - * Fixed udelay() hack that was being used on DATAOUT phases - * instead of a proper wait for the final handshake. - * - * Revision 1.4 1994/01/19 06:44:25 drew - * *** empty log message *** - * - * Revision 1.3 1994/01/19 05:24:40 drew - * Added support for TCR LAST_BYTE_SENT bit. - * - * Revision 1.2 1994/01/15 06:14:11 drew - * REAL DMA support, bug fixes. - * - * Revision 1.1 1994/01/15 06:00:54 drew - * Initial revision - * + * With contributions from Ray Van Tassle, Ingmar Baumgart, + * Ronald van Cuijlenborg, Alan Cox and others. */ /* - * Further development / testing that should be done : + * Further development / testing that should be done : * 1. Cleanup the NCR5380_transfer_dma function and DMA operation complete - * code so that everything does the same thing that's done at the - * end of a pseudo-DMA read operation. + * code so that everything does the same thing that's done at the + * end of a pseudo-DMA read operation. * * 2. Fix REAL_DMA (interrupt driven, polled works fine) - - * basically, transfer size needs to be reduced by one - * and the last byte read as is done with PSEUDO_DMA. - * - * 4. Test SCSI-II tagged queueing (I have no devices which support - * tagged queueing) - * - * 5. Test linked command handling code after Eric is ready with - * the high level code. + * basically, transfer size needs to be reduced by one + * and the last byte read as is done with PSEUDO_DMA. + * + * 4. Test SCSI-II tagged queueing (I have no devices which support + * tagged queueing) */ -#include <scsi/scsi_dbg.h> -#include <scsi/scsi_transport_spi.h> - -#if (NDEBUG & NDEBUG_LISTS) -#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); } -#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); } -#else -#define LIST(x,y) -#define REMOVE(w,x,y,z) -#endif #ifndef notyet -#undef LINKED #undef REAL_DMA #endif -#ifdef REAL_DMA_POLL -#undef READ_OVERRUNS -#define READ_OVERRUNS -#endif - #ifdef BOARD_REQUIRES_NO_DELAY #define io_recovery_delay(x) #else @@ -112,44 +56,28 @@ /* * Design * - * This is a generic 5380 driver. To use it on a different platform, + * This is a generic 5380 driver. To use it on a different platform, * one simply writes appropriate system specific macros (ie, data - * transfer - some PC's will use the I/O bus, 68K's must use + * transfer - some PC's will use the I/O bus, 68K's must use * memory mapped) and drops this file in their 'C' wrapper. * - * (Note from hch: unfortunately it was not enough for the different - * m68k folks and instead of improving this driver they copied it - * and hacked it up for their needs. As a consequence they lost - * most updates to this driver. Maybe someone will fix all these - * drivers to use a common core one day..) - * - * As far as command queueing, two queues are maintained for + * As far as command queueing, two queues are maintained for * each 5380 in the system - commands that haven't been issued yet, - * and commands that are currently executing. This means that an - * unlimited number of commands may be queued, letting - * more commands propagate from the higher driver levels giving higher - * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, - * allowing multiple commands to propagate all the way to a SCSI-II device + * and commands that are currently executing. This means that an + * unlimited number of commands may be queued, letting + * more commands propagate from the higher driver levels giving higher + * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, + * allowing multiple commands to propagate all the way to a SCSI-II device * while a command is already executing. * * - * Issues specific to the NCR5380 : + * Issues specific to the NCR5380 : * - * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead - * piece of hardware that requires you to sit in a loop polling for - * the REQ signal as long as you are connected. Some devices are - * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect - * while doing long seek operations. - * - * The workaround for this is to keep track of devices that have - * disconnected. If the device hasn't disconnected, for commands that - * should disconnect, we do something like - * - * while (!REQ is asserted) { sleep for N usecs; poll for M usecs } - * - * Some tweaking of N and M needs to be done. An algorithm based - * on "time to data" would give the best results as long as short time - * to datas (ie, on the same track) were considered, however these + * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead + * piece of hardware that requires you to sit in a loop polling for + * the REQ signal as long as you are connected. Some devices are + * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect + * while doing long seek operations. [...] These * broken devices are the exception rather than the rule and I'd rather * spend my time optimizing for the normal case. * @@ -159,23 +87,23 @@ * which is started from a workqueue for each NCR5380 host in the * system. It attempts to establish I_T_L or I_T_L_Q nexuses by * removing the commands from the issue queue and calling - * NCR5380_select() if a nexus is not established. + * NCR5380_select() if a nexus is not established. * * Once a nexus is established, the NCR5380_information_transfer() * phase goes through the various phases as instructed by the target. * if the target goes into MSG IN and sends a DISCONNECT message, * the command structure is placed into the per instance disconnected - * queue, and NCR5380_main tries to find more work. If the target is + * queue, and NCR5380_main tries to find more work. If the target is * idle for too long, the system will try to sleep. * * If a command has disconnected, eventually an interrupt will trigger, * calling NCR5380_intr() which will in turn call NCR5380_reselect * to reestablish a nexus. This will run main if necessary. * - * On command termination, the done function will be called as + * On command termination, the done function will be called as * appropriate. * - * SCSI pointers are maintained in the SCp field of SCSI command + * SCSI pointers are maintained in the SCp field of SCSI command * structures, being initialized after the command is connected * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. * Note that in violation of the standard, an implicit SAVE POINTERS operation @@ -185,73 +113,48 @@ /* * Using this file : * This file a skeleton Linux SCSI driver for the NCR 5380 series - * of chips. To use it, you write an architecture specific functions + * of chips. To use it, you write an architecture specific functions * and macros and include this file in your driver. * - * These macros control options : - * AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be - * defined. - * + * These macros control options : + * AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be + * defined. + * * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically - * for commands that return with a CHECK CONDITION status. + * for commands that return with a CHECK CONDITION status. * * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential - * transceivers. + * transceivers. * * DONT_USE_INTR - if defined, never use interrupts, even if we probe or - * override-configure an IRQ. - * - * LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512 - * bytes at a time. Since interrupts are disabled by default during - * these transfers, we might need this to give reasonable interrupt - * service time if the transfer size gets too large. - * - * LINKED - if defined, linked commands are supported. + * override-configure an IRQ. * * PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases. * * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. * * REAL_DMA_POLL - if defined, REAL DMA is used but the driver doesn't - * rely on phase mismatch and EOP interrupts to determine end - * of phase. - * - * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You - * only really want to use this if you're having a problem with - * dropped characters during high speed communications, and even - * then, you're going to be better off twiddling with transfersize - * in the high level code. - * - * Defaults for these will be provided although the user may want to adjust - * these to allocate CPU resources to the SCSI driver or "real" code. - * - * USLEEP_SLEEP - amount of time, in jiffies, to sleep - * - * USLEEP_POLL - amount of time, in jiffies, to poll + * rely on phase mismatch and EOP interrupts to determine end + * of phase. * * These macros MUST be defined : - * NCR5380_local_declare() - declare any local variables needed for your - * transfer routines. * - * NCR5380_setup(instance) - initialize any local variables needed from a given - * instance of the host adapter for NCR5380_{read,write,pread,pwrite} - * * NCR5380_read(register) - read from the specified register * - * NCR5380_write(register, value) - write to the specific register + * NCR5380_write(register, value) - write to the specific register * - * NCR5380_implementation_fields - additional fields needed for this - * specific implementation of the NCR5380 + * NCR5380_implementation_fields - additional fields needed for this + * specific implementation of the NCR5380 * * Either real DMA *or* pseudo DMA may be implemented - * REAL functions : + * REAL functions : * NCR5380_REAL_DMA should be defined if real DMA is to be used. - * Note that the DMA setup functions should return the number of bytes - * that they were able to program the controller for. + * Note that the DMA setup functions should return the number of bytes + * that they were able to program the controller for. * - * Also note that generic i386/PC versions of these macros are - * available as NCR5380_i386_dma_write_setup, - * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. + * Also note that generic i386/PC versions of these macros are + * available as NCR5380_i386_dma_write_setup, + * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. * * NCR5380_dma_write_setup(instance, src, count) - initialize * NCR5380_dma_read_setup(instance, dst, count) - initialize @@ -262,25 +165,25 @@ * NCR5380_pread(instance, dst, count); * * The generic driver is initialized by calling NCR5380_init(instance), - * after setting the appropriate host specific fields and ID. If the + * after setting the appropriate host specific fields and ID. If the * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, * possible) function may be used. */ -static int do_abort(struct Scsi_Host *host); -static void do_reset(struct Scsi_Host *host); +static int do_abort(struct Scsi_Host *); +static void do_reset(struct Scsi_Host *); -/* - * initialize_SCp - init the scsi pointer field - * @cmd: command block to set up +/** + * initialize_SCp - init the scsi pointer field + * @cmd: command block to set up * - * Set up the internal fields in the SCSI command. + * Set up the internal fields in the SCSI command. */ static inline void initialize_SCp(struct scsi_cmnd *cmd) { - /* - * Initialize the Scsi Pointer field so that all of the commands in the + /* + * Initialize the Scsi Pointer field so that all of the commands in the * various queues are valid. */ @@ -295,120 +198,123 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd) cmd->SCp.ptr = NULL; cmd->SCp.this_residual = 0; } + + cmd->SCp.Status = 0; + cmd->SCp.Message = 0; } /** - * NCR5380_poll_politely - wait for NCR5380 status bits - * @instance: controller to poll - * @reg: 5380 register to poll - * @bit: Bitmask to check - * @val: Value required to exit - * - * Polls the NCR5380 in a reasonably efficient manner waiting for - * an event to occur, after a short quick poll we begin giving the - * CPU back in non IRQ contexts - * - * Returns the value of the register or a negative error code. + * NCR5380_poll_politely2 - wait for two chip register values + * @instance: controller to poll + * @reg1: 5380 register to poll + * @bit1: Bitmask to check + * @val1: Expected value + * @reg2: Second 5380 register to poll + * @bit2: Second bitmask to check + * @val2: Second expected value + * @wait: Time-out in jiffies + * + * Polls the chip in a reasonably efficient manner waiting for an + * event to occur. After a short quick poll we begin to yield the CPU + * (if possible). In irq contexts the time-out is arbitrarily limited. + * Callers may hold locks as long as they are held in irq mode. + * + * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT. */ - -static int NCR5380_poll_politely(struct Scsi_Host *instance, int reg, int bit, int val, int t) + +static int NCR5380_poll_politely2(struct Scsi_Host *instance, + int reg1, int bit1, int val1, + int reg2, int bit2, int val2, int wait) { - NCR5380_local_declare(); - int n = 500; /* At about 8uS a cycle for the cpu access */ - unsigned long end = jiffies + t; - int r; - - NCR5380_setup(instance); - - while( n-- > 0) - { - r = NCR5380_read(reg); - if((r & bit) == val) + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned long deadline = jiffies + wait; + unsigned long n; + + /* Busy-wait for up to 10 ms */ + n = min(10000U, jiffies_to_usecs(wait)); + n *= hostdata->accesses_per_ms; + n /= 2000; + do { + if ((NCR5380_read(reg1) & bit1) == val1) + return 0; + if ((NCR5380_read(reg2) & bit2) == val2) return 0; cpu_relax(); - } - - /* t time yet ? */ - while(time_before(jiffies, end)) - { - r = NCR5380_read(reg); - if((r & bit) == val) + } while (n--); + + if (irqs_disabled() || in_interrupt()) + return -ETIMEDOUT; + + /* Repeatedly sleep for 1 ms until deadline */ + while (time_is_after_jiffies(deadline)) { + schedule_timeout_uninterruptible(1); + if ((NCR5380_read(reg1) & bit1) == val1) + return 0; + if ((NCR5380_read(reg2) & bit2) == val2) return 0; - if(!in_interrupt()) - cond_resched(); - else - cpu_relax(); } + return -ETIMEDOUT; } -static struct { - unsigned char value; - const char *name; -} phases[] __maybe_unused = { - {PHASE_DATAOUT, "DATAOUT"}, - {PHASE_DATAIN, "DATAIN"}, - {PHASE_CMDOUT, "CMDOUT"}, - {PHASE_STATIN, "STATIN"}, - {PHASE_MSGOUT, "MSGOUT"}, - {PHASE_MSGIN, "MSGIN"}, - {PHASE_UNKNOWN, "UNKNOWN"} -}; +static inline int NCR5380_poll_politely(struct Scsi_Host *instance, + int reg, int bit, int val, int wait) +{ + return NCR5380_poll_politely2(instance, reg, bit, val, + reg, bit, val, wait); +} #if NDEBUG static struct { unsigned char mask; const char *name; -} signals[] = { - {SR_DBP, "PARITY"}, - {SR_RST, "RST"}, - {SR_BSY, "BSY"}, - {SR_REQ, "REQ"}, - {SR_MSG, "MSG"}, - {SR_CD, "CD"}, - {SR_IO, "IO"}, - {SR_SEL, "SEL"}, +} signals[] = { + {SR_DBP, "PARITY"}, + {SR_RST, "RST"}, + {SR_BSY, "BSY"}, + {SR_REQ, "REQ"}, + {SR_MSG, "MSG"}, + {SR_CD, "CD"}, + {SR_IO, "IO"}, + {SR_SEL, "SEL"}, {0, NULL} -}, +}, basrs[] = { - {BASR_ATN, "ATN"}, - {BASR_ACK, "ACK"}, + {BASR_ATN, "ATN"}, + {BASR_ACK, "ACK"}, {0, NULL} -}, -icrs[] = { - {ICR_ASSERT_RST, "ASSERT RST"}, - {ICR_ASSERT_ACK, "ASSERT ACK"}, - {ICR_ASSERT_BSY, "ASSERT BSY"}, - {ICR_ASSERT_SEL, "ASSERT SEL"}, - {ICR_ASSERT_ATN, "ASSERT ATN"}, - {ICR_ASSERT_DATA, "ASSERT DATA"}, +}, +icrs[] = { + {ICR_ASSERT_RST, "ASSERT RST"}, + {ICR_ASSERT_ACK, "ASSERT ACK"}, + {ICR_ASSERT_BSY, "ASSERT BSY"}, + {ICR_ASSERT_SEL, "ASSERT SEL"}, + {ICR_ASSERT_ATN, "ASSERT ATN"}, + {ICR_ASSERT_DATA, "ASSERT DATA"}, {0, NULL} -}, -mrs[] = { - {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, - {MR_TARGET, "MODE TARGET"}, - {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, - {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, - {MR_MONITOR_BSY, "MODE MONITOR BSY"}, - {MR_DMA_MODE, "MODE DMA"}, - {MR_ARBITRATE, "MODE ARBITRATION"}, +}, +mrs[] = { + {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, + {MR_TARGET, "MODE TARGET"}, + {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, + {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, + {MR_ENABLE_EOP_INTR, "MODE EOP INTR"}, + {MR_MONITOR_BSY, "MODE MONITOR BSY"}, + {MR_DMA_MODE, "MODE DMA"}, + {MR_ARBITRATE, "MODE ARBITRATION"}, {0, NULL} }; /** - * NCR5380_print - print scsi bus signals - * @instance: adapter state to dump + * NCR5380_print - print scsi bus signals + * @instance: adapter state to dump * - * Print the SCSI bus signals for debugging purposes - * - * Locks: caller holds hostdata lock (not essential) + * Print the SCSI bus signals for debugging purposes */ static void NCR5380_print(struct Scsi_Host *instance) { - NCR5380_local_declare(); unsigned char status, data, basr, mr, icr, i; - NCR5380_setup(instance); data = NCR5380_read(CURRENT_SCSI_DATA_REG); status = NCR5380_read(STATUS_REG); @@ -435,117 +341,56 @@ static void NCR5380_print(struct Scsi_Host *instance) printk("\n"); } +static struct { + unsigned char value; + const char *name; +} phases[] = { + {PHASE_DATAOUT, "DATAOUT"}, + {PHASE_DATAIN, "DATAIN"}, + {PHASE_CMDOUT, "CMDOUT"}, + {PHASE_STATIN, "STATIN"}, + {PHASE_MSGOUT, "MSGOUT"}, + {PHASE_MSGIN, "MSGIN"}, + {PHASE_UNKNOWN, "UNKNOWN"} +}; -/* - * NCR5380_print_phase - show SCSI phase - * @instance: adapter to dump - * - * Print the current SCSI phase for debugging purposes +/** + * NCR5380_print_phase - show SCSI phase + * @instance: adapter to dump * - * Locks: none + * Print the current SCSI phase for debugging purposes */ static void NCR5380_print_phase(struct Scsi_Host *instance) { - NCR5380_local_declare(); unsigned char status; int i; - NCR5380_setup(instance); status = NCR5380_read(STATUS_REG); if (!(status & SR_REQ)) - printk("scsi%d : REQ not asserted, phase unknown.\n", instance->host_no); + shost_printk(KERN_DEBUG, instance, "REQ not asserted, phase unknown.\n"); else { - for (i = 0; (phases[i].value != PHASE_UNKNOWN) && (phases[i].value != (status & PHASE_MASK)); ++i); - printk("scsi%d : phase %s\n", instance->host_no, phases[i].name); + for (i = 0; (phases[i].value != PHASE_UNKNOWN) && + (phases[i].value != (status & PHASE_MASK)); ++i) + ; + shost_printk(KERN_DEBUG, instance, "phase %s\n", phases[i].name); } } #endif -/* - * These need tweaking, and would probably work best as per-device - * flags initialized differently for disk, tape, cd, etc devices. - * People with broken devices are free to experiment as to what gives - * the best results for them. - * - * USLEEP_SLEEP should be a minimum seek time. - * - * USLEEP_POLL should be a maximum rotational latency. - */ -#ifndef USLEEP_SLEEP -/* 20 ms (reasonable hard disk speed) */ -#define USLEEP_SLEEP msecs_to_jiffies(20) -#endif -/* 300 RPM (floppy speed) */ -#ifndef USLEEP_POLL -#define USLEEP_POLL msecs_to_jiffies(200) -#endif -#ifndef USLEEP_WAITLONG -/* RvC: (reasonable time to wait on select error) */ -#define USLEEP_WAITLONG USLEEP_SLEEP -#endif -/* - * Function : int should_disconnect (unsigned char cmd) - * - * Purpose : decide whether a command would normally disconnect or - * not, since if it won't disconnect we should go to sleep. - * - * Input : cmd - opcode of SCSI command - * - * Returns : DISCONNECT_LONG if we should disconnect for a really long - * time (ie always, sleep, look for REQ active, sleep), - * DISCONNECT_TIME_TO_DATA if we would only disconnect for a normal - * time-to-data delay, DISCONNECT_NONE if this command would return - * immediately. - * - * Future sleep algorithms based on time to data can exploit - * something like this so they can differentiate between "normal" - * (ie, read, write, seek) and unusual commands (ie, * format). - * - * Note : We don't deal with commands that handle an immediate disconnect, - * - */ - -static int should_disconnect(unsigned char cmd) -{ - switch (cmd) { - case READ_6: - case WRITE_6: - case SEEK_6: - case READ_10: - case WRITE_10: - case SEEK_10: - return DISCONNECT_TIME_TO_DATA; - case FORMAT_UNIT: - case SEARCH_HIGH: - case SEARCH_LOW: - case SEARCH_EQUAL: - return DISCONNECT_LONG; - default: - return DISCONNECT_NONE; - } -} - -static void NCR5380_set_timer(struct NCR5380_hostdata *hostdata, unsigned long timeout) -{ - hostdata->time_expires = jiffies + timeout; - schedule_delayed_work(&hostdata->coroutine, timeout); -} - - -static int probe_irq __initdata = 0; +static int probe_irq __initdata; /** - * probe_intr - helper for IRQ autoprobe - * @irq: interrupt number - * @dev_id: unused - * @regs: unused + * probe_intr - helper for IRQ autoprobe + * @irq: interrupt number + * @dev_id: unused + * @regs: unused * - * Set a flag to indicate the IRQ in question was received. This is - * used by the IRQ probe code. + * Set a flag to indicate the IRQ in question was received. This is + * used by the IRQ probe code. */ - + static irqreturn_t __init probe_intr(int irq, void *dev_id) { probe_irq = irq; @@ -553,24 +398,20 @@ static irqreturn_t __init probe_intr(int irq, void *dev_id) } /** - * NCR5380_probe_irq - find the IRQ of an NCR5380 - * @instance: NCR5380 controller - * @possible: bitmask of ISA IRQ lines - * - * Autoprobe for the IRQ line used by the NCR5380 by triggering an IRQ - * and then looking to see what interrupt actually turned up. + * NCR5380_probe_irq - find the IRQ of an NCR5380 + * @instance: NCR5380 controller + * @possible: bitmask of ISA IRQ lines * - * Locks: none, irqs must be enabled on entry + * Autoprobe for the IRQ line used by the NCR5380 by triggering an IRQ + * and then looking to see what interrupt actually turned up. */ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, int possible) { - NCR5380_local_declare(); - struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; + struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned long timeout; int trying_irqs, i, mask; - NCR5380_setup(instance); for (trying_irqs = 0, i = 1, mask = 2; i < 16; ++i, mask <<= 1) if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0)) @@ -581,7 +422,7 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, /* * A interrupt is triggered whenever BSY = false, SEL = true - * and a bit set in the SELECT_ENABLE_REG is asserted on the + * and a bit set in the SELECT_ENABLE_REG is asserted on the * SCSI bus. * * Note that the bus is only driven when the phase control signals @@ -596,7 +437,7 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, while (probe_irq == NO_IRQ && time_before(jiffies, timeout)) schedule_timeout_uninterruptible(1); - + NCR5380_write(SELECT_ENABLE_REG, 0); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); @@ -608,12 +449,10 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, } /** - * NCR58380_info - report driver and host information - * @instance: relevant scsi host instance + * NCR58380_info - report driver and host information + * @instance: relevant scsi host instance * - * For use as the host template info() handler. - * - * Locks: none + * For use as the host template info() handler. */ static const char *NCR5380_info(struct Scsi_Host *instance) @@ -633,20 +472,14 @@ static void prepare_info(struct Scsi_Host *instance) "can_queue %d, cmd_per_lun %d, " "sg_tablesize %d, this_id %d, " "flags { %s%s%s}, " -#if defined(USLEEP_POLL) && defined(USLEEP_WAITLONG) - "USLEEP_POLL %lu, USLEEP_WAITLONG %lu, " -#endif "options { %s} ", instance->hostt->name, instance->io_port, instance->n_io_port, instance->base, instance->irq, instance->can_queue, instance->cmd_per_lun, instance->sg_tablesize, instance->this_id, - hostdata->flags & FLAG_NCR53C400 ? "NCR53C400 " : "", - hostdata->flags & FLAG_DTC3181E ? "DTC3181E " : "", + hostdata->flags & FLAG_NO_DMA_FIXUP ? "NO_DMA_FIXUP " : "", hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "", -#if defined(USLEEP_POLL) && defined(USLEEP_WAITLONG) - USLEEP_POLL, USLEEP_WAITLONG, -#endif + hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "", #ifdef AUTOPROBE_IRQ "AUTOPROBE_IRQ " #endif @@ -665,46 +498,10 @@ static void prepare_info(struct Scsi_Host *instance) #ifdef PSEUDO_DMA "PSEUDO_DMA " #endif -#ifdef UNSAFE - "UNSAFE " -#endif -#ifdef NCR53C400 - "NCR53C400 " -#endif ""); } -/** - * NCR5380_print_status - dump controller info - * @instance: controller to dump - * - * Print commands in the various queues, called from NCR5380_abort - * and NCR5380_debug to aid debugging. - * - * Locks: called functions disable irqs - */ - -static void NCR5380_print_status(struct Scsi_Host *instance) -{ - NCR5380_dprint(NDEBUG_ANY, instance); - NCR5380_dprint_phase(NDEBUG_ANY, instance); -} - #ifdef PSEUDO_DMA -/******************************************/ -/* - * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED] - * - * *buffer: I/O buffer - * **start: if inout == FALSE pointer into buffer where user read should start - * offset: current offset - * length: length of buffer - * hostno: Scsi_Host host_no - * inout: TRUE - user is writing; FALSE - user is reading - * - * Return the number of bytes read from or written - */ - static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance, char *buffer, int length) { @@ -714,104 +511,41 @@ static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance, hostdata->spin_max_w = 0; return 0; } -#endif - -static -void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m); -static -void lprint_command(unsigned char *cmd, struct seq_file *m); -static -void lprint_opcode(int opcode, struct seq_file *m); static int __maybe_unused NCR5380_show_info(struct seq_file *m, - struct Scsi_Host *instance) + struct Scsi_Host *instance) { - struct NCR5380_hostdata *hostdata; - struct scsi_cmnd *ptr; - - hostdata = (struct NCR5380_hostdata *) instance->hostdata; + struct NCR5380_hostdata *hostdata = shost_priv(instance); -#ifdef PSEUDO_DMA seq_printf(m, "Highwater I/O busy spin counts: write %d, read %d\n", hostdata->spin_max_w, hostdata->spin_max_r); -#endif - spin_lock_irq(instance->host_lock); - if (!hostdata->connected) - seq_printf(m, "scsi%d: no currently connected command\n", instance->host_no); - else - lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m); - seq_printf(m, "scsi%d: issue_queue\n", instance->host_no); - for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble) - lprint_Scsi_Cmnd(ptr, m); - - seq_printf(m, "scsi%d: disconnected_queue\n", instance->host_no); - for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble) - lprint_Scsi_Cmnd(ptr, m); - spin_unlock_irq(instance->host_lock); return 0; } - -static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m) -{ - seq_printf(m, "scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun); - seq_puts(m, " command = "); - lprint_command(cmd->cmnd, m); -} - -static void lprint_command(unsigned char *command, struct seq_file *m) -{ - int i, s; - lprint_opcode(command[0], m); - for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) - seq_printf(m, "%02x ", command[i]); - seq_putc(m, '\n'); -} - -static void lprint_opcode(int opcode, struct seq_file *m) -{ - seq_printf(m, "%2d (0x%02x)", opcode, opcode); -} - +#endif /** - * NCR5380_init - initialise an NCR5380 - * @instance: adapter to configure - * @flags: control flags - * - * Initializes *instance and corresponding 5380 chip, - * with flags OR'd into the initial flags value. + * NCR5380_init - initialise an NCR5380 + * @instance: adapter to configure + * @flags: control flags * - * Notes : I assume that the host, hostno, and id bits have been - * set correctly. I don't care about the irq and other fields. + * Initializes *instance and corresponding 5380 chip, + * with flags OR'd into the initial flags value. * - * Returns 0 for success + * Notes : I assume that the host, hostno, and id bits have been + * set correctly. I don't care about the irq and other fields. * - * Locks: interrupts must be enabled when we are called + * Returns 0 for success */ static int NCR5380_init(struct Scsi_Host *instance, int flags) { - NCR5380_local_declare(); - int i, pass; - unsigned long timeout; - struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; - - if(in_interrupt()) - printk(KERN_ERR "NCR5380_init called with interrupts off!\n"); - /* - * On NCR53C400 boards, NCR5380 registers are mapped 8 past - * the base address. - */ - -#ifdef NCR53C400 - if (flags & FLAG_NCR53C400) - instance->NCR5380_instance_name += NCR53C400_address_adjust; -#endif - - NCR5380_setup(instance); + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int i; + unsigned long deadline; - hostdata->aborted = 0; + hostdata->host = instance; hostdata->id_mask = 1 << instance->this_id; + hostdata->id_higher_mask = 0; for (i = hostdata->id_mask; i <= 0x80; i <<= 1) if (i > hostdata->id_mask) hostdata->id_higher_mask |= i; @@ -820,21 +554,21 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags) #ifdef REAL_DMA hostdata->dmalen = 0; #endif - hostdata->targets_present = 0; + spin_lock_init(&hostdata->lock); hostdata->connected = NULL; - hostdata->issue_queue = NULL; - hostdata->disconnected_queue = NULL; - - INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main); - - /* The CHECK code seems to break the 53C400. Will check it later maybe */ - if (flags & FLAG_NCR53C400) - hostdata->flags = FLAG_HAS_LAST_BYTE_SENT | flags; - else - hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT | flags; + hostdata->sensing = NULL; + INIT_LIST_HEAD(&hostdata->autosense); + INIT_LIST_HEAD(&hostdata->unissued); + INIT_LIST_HEAD(&hostdata->disconnected); - hostdata->host = instance; - hostdata->time_expires = 0; + hostdata->flags = flags; + + INIT_WORK(&hostdata->main_task, NCR5380_main); + hostdata->work_q = alloc_workqueue("ncr5380_%d", + WQ_UNBOUND | WQ_MEM_RECLAIM, + 1, instance->host_no); + if (!hostdata->work_q) + return -ENOMEM; prepare_info(instance); @@ -843,43 +577,69 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags) NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(SELECT_ENABLE_REG, 0); -#ifdef NCR53C400 - if (hostdata->flags & FLAG_NCR53C400) { - NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE); - } -#endif + /* Calibrate register polling loop */ + i = 0; + deadline = jiffies + 1; + do { + cpu_relax(); + } while (time_is_after_jiffies(deadline)); + deadline += msecs_to_jiffies(256); + do { + NCR5380_read(STATUS_REG); + ++i; + cpu_relax(); + } while (time_is_after_jiffies(deadline)); + hostdata->accesses_per_ms = i / 256; - /* - * Detect and correct bus wedge problems. - * - * If the system crashed, it may have crashed in a state - * where a SCSI command was still executing, and the - * SCSI bus is not in a BUS FREE STATE. - * - * If this is the case, we'll try to abort the currently - * established nexus which we know nothing about, and that - * failing, do a hard reset of the SCSI bus - */ + return 0; +} + +/** + * NCR5380_maybe_reset_bus - Detect and correct bus wedge problems. + * @instance: adapter to check + * + * If the system crashed, it may have crashed with a connected target and + * the SCSI bus busy. Check for BUS FREE phase. If not, try to abort the + * currently established nexus, which we know nothing about. Failing that + * do a bus reset. + * + * Note that a bus reset will cause the chip to assert IRQ. + * + * Returns 0 if successful, otherwise -ENXIO. + */ + +static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int pass; for (pass = 1; (NCR5380_read(STATUS_REG) & SR_BSY) && pass <= 6; ++pass) { switch (pass) { case 1: case 3: case 5: - printk(KERN_INFO "scsi%d: SCSI bus busy, waiting up to five seconds\n", instance->host_no); - timeout = jiffies + 5 * HZ; - NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, 0, 5*HZ); + shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n"); + NCR5380_poll_politely(instance, + STATUS_REG, SR_BSY, 0, 5 * HZ); break; case 2: - printk(KERN_WARNING "scsi%d: bus busy, attempting abort\n", instance->host_no); + shost_printk(KERN_ERR, instance, "bus busy, attempting abort\n"); do_abort(instance); break; case 4: - printk(KERN_WARNING "scsi%d: bus busy, attempting reset\n", instance->host_no); + shost_printk(KERN_ERR, instance, "bus busy, attempting reset\n"); do_reset(instance); + /* Wait after a reset; the SCSI standard calls for + * 250ms, we wait 500ms to be on the safe side. + * But some Toshiba CD-ROMs need ten times that. + */ + if (hostdata->flags & FLAG_TOSHIBA_DELAY) + msleep(2500); + else + msleep(500); break; case 6: - printk(KERN_ERR "scsi%d: bus locked solid or invalid override\n", instance->host_no); + shost_printk(KERN_ERR, instance, "bus locked solid\n"); return -ENXIO; } } @@ -887,450 +647,519 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags) } /** - * NCR5380_exit - remove an NCR5380 - * @instance: adapter to remove + * NCR5380_exit - remove an NCR5380 + * @instance: adapter to remove + * + * Assumes that no more work can be queued (e.g. by NCR5380_intr). */ static void NCR5380_exit(struct Scsi_Host *instance) { - struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; + struct NCR5380_hostdata *hostdata = shost_priv(instance); - cancel_delayed_work_sync(&hostdata->coroutine); + cancel_work_sync(&hostdata->main_task); + destroy_workqueue(hostdata->work_q); } /** - * NCR5380_queue_command - queue a command - * @cmd: SCSI command - * @done: completion handler - * - * cmd is added to the per instance issue_queue, with minor - * twiddling done to the host specific fields of cmd. If the - * main coroutine is not running, it is restarted. + * complete_cmd - finish processing a command and return it to the SCSI ML + * @instance: the host instance + * @cmd: command to complete + */ + +static void complete_cmd(struct Scsi_Host *instance, + struct scsi_cmnd *cmd) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + dsprintk(NDEBUG_QUEUES, instance, "complete_cmd: cmd %p\n", cmd); + + if (hostdata->sensing == cmd) { + /* Autosense processing ends here */ + if ((cmd->result & 0xff) != SAM_STAT_GOOD) { + scsi_eh_restore_cmnd(cmd, &hostdata->ses); + set_host_byte(cmd, DID_ERROR); + } else + scsi_eh_restore_cmnd(cmd, &hostdata->ses); + hostdata->sensing = NULL; + } + + hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); + + cmd->scsi_done(cmd); +} + +/** + * NCR5380_queue_command - queue a command + * @instance: the relevant SCSI adapter + * @cmd: SCSI command * - * Locks: host lock taken by caller + * cmd is added to the per-instance issue queue, with minor + * twiddling done to the host specific fields of cmd. If the + * main coroutine is not running, it is restarted. */ -static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, void (*done) (struct scsi_cmnd *)) +static int NCR5380_queue_command(struct Scsi_Host *instance, + struct scsi_cmnd *cmd) { - struct Scsi_Host *instance = cmd->device->host; - struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; - struct scsi_cmnd *tmp; + struct NCR5380_hostdata *hostdata = shost_priv(instance); + struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); + unsigned long flags; #if (NDEBUG & NDEBUG_NO_WRITE) switch (cmd->cmnd[0]) { case WRITE_6: case WRITE_10: - printk("scsi%d : WRITE attempted with NO_WRITE debugging flag set\n", instance->host_no); + shost_printk(KERN_DEBUG, instance, "WRITE attempted with NDEBUG_NO_WRITE set\n"); cmd->result = (DID_ERROR << 16); - done(cmd); + cmd->scsi_done(cmd); return 0; } -#endif /* (NDEBUG & NDEBUG_NO_WRITE) */ - - /* - * We use the host_scribble field as a pointer to the next command - * in a queue - */ +#endif /* (NDEBUG & NDEBUG_NO_WRITE) */ - cmd->host_scribble = NULL; - cmd->scsi_done = done; cmd->result = 0; - /* - * Insert the cmd into the issue queue. Note that REQUEST SENSE + spin_lock_irqsave(&hostdata->lock, flags); + + /* + * Insert the cmd into the issue queue. Note that REQUEST SENSE * commands are added to the head of the queue since any command will - * clear the contingent allegiance condition that exists and the + * clear the contingent allegiance condition that exists and the * sense data is only guaranteed to be valid while the condition exists. */ - if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { - LIST(cmd, hostdata->issue_queue); - cmd->host_scribble = (unsigned char *) hostdata->issue_queue; - hostdata->issue_queue = cmd; - } else { - for (tmp = (struct scsi_cmnd *) hostdata->issue_queue; tmp->host_scribble; tmp = (struct scsi_cmnd *) tmp->host_scribble); - LIST(cmd, tmp); - tmp->host_scribble = (unsigned char *) cmd; - } - dprintk(NDEBUG_QUEUES, "scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); + if (cmd->cmnd[0] == REQUEST_SENSE) + list_add(&ncmd->list, &hostdata->unissued); + else + list_add_tail(&ncmd->list, &hostdata->unissued); + + spin_unlock_irqrestore(&hostdata->lock, flags); + + dsprintk(NDEBUG_QUEUES, instance, "command %p added to %s of queue\n", + cmd, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); - /* Run the coroutine if it isn't already running. */ /* Kick off command processing */ - schedule_delayed_work(&hostdata->coroutine, 0); + queue_work(hostdata->work_q, &hostdata->main_task); return 0; } -static DEF_SCSI_QCMD(NCR5380_queue_command) +/** + * dequeue_next_cmd - dequeue a command for processing + * @instance: the scsi host instance + * + * Priority is given to commands on the autosense queue. These commands + * need autosense because of a CHECK CONDITION result. + * + * Returns a command pointer if a command is found for a target that is + * not already busy. Otherwise returns NULL. + */ + +static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + struct NCR5380_cmd *ncmd; + struct scsi_cmnd *cmd; + + if (hostdata->sensing || list_empty(&hostdata->autosense)) { + list_for_each_entry(ncmd, &hostdata->unissued, list) { + cmd = NCR5380_to_scmd(ncmd); + dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n", + cmd, scmd_id(cmd), hostdata->busy[scmd_id(cmd)], cmd->device->lun); + + if (!(hostdata->busy[scmd_id(cmd)] & (1 << cmd->device->lun))) { + list_del(&ncmd->list); + dsprintk(NDEBUG_QUEUES, instance, + "dequeue: removed %p from issue queue\n", cmd); + return cmd; + } + } + } else { + /* Autosense processing begins here */ + ncmd = list_first_entry(&hostdata->autosense, + struct NCR5380_cmd, list); + list_del(&ncmd->list); + cmd = NCR5380_to_scmd(ncmd); + dsprintk(NDEBUG_QUEUES, instance, + "dequeue: removed %p from autosense queue\n", cmd); + scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); + hostdata->sensing = cmd; + return cmd; + } + return NULL; +} + +static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); + + if (hostdata->sensing == cmd) { + scsi_eh_restore_cmnd(cmd, &hostdata->ses); + list_add(&ncmd->list, &hostdata->autosense); + hostdata->sensing = NULL; + } else + list_add(&ncmd->list, &hostdata->unissued); +} /** - * NCR5380_main - NCR state machines - * - * NCR5380_main is a coroutine that runs as long as more work can - * be done on the NCR5380 host adapters in a system. Both - * NCR5380_queue_command() and NCR5380_intr() will try to start it - * in case it is not running. - * - * Locks: called as its own thread with no locks held. Takes the - * host lock and called routines may take the isa dma lock. + * NCR5380_main - NCR state machines + * + * NCR5380_main is a coroutine that runs as long as more work can + * be done on the NCR5380 host adapters in a system. Both + * NCR5380_queue_command() and NCR5380_intr() will try to start it + * in case it is not running. */ static void NCR5380_main(struct work_struct *work) { struct NCR5380_hostdata *hostdata = - container_of(work, struct NCR5380_hostdata, coroutine.work); + container_of(work, struct NCR5380_hostdata, main_task); struct Scsi_Host *instance = hostdata->host; - struct scsi_cmnd *tmp, *prev; int done; - - spin_lock_irq(instance->host_lock); + do { - /* Lock held here */ done = 1; - if (!hostdata->connected && !hostdata->selecting) { - dprintk(NDEBUG_MAIN, "scsi%d : not connected\n", instance->host_no); - /* - * Search through the issue_queue for a command destined - * for a target that's not busy. - */ - for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (struct scsi_cmnd *) tmp->host_scribble) - { - if (prev != tmp) - dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); - /* When we find one, remove it from the issue queue. */ - if (!(hostdata->busy[tmp->device->id] & - (1 << (u8)(tmp->device->lun & 0xff)))) { - if (prev) { - REMOVE(prev, prev->host_scribble, tmp, tmp->host_scribble); - prev->host_scribble = tmp->host_scribble; - } else { - REMOVE(-1, hostdata->issue_queue, tmp, tmp->host_scribble); - hostdata->issue_queue = (struct scsi_cmnd *) tmp->host_scribble; - } - tmp->host_scribble = NULL; - /* - * Attempt to establish an I_T_L nexus here. - * On success, instance->hostdata->connected is set. - * On failure, we must add the command back to the - * issue queue so we can keep trying. - */ - dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main() : command for target %d lun %llu removed from issue_queue\n", instance->host_no, tmp->device->id, tmp->device->lun); - - /* - * A successful selection is defined as one that - * leaves us with the command connected and - * in hostdata->connected, OR has terminated the - * command. - * - * With successful commands, we fall through - * and see if we can do an information transfer, - * with failures we will restart. - */ - hostdata->selecting = NULL; - /* RvC: have to preset this to indicate a new command is being performed */ + spin_lock_irq(&hostdata->lock); + while (!hostdata->connected && !hostdata->selecting) { + struct scsi_cmnd *cmd = dequeue_next_cmd(instance); - /* - * REQUEST SENSE commands are issued without tagged - * queueing, even on SCSI-II devices because the - * contingent allegiance condition exists for the - * entire unit. - */ + if (!cmd) + break; - if (!NCR5380_select(instance, tmp)) { - break; - } else { - LIST(tmp, hostdata->issue_queue); - tmp->host_scribble = (unsigned char *) hostdata->issue_queue; - hostdata->issue_queue = tmp; - done = 0; - dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no); - } - /* lock held here still */ - } /* if target/lun is not busy */ - } /* for */ - /* exited locked */ - } /* if (!hostdata->connected) */ - if (hostdata->selecting) { - tmp = (struct scsi_cmnd *) hostdata->selecting; - /* Selection will drop and retake the lock */ - if (!NCR5380_select(instance, tmp)) { - /* Ok ?? */ + dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd); + + /* + * Attempt to establish an I_T_L nexus here. + * On success, instance->hostdata->connected is set. + * On failure, we must add the command back to the + * issue queue so we can keep trying. + */ + /* + * REQUEST SENSE commands are issued without tagged + * queueing, even on SCSI-II devices because the + * contingent allegiance condition exists for the + * entire unit. + */ + + if (!NCR5380_select(instance, cmd)) { + dsprintk(NDEBUG_MAIN, instance, "main: select complete\n"); } else { - /* RvC: device failed, so we wait a long time - this is needed for Mustek scanners, that - do not respond to commands immediately - after a scan */ - printk(KERN_DEBUG "scsi%d: device %d did not respond in time\n", instance->host_no, tmp->device->id); - LIST(tmp, hostdata->issue_queue); - tmp->host_scribble = (unsigned char *) hostdata->issue_queue; - hostdata->issue_queue = tmp; - NCR5380_set_timer(hostdata, USLEEP_WAITLONG); + dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance, + "main: select failed, returning %p to queue\n", cmd); + requeue_cmd(instance, cmd); } - } /* if hostdata->selecting */ + } if (hostdata->connected #ifdef REAL_DMA && !hostdata->dmalen #endif - && (!hostdata->time_expires || time_before_eq(hostdata->time_expires, jiffies)) ) { - dprintk(NDEBUG_MAIN, "scsi%d : main() : performing information transfer\n", instance->host_no); + dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n"); NCR5380_information_transfer(instance); - dprintk(NDEBUG_MAIN, "scsi%d : main() : done set false\n", instance->host_no); done = 0; - } else - break; + } + spin_unlock_irq(&hostdata->lock); + if (!done) + cond_resched(); } while (!done); - - spin_unlock_irq(instance->host_lock); } #ifndef DONT_USE_INTR /** - * NCR5380_intr - generic NCR5380 irq handler - * @irq: interrupt number - * @dev_id: device info - * - * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses - * from the disconnected queue, and restarting NCR5380_main() - * as required. - * - * Locks: takes the needed instance locks + * NCR5380_intr - generic NCR5380 irq handler + * @irq: interrupt number + * @dev_id: device info + * + * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses + * from the disconnected queue, and restarting NCR5380_main() + * as required. + * + * The chip can assert IRQ in any of six different conditions. The IRQ flag + * is then cleared by reading the Reset Parity/Interrupt Register (RPIR). + * Three of these six conditions are latched in the Bus and Status Register: + * - End of DMA (cleared by ending DMA Mode) + * - Parity error (cleared by reading RPIR) + * - Loss of BSY (cleared by reading RPIR) + * Two conditions have flag bits that are not latched: + * - Bus phase mismatch (non-maskable in DMA Mode, cleared by ending DMA Mode) + * - Bus reset (non-maskable) + * The remaining condition has no flag bit at all: + * - Selection/reselection + * + * Hence, establishing the cause(s) of any interrupt is partly guesswork. + * In "The DP8490 and DP5380 Comparison Guide", National Semiconductor + * claimed that "the design of the [DP8490] interrupt logic ensures + * interrupts will not be lost (they can be on the DP5380)." + * The L5380/53C80 datasheet from LOGIC Devices has more details. + * + * Checking for bus reset by reading RST is futile because of interrupt + * latency, but a bus reset will reset chip logic. Checking for parity error + * is unnecessary because that interrupt is never enabled. A Loss of BSY + * condition will clear DMA Mode. We can tell when this occurs because the + * the Busy Monitor interrupt is enabled together with DMA Mode. */ -static irqreturn_t NCR5380_intr(int dummy, void *dev_id) +static irqreturn_t NCR5380_intr(int irq, void *dev_id) { - NCR5380_local_declare(); struct Scsi_Host *instance = dev_id; - struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; - int done; + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int handled = 0; unsigned char basr; unsigned long flags; - dprintk(NDEBUG_INTR, "scsi : NCR5380 irq %d triggered\n", - instance->irq); + spin_lock_irqsave(&hostdata->lock, flags); + + basr = NCR5380_read(BUS_AND_STATUS_REG); + if (basr & BASR_IRQ) { + unsigned char mr = NCR5380_read(MODE_REG); + unsigned char sr = NCR5380_read(STATUS_REG); + + dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n", + irq, basr, sr, mr); - do { - done = 1; - spin_lock_irqsave(instance->host_lock, flags); - /* Look for pending interrupts */ - NCR5380_setup(instance); - basr = NCR5380_read(BUS_AND_STATUS_REG); - /* XXX dispatch to appropriate routine if found and done=0 */ - if (basr & BASR_IRQ) { - NCR5380_dprint(NDEBUG_INTR, instance); - if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { - done = 0; - dprintk(NDEBUG_INTR, "scsi%d : SEL interrupt\n", instance->host_no); - NCR5380_reselect(instance); - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } else if (basr & BASR_PARITY_ERROR) { - dprintk(NDEBUG_INTR, "scsi%d : PARITY interrupt\n", instance->host_no); - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { - dprintk(NDEBUG_INTR, "scsi%d : RESET interrupt\n", instance->host_no); - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } else { #if defined(REAL_DMA) - /* - * We should only get PHASE MISMATCH and EOP interrupts - * if we have DMA enabled, so do a sanity check based on - * the current setting of the MODE register. - */ + if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) { + /* Probably End of DMA, Phase Mismatch or Loss of BSY. + * We ack IRQ after clearing Mode Register. Workarounds + * for End of DMA errata need to happen in DMA Mode. + */ - if ((NCR5380_read(MODE_REG) & MR_DMA) && ((basr & BASR_END_DMA_TRANSFER) || !(basr & BASR_PHASE_MATCH))) { - int transferred; + dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n"); - if (!hostdata->connected) - panic("scsi%d : received end of DMA interrupt with no connected cmd\n", instance->hostno); + int transferred; - transferred = (hostdata->dmalen - NCR5380_dma_residual(instance)); - hostdata->connected->SCp.this_residual -= transferred; - hostdata->connected->SCp.ptr += transferred; - hostdata->dmalen = 0; + if (!hostdata->connected) + panic("scsi%d : DMA interrupt with no connected cmd\n", + instance->hostno); - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); - - /* FIXME: we need to poll briefly then defer a workqueue task ! */ - NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_ACK, 0, 2*HZ); + transferred = hostdata->dmalen - NCR5380_dma_residual(instance); + hostdata->connected->SCp.this_residual -= transferred; + hostdata->connected->SCp.ptr += transferred; + hostdata->dmalen = 0; - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - } -#else - dprintk(NDEBUG_INTR, "scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); -#endif + /* FIXME: we need to poll briefly then defer a workqueue task ! */ + NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_ACK, 0, 2 * HZ); + + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + NCR5380_write(MODE_REG, MR_BASE); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + } else +#endif /* REAL_DMA */ + if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) && + (sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) { + /* Probably reselected */ + NCR5380_write(SELECT_ENABLE_REG, 0); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + + dsprintk(NDEBUG_INTR, instance, "interrupt with SEL and IO\n"); + + if (!hostdata->connected) { + NCR5380_reselect(instance); + queue_work(hostdata->work_q, &hostdata->main_task); } - } /* if BASR_IRQ */ - spin_unlock_irqrestore(instance->host_lock, flags); - if(!done) - schedule_delayed_work(&hostdata->coroutine, 0); - } while (!done); - return IRQ_HANDLED; + if (!hostdata->connected) + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); + } else { + /* Probably Bus Reset */ + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + + dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); + } + handled = 1; + } else { + shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n"); + } + + spin_unlock_irqrestore(&hostdata->lock, flags); + + return IRQ_RETVAL(handled); } -#endif +#endif -/* +/* * Function : int NCR5380_select(struct Scsi_Host *instance, - * struct scsi_cmnd *cmd) + * struct scsi_cmnd *cmd) * * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, - * including ARBITRATION, SELECTION, and initial message out for - * IDENTIFY and queue messages. - * - * Inputs : instance - instantiation of the 5380 driver on which this - * target lives, cmd - SCSI command to execute. - * - * Returns : -1 if selection could not execute for some reason, - * 0 if selection succeeded or failed because the target - * did not respond. - * - * Side effects : - * If bus busy, arbitration failed, etc, NCR5380_select() will exit - * with registers as they should have been on entry - ie - * SELECT_ENABLE will be set appropriately, the NCR5380 - * will cease to drive any SCSI bus signals. - * - * If successful : I_T_L or I_T_L_Q nexus will be established, - * instance->connected will be set to cmd. - * SELECT interrupt will be disabled. - * - * If failed (no target) : cmd->scsi_done() will be called, and the - * cmd->result host byte set to DID_BAD_TARGET. - * - * Locks: caller holds hostdata lock in IRQ mode + * including ARBITRATION, SELECTION, and initial message out for + * IDENTIFY and queue messages. + * + * Inputs : instance - instantiation of the 5380 driver on which this + * target lives, cmd - SCSI command to execute. + * + * Returns cmd if selection failed but should be retried, + * NULL if selection failed and should not be retried, or + * NULL if selection succeeded (hostdata->connected == cmd). + * + * Side effects : + * If bus busy, arbitration failed, etc, NCR5380_select() will exit + * with registers as they should have been on entry - ie + * SELECT_ENABLE will be set appropriately, the NCR5380 + * will cease to drive any SCSI bus signals. + * + * If successful : I_T_L or I_T_L_Q nexus will be established, + * instance->connected will be set to cmd. + * SELECT interrupt will be disabled. + * + * If failed (no target) : cmd->scsi_done() will be called, and the + * cmd->result host byte set to DID_BAD_TARGET. */ - -static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) + +static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, + struct scsi_cmnd *cmd) { - NCR5380_local_declare(); - struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; + struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char tmp[3], phase; unsigned char *data; int len; - unsigned long timeout; - unsigned char value; int err; - NCR5380_setup(instance); - - if (hostdata->selecting) - goto part2; - - hostdata->restart_select = 0; NCR5380_dprint(NDEBUG_ARBITRATION, instance); - dprintk(NDEBUG_ARBITRATION, "scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id); + dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n", + instance->this_id); - /* - * Set the phase bits to 0, otherwise the NCR5380 won't drive the + /* + * Arbitration and selection phases are slow and involve dropping the + * lock, so we have to watch out for EH. An exception handler may + * change 'selecting' to NULL. This function will then return NULL + * so that the caller will forget about 'cmd'. (During information + * transfer phases, EH may change 'connected' to NULL.) + */ + hostdata->selecting = cmd; + + /* + * Set the phase bits to 0, otherwise the NCR5380 won't drive the * data bus during SELECTION. */ NCR5380_write(TARGET_COMMAND_REG, 0); - /* + /* * Start arbitration. */ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); NCR5380_write(MODE_REG, MR_ARBITRATE); + /* The chip now waits for BUS FREE phase. Then after the 800 ns + * Bus Free Delay, arbitration will begin. + */ - /* We can be relaxed here, interrupts are on, we are - in workqueue context, the birds are singing in the trees */ - spin_unlock_irq(instance->host_lock); - err = NCR5380_poll_politely(instance, INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS, ICR_ARBITRATION_PROGRESS, 5*HZ); - spin_lock_irq(instance->host_lock); + spin_unlock_irq(&hostdata->lock); + err = NCR5380_poll_politely2(instance, MODE_REG, MR_ARBITRATE, 0, + INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS, + ICR_ARBITRATION_PROGRESS, HZ); + spin_lock_irq(&hostdata->lock); + if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) { + /* Reselection interrupt */ + goto out; + } + if (!hostdata->selecting) { + /* Command was aborted */ + NCR5380_write(MODE_REG, MR_BASE); + goto out; + } if (err < 0) { - printk(KERN_DEBUG "scsi: arbitration timeout at %d\n", __LINE__); NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - goto failed; + shost_printk(KERN_ERR, instance, + "select: arbitration timeout\n"); + goto out; } + spin_unlock_irq(&hostdata->lock); - dprintk(NDEBUG_ARBITRATION, "scsi%d : arbitration complete\n", instance->host_no); - - /* - * The arbitration delay is 2.2us, but this is a minimum and there is - * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate - * the integral nature of udelay(). - * - */ - + /* The SCSI-2 arbitration delay is 2.4 us */ udelay(3); /* Check for lost arbitration */ - if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { - NCR5380_write(MODE_REG, MR_BASE); - dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no); - goto failed; - } - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL); - - if (!(hostdata->flags & FLAG_DTC3181E) && - /* RvC: DTC3181E has some trouble with this - * so we simply removed it. Seems to work with - * only Mustek scanner attached - */ + if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || + (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no); - goto failed; + dsprintk(NDEBUG_ARBITRATION, instance, "lost arbitration, deasserting MR_ARBITRATE\n"); + spin_lock_irq(&hostdata->lock); + goto out; } - /* - * Again, bus clear + bus settle time is 1.2us, however, this is + + /* After/during arbitration, BSY should be asserted. + * IBM DPES-31080 Version S31Q works now + * Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) + */ + NCR5380_write(INITIATOR_COMMAND_REG, + ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY); + + /* + * Again, bus clear + bus settle time is 1.2us, however, this is * a minimum so we'll udelay ceil(1.2) */ - udelay(2); + if (hostdata->flags & FLAG_TOSHIBA_DELAY) + udelay(15); + else + udelay(2); + + spin_lock_irq(&hostdata->lock); - dprintk(NDEBUG_ARBITRATION, "scsi%d : won arbitration\n", instance->host_no); + /* NCR5380_reselect() clears MODE_REG after a reselection interrupt */ + if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) + goto out; + + if (!hostdata->selecting) { + NCR5380_write(MODE_REG, MR_BASE); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + goto out; + } - /* - * Now that we have won arbitration, start Selection process, asserting + dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n"); + + /* + * Now that we have won arbitration, start Selection process, asserting * the host and target ID's on the SCSI bus. */ - NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << scmd_id(cmd)))); + NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask | (1 << scmd_id(cmd))); - /* + /* * Raise ATN while SEL is true before BSY goes false from arbitration, * since this is the only way to guarantee that we'll get a MESSAGE OUT * phase immediately after selection. */ - NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL)); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY | + ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL); NCR5380_write(MODE_REG, MR_BASE); - /* + /* * Reselect interrupts must be turned off prior to the dropping of BSY, * otherwise we will trigger an interrupt. */ NCR5380_write(SELECT_ENABLE_REG, 0); + spin_unlock_irq(&hostdata->lock); + /* - * The initiator shall then wait at least two deskew delays and release + * The initiator shall then wait at least two deskew delays and release * the BSY signal. */ - udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ + udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ /* Reset BSY */ - NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL)); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | + ICR_ASSERT_ATN | ICR_ASSERT_SEL); - /* + /* * Something weird happens when we cease to drive BSY - looks - * like the board/chip is letting us do another read before the + * like the board/chip is letting us do another read before the * appropriate propagation delay has expired, and we're confusing * a BSY signal from ourselves as the target's response to SELECTION. * * A small delay (the 'C++' frontend breaks the pipeline with an * unnecessary jump, making it work on my 386-33/Trantor T128, the - * tighter 'C' code breaks and requires this) solves the problem - - * the 1 us delay is arbitrary, and only used because this delay will - * be the same on other platforms and since it works here, it should + * tighter 'C' code breaks and requires this) solves the problem - + * the 1 us delay is arbitrary, and only used because this delay will + * be the same on other platforms and since it works here, it should * work there. * * wingel suggests that this could be due to failing to wait @@ -1339,50 +1168,43 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) udelay(1); - dprintk(NDEBUG_SELECTION, "scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd)); + dsprintk(NDEBUG_SELECTION, instance, "selecting target %d\n", scmd_id(cmd)); - /* - * The SCSI specification calls for a 250 ms timeout for the actual + /* + * The SCSI specification calls for a 250 ms timeout for the actual * selection. */ - timeout = jiffies + msecs_to_jiffies(250); - - /* - * XXX very interesting - we're seeing a bounce where the BSY we - * asserted is being reflected / still asserted (propagation delay?) - * and it's detecting as true. Sigh. - */ - - hostdata->select_time = 0; /* we count the clock ticks at which we polled */ - hostdata->selecting = cmd; + err = NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, SR_BSY, + msecs_to_jiffies(250)); -part2: - /* RvC: here we enter after a sleeping period, or immediately after - execution of part 1 - we poll only once ech clock tick */ - value = NCR5380_read(STATUS_REG) & (SR_BSY | SR_IO); - - if (!value && (hostdata->select_time < HZ/4)) { - /* RvC: we still must wait for a device response */ - hostdata->select_time++; /* after 25 ticks the device has failed */ - NCR5380_set_timer(hostdata, 1); - return 0; /* RvC: we return here with hostdata->selecting set, - to go to sleep */ - } - - hostdata->selecting = NULL;/* clear this pointer, because we passed the - waiting period */ if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { + spin_lock_irq(&hostdata->lock); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_reselect(instance); - printk("scsi%d : reselection after won arbitration?\n", instance->host_no); + if (!hostdata->connected) + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); + shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n"); + goto out; + } + + if (err < 0) { + spin_lock_irq(&hostdata->lock); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return -1; + /* Can't touch cmd if it has been reclaimed by the scsi ML */ + if (hostdata->selecting) { + cmd->result = DID_BAD_TARGET << 16; + complete_cmd(instance, cmd); + dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n"); + cmd = NULL; + } + goto out; } - /* - * No less than two deskew delays after the initiator detects the - * BSY signal is true, it shall release the SEL signal and may + + /* + * No less than two deskew delays after the initiator detects the + * BSY signal is true, it shall release the SEL signal and may * change the DATA BUS. -wingel */ @@ -1390,53 +1212,38 @@ part2: NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - if (!(NCR5380_read(STATUS_REG) & SR_BSY)) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - if (hostdata->targets_present & (1 << scmd_id(cmd))) { - printk(KERN_DEBUG "scsi%d : weirdness\n", instance->host_no); - if (hostdata->restart_select) - printk(KERN_DEBUG "\trestart select\n"); - NCR5380_dprint(NDEBUG_SELECTION, instance); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return -1; - } - cmd->result = DID_BAD_TARGET << 16; - cmd->scsi_done(cmd); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return 0; - } - hostdata->targets_present |= (1 << scmd_id(cmd)); - /* - * Since we followed the SCSI spec, and raised ATN while SEL + * Since we followed the SCSI spec, and raised ATN while SEL * was true but before BSY was false during selection, the information * transfer phase should be a MESSAGE OUT phase so that we can send the * IDENTIFY message. - * + * * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG * message (2 bytes) with a tag ID that we increment with every command * until it wraps back to 0. * * XXX - it turns out that there are some broken SCSI-II devices, - * which claim to support tagged queuing but fail when more than - * some number of commands are issued at once. + * which claim to support tagged queuing but fail when more than + * some number of commands are issued at once. */ /* Wait for start of REQ/ACK handshake */ - spin_unlock_irq(instance->host_lock); err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ); - spin_lock_irq(instance->host_lock); - - if(err) { - printk(KERN_ERR "scsi%d: timeout at NCR5380.c:%d\n", instance->host_no, __LINE__); + spin_lock_irq(&hostdata->lock); + if (err < 0) { + shost_printk(KERN_ERR, instance, "select: REQ timeout\n"); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - goto failed; + goto out; + } + if (!hostdata->selecting) { + do_abort(instance); + goto out; } - dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id); + dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n", + scmd_id(cmd)); tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun); len = 1; @@ -1446,104 +1253,82 @@ part2: data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_SELECTION, "scsi%d : nexus established.\n", instance->host_no); + dsprintk(NDEBUG_SELECTION, instance, "nexus established.\n"); /* XXX need to handle errors here */ + hostdata->connected = cmd; - hostdata->busy[cmd->device->id] |= (1 << (cmd->device->lun & 0xFF)); + hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun; initialize_SCp(cmd); - return 0; - - /* Selection failed */ -failed: - return -1; + cmd = NULL; +out: + if (!hostdata->selecting) + return NULL; + hostdata->selecting = NULL; + return cmd; } -/* - * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, - * unsigned char *phase, int *count, unsigned char **data) +/* + * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, + * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using polled I/O * - * Inputs : instance - instance of driver, *phase - pointer to - * what phase is expected, *count - pointer to number of - * bytes to transfer, **data - pointer to data pointer. - * + * Inputs : instance - instance of driver, *phase - pointer to + * what phase is expected, *count - pointer to number of + * bytes to transfer, **data - pointer to data pointer. + * * Returns : -1 when different phase is entered without transferring - * maximum number of bytes, 0 if all bytes or transferred or exit - * is in same phase. + * maximum number of bytes, 0 if all bytes are transferred or exit + * is in same phase. * - * Also, *phase, *count, *data are modified in place. + * Also, *phase, *count, *data are modified in place. * * XXX Note : handling for bus free may be useful. */ /* - * Note : this code is not as quick as it could be, however it + * Note : this code is not as quick as it could be, however it * IS 100% reliable, and for the actual data transfer where speed * counts, we will always do a pseudo DMA or DMA transfer. */ -static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { - NCR5380_local_declare(); +static int NCR5380_transfer_pio(struct Scsi_Host *instance, + unsigned char *phase, int *count, + unsigned char **data) +{ unsigned char p = *phase, tmp; int c = *count; unsigned char *d = *data; - /* - * RvC: some administrative data to process polling time - */ - int break_allowed = 0; - struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; - NCR5380_setup(instance); - if (!(p & SR_IO)) - dprintk(NDEBUG_PIO, "scsi%d : pio write %d bytes\n", instance->host_no, c); - else - dprintk(NDEBUG_PIO, "scsi%d : pio read %d bytes\n", instance->host_no, c); - - /* - * The NCR5380 chip will only drive the SCSI bus when the + /* + * The NCR5380 chip will only drive the SCSI bus when the * phase specified in the appropriate bits of the TARGET COMMAND * REGISTER match the STATUS REGISTER */ - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); + NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); - /* RvC: don't know if this is necessary, but other SCSI I/O is short - * so breaks are not necessary there - */ - if ((p == PHASE_DATAIN) || (p == PHASE_DATAOUT)) { - break_allowed = 1; - } do { - /* - * Wait for assertion of REQ, after which the phase bits will be - * valid - */ - - /* RvC: we simply poll once, after that we stop temporarily - * and let the device buffer fill up - * if breaking is not allowed, we keep polling as long as needed + /* + * Wait for assertion of REQ, after which the phase bits will be + * valid */ - /* FIXME */ - while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ) && !break_allowed); - if (!(tmp & SR_REQ)) { - /* timeout condition */ - NCR5380_set_timer(hostdata, USLEEP_SLEEP); + if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0) break; - } - dprintk(NDEBUG_HANDSHAKE, "scsi%d : REQ detected\n", instance->host_no); + dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n"); /* Check for phase mismatch */ - if ((tmp & PHASE_MASK) != p) { - dprintk(NDEBUG_HANDSHAKE, "scsi%d : phase mismatch\n", instance->host_no); - NCR5380_dprint_phase(NDEBUG_HANDSHAKE, instance); + if ((NCR5380_read(STATUS_REG) & PHASE_MASK) != p) { + dsprintk(NDEBUG_PIO, instance, "phase mismatch\n"); + NCR5380_dprint_phase(NDEBUG_PIO, instance); break; } + /* Do actual transfer from SCSI bus to / from memory */ if (!(p & SR_IO)) NCR5380_write(OUTPUT_DATA_REG, *d); @@ -1552,7 +1337,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase ++d; - /* + /* * The SCSI standard suggests that in MSGOUT phase, the initiator * should drop ATN on the last byte of the message phase * after REQ has been asserted for the handshake but before @@ -1563,29 +1348,34 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase if (!((p & SR_MSG) && c > 1)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); NCR5380_dprint(NDEBUG_PIO, instance); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ACK); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | + ICR_ASSERT_DATA | ICR_ASSERT_ACK); } else { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | + ICR_ASSERT_DATA | ICR_ASSERT_ATN); NCR5380_dprint(NDEBUG_PIO, instance); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | + ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); } } else { NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); } - /* FIXME - if this fails bus reset ?? */ - NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 5*HZ); - dprintk(NDEBUG_HANDSHAKE, "scsi%d : req false, handshake complete\n", instance->host_no); + if (NCR5380_poll_politely(instance, + STATUS_REG, SR_REQ, 0, 5 * HZ) < 0) + break; + + dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n"); /* - * We have several special cases to consider during REQ/ACK handshaking : - * 1. We were in MSGOUT phase, and we are on the last byte of the - * message. ATN must be dropped as ACK is dropped. + * We have several special cases to consider during REQ/ACK handshaking : + * 1. We were in MSGOUT phase, and we are on the last byte of the + * message. ATN must be dropped as ACK is dropped. * - * 2. We are in a MSGIN phase, and we are on the last byte of the - * message. We must exit with ACK asserted, so that the calling - * code may raise ATN before dropping ACK to reject the message. + * 2. We are in a MSGIN phase, and we are on the last byte of the + * message. We must exit with ACK asserted, so that the calling + * code may raise ATN before dropping ACK to reject the message. * * 3. ACK and ATN are clear and the target may proceed as normal. */ @@ -1597,12 +1387,16 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase } } while (--c); - dprintk(NDEBUG_PIO, "scsi%d : residual %d\n", instance->host_no, c); + dsprintk(NDEBUG_PIO, instance, "residual %d\n", c); *count = c; *data = d; tmp = NCR5380_read(STATUS_REG); - if (tmp & SR_REQ) + /* The phase read from the bus is valid if either REQ is (already) + * asserted or if ACK hasn't been released yet. The latter applies if + * we're in MSG IN, DATA IN or STATUS and all bytes have been received. + */ + if ((tmp & SR_REQ) || ((tmp & SR_IO) && c == 0)) *phase = tmp & PHASE_MASK; else *phase = PHASE_UNKNOWN; @@ -1614,79 +1408,80 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase } /** - * do_reset - issue a reset command - * @host: adapter to reset + * do_reset - issue a reset command + * @instance: adapter to reset * - * Issue a reset sequence to the NCR5380 and try and get the bus - * back into sane shape. + * Issue a reset sequence to the NCR5380 and try and get the bus + * back into sane shape. * - * Locks: caller holds queue lock + * This clears the reset interrupt flag because there may be no handler for + * it. When the driver is initialized, the NCR5380_intr() handler has not yet + * been installed. And when in EH we may have released the ST DMA interrupt. */ - -static void do_reset(struct Scsi_Host *host) { - NCR5380_local_declare(); - NCR5380_setup(host); - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK)); +static void do_reset(struct Scsi_Host *instance) +{ + unsigned long flags; + + local_irq_save(flags); + NCR5380_write(TARGET_COMMAND_REG, + PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK)); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); - udelay(25); + udelay(50); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); + local_irq_restore(flags); } -/* - * Function : do_abort (Scsi_Host *host) - * - * Purpose : abort the currently established nexus. Should only be - * called from a routine which can drop into a - * - * Returns : 0 on success, -1 on failure. - * - * Locks: queue lock held by caller - * FIXME: sort this out and get new_eh running +/** + * do_abort - abort the currently established nexus by going to + * MESSAGE OUT phase and sending an ABORT message. + * @instance: relevant scsi host instance + * + * Returns 0 on success, -1 on failure. */ -static int do_abort(struct Scsi_Host *host) { - NCR5380_local_declare(); +static int do_abort(struct Scsi_Host *instance) +{ unsigned char *msgptr, phase, tmp; int len; int rc; - NCR5380_setup(host); - /* Request message out phase */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - /* - * Wait for the target to indicate a valid phase by asserting - * REQ. Once this happens, we'll have either a MSGOUT phase - * and can immediately send the ABORT message, or we'll have some + /* + * Wait for the target to indicate a valid phase by asserting + * REQ. Once this happens, we'll have either a MSGOUT phase + * and can immediately send the ABORT message, or we'll have some * other phase and will have to source/sink data. - * + * * We really don't care what value was on the bus or what value * the target sees, so we just handshake. */ - rc = NCR5380_poll_politely(host, STATUS_REG, SR_REQ, SR_REQ, 60 * HZ); - - if(rc < 0) - return -1; + rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ); + if (rc < 0) + goto timeout; + + tmp = NCR5380_read(STATUS_REG) & PHASE_MASK; - tmp = (unsigned char)rc; - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); - if ((tmp & PHASE_MASK) != PHASE_MSGOUT) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); - rc = NCR5380_poll_politely(host, STATUS_REG, SR_REQ, 0, 3*HZ); + if (tmp != PHASE_MSGOUT) { + NCR5380_write(INITIATOR_COMMAND_REG, + ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); + rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 3 * HZ); + if (rc < 0) + goto timeout; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - if(rc == -1) - return -1; } + tmp = ABORT; msgptr = &tmp; len = 1; phase = PHASE_MSGOUT; - NCR5380_transfer_pio(host, &phase, &len, &msgptr); + NCR5380_transfer_pio(instance, &phase, &len, &msgptr); /* * If we got here, and the command completed successfully, @@ -1694,32 +1489,37 @@ static int do_abort(struct Scsi_Host *host) { */ return len ? -1 : 0; + +timeout: + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + return -1; } #if defined(REAL_DMA) || defined(PSEUDO_DMA) || defined (REAL_DMA_POLL) -/* - * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, - * unsigned char *phase, int *count, unsigned char **data) +/* + * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, + * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using either real - * or pseudo DMA. + * or pseudo DMA. * - * Inputs : instance - instance of driver, *phase - pointer to - * what phase is expected, *count - pointer to number of - * bytes to transfer, **data - pointer to data pointer. - * - * Returns : -1 when different phase is entered without transferring - * maximum number of bytes, 0 if all bytes or transferred or exit - * is in same phase. + * Inputs : instance - instance of driver, *phase - pointer to + * what phase is expected, *count - pointer to number of + * bytes to transfer, **data - pointer to data pointer. * - * Also, *phase, *count, *data are modified in place. + * Returns : -1 when different phase is entered without transferring + * maximum number of bytes, 0 if all bytes or transferred or exit + * is in same phase. * - * Locks: io_request lock held by caller + * Also, *phase, *count, *data are modified in place. */ -static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { - NCR5380_local_declare(); +static int NCR5380_transfer_dma(struct Scsi_Host *instance, + unsigned char *phase, int *count, + unsigned char **data) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); register int c = *count; register unsigned char p = *phase; register unsigned char *d = *data; @@ -1730,54 +1530,47 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase unsigned char saved_data = 0, overrun = 0, residue; #endif - struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; - - NCR5380_setup(instance); - if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { *phase = tmp; return -1; } #if defined(REAL_DMA) || defined(REAL_DMA_POLL) -#ifdef READ_OVERRUNS if (p & SR_IO) { - c -= 2; + if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS)) + c -= 2; } -#endif - dprintk(NDEBUG_DMA, "scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d); hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); + + dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", + (p & SR_IO) ? "receive" : "send", c, *data); #endif NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); #ifdef REAL_DMA - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY); + NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | + MR_ENABLE_EOP_INTR); #elif defined(REAL_DMA_POLL) - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE); + NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY); #else /* * Note : on my sample board, watch-dog timeouts occurred when interrupts - * were not disabled for the duration of a single DMA transfer, from + * were not disabled for the duration of a single DMA transfer, from * before the setting of DMA mode to after transfer of the last byte. */ -#if defined(PSEUDO_DMA) && defined(UNSAFE) - spin_unlock_irq(instance->host_lock); -#endif - /* KLL May need eop and parity in 53c400 */ - if (hostdata->flags & FLAG_NCR53C400) - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | - MR_ENABLE_PAR_CHECK | MR_ENABLE_PAR_INTR | - MR_ENABLE_EOP_INTR | MR_MONITOR_BSY); + if (hostdata->flags & FLAG_NO_DMA_FIXUP) + NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | + MR_ENABLE_EOP_INTR); else - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE); + NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY); #endif /* def REAL_DMA */ dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG)); - /* - * On the PAS16 at least I/O recovery delays are not needed here. - * Everyone else seems to want them. + /* + * On the PAS16 at least I/O recovery delays are not needed here. + * Everyone else seems to want them. */ if (p & SR_IO) { @@ -1797,49 +1590,49 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase } while ((tmp & BASR_PHASE_MATCH) && !(tmp & (BASR_BUSY_ERROR | BASR_END_DMA_TRANSFER))); /* - At this point, either we've completed DMA, or we have a phase mismatch, - or we've unexpectedly lost BUSY (which is a real error). - - For write DMAs, we want to wait until the last byte has been - transferred out over the bus before we turn off DMA mode. Alas, there - seems to be no terribly good way of doing this on a 5380 under all - conditions. For non-scatter-gather operations, we can wait until REQ - and ACK both go false, or until a phase mismatch occurs. Gather-writes - are nastier, since the device will be expecting more data than we - are prepared to send it, and REQ will remain asserted. On a 53C8[01] we - could test LAST BIT SENT to assure transfer (I imagine this is precisely - why this signal was added to the newer chips) but on the older 538[01] - this signal does not exist. The workaround for this lack is a watchdog; - we bail out of the wait-loop after a modest amount of wait-time if - the usual exit conditions are not met. Not a terribly clean or - correct solution :-% - - Reads are equally tricky due to a nasty characteristic of the NCR5380. - If the chip is in DMA mode for an READ, it will respond to a target's - REQ by latching the SCSI data into the INPUT DATA register and asserting - ACK, even if it has _already_ been notified by the DMA controller that - the current DMA transfer has completed! If the NCR5380 is then taken - out of DMA mode, this already-acknowledged byte is lost. - - This is not a problem for "one DMA transfer per command" reads, because - the situation will never arise... either all of the data is DMA'ed - properly, or the target switches to MESSAGE IN phase to signal a - disconnection (either operation bringing the DMA to a clean halt). - However, in order to handle scatter-reads, we must work around the - problem. The chosen fix is to DMA N-2 bytes, then check for the - condition before taking the NCR5380 out of DMA mode. One or two extra - bytes are transferred via PIO as necessary to fill out the original - request. + * At this point, either we've completed DMA, or we have a phase mismatch, + * or we've unexpectedly lost BUSY (which is a real error). + * + * For DMA sends, we want to wait until the last byte has been + * transferred out over the bus before we turn off DMA mode. Alas, there + * seems to be no terribly good way of doing this on a 5380 under all + * conditions. For non-scatter-gather operations, we can wait until REQ + * and ACK both go false, or until a phase mismatch occurs. Gather-sends + * are nastier, since the device will be expecting more data than we + * are prepared to send it, and REQ will remain asserted. On a 53C8[01] we + * could test Last Byte Sent to assure transfer (I imagine this is precisely + * why this signal was added to the newer chips) but on the older 538[01] + * this signal does not exist. The workaround for this lack is a watchdog; + * we bail out of the wait-loop after a modest amount of wait-time if + * the usual exit conditions are not met. Not a terribly clean or + * correct solution :-% + * + * DMA receive is equally tricky due to a nasty characteristic of the NCR5380. + * If the chip is in DMA receive mode, it will respond to a target's + * REQ by latching the SCSI data into the INPUT DATA register and asserting + * ACK, even if it has _already_ been notified by the DMA controller that + * the current DMA transfer has completed! If the NCR5380 is then taken + * out of DMA mode, this already-acknowledged byte is lost. This is + * not a problem for "one DMA transfer per READ command", because + * the situation will never arise... either all of the data is DMA'ed + * properly, or the target switches to MESSAGE IN phase to signal a + * disconnection (either operation bringing the DMA to a clean halt). + * However, in order to handle scatter-receive, we must work around the + * problem. The chosen fix is to DMA N-2 bytes, then check for the + * condition before taking the NCR5380 out of DMA mode. One or two extra + * bytes are transferred via PIO as necessary to fill out the original + * request. */ if (p & SR_IO) { -#ifdef READ_OVERRUNS - udelay(10); - if (((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == (BASR_PHASE_MATCH | BASR_ACK))) { - saved_data = NCR5380_read(INPUT_DATA_REGISTER); - overrun = 1; + if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS)) { + udelay(10); + if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == + (BASR_PHASE_MATCH | BASR_ACK)) { + saved_data = NCR5380_read(INPUT_DATA_REGISTER); + overrun = 1; + } } -#endif } else { int limit = 100; while (((tmp = NCR5380_read(BUS_AND_STATUS_REG)) & BASR_ACK) || (NCR5380_read(STATUS_REG) & SR_REQ)) { @@ -1850,7 +1643,8 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase } } - dprintk(NDEBUG_DMA, "scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG)); + dsprintk(NDEBUG_DMA, "polled DMA transfer complete, basr 0x%02x, sr 0x%02x\n", + tmp, NCR5380_read(STATUS_REG)); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); @@ -1861,8 +1655,8 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase *data += c; *phase = NCR5380_read(STATUS_REG) & PHASE_MASK; -#ifdef READ_OVERRUNS - if (*phase == p && (p & SR_IO) && residue == 0) { + if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS) && + *phase == p && (p & SR_IO) && residue == 0) { if (overrun) { dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); **data = saved_data; @@ -1877,7 +1671,6 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase NCR5380_transfer_pio(instance, phase, &cnt, data); *count -= toPIO - cnt; } -#endif dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count)); return 0; @@ -1886,95 +1679,64 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase return 0; #else /* defined(REAL_DMA_POLL) */ if (p & SR_IO) { -#ifdef DMA_WORKS_RIGHT - foo = NCR5380_pread(instance, d, c); -#else - int diff = 1; - if (hostdata->flags & FLAG_NCR53C400) { - diff = 0; - } - if (!(foo = NCR5380_pread(instance, d, c - diff))) { + foo = NCR5380_pread(instance, d, + hostdata->flags & FLAG_NO_DMA_FIXUP ? c : c - 1); + if (!foo && !(hostdata->flags & FLAG_NO_DMA_FIXUP)) { /* - * We can't disable DMA mode after successfully transferring + * We can't disable DMA mode after successfully transferring * what we plan to be the last byte, since that would open up - * a race condition where if the target asserted REQ before + * a race condition where if the target asserted REQ before * we got the DMA mode reset, the NCR5380 would have latched * an additional byte into the INPUT DATA register and we'd * have dropped it. - * - * The workaround was to transfer one fewer bytes than we - * intended to with the pseudo-DMA read function, wait for + * + * The workaround was to transfer one fewer bytes than we + * intended to with the pseudo-DMA read function, wait for * the chip to latch the last byte, read it, and then disable * pseudo-DMA mode. - * + * * After REQ is asserted, the NCR5380 asserts DRQ and ACK. * REQ is deasserted when ACK is asserted, and not reasserted * until ACK goes false. Since the NCR5380 won't lower ACK * until DACK is asserted, which won't happen unless we twiddle - * the DMA port or we take the NCR5380 out of DMA mode, we - * can guarantee that we won't handshake another extra + * the DMA port or we take the NCR5380 out of DMA mode, we + * can guarantee that we won't handshake another extra * byte. */ - if (!(hostdata->flags & FLAG_NCR53C400)) { - while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)); - /* Wait for clean handshake */ - while (NCR5380_read(STATUS_REG) & SR_REQ); - d[c - 1] = NCR5380_read(INPUT_DATA_REG); + if (NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, + BASR_DRQ, BASR_DRQ, HZ) < 0) { + foo = -1; + shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); } + if (NCR5380_poll_politely(instance, STATUS_REG, + SR_REQ, 0, HZ) < 0) { + foo = -1; + shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); + } + d[c - 1] = NCR5380_read(INPUT_DATA_REG); } -#endif } else { -#ifdef DMA_WORKS_RIGHT foo = NCR5380_pwrite(instance, d, c); -#else - int timeout; - dprintk(NDEBUG_C400_PWRITE, "About to pwrite %d bytes\n", c); - if (!(foo = NCR5380_pwrite(instance, d, c))) { + if (!foo && !(hostdata->flags & FLAG_NO_DMA_FIXUP)) { /* - * Wait for the last byte to be sent. If REQ is being asserted for - * the byte we're interested, we'll ACK it and it will go false. + * Wait for the last byte to be sent. If REQ is being asserted for + * the byte we're interested, we'll ACK it and it will go false. */ - if (!(hostdata->flags & FLAG_HAS_LAST_BYTE_SENT)) { - timeout = 20000; - while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)); - - if (!timeout) - dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : timed out on last byte\n", instance->host_no); - - if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) { - hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT; - if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) { - hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT; - dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : last byte sent works\n", instance->host_no); - } - } - } else { - dprintk(NDEBUG_C400_PWRITE, "Waiting for LASTBYTE\n"); - while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT)); - dprintk(NDEBUG_C400_PWRITE, "Got LASTBYTE\n"); + if (NCR5380_poll_politely2(instance, + BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, + BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) { + foo = -1; + shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n"); } } -#endif } NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) { - dprintk(NDEBUG_C400_PWRITE, "53C400w: Checking for IRQ\n"); - if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) { - dprintk(NDEBUG_C400_PWRITE, "53C400w: got it, reading reset interrupt reg\n"); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } else { - printk("53C400w: IRQ NOT THERE!\n"); - } - } + NCR5380_read(RESET_PARITY_INTERRUPT_REG); *data = d + c; *count = 0; *phase = NCR5380_read(STATUS_REG) & PHASE_MASK; -#if defined(PSEUDO_DMA) && defined(UNSAFE) - spin_lock_irq(instance->host_lock); -#endif /* defined(REAL_DMA_POLL) */ return foo; #endif /* def REAL_DMA */ } @@ -1983,40 +1745,34 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase /* * Function : NCR5380_information_transfer (struct Scsi_Host *instance) * - * Purpose : run through the various SCSI phases and do as the target - * directs us to. Operates on the currently connected command, - * instance->connected. + * Purpose : run through the various SCSI phases and do as the target + * directs us to. Operates on the currently connected command, + * instance->connected. * * Inputs : instance, instance for which we are doing commands * - * Side effects : SCSI things happen, the disconnected queue will be - * modified if a command disconnects, *instance->connected will - * change. + * Side effects : SCSI things happen, the disconnected queue will be + * modified if a command disconnects, *instance->connected will + * change. * - * XXX Note : we need to watch for bus free or a reset condition here - * to recover from an unexpected bus free condition. - * - * Locks: io_request_lock held by caller in IRQ mode + * XXX Note : we need to watch for bus free or a reset condition here + * to recover from an unexpected bus free condition. */ -static void NCR5380_information_transfer(struct Scsi_Host *instance) { - NCR5380_local_declare(); - struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)instance->hostdata; +static void NCR5380_information_transfer(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char msgout = NOP; int sink = 0; int len; -#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) int transfersize; -#endif unsigned char *data; unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; - struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected; - /* RvC: we need to set the end of the polling time */ - unsigned long poll_time = jiffies + USLEEP_POLL; + struct scsi_cmnd *cmd; - NCR5380_setup(instance); + while ((cmd = hostdata->connected)) { + struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); - while (1) { tmp = NCR5380_read(STATUS_REG); /* We only have a valid SCSI phase when REQ is asserted */ if (tmp & SR_REQ) { @@ -2028,24 +1784,29 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { if (sink && (phase != PHASE_MSGOUT)) { NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); - while (NCR5380_read(STATUS_REG) & SR_REQ); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | + ICR_ASSERT_ACK); + while (NCR5380_read(STATUS_REG) & SR_REQ) + ; + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | + ICR_ASSERT_ATN); sink = 0; continue; } + switch (phase) { - case PHASE_DATAIN: case PHASE_DATAOUT: #if (NDEBUG & NDEBUG_NO_DATAOUT) - printk("scsi%d : NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n", instance->host_no); + shost_printk(KERN_DEBUG, instance, "NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n"); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; - cmd->scsi_done(cmd); + complete_cmd(instance, cmd); + hostdata->connected = NULL; return; #endif - /* + case PHASE_DATAIN: + /* * If there is no room left in the current buffer in the * scatter-gather list, move onto the next one. */ @@ -2055,10 +1816,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); - dprintk(NDEBUG_INFORMATION, "scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual); + dsprintk(NDEBUG_INFORMATION, instance, "%d bytes and %d buffers left\n", + cmd->SCp.this_residual, + cmd->SCp.buffers_residual); } + /* - * The preferred transfer method is going to be + * The preferred transfer method is going to be * PSEUDO-DMA for systems that are strictly PIO, * since we can let the hardware do the handshaking. * @@ -2068,51 +1832,43 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { */ #if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) - /* KLL - * PSEUDO_DMA is defined here. If this is the g_NCR5380 - * driver then it will always be defined, so the - * FLAG_NO_PSEUDO_DMA is used to inhibit PDMA in the base - * NCR5380 case. I think this is a fairly clean solution. - * We supplement these 2 if's with the flag. - */ -#ifdef NCR5380_dma_xfer_len - if (!cmd->device->borken && !(hostdata->flags & FLAG_NO_PSEUDO_DMA) && (transfersize = NCR5380_dma_xfer_len(instance, cmd)) != 0) { -#else - transfersize = cmd->transfersize; - -#ifdef LIMIT_TRANSFERSIZE /* If we have problems with interrupt service */ - if (transfersize > 512) - transfersize = 512; -#endif /* LIMIT_TRANSFERSIZE */ - - if (!cmd->device->borken && transfersize && !(hostdata->flags & FLAG_NO_PSEUDO_DMA) && cmd->SCp.this_residual && !(cmd->SCp.this_residual % transfersize)) { - /* Limit transfers to 32K, for xx400 & xx406 - * pseudoDMA that transfers in 128 bytes blocks. */ - if (transfersize > 32 * 1024) - transfersize = 32 * 1024; -#endif + transfersize = 0; + if (!cmd->device->borken && + !(hostdata->flags & FLAG_NO_PSEUDO_DMA)) + transfersize = NCR5380_dma_xfer_len(instance, cmd, phase); + + if (transfersize) { len = transfersize; - if (NCR5380_transfer_dma(instance, &phase, &len, (unsigned char **) &cmd->SCp.ptr)) { + if (NCR5380_transfer_dma(instance, &phase, + &len, (unsigned char **)&cmd->SCp.ptr)) { /* - * If the watchdog timer fires, all future accesses to this - * device will use the polled-IO. + * If the watchdog timer fires, all future + * accesses to this device will use the + * polled-IO. */ scmd_printk(KERN_INFO, cmd, - "switching to slow handshake\n"); + "switching to slow handshake\n"); cmd->device->borken = 1; - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; - cmd->scsi_done(cmd); /* XXX - need to source or sink data here, as appropriate */ } else cmd->SCp.this_residual -= transfersize - len; } else #endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */ - NCR5380_transfer_pio(instance, &phase, (int *) &cmd->SCp.this_residual, (unsigned char **) - &cmd->SCp.ptr); - break; + { + /* Break up transfer into 3 ms chunks, + * presuming 6 accesses per handshake. + */ + transfersize = min((unsigned long)cmd->SCp.this_residual, + hostdata->accesses_per_ms / 2); + len = transfersize; + NCR5380_transfer_pio(instance, &phase, &len, + (unsigned char **)&cmd->SCp.ptr); + cmd->SCp.this_residual -= transfersize - len; + } + return; case PHASE_MSGIN: len = 1; data = &tmp; @@ -2120,101 +1876,42 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { cmd->SCp.Message = tmp; switch (tmp) { - /* - * Linking lets us reduce the time required to get the - * next command out to the device, hopefully this will - * mean we don't waste another revolution due to the delays - * required by ARBITRATION and another SELECTION. - * - * In the current implementation proposal, low level drivers - * merely have to start the next command, pointed to by - * next_link, done() is called as with unlinked commands. - */ -#ifdef LINKED - case LINKED_CMD_COMPLETE: - case LINKED_FLG_CMD_COMPLETE: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %llu linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun); - /* - * Sanity check : A linked command should only terminate with - * one of these messages if there are more linked commands - * available. - */ - if (!cmd->next_link) { - printk("scsi%d : target %d lun %llu linked command complete, no next_link\n" instance->host_no, cmd->device->id, cmd->device->lun); - sink = 1; - do_abort(instance); - return; - } - initialize_SCp(cmd->next_link); - /* The next command is still part of this process */ - cmd->next_link->tag = cmd->tag; - cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); - dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %llu linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun); - cmd->scsi_done(cmd); - cmd = hostdata->connected; - break; -#endif /* def LINKED */ case ABORT: case COMMAND_COMPLETE: /* Accept message by clearing ACK */ sink = 1; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - hostdata->connected = NULL; - dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d, lun %llu completed\n", instance->host_no, cmd->device->id, cmd->device->lun); - hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xFF)); - - /* - * I'm not sure what the correct thing to do here is : - * - * If the command that just executed is NOT a request - * sense, the obvious thing to do is to set the result - * code to the values of the stored parameters. - * - * If it was a REQUEST SENSE command, we need some way - * to differentiate between the failure code of the original - * and the failure code of the REQUEST sense - the obvious - * case is success, where we fall through and leave the result - * code unchanged. - * - * The non-obvious place is where the REQUEST SENSE failed - */ - - if (cmd->cmnd[0] != REQUEST_SENSE) - cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); - else if (status_byte(cmd->SCp.Status) != GOOD) - cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); + dsprintk(NDEBUG_QUEUES, instance, + "COMMAND COMPLETE %p target %d lun %llu\n", + cmd, scmd_id(cmd), cmd->device->lun); - if ((cmd->cmnd[0] == REQUEST_SENSE) && - hostdata->ses.cmd_len) { - scsi_eh_restore_cmnd(cmd, &hostdata->ses); - hostdata->ses.cmd_len = 0 ; - } - - if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { - scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); - - dprintk(NDEBUG_AUTOSENSE, "scsi%d : performing request sense\n", instance->host_no); + hostdata->connected = NULL; - LIST(cmd, hostdata->issue_queue); - cmd->host_scribble = (unsigned char *) - hostdata->issue_queue; - hostdata->issue_queue = (struct scsi_cmnd *) cmd; - dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no); - } else { - cmd->scsi_done(cmd); + cmd->result &= ~0xffff; + cmd->result |= cmd->SCp.Status; + cmd->result |= cmd->SCp.Message << 8; + + if (cmd->cmnd[0] == REQUEST_SENSE) + complete_cmd(instance, cmd); + else { + if (cmd->SCp.Status == SAM_STAT_CHECK_CONDITION || + cmd->SCp.Status == SAM_STAT_COMMAND_TERMINATED) { + dsprintk(NDEBUG_QUEUES, instance, "autosense: adding cmd %p to tail of autosense queue\n", + cmd); + list_add_tail(&ncmd->list, + &hostdata->autosense); + } else + complete_cmd(instance, cmd); } - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - /* - * Restore phase bits to 0 so an interrupted selection, + /* + * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); - while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) - barrier(); + /* Enable reselect interrupts */ + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return; case MESSAGE_REJECT: /* Accept message by clearing ACK */ @@ -2229,38 +1926,33 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { default: break; } - case DISCONNECT:{ - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - cmd->device->disconnect = 1; - LIST(cmd, hostdata->disconnected_queue); - cmd->host_scribble = (unsigned char *) - hostdata->disconnected_queue; - hostdata->connected = NULL; - hostdata->disconnected_queue = cmd; - dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d lun %llu was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun); - /* - * Restore phase bits to 0 so an interrupted selection, - * arbitration can resume. - */ - NCR5380_write(TARGET_COMMAND_REG, 0); - - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - /* Wait for bus free to avoid nasty timeouts - FIXME timeout !*/ - /* NCR538_poll_politely(instance, STATUS_REG, SR_BSY, 0, 30 * HZ); */ - while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) - barrier(); - return; - } - /* + break; + case DISCONNECT: + /* Accept message by clearing ACK */ + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + hostdata->connected = NULL; + list_add(&ncmd->list, &hostdata->disconnected); + dsprintk(NDEBUG_INFORMATION | NDEBUG_QUEUES, + instance, "connected command %p for target %d lun %llu moved to disconnected queue\n", + cmd, scmd_id(cmd), cmd->device->lun); + + /* + * Restore phase bits to 0 so an interrupted selection, + * arbitration can resume. + */ + NCR5380_write(TARGET_COMMAND_REG, 0); + + /* Enable reselect interrupts */ + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); + return; + /* * The SCSI data pointer is *IMPLICITLY* saved on a disconnect - * operation, in violation of the SCSI spec so we can safely + * operation, in violation of the SCSI spec so we can safely * ignore SAVE/RESTORE pointers calls. * - * Unfortunately, some disks violate the SCSI spec and + * Unfortunately, some disks violate the SCSI spec and * don't issue the required SAVE_POINTERS message before - * disconnecting, and we have to break spec to remain + * disconnecting, and we have to break spec to remain * compatible. */ case SAVE_POINTERS: @@ -2269,31 +1961,28 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); break; case EXTENDED_MESSAGE: -/* - * Extended messages are sent in the following format : - * Byte - * 0 EXTENDED_MESSAGE == 1 - * 1 length (includes one byte for code, doesn't - * include first two bytes) - * 2 code - * 3..length+1 arguments - * - * Start the extended message buffer with the EXTENDED_MESSAGE - * byte, since spi_print_msg() wants the whole thing. - */ + /* + * Start the message buffer with the EXTENDED_MESSAGE + * byte, since spi_print_msg() wants the whole thing. + */ extended_msg[0] = EXTENDED_MESSAGE; /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dprintk(NDEBUG_EXTENDED, "scsi%d : receiving extended message\n", instance->host_no); + + spin_unlock_irq(&hostdata->lock); + + dsprintk(NDEBUG_EXTENDED, instance, "receiving extended message\n"); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); + dsprintk(NDEBUG_EXTENDED, instance, "length %d, code 0x%02x\n", + (int)extended_msg[1], + (int)extended_msg[2]); - dprintk(NDEBUG_EXTENDED, "scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2]); - - if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) { + if (!len && extended_msg[1] > 0 && + extended_msg[1] <= sizeof(extended_msg) - 2) { /* Accept third byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = extended_msg[1] - 1; @@ -2301,7 +1990,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_EXTENDED, "scsi%d : message received, residual %d\n", instance->host_no, len); + dsprintk(NDEBUG_EXTENDED, instance, "message received, residual %d\n", + len); switch (extended_msg[2]) { case EXTENDED_SDTR: @@ -2311,34 +2001,42 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { tmp = 0; } } else if (len) { - printk("scsi%d: error receiving extended message\n", instance->host_no); + shost_printk(KERN_ERR, instance, "error receiving extended message\n"); tmp = 0; } else { - printk("scsi%d: extended message code %02x length %d is too long\n", instance->host_no, extended_msg[2], extended_msg[1]); + shost_printk(KERN_NOTICE, instance, "extended message code %02x length %d is too long\n", + extended_msg[2], extended_msg[1]); tmp = 0; } + + spin_lock_irq(&hostdata->lock); + if (!hostdata->connected) + return; + /* Fall through to reject message */ - /* - * If we get something weird that we aren't expecting, + /* + * If we get something weird that we aren't expecting, * reject it. */ default: if (!tmp) { - printk("scsi%d: rejecting message ", instance->host_no); + shost_printk(KERN_ERR, instance, "rejecting message "); spi_print_msg(extended_msg); printk("\n"); } else if (tmp != EXTENDED_MESSAGE) scmd_printk(KERN_INFO, cmd, - "rejecting unknown message %02x\n",tmp); + "rejecting unknown message %02x\n", + tmp); else scmd_printk(KERN_INFO, cmd, - "rejecting unknown extended message code %02x, length %d\n", extended_msg[1], extended_msg[0]); + "rejecting unknown extended message code %02x, length %d\n", + extended_msg[1], extended_msg[0]); msgout = MESSAGE_REJECT; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); break; - } /* switch (tmp) */ + } /* switch (tmp) */ break; case PHASE_MSGOUT: len = 1; @@ -2346,10 +2044,9 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { hostdata->last_message = msgout; NCR5380_transfer_pio(instance, &phase, &len, &data); if (msgout == ABORT) { - hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xFF)); hostdata->connected = NULL; cmd->result = DID_ERROR << 16; - cmd->scsi_done(cmd); + complete_cmd(instance, cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return; } @@ -2358,17 +2055,12 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { case PHASE_CMDOUT: len = cmd->cmd_len; data = cmd->cmnd; - /* - * XXX for performance reasons, on machines with a - * PSEUDO-DMA architecture we should probably - * use the dma transfer function. + /* + * XXX for performance reasons, on machines with a + * PSEUDO-DMA architecture we should probably + * use the dma transfer function. */ NCR5380_transfer_pio(instance, &phase, &len, &data); - if (!cmd->device->disconnect && should_disconnect(cmd->cmnd[0])) { - NCR5380_set_timer(hostdata, USLEEP_SLEEP); - dprintk(NDEBUG_USLEEP, "scsi%d : issued command, sleeping until %lu\n", instance->host_no, hostdata->time_expires); - return; - } break; case PHASE_STATIN: len = 1; @@ -2377,46 +2069,37 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { cmd->SCp.Status = tmp; break; default: - printk("scsi%d : unknown phase\n", instance->host_no); + shost_printk(KERN_ERR, instance, "unknown phase\n"); NCR5380_dprint(NDEBUG_ANY, instance); - } /* switch(phase) */ - } /* if (tmp * SR_REQ) */ - else { - /* RvC: go to sleep if polling time expired - */ - if (!cmd->device->disconnect && time_after_eq(jiffies, poll_time)) { - NCR5380_set_timer(hostdata, USLEEP_SLEEP); - dprintk(NDEBUG_USLEEP, "scsi%d : poll timed out, sleeping until %lu\n", instance->host_no, hostdata->time_expires); - return; - } + } /* switch(phase) */ + } else { + spin_unlock_irq(&hostdata->lock); + NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ); + spin_lock_irq(&hostdata->lock); } - } /* while (1) */ + } } /* * Function : void NCR5380_reselect (struct Scsi_Host *instance) * - * Purpose : does reselection, initializing the instance->connected - * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q - * nexus has been reestablished, - * - * Inputs : instance - this instance of the NCR5380. + * Purpose : does reselection, initializing the instance->connected + * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q + * nexus has been reestablished, * - * Locks: io_request_lock held by caller if IRQ driven + * Inputs : instance - this instance of the NCR5380. */ -static void NCR5380_reselect(struct Scsi_Host *instance) { - NCR5380_local_declare(); - struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) - instance->hostdata; +static void NCR5380_reselect(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char target_mask; unsigned char lun, phase; int len; unsigned char msg[3]; unsigned char *data; - struct scsi_cmnd *tmp = NULL, *prev; - int abort = 0; - NCR5380_setup(instance); + struct NCR5380_cmd *ncmd; + struct scsi_cmnd *tmp; /* * Disable arbitration, etc. since the host adapter obviously @@ -2424,12 +2107,12 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { */ NCR5380_write(MODE_REG, MR_BASE); - hostdata->restart_select = 1; target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); - dprintk(NDEBUG_SELECTION, "scsi%d : reselect\n", instance->host_no); - /* + dsprintk(NDEBUG_RESELECTION, instance, "reselect\n"); + + /* * At this point, we have detected that our SCSI ID is on the bus, * SEL is true and BSY was false for at least one bus settle delay * (400 ns). @@ -2439,103 +2122,110 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); - - /* FIXME: timeout too long, must fail to workqueue */ - if(NCR5380_poll_politely(instance, STATUS_REG, SR_SEL, 0, 2*HZ)<0) - abort = 1; - + if (NCR5380_poll_politely(instance, + STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) { + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + return; + } NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* * Wait for target to go into MSGIN. - * FIXME: timeout needed and fail to work queeu */ - if(NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 2*HZ)) - abort = 1; + if (NCR5380_poll_politely(instance, + STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) { + do_abort(instance); + return; + } len = 1; data = msg; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); + if (len) { + do_abort(instance); + return; + } + if (!(msg[0] & 0x80)) { - printk(KERN_ERR "scsi%d : expecting IDENTIFY message, got ", instance->host_no); + shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got "); spi_print_msg(msg); - abort = 1; - } else { - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - lun = (msg[0] & 0x07); + printk("\n"); + do_abort(instance); + return; + } + lun = msg[0] & 0x07; - /* - * We need to add code for SCSI-II to track which devices have - * I_T_L_Q nexuses established, and which have simple I_T_L - * nexuses so we can chose to do additional data transfer. - */ + /* + * We need to add code for SCSI-II to track which devices have + * I_T_L_Q nexuses established, and which have simple I_T_L + * nexuses so we can chose to do additional data transfer. + */ - /* - * Find the command corresponding to the I_T_L or I_T_L_Q nexus we - * just reestablished, and remove it from the disconnected queue. - */ + /* + * Find the command corresponding to the I_T_L or I_T_L_Q nexus we + * just reestablished, and remove it from the disconnected queue. + */ + tmp = NULL; + list_for_each_entry(ncmd, &hostdata->disconnected, list) { + struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); - for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = (struct scsi_cmnd *) tmp->host_scribble) - if ((target_mask == (1 << tmp->device->id)) && (lun == (u8)tmp->device->lun) - ) { - if (prev) { - REMOVE(prev, prev->host_scribble, tmp, tmp->host_scribble); - prev->host_scribble = tmp->host_scribble; - } else { - REMOVE(-1, hostdata->disconnected_queue, tmp, tmp->host_scribble); - hostdata->disconnected_queue = (struct scsi_cmnd *) tmp->host_scribble; - } - tmp->host_scribble = NULL; - break; - } - if (!tmp) { - printk(KERN_ERR "scsi%d : warning : target bitmask %02x lun %d not in disconnect_queue.\n", instance->host_no, target_mask, lun); - /* - * Since we have an established nexus that we can't do anything with, - * we must abort it. - */ - abort = 1; + if (target_mask == (1 << scmd_id(cmd)) && + lun == (u8)cmd->device->lun) { + list_del(&ncmd->list); + tmp = cmd; + break; } } - if (abort) { - do_abort(instance); + if (tmp) { + dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance, + "reselect: removed %p from disconnected queue\n", tmp); } else { - hostdata->connected = tmp; - dprintk(NDEBUG_RESELECTION, "scsi%d : nexus established, target = %d, lun = %llu, tag = %d\n", instance->host_no, tmp->device->id, tmp->device->lun, tmp->tag); + shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n", + target_mask, lun); + /* + * Since we have an established nexus that we can't do anything + * with, we must abort it. + */ + do_abort(instance); + return; } + + /* Accept message by clearing ACK */ + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + + hostdata->connected = tmp; + dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu, tag %d\n", + scmd_id(tmp), tmp->device->lun, tmp->tag); } /* * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) * * Purpose : called by interrupt handler when DMA finishes or a phase - * mismatch occurs (which would finish the DMA transfer). + * mismatch occurs (which would finish the DMA transfer). * * Inputs : instance - this instance of the NCR5380. * * Returns : pointer to the scsi_cmnd structure for which the I_T_L - * nexus has been reestablished, on failure NULL is returned. + * nexus has been reestablished, on failure NULL is returned. */ #ifdef REAL_DMA static void NCR5380_dma_complete(NCR5380_instance * instance) { - NCR5380_local_declare(); - struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; + struct NCR5380_hostdata *hostdata = shost_priv(instance); int transferred; - NCR5380_setup(instance); /* * XXX this might not be right. * * Wait for final byte to transfer, ie wait for ACK to go false. * - * We should use the Last Byte Sent bit, unfortunately this is + * We should use the Last Byte Sent bit, unfortunately this is * not available on the 5380/5381 (only the various CMOS chips) * * FIXME: timeout, and need to handle long timeout/irq case @@ -2543,7 +2233,6 @@ static void NCR5380_dma_complete(NCR5380_instance * instance) { NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, BASR_ACK, 0, 5*HZ); - NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* @@ -2560,190 +2249,228 @@ static void NCR5380_dma_complete(NCR5380_instance * instance) { } #endif /* def REAL_DMA */ -/* - * Function : int NCR5380_abort (struct scsi_cmnd *cmd) +/** + * list_find_cmd - test for presence of a command in a linked list + * @haystack: list of commands + * @needle: command to search for + */ + +static bool list_find_cmd(struct list_head *haystack, + struct scsi_cmnd *needle) +{ + struct NCR5380_cmd *ncmd; + + list_for_each_entry(ncmd, haystack, list) + if (NCR5380_to_scmd(ncmd) == needle) + return true; + return false; +} + +/** + * list_remove_cmd - remove a command from linked list + * @haystack: list of commands + * @needle: command to remove + */ + +static bool list_del_cmd(struct list_head *haystack, + struct scsi_cmnd *needle) +{ + if (list_find_cmd(haystack, needle)) { + struct NCR5380_cmd *ncmd = scsi_cmd_priv(needle); + + list_del(&ncmd->list); + return true; + } + return false; +} + +/** + * NCR5380_abort - scsi host eh_abort_handler() method + * @cmd: the command to be aborted + * + * Try to abort a given command by removing it from queues and/or sending + * the target an abort message. This may not succeed in causing a target + * to abort the command. Nonetheless, the low-level driver must forget about + * the command because the mid-layer reclaims it and it may be re-issued. * - * Purpose : abort a command + * The normal path taken by a command is as follows. For EH we trace this + * same path to locate and abort the command. * - * Inputs : cmd - the scsi_cmnd to abort, code - code to set the - * host byte of the result field to, if zero DID_ABORTED is - * used. + * unissued -> selecting -> [unissued -> selecting ->]... connected -> + * [disconnected -> connected ->]... + * [autosense -> connected ->] done * - * Returns : SUCCESS - success, FAILED on failure. + * If cmd was not found at all then presumably it has already been completed, + * in which case return SUCCESS to try to avoid further EH measures. * - * XXX - there is no way to abort the command that is currently - * connected, you have to wait for it to complete. If this is - * a problem, we could implement longjmp() / setjmp(), setjmp() - * called where the loop started in NCR5380_main(). + * If the command has not completed yet, we must not fail to find it. + * We have no option but to forget the aborted command (even if it still + * lacks sense data). The mid-layer may re-issue a command that is in error + * recovery (see scsi_send_eh_cmnd), but the logic and data structures in + * this driver are such that a command can appear on one queue only. * - * Locks: host lock taken by caller + * The lock protects driver data structures, but EH handlers also use it + * to serialize their own execution and prevent their own re-entry. */ static int NCR5380_abort(struct scsi_cmnd *cmd) { - NCR5380_local_declare(); struct Scsi_Host *instance = cmd->device->host; - struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; - struct scsi_cmnd *tmp, **prev; + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned long flags; + int result = SUCCESS; - scmd_printk(KERN_WARNING, cmd, "aborting command\n"); + spin_lock_irqsave(&hostdata->lock, flags); - NCR5380_print_status(instance); +#if (NDEBUG & NDEBUG_ANY) + scmd_printk(KERN_INFO, cmd, __func__); +#endif + NCR5380_dprint(NDEBUG_ANY, instance); + NCR5380_dprint_phase(NDEBUG_ANY, instance); - NCR5380_setup(instance); + if (list_del_cmd(&hostdata->unissued, cmd)) { + dsprintk(NDEBUG_ABORT, instance, + "abort: removed %p from issue queue\n", cmd); + cmd->result = DID_ABORT << 16; + cmd->scsi_done(cmd); /* No tag or busy flag to worry about */ + goto out; + } - dprintk(NDEBUG_ABORT, "scsi%d : abort called\n", instance->host_no); - dprintk(NDEBUG_ABORT, " basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); + if (hostdata->selecting == cmd) { + dsprintk(NDEBUG_ABORT, instance, + "abort: cmd %p == selecting\n", cmd); + hostdata->selecting = NULL; + cmd->result = DID_ABORT << 16; + complete_cmd(instance, cmd); + goto out; + } -#if 0 -/* - * Case 1 : If the command is the currently executing command, - * we'll set the aborted flag and return control so that - * information transfer routine can exit cleanly. - */ + if (list_del_cmd(&hostdata->disconnected, cmd)) { + dsprintk(NDEBUG_ABORT, instance, + "abort: removed %p from disconnected list\n", cmd); + /* Can't call NCR5380_select() and send ABORT because that + * means releasing the lock. Need a bus reset. + */ + set_host_byte(cmd, DID_ERROR); + complete_cmd(instance, cmd); + result = FAILED; + goto out; + } if (hostdata->connected == cmd) { - dprintk(NDEBUG_ABORT, "scsi%d : aborting connected command\n", instance->host_no); - hostdata->aborted = 1; -/* - * We should perform BSY checking, and make sure we haven't slipped - * into BUS FREE. - */ - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); -/* - * Since we can't change phases until we've completed the current - * handshake, we have to source or sink a byte of data if the current - * phase is not MSGOUT. - */ - -/* - * Return control to the executing NCR drive so we can clear the - * aborted flag and get back into our main loop. - */ - - return SUCCESS; - } + dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd); + hostdata->connected = NULL; +#ifdef REAL_DMA + hostdata->dma_len = 0; #endif - -/* - * Case 2 : If the command hasn't been issued yet, we simply remove it - * from the issue queue. - */ - - dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no); - for (prev = (struct scsi_cmnd **) &(hostdata->issue_queue), tmp = (struct scsi_cmnd *) hostdata->issue_queue; tmp; prev = (struct scsi_cmnd **) &(tmp->host_scribble), tmp = (struct scsi_cmnd *) tmp->host_scribble) - if (cmd == tmp) { - REMOVE(5, *prev, tmp, tmp->host_scribble); - (*prev) = (struct scsi_cmnd *) tmp->host_scribble; - tmp->host_scribble = NULL; - tmp->result = DID_ABORT << 16; - dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no); - tmp->scsi_done(tmp); - return SUCCESS; + if (do_abort(instance)) { + set_host_byte(cmd, DID_ERROR); + complete_cmd(instance, cmd); + result = FAILED; + goto out; } -#if (NDEBUG & NDEBUG_ABORT) - /* KLL */ - else if (prev == tmp) - printk(KERN_ERR "scsi%d : LOOP\n", instance->host_no); -#endif - -/* - * Case 3 : If any commands are connected, we're going to fail the abort - * and let the high level SCSI driver retry at a later time or - * issue a reset. - * - * Timeouts, and therefore aborted commands, will be highly unlikely - * and handling them cleanly in this situation would make the common - * case of noresets less efficient, and would pollute our code. So, - * we fail. - */ - - if (hostdata->connected) { - dprintk(NDEBUG_ABORT, "scsi%d : abort failed, command connected.\n", instance->host_no); - return FAILED; + set_host_byte(cmd, DID_ABORT); + complete_cmd(instance, cmd); + goto out; } -/* - * Case 4: If the command is currently disconnected from the bus, and - * there are no connected commands, we reconnect the I_T_L or - * I_T_L_Q nexus associated with it, go into message out, and send - * an abort message. - * - * This case is especially ugly. In order to reestablish the nexus, we - * need to call NCR5380_select(). The easiest way to implement this - * function was to abort if the bus was busy, and let the interrupt - * handler triggered on the SEL for reselect take care of lost arbitrations - * where necessary, meaning interrupts need to be enabled. - * - * When interrupts are enabled, the queues may change - so we - * can't remove it from the disconnected queue before selecting it - * because that could cause a failure in hashing the nexus if that - * device reselected. - * - * Since the queues may change, we can't use the pointers from when we - * first locate it. - * - * So, we must first locate the command, and if NCR5380_select() - * succeeds, then issue the abort, relocate the command and remove - * it from the disconnected queue. - */ - for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; tmp = (struct scsi_cmnd *) tmp->host_scribble) - if (cmd == tmp) { - dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no); + if (list_del_cmd(&hostdata->autosense, cmd)) { + dsprintk(NDEBUG_ABORT, instance, + "abort: removed %p from sense queue\n", cmd); + set_host_byte(cmd, DID_ERROR); + complete_cmd(instance, cmd); + } - if (NCR5380_select(instance, cmd)) - return FAILED; - dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no); +out: + if (result == FAILED) + dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd); + else + dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); - do_abort(instance); + queue_work(hostdata->work_q, &hostdata->main_task); + spin_unlock_irqrestore(&hostdata->lock, flags); - for (prev = (struct scsi_cmnd **) &(hostdata->disconnected_queue), tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; prev = (struct scsi_cmnd **) &(tmp->host_scribble), tmp = (struct scsi_cmnd *) tmp->host_scribble) - if (cmd == tmp) { - REMOVE(5, *prev, tmp, tmp->host_scribble); - *prev = (struct scsi_cmnd *) tmp->host_scribble; - tmp->host_scribble = NULL; - tmp->result = DID_ABORT << 16; - tmp->scsi_done(tmp); - return SUCCESS; - } - } -/* - * Case 5 : If we reached this point, the command was not found in any of - * the queues. - * - * We probably reached this point because of an unlikely race condition - * between the command completing successfully and the abortion code, - * so we won't panic, but we will notify the user in case something really - * broke. - */ - printk(KERN_WARNING "scsi%d : warning : SCSI command probably completed successfully\n" - " before abortion\n", instance->host_no); - return FAILED; + return result; } -/* - * Function : int NCR5380_bus_reset (struct scsi_cmnd *cmd) - * - * Purpose : reset the SCSI bus. - * - * Returns : SUCCESS +/** + * NCR5380_bus_reset - reset the SCSI bus + * @cmd: SCSI command undergoing EH * - * Locks: host lock taken by caller + * Returns SUCCESS */ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int i; + unsigned long flags; + struct NCR5380_cmd *ncmd; + + spin_lock_irqsave(&hostdata->lock, flags); - NCR5380_local_declare(); - NCR5380_setup(instance); - NCR5380_print_status(instance); +#if (NDEBUG & NDEBUG_ANY) + scmd_printk(KERN_INFO, cmd, __func__); +#endif + NCR5380_dprint(NDEBUG_ANY, instance); + NCR5380_dprint_phase(NDEBUG_ANY, instance); - spin_lock_irq(instance->host_lock); do_reset(instance); - spin_unlock_irq(instance->host_lock); + + /* reset NCR registers */ + NCR5380_write(MODE_REG, MR_BASE); + NCR5380_write(TARGET_COMMAND_REG, 0); + NCR5380_write(SELECT_ENABLE_REG, 0); + + /* After the reset, there are no more connected or disconnected commands + * and no busy units; so clear the low-level status here to avoid + * conflicts when the mid-level code tries to wake up the affected + * commands! + */ + + if (list_del_cmd(&hostdata->unissued, cmd)) { + cmd->result = DID_RESET << 16; + cmd->scsi_done(cmd); + } + + if (hostdata->selecting) { + hostdata->selecting->result = DID_RESET << 16; + complete_cmd(instance, hostdata->selecting); + hostdata->selecting = NULL; + } + + list_for_each_entry(ncmd, &hostdata->disconnected, list) { + struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); + + set_host_byte(cmd, DID_RESET); + cmd->scsi_done(cmd); + } + INIT_LIST_HEAD(&hostdata->disconnected); + + list_for_each_entry(ncmd, &hostdata->autosense, list) { + struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); + + set_host_byte(cmd, DID_RESET); + cmd->scsi_done(cmd); + } + INIT_LIST_HEAD(&hostdata->autosense); + + if (hostdata->connected) { + set_host_byte(hostdata->connected, DID_RESET); + complete_cmd(instance, hostdata->connected); + hostdata->connected = NULL; + } + + for (i = 0; i < 8; ++i) + hostdata->busy[i] = 0; +#ifdef REAL_DMA + hostdata->dma_len = 0; +#endif + + queue_work(hostdata->work_q, &hostdata->main_task); + spin_unlock_irqrestore(&hostdata->lock, flags); return SUCCESS; } diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index 162112dd1bf8..a79288682a74 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h @@ -22,8 +22,13 @@ #ifndef NCR5380_H #define NCR5380_H +#include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/workqueue.h> +#include <scsi/scsi_dbg.h> #include <scsi/scsi_eh.h> +#include <scsi/scsi_transport_spi.h> #define NDEBUG_ARBITRATION 0x1 #define NDEBUG_AUTOSENSE 0x2 @@ -158,8 +163,7 @@ /* Write any value to this register to start an ini mode DMA receive */ #define START_DMA_INITIATOR_RECEIVE_REG 7 /* wo */ -#define C400_CONTROL_STATUS_REG NCR53C400_register_offset-8 /* rw */ - +/* NCR 53C400(A) Control Status Register bits: */ #define CSR_RESET 0x80 /* wo Resets 53c400 */ #define CSR_53C80_REG 0x80 /* ro 5380 registers busy */ #define CSR_TRANS_DIR 0x40 /* rw Data transfer direction */ @@ -176,16 +180,6 @@ #define CSR_BASE CSR_53C80_INTR #endif -/* Number of 128-byte blocks to be transferred */ -#define C400_BLOCK_COUNTER_REG NCR53C400_register_offset-7 /* rw */ - -/* Resume transfer after disconnect */ -#define C400_RESUME_TRANSFER_REG NCR53C400_register_offset-6 /* wo */ - -/* Access to host buffer stack */ -#define C400_HOST_BUFFER NCR53C400_register_offset-4 /* rw */ - - /* Note : PHASE_* macros are based on the values of the STATUS register */ #define PHASE_MASK (SR_MSG | SR_CD | SR_IO) @@ -205,16 +199,6 @@ #define PHASE_SR_TO_TCR(phase) ((phase) >> 2) -/* - * The internal should_disconnect() function returns these based on the - * expected length of a disconnect if a device supports disconnect/ - * reconnect. - */ - -#define DISCONNECT_NONE 0 -#define DISCONNECT_TIME_TO_DATA 1 -#define DISCONNECT_LONG 2 - /* * "Special" value for the (unsigned char) command tag, to indicate * I_T_L nexus instead of I_T_L_Q. @@ -236,15 +220,11 @@ #define NO_IRQ 0 #endif -#define FLAG_HAS_LAST_BYTE_SENT 1 /* NCR53c81 or better */ -#define FLAG_CHECK_LAST_BYTE_SENT 2 /* Only test once */ -#define FLAG_NCR53C400 4 /* NCR53c400 */ +#define FLAG_NO_DMA_FIXUP 1 /* No DMA errata workarounds */ #define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */ -#define FLAG_DTC3181E 16 /* DTC3181E */ #define FLAG_LATE_DMA_SETUP 32 /* Setup NCR before DMA H/W */ #define FLAG_TAGGED_QUEUING 64 /* as X3T9.2 spelled it */ - -#ifndef ASM +#define FLAG_TOSHIBA_DELAY 128 /* Allow for borken CD-ROMs */ #ifdef SUPPORT_TAGS struct tag_alloc { @@ -258,33 +238,24 @@ struct NCR5380_hostdata { NCR5380_implementation_fields; /* implementation specific */ struct Scsi_Host *host; /* Host backpointer */ unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */ - unsigned char targets_present; /* targets we have connected - to, so we can call a select - failure a retryable condition */ - volatile unsigned char busy[8]; /* index = target, bit = lun */ + unsigned char busy[8]; /* index = target, bit = lun */ #if defined(REAL_DMA) || defined(REAL_DMA_POLL) - volatile int dma_len; /* requested length of DMA */ + int dma_len; /* requested length of DMA */ #endif - volatile unsigned char last_message; /* last message OUT */ - volatile struct scsi_cmnd *connected; /* currently connected command */ - volatile struct scsi_cmnd *issue_queue; /* waiting to be issued */ - volatile struct scsi_cmnd *disconnected_queue; /* waiting for reconnect */ - volatile int restart_select; /* we have disconnected, - used to restart - NCR5380_select() */ - volatile unsigned aborted:1; /* flag, says aborted */ + unsigned char last_message; /* last message OUT */ + struct scsi_cmnd *connected; /* currently connected cmnd */ + struct scsi_cmnd *selecting; /* cmnd to be connected */ + struct list_head unissued; /* waiting to be issued */ + struct list_head autosense; /* priority issue queue */ + struct list_head disconnected; /* waiting for reconnect */ + spinlock_t lock; /* protects this struct */ int flags; - unsigned long time_expires; /* in jiffies, set prior to sleeping */ - int select_time; /* timer in select for target response */ - volatile struct scsi_cmnd *selecting; - struct delayed_work coroutine; /* our co-routine */ struct scsi_eh_save ses; + struct scsi_cmnd *sensing; char info[256]; int read_overruns; /* number of bytes to cut from a * transfer to handle chip overruns */ - int retain_dma_intr; struct work_struct main_task; - volatile int main_running; #ifdef SUPPORT_TAGS struct tag_alloc TagAlloc[8][8]; /* 8 targets and 8 LUNs */ #endif @@ -292,10 +263,23 @@ struct NCR5380_hostdata { unsigned spin_max_r; unsigned spin_max_w; #endif + struct workqueue_struct *work_q; + unsigned long accesses_per_ms; /* chip register accesses per ms */ }; #ifdef __KERNEL__ +struct NCR5380_cmd { + struct list_head list; +}; + +#define NCR5380_CMD_SIZE (sizeof(struct NCR5380_cmd)) + +static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr) +{ + return ((struct scsi_cmnd *)ncmd_ptr) - 1; +} + #ifndef NDEBUG #define NDEBUG (0) #endif @@ -304,6 +288,11 @@ struct NCR5380_hostdata { do { if ((NDEBUG) & (flg)) \ printk(KERN_DEBUG fmt, ## __VA_ARGS__); } while (0) +#define dsprintk(flg, host, fmt, ...) \ + do { if ((NDEBUG) & (flg)) \ + shost_printk(KERN_DEBUG, host, fmt, ## __VA_ARGS__); \ + } while (0) + #if NDEBUG #define NCR5380_dprint(flg, arg) \ do { if ((NDEBUG) & (flg)) NCR5380_print(arg); } while (0) @@ -320,6 +309,7 @@ static void NCR5380_print(struct Scsi_Host *instance); static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible); #endif static int NCR5380_init(struct Scsi_Host *instance, int flags); +static int NCR5380_maybe_reset_bus(struct Scsi_Host *); static void NCR5380_exit(struct Scsi_Host *instance); static void NCR5380_information_transfer(struct Scsi_Host *instance); #ifndef DONT_USE_INTR @@ -328,7 +318,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id); static void NCR5380_main(struct work_struct *work); static const char *NCR5380_info(struct Scsi_Host *instance); static void NCR5380_reselect(struct Scsi_Host *instance); -static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd); +static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); #if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL) static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); #endif @@ -443,5 +433,4 @@ static __inline__ int NCR5380_pc_dma_residual(struct Scsi_Host *instance) #endif /* defined(i386) || defined(__alpha__) */ #endif /* defined(REAL_DMA) */ #endif /* __KERNEL__ */ -#endif /* ndef ASM */ #endif /* NCR5380_H */ diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index e4c243748a97..7dfd0fa27255 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -323,7 +323,6 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd, if (unlikely(!scsicmd || !scsicmd->scsi_done)) { dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n")); aac_fib_complete(fibptr); - aac_fib_free(fibptr); return 0; } scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL; @@ -331,7 +330,6 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd, if (unlikely(!device || !scsi_device_online(device))) { dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n")); aac_fib_complete(fibptr); - aac_fib_free(fibptr); return 0; } return 1; @@ -541,7 +539,6 @@ static void get_container_name_callback(void *context, struct fib * fibptr) scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; aac_fib_complete(fibptr); - aac_fib_free(fibptr); scsicmd->scsi_done(scsicmd); } @@ -557,7 +554,8 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd) dev = (struct aac_dev *)scsicmd->device->host->hostdata; - if (!(cmd_fibcontext = aac_fib_alloc(dev))) + cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); + if (!cmd_fibcontext) return -ENOMEM; aac_fib_init(cmd_fibcontext); @@ -586,7 +584,6 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd) printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status); aac_fib_complete(cmd_fibcontext); - aac_fib_free(cmd_fibcontext); return -1; } @@ -1024,7 +1021,6 @@ static void get_container_serial_callback(void *context, struct fib * fibptr) scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; aac_fib_complete(fibptr); - aac_fib_free(fibptr); scsicmd->scsi_done(scsicmd); } @@ -1040,7 +1036,8 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd) dev = (struct aac_dev *)scsicmd->device->host->hostdata; - if (!(cmd_fibcontext = aac_fib_alloc(dev))) + cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); + if (!cmd_fibcontext) return -ENOMEM; aac_fib_init(cmd_fibcontext); @@ -1068,7 +1065,6 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd) printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status); aac_fib_complete(cmd_fibcontext); - aac_fib_free(cmd_fibcontext); return -1; } @@ -1869,7 +1865,6 @@ static void io_callback(void *context, struct fib * fibptr) break; } aac_fib_complete(fibptr); - aac_fib_free(fibptr); scsicmd->scsi_done(scsicmd); } @@ -1954,7 +1949,8 @@ static int aac_read(struct scsi_cmnd * scsicmd) /* * Alocate and initialize a Fib */ - if (!(cmd_fibcontext = aac_fib_alloc(dev))) { + cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); + if (!cmd_fibcontext) { printk(KERN_WARNING "aac_read: fib allocation failed\n"); return -1; } @@ -2051,7 +2047,8 @@ static int aac_write(struct scsi_cmnd * scsicmd) /* * Allocate and initialize a Fib then setup a BlockWrite command */ - if (!(cmd_fibcontext = aac_fib_alloc(dev))) { + cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); + if (!cmd_fibcontext) { /* FIB temporarily unavailable,not catastrophic failure */ /* scsicmd->result = DID_ERROR << 16; @@ -2285,7 +2282,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd) /* * Allocate and initialize a Fib */ - cmd_fibcontext = aac_fib_alloc(aac); + cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd); if (!cmd_fibcontext) return SCSI_MLQUEUE_HOST_BUSY; @@ -3157,7 +3154,6 @@ static void aac_srb_callback(void *context, struct fib * fibptr) scsicmd->result |= le32_to_cpu(srbreply->scsi_status); aac_fib_complete(fibptr); - aac_fib_free(fibptr); scsicmd->scsi_done(scsicmd); } @@ -3187,9 +3183,10 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) /* * Allocate and initialize a Fib then setup a BlockWrite command */ - if (!(cmd_fibcontext = aac_fib_alloc(dev))) { + cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); + if (!cmd_fibcontext) return -1; - } + status = aac_adapter_scsi(cmd_fibcontext, scsicmd); /* diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 074878b55a0b..efa493cf1bc6 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -62,7 +62,7 @@ enum { #define PMC_GLOBAL_INT_BIT0 0x00000001 #ifndef AAC_DRIVER_BUILD -# define AAC_DRIVER_BUILD 41010 +# define AAC_DRIVER_BUILD 41052 # define AAC_DRIVER_BRANCH "-ms" #endif #define MAXIMUM_NUM_CONTAINERS 32 @@ -94,6 +94,13 @@ enum { #define aac_phys_to_logical(x) ((x)+1) #define aac_logical_to_phys(x) ((x)?(x)-1:0) +/* + * These macros are for keeping track of + * character device state. + */ +#define AAC_CHARDEV_UNREGISTERED (-1) +#define AAC_CHARDEV_NEEDS_REINIT (-2) + /* #define AAC_DETAILED_STATUS_INFO */ struct diskparm @@ -944,6 +951,7 @@ struct fib { */ struct list_head fiblink; void *data; + u32 vector_no; struct hw_fib *hw_fib_va; /* Actual shared object */ dma_addr_t hw_fib_pa; /* physical address of hw_fib*/ }; @@ -1123,6 +1131,7 @@ struct aac_dev struct fib *free_fib; spinlock_t fib_lock; + struct mutex ioctl_mutex; struct aac_queue_block *queues; /* * The user API will use an IOCTL to register itself to receive @@ -1234,6 +1243,7 @@ struct aac_dev struct msix_entry msixentry[AAC_MAX_MSIX]; struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */ u8 adapter_shutdown; + u32 handle_pci_error; }; #define aac_adapter_interrupt(dev) \ @@ -2113,7 +2123,9 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor) int aac_acquire_irq(struct aac_dev *dev); void aac_free_irq(struct aac_dev *dev); const char *aac_driverinfo(struct Scsi_Host *); +void aac_fib_vector_assign(struct aac_dev *dev); struct fib *aac_fib_alloc(struct aac_dev *dev); +struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd); int aac_fib_setup(struct aac_dev *dev); void aac_fib_map_free(struct aac_dev *dev); void aac_fib_free(struct fib * context); diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 54195a117f72..4b3bb52b5108 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -855,13 +855,20 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) { int status; + mutex_lock(&dev->ioctl_mutex); + + if (dev->adapter_shutdown) { + status = -EACCES; + goto cleanup; + } + /* * HBA gets first crack */ status = aac_dev_ioctl(dev, cmd, arg); if (status != -ENOTTY) - return status; + goto cleanup; switch (cmd) { case FSACTL_MINIPORT_REV_CHECK: @@ -890,6 +897,10 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) status = -ENOTTY; break; } + +cleanup: + mutex_unlock(&dev->ioctl_mutex); + return status; } diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 0e954e37f0b5..2b4e75380ae6 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -212,8 +212,11 @@ int aac_send_shutdown(struct aac_dev * dev) return -ENOMEM; aac_fib_init(fibctx); - cmd = (struct aac_close *) fib_data(fibctx); + mutex_lock(&dev->ioctl_mutex); + dev->adapter_shutdown = 1; + mutex_unlock(&dev->ioctl_mutex); + cmd = (struct aac_close *) fib_data(fibctx); cmd->command = cpu_to_le32(VM_CloseAll); cmd->cid = cpu_to_le32(0xfffffffe); @@ -229,7 +232,6 @@ int aac_send_shutdown(struct aac_dev * dev) /* FIB should be freed only after getting the response from the F/W */ if (status != -ERESTARTSYS) aac_fib_free(fibctx); - dev->adapter_shutdown = 1; if ((dev->pdev->device == PMC_DEVICE_S7 || dev->pdev->device == PMC_DEVICE_S8 || dev->pdev->device == PMC_DEVICE_S9) && diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index a1f90fe849c9..511bbc575062 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -83,13 +83,38 @@ static int fib_map_alloc(struct aac_dev *dev) void aac_fib_map_free(struct aac_dev *dev) { - pci_free_consistent(dev->pdev, - dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), - dev->hw_fib_va, dev->hw_fib_pa); + if (dev->hw_fib_va && dev->max_fib_size) { + pci_free_consistent(dev->pdev, + (dev->max_fib_size * + (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)), + dev->hw_fib_va, dev->hw_fib_pa); + } dev->hw_fib_va = NULL; dev->hw_fib_pa = 0; } +void aac_fib_vector_assign(struct aac_dev *dev) +{ + u32 i = 0; + u32 vector = 1; + struct fib *fibptr = NULL; + + for (i = 0, fibptr = &dev->fibs[i]; + i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); + i++, fibptr++) { + if ((dev->max_msix == 1) || + (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1) + - dev->vector_cap))) { + fibptr->vector_no = 0; + } else { + fibptr->vector_no = vector; + vector++; + if (vector == dev->max_msix) + vector = 1; + } + } +} + /** * aac_fib_setup - setup the fibs * @dev: Adapter to set up @@ -137,6 +162,7 @@ int aac_fib_setup(struct aac_dev * dev) i++, fibptr++) { fibptr->flags = 0; + fibptr->size = sizeof(struct fib); fibptr->dev = dev; fibptr->hw_fib_va = hw_fib; fibptr->data = (void *) fibptr->hw_fib_va->data; @@ -151,18 +177,49 @@ int aac_fib_setup(struct aac_dev * dev) hw_fib_pa = hw_fib_pa + dev->max_fib_size + sizeof(struct aac_fib_xporthdr); } + + /* + *Assign vector numbers to fibs + */ + aac_fib_vector_assign(dev); + /* * Add the fib chain to the free list */ dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL; /* - * Enable this to debug out of queue space - */ - dev->free_fib = &dev->fibs[0]; + * Set 8 fibs aside for management tools + */ + dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue]; return 0; } /** + * aac_fib_alloc_tag-allocate a fib using tags + * @dev: Adapter to allocate the fib for + * + * Allocate a fib from the adapter fib pool using tags + * from the blk layer. + */ + +struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd) +{ + struct fib *fibptr; + + fibptr = &dev->fibs[scmd->request->tag]; + /* + * Null out fields that depend on being zero at the start of + * each I/O + */ + fibptr->hw_fib_va->header.XferState = 0; + fibptr->type = FSAFS_NTC_FIB_CONTEXT; + fibptr->callback_data = NULL; + fibptr->callback = NULL; + + return fibptr; +} + +/** * aac_fib_alloc - allocate a fib * @dev: Adapter to allocate the fib for * diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index da9d9936e995..d677b52860ae 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c @@ -394,7 +394,6 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, fib->callback(fib->callback_data, fib); } else { aac_fib_complete(fib); - aac_fib_free(fib); } } else { unsigned long flagv; @@ -416,7 +415,6 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, fib->done = 0; spin_unlock_irqrestore(&fib->event_lock, flagv); aac_fib_complete(fib); - aac_fib_free(fib); } } diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 3b6e5c67e853..21a67ed047e8 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -38,6 +38,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pci.h> +#include <linux/aer.h> #include <linux/pci-aspm.h> #include <linux/slab.h> #include <linux/mutex.h> @@ -79,7 +80,7 @@ MODULE_VERSION(AAC_DRIVER_FULL_VERSION); static DEFINE_MUTEX(aac_mutex); static LIST_HEAD(aac_devices); -static int aac_cfg_major = -1; +static int aac_cfg_major = AAC_CHARDEV_UNREGISTERED; char aac_driver_version[] = AAC_DRIVER_FULL_VERSION; /* @@ -454,6 +455,8 @@ static int aac_slave_configure(struct scsi_device *sdev) } else scsi_change_queue_depth(sdev, 1); + sdev->tagged_supported = 1; + return 0; } @@ -700,23 +703,18 @@ static int aac_cfg_open(struct inode *inode, struct file *file) static long aac_cfg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - int ret; - struct aac_dev *aac; - aac = (struct aac_dev *)file->private_data; - if (!capable(CAP_SYS_RAWIO) || aac->adapter_shutdown) + struct aac_dev *aac = (struct aac_dev *)file->private_data; + + if (!capable(CAP_SYS_RAWIO)) return -EPERM; - mutex_lock(&aac_mutex); - ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg); - mutex_unlock(&aac_mutex); - return ret; + return aac_do_ioctl(aac, cmd, (void __user *)arg); } #ifdef CONFIG_COMPAT static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg) { long ret; - mutex_lock(&aac_mutex); switch (cmd) { case FSACTL_MINIPORT_REV_CHECK: case FSACTL_SENDFIB: @@ -750,7 +748,6 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long ret = -ENOIOCTLCMD; break; } - mutex_unlock(&aac_mutex); return ret; } @@ -1075,6 +1072,8 @@ static void __aac_shutdown(struct aac_dev * aac) int i; int cpu; + aac_send_shutdown(aac); + if (aac->aif_thread) { int i; /* Clear out events first */ @@ -1086,7 +1085,6 @@ static void __aac_shutdown(struct aac_dev * aac) } kthread_stop(aac->thread); } - aac_send_shutdown(aac); aac_adapter_disable_int(aac); cpu = cpumask_first(cpu_online_mask); if (aac->pdev->device == PMC_DEVICE_S6 || @@ -1120,6 +1118,13 @@ static void __aac_shutdown(struct aac_dev * aac) else if (aac->max_msix > 1) pci_disable_msix(aac->pdev); } +static void aac_init_char(void) +{ + aac_cfg_major = register_chrdev(0, "aac", &aac_cfg_fops); + if (aac_cfg_major < 0) { + pr_err("aacraid: unable to register \"aac\" device.\n"); + } +} static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -1132,6 +1137,12 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) u64 dmamask; extern int aac_sync_mode; + /* + * Only series 7 needs freset. + */ + if (pdev->device == PMC_DEVICE_S7) + pdev->needs_freset = 1; + list_for_each_entry(aac, &aac_devices, entry) { if (aac->id > unique_id) break; @@ -1171,6 +1182,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) shost->max_cmd_len = 16; shost->use_cmd_list = 1; + if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT) + aac_init_char(); + aac = (struct aac_dev *)shost->hostdata; aac->base_start = pci_resource_start(pdev, 0); aac->scsi_host_ptr = shost; @@ -1185,6 +1199,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto out_free_host; spin_lock_init(&aac->fib_lock); + mutex_init(&aac->ioctl_mutex); /* * Map in the registers from the adapter. */ @@ -1296,6 +1311,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto out_deinit; scsi_scan_host(shost); + pci_enable_pcie_error_reporting(pdev); + pci_save_state(pdev); + return 0; out_deinit: @@ -1317,8 +1335,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) return error; } -#if (defined(CONFIG_PM)) -void aac_release_resources(struct aac_dev *aac) +static void aac_release_resources(struct aac_dev *aac) { int i; @@ -1404,14 +1421,26 @@ static int aac_acquire_resources(struct aac_dev *dev) aac_adapter_enable_int(dev); - if (!dev->sync_mode) + /*max msix may change after EEH + * Re-assign vectors to fibs + */ + aac_fib_vector_assign(dev); + + if (!dev->sync_mode) { + /* After EEH recovery or suspend resume, max_msix count + * may change, therfore updating in init as well. + */ aac_adapter_start(dev); + dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix); + } return 0; error_iounmap: return -1; } + +#if (defined(CONFIG_PM)) static int aac_suspend(struct pci_dev *pdev, pm_message_t state) { @@ -1495,10 +1524,142 @@ static void aac_remove_one(struct pci_dev *pdev) pci_disable_device(pdev); if (list_empty(&aac_devices)) { unregister_chrdev(aac_cfg_major, "aac"); - aac_cfg_major = -1; + aac_cfg_major = AAC_CHARDEV_NEEDS_REINIT; + } +} + +static void aac_flush_ios(struct aac_dev *aac) +{ + int i; + struct scsi_cmnd *cmd; + + for (i = 0; i < aac->scsi_host_ptr->can_queue; i++) { + cmd = (struct scsi_cmnd *)aac->fibs[i].callback_data; + if (cmd && (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) { + scsi_dma_unmap(cmd); + + if (aac->handle_pci_error) + cmd->result = DID_NO_CONNECT << 16; + else + cmd->result = DID_RESET << 16; + + cmd->scsi_done(cmd); + } + } +} + +static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev, + enum pci_channel_state error) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct aac_dev *aac = shost_priv(shost); + + dev_err(&pdev->dev, "aacraid: PCI error detected %x\n", error); + + switch (error) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + aac->handle_pci_error = 1; + + scsi_block_requests(aac->scsi_host_ptr); + aac_flush_ios(aac); + aac_release_resources(aac); + + pci_disable_pcie_error_reporting(pdev); + aac_adapter_ioremap(aac, 0); + + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + aac->handle_pci_error = 1; + + aac_flush_ios(aac); + return PCI_ERS_RESULT_DISCONNECT; } + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t aac_pci_mmio_enabled(struct pci_dev *pdev) +{ + dev_err(&pdev->dev, "aacraid: PCI error - mmio enabled\n"); + return PCI_ERS_RESULT_NEED_RESET; } +static pci_ers_result_t aac_pci_slot_reset(struct pci_dev *pdev) +{ + dev_err(&pdev->dev, "aacraid: PCI error - slot reset\n"); + pci_restore_state(pdev); + if (pci_enable_device(pdev)) { + dev_warn(&pdev->dev, + "aacraid: failed to enable slave\n"); + goto fail_device; + } + + pci_set_master(pdev); + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, "pci_enable_device_mem failed\n"); + goto fail_device; + } + + return PCI_ERS_RESULT_RECOVERED; + +fail_device: + dev_err(&pdev->dev, "aacraid: PCI error - slot reset failed\n"); + return PCI_ERS_RESULT_DISCONNECT; +} + + +static void aac_pci_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct scsi_device *sdev = NULL; + struct aac_dev *aac = (struct aac_dev *)shost_priv(shost); + + pci_cleanup_aer_uncorrect_error_status(pdev); + + if (aac_adapter_ioremap(aac, aac->base_size)) { + + dev_err(&pdev->dev, "aacraid: ioremap failed\n"); + /* remap failed, go back ... */ + aac->comm_interface = AAC_COMM_PRODUCER; + if (aac_adapter_ioremap(aac, AAC_MIN_FOOTPRINT_SIZE)) { + dev_warn(&pdev->dev, + "aacraid: unable to map adapter.\n"); + + return; + } + } + + msleep(10000); + + aac_acquire_resources(aac); + + /* + * reset this flag to unblock ioctl() as it was set + * at aac_send_shutdown() to block ioctls from upperlayer + */ + aac->adapter_shutdown = 0; + aac->handle_pci_error = 0; + + shost_for_each_device(sdev, shost) + if (sdev->sdev_state == SDEV_OFFLINE) + sdev->sdev_state = SDEV_RUNNING; + scsi_unblock_requests(aac->scsi_host_ptr); + scsi_scan_host(aac->scsi_host_ptr); + pci_save_state(pdev); + + dev_err(&pdev->dev, "aacraid: PCI error - resume\n"); +} + +static struct pci_error_handlers aac_pci_err_handler = { + .error_detected = aac_pci_error_detected, + .mmio_enabled = aac_pci_mmio_enabled, + .slot_reset = aac_pci_slot_reset, + .resume = aac_pci_resume, +}; + static struct pci_driver aac_pci_driver = { .name = AAC_DRIVERNAME, .id_table = aac_pci_tbl, @@ -1509,6 +1670,7 @@ static struct pci_driver aac_pci_driver = { .resume = aac_resume, #endif .shutdown = aac_shutdown, + .err_handler = &aac_pci_err_handler, }; static int __init aac_init(void) @@ -1522,11 +1684,8 @@ static int __init aac_init(void) if (error < 0) return error; - aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops); - if (aac_cfg_major < 0) { - printk(KERN_WARNING - "aacraid: unable to register \"aac\" device.\n"); - } + aac_init_char(); + return 0; } diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 2aa34ea8ceb1..bc0203f3d243 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -156,8 +156,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id) break; if (dev->msi_enabled && dev->max_msix > 1) atomic_dec(&dev->rrq_outstanding[vector_no]); - aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL); dev->host_rrq[index++] = 0; + aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL); if (index == (vector_no + 1) * dev->vector_cap) index = vector_no * dev->vector_cap; dev->host_rrq_idx[vector_no] = index; @@ -452,36 +452,20 @@ static int aac_src_deliver_message(struct fib *fib) #endif u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size); + u16 vector_no; atomic_inc(&q->numpending); if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest && dev->max_msix > 1) { - u_int16_t vector_no, first_choice = 0xffff; - - vector_no = dev->fibs_pushed_no % dev->max_msix; - do { - vector_no += 1; - if (vector_no == dev->max_msix) - vector_no = 1; - if (atomic_read(&dev->rrq_outstanding[vector_no]) < - dev->vector_cap) - break; - if (0xffff == first_choice) - first_choice = vector_no; - else if (vector_no == first_choice) - break; - } while (1); - if (vector_no == first_choice) - vector_no = 0; - atomic_inc(&dev->rrq_outstanding[vector_no]); - if (dev->fibs_pushed_no == 0xffffffff) - dev->fibs_pushed_no = 0; - else - dev->fibs_pushed_no++; + vector_no = fib->vector_no; fib->hw_fib_va->header.Handle += (vector_no << 16); + } else { + vector_no = 0; } + atomic_inc(&dev->rrq_outstanding[vector_no]); + if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { /* Calculate the amount to the fibsize bits */ fibsize = (hdr_size + 127) / 128 - 1; diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index 5b8b2937a3fe..7db448ec8beb 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c @@ -403,6 +403,9 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA); if (!cptr) return SCSI_MLQUEUE_HOST_BUSY; + } else { + sg_count = 0; + cptr = NULL; } /* Use the outgoing mailboxes in a round-robin fashion, because this diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h index df2e0e5367d2..d47b527b25dd 100644 --- a/drivers/scsi/aic7xxx/aic79xx.h +++ b/drivers/scsi/aic7xxx/aic79xx.h @@ -624,7 +624,7 @@ struct scb { }; TAILQ_HEAD(scb_tailq, scb); -LIST_HEAD(scb_list, scb); +BSD_LIST_HEAD(scb_list, scb); struct scb_data { /* @@ -1069,7 +1069,7 @@ struct ahd_softc { /* * SCBs that have been sent to the controller */ - LIST_HEAD(, scb) pending_scbs; + BSD_LIST_HEAD(, scb) pending_scbs; /* * Current register window mode information. diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h index c58fa33c6592..728193a42e6e 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.h +++ b/drivers/scsi/aic7xxx/aic79xx_osm.h @@ -65,11 +65,6 @@ /* Core SCSI definitions */ #define AIC_LIB_PREFIX ahd -/* Name space conflict with BSD queue macros */ -#ifdef LIST_HEAD -#undef LIST_HEAD -#endif - #include "cam.h" #include "queue.h" #include "scsi_message.h" diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h index f695774645c1..4ce4e903a759 100644 --- a/drivers/scsi/aic7xxx/aic7xxx.h +++ b/drivers/scsi/aic7xxx/aic7xxx.h @@ -916,7 +916,7 @@ struct ahc_softc { /* * SCBs that have been sent to the controller */ - LIST_HEAD(, scb) pending_scbs; + BSD_LIST_HEAD(, scb) pending_scbs; /* * Counting lock for deferring the release of additional diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index b846a4683562..fc6a83188c1e 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -1336,6 +1336,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev, case AHC_DEV_Q_TAGGED: scsi_change_queue_depth(sdev, dev->openings + dev->active); + break; default: /* * We allow the OS to queue 2 untagged transactions to diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h index bc4cca92ff04..54c702864103 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.h +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h @@ -82,11 +82,6 @@ /* Core SCSI definitions */ #define AIC_LIB_PREFIX ahc -/* Name space conflict with BSD queue macros */ -#ifdef LIST_HEAD -#undef LIST_HEAD -#endif - #include "cam.h" #include "queue.h" #include "scsi_message.h" diff --git a/drivers/scsi/aic7xxx/queue.h b/drivers/scsi/aic7xxx/queue.h index 8adf8003a164..ba602981f193 100644 --- a/drivers/scsi/aic7xxx/queue.h +++ b/drivers/scsi/aic7xxx/queue.h @@ -246,7 +246,7 @@ struct { \ /* * List declarations. */ -#define LIST_HEAD(name, type) \ +#define BSD_LIST_HEAD(name, type) \ struct name { \ struct type *lh_first; /* first element */ \ } diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h index 912e6b755f74..101072cab70f 100644 --- a/drivers/scsi/aic94xx/aic94xx_sas.h +++ b/drivers/scsi/aic94xx/aic94xx_sas.h @@ -327,46 +327,9 @@ struct scb_header { #define LUN_SIZE 8 -/* See SAS spec, task IU - */ -struct ssp_task_iu { - u8 lun[LUN_SIZE]; /* BE */ - u16 _r_a; - u8 tmf; - u8 _r_b; - __be16 tag; /* BE */ - u8 _r_c[14]; -} __attribute__ ((packed)); - -/* See SAS spec, command IU - */ -struct ssp_command_iu { - u8 lun[LUN_SIZE]; - u8 _r_a; - u8 efb_prio_attr; /* enable first burst, task prio & attr */ -#define EFB_MASK 0x80 -#define TASK_PRIO_MASK 0x78 -#define TASK_ATTR_MASK 0x07 - - u8 _r_b; - u8 add_cdb_len; /* in dwords, since bit 0,1 are reserved */ - union { - u8 cdb[16]; - struct { - __le64 long_cdb_addr; /* bus address, LE */ - __le32 long_cdb_size; /* LE */ - u8 _r_c[3]; - u8 eol_ds; /* eol:6,6, ds:5,4 */ - } long_cdb; /* sequencer extension */ - }; -} __attribute__ ((packed)); - -struct xfer_rdy_iu { - __be32 requested_offset; /* BE */ - __be32 write_data_len; /* BE */ - __be32 _r_a; -} __attribute__ ((packed)); - +#define EFB_MASK 0x80 +#define TASK_PRIO_MASK 0x78 +#define TASK_ATTR_MASK 0x07 /* ---------- SCB tasks ---------- */ /* This is both ssp_task and long_ssp_task @@ -511,7 +474,7 @@ struct abort_task { u8 proto_conn_rate; __le32 _r_a; struct ssp_frame_hdr ssp_frame; - struct ssp_task_iu ssp_task; + struct ssp_tmf_iu ssp_task; __le16 sister_scb; __le16 conn_handle; u8 flags; /* ovrd_itnl_timer:3,3, suspend_data_trans:2,2 */ @@ -549,7 +512,7 @@ struct clear_nexus { u8 _r_b[3]; u8 conn_mask; u8 _r_c[19]; - struct ssp_task_iu ssp_task; /* LUN and TAG */ + struct ssp_tmf_iu ssp_task; /* LUN and TAG */ __le16 _r_d; __le16 conn_handle; __le64 _r_e; @@ -562,7 +525,7 @@ struct initiate_ssp_tmf { u8 proto_conn_rate; __le32 _r_a; struct ssp_frame_hdr ssp_frame; - struct ssp_task_iu ssp_task; + struct ssp_tmf_iu ssp_task; __le16 sister_scb; __le16 conn_handle; u8 flags; /* itnl override and suspend data tx */ diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 3bcaaac0ae4b..cf99f8cf4cdd 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -52,7 +52,7 @@ struct device_attribute; #define ARCMSR_MAX_FREECCB_NUM 320 #define ARCMSR_MAX_OUTSTANDING_CMD 255 #endif -#define ARCMSR_DRIVER_VERSION "v1.30.00.04-20140919" +#define ARCMSR_DRIVER_VERSION "v1.30.00.22-20151126" #define ARCMSR_SCSI_INITIATOR_ID 255 #define ARCMSR_MAX_XFER_SECTORS 512 #define ARCMSR_MAX_XFER_SECTORS_B 4096 @@ -74,6 +74,9 @@ struct device_attribute; #ifndef PCI_DEVICE_ID_ARECA_1214 #define PCI_DEVICE_ID_ARECA_1214 0x1214 #endif +#ifndef PCI_DEVICE_ID_ARECA_1203 + #define PCI_DEVICE_ID_ARECA_1203 0x1203 +#endif /* ********************************************************************************** ** @@ -245,6 +248,12 @@ struct FIRMWARE_INFO /* window of "instruction flags" from iop to driver */ #define ARCMSR_IOP2DRV_DOORBELL 0x00020408 #define ARCMSR_IOP2DRV_DOORBELL_MASK 0x0002040C +/* window of "instruction flags" from iop to driver */ +#define ARCMSR_IOP2DRV_DOORBELL_1203 0x00021870 +#define ARCMSR_IOP2DRV_DOORBELL_MASK_1203 0x00021874 +/* window of "instruction flags" from driver to iop */ +#define ARCMSR_DRV2IOP_DOORBELL_1203 0x00021878 +#define ARCMSR_DRV2IOP_DOORBELL_MASK_1203 0x0002187C /* ARECA FLAG LANGUAGE */ /* ioctl transfer */ #define ARCMSR_IOP2DRV_DATA_WRITE_OK 0x00000001 @@ -288,6 +297,9 @@ struct FIRMWARE_INFO #define ARCMSR_MESSAGE_RBUFFER 0x0000ff00 /* iop message_rwbuffer for message command */ #define ARCMSR_MESSAGE_RWBUFFER 0x0000fa00 + +#define MEM_BASE0(x) (u32 __iomem *)((unsigned long)acb->mem_base0 + x) +#define MEM_BASE1(x) (u32 __iomem *)((unsigned long)acb->mem_base1 + x) /* ************************************************************************ ** SPEC. for Areca HBC adapter diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 333db5953607..7640498964a5 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -114,6 +114,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb); static const char *arcmsr_info(struct Scsi_Host *); static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *); +static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb); static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth) { if (queue_depth > ARCMSR_MAX_CMD_PERLUN) @@ -157,6 +158,8 @@ static struct pci_device_id arcmsr_device_id_table[] = { .driver_data = ACB_ADAPTER_TYPE_B}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202), .driver_data = ACB_ADAPTER_TYPE_B}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1203), + .driver_data = ACB_ADAPTER_TYPE_B}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210), .driver_data = ACB_ADAPTER_TYPE_A}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214), @@ -495,6 +498,91 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) } } +static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) +{ + bool rtn = true; + void *dma_coherent; + dma_addr_t dma_coherent_handle; + struct pci_dev *pdev = acb->pdev; + + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg; + acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32); + dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, + &dma_coherent_handle, GFP_KERNEL); + if (!dma_coherent) { + pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); + return false; + } + acb->dma_coherent_handle2 = dma_coherent_handle; + acb->dma_coherent2 = dma_coherent; + reg = (struct MessageUnit_B *)dma_coherent; + acb->pmuB = reg; + if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) { + reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203); + reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203); + reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203); + reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203); + } else { + reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL); + reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK); + reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL); + reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK); + } + reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER); + reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER); + reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER); + } + break; + case ACB_ADAPTER_TYPE_D: { + struct MessageUnit_D *reg; + + acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32); + dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, + &dma_coherent_handle, GFP_KERNEL); + if (!dma_coherent) { + pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); + return false; + } + acb->dma_coherent_handle2 = dma_coherent_handle; + acb->dma_coherent2 = dma_coherent; + reg = (struct MessageUnit_D *)dma_coherent; + acb->pmuD = reg; + reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID); + reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION); + reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK); + reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET); + reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST); + reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS); + reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE); + reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0); + reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1); + reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0); + reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1); + reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL); + reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL); + reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE); + reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW); + reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH); + reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER); + reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW); + reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH); + reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER); + reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER); + reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE); + reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE); + reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER); + reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER); + reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER); + } + break; + default: + break; + } + return rtn; +} + static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) { struct pci_dev *pdev = acb->pdev; @@ -739,9 +827,12 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) if(!error){ goto pci_release_regs; } + error = arcmsr_alloc_io_queue(acb); + if (!error) + goto unmap_pci_region; error = arcmsr_get_firmware_spec(acb); if(!error){ - goto unmap_pci_region; + goto free_hbb_mu; } error = arcmsr_alloc_ccb_pool(acb); if(error){ @@ -2622,9 +2713,6 @@ static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb) static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; - struct pci_dev *pdev = acb->pdev; - void *dma_coherent; - dma_addr_t dma_coherent_handle; char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; @@ -2636,30 +2724,16 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb) /*firm_version,21,84-99*/ int count; - acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32); - dma_coherent = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize, - &dma_coherent_handle, GFP_KERNEL); - if (!dma_coherent){ - printk(KERN_NOTICE - "arcmsr%d: dma_alloc_coherent got error for hbb mu\n", - acb->host->host_no); - return false; - } - acb->dma_coherent_handle2 = dma_coherent_handle; - acb->dma_coherent2 = dma_coherent; - reg = (struct MessageUnit_B *)dma_coherent; - acb->pmuB = reg; - reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL); - reg->drv2iop_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL_MASK); - reg->iop2drv_doorbell = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL); - reg->iop2drv_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL_MASK); - reg->message_wbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_WBUFFER); - reg->message_rbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RBUFFER); - reg->message_rwbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RWBUFFER); iop_firm_model = (char __iomem *)(®->message_rwbuffer[15]); /*firm_model,15,60-67*/ iop_firm_version = (char __iomem *)(®->message_rwbuffer[17]); /*firm_version,17,68-83*/ iop_device_map = (char __iomem *)(®->message_rwbuffer[21]); /*firm_version,21,84-99*/ + arcmsr_wait_firmware_ready(acb); + writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); + if (!arcmsr_hbaB_wait_msgint_ready(acb)) { + printk(KERN_ERR "arcmsr%d: can't set driver mode.\n", acb->host->host_no); + return false; + } writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); if (!arcmsr_hbaB_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ @@ -2694,15 +2768,15 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb) acb->firm_model, acb->firm_version); - acb->signature = readl(®->message_rwbuffer[1]); + acb->signature = readl(®->message_rwbuffer[0]); /*firm_signature,1,00-03*/ - acb->firm_request_len = readl(®->message_rwbuffer[2]); + acb->firm_request_len = readl(®->message_rwbuffer[1]); /*firm_request_len,1,04-07*/ - acb->firm_numbers_queue = readl(®->message_rwbuffer[3]); + acb->firm_numbers_queue = readl(®->message_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/ - acb->firm_sdram_size = readl(®->message_rwbuffer[4]); + acb->firm_sdram_size = readl(®->message_rwbuffer[3]); /*firm_sdram_size,3,12-15*/ - acb->firm_hd_channels = readl(®->message_rwbuffer[5]); + acb->firm_hd_channels = readl(®->message_rwbuffer[4]); /*firm_ide_channels,4,16-19*/ acb->firm_cfg_version = readl(®->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/ /*firm_ide_channels,4,16-19*/ @@ -2777,70 +2851,8 @@ static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb) char __iomem *iop_firm_version; char __iomem *iop_device_map; u32 count; - struct MessageUnit_D *reg; - void *dma_coherent2; - dma_addr_t dma_coherent_handle2; - struct pci_dev *pdev = acb->pdev; + struct MessageUnit_D *reg = acb->pmuD; - acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32); - dma_coherent2 = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize, - &dma_coherent_handle2, GFP_KERNEL); - if (!dma_coherent2) { - pr_notice("DMA allocation failed...\n"); - return false; - } - memset(dma_coherent2, 0, acb->roundup_ccbsize); - acb->dma_coherent_handle2 = dma_coherent_handle2; - acb->dma_coherent2 = dma_coherent2; - reg = (struct MessageUnit_D *)dma_coherent2; - acb->pmuD = reg; - reg->chip_id = acb->mem_base0 + ARCMSR_ARC1214_CHIP_ID; - reg->cpu_mem_config = acb->mem_base0 + - ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION; - reg->i2o_host_interrupt_mask = acb->mem_base0 + - ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK; - reg->sample_at_reset = acb->mem_base0 + ARCMSR_ARC1214_SAMPLE_RESET; - reg->reset_request = acb->mem_base0 + ARCMSR_ARC1214_RESET_REQUEST; - reg->host_int_status = acb->mem_base0 + - ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS; - reg->pcief0_int_enable = acb->mem_base0 + - ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE; - reg->inbound_msgaddr0 = acb->mem_base0 + - ARCMSR_ARC1214_INBOUND_MESSAGE0; - reg->inbound_msgaddr1 = acb->mem_base0 + - ARCMSR_ARC1214_INBOUND_MESSAGE1; - reg->outbound_msgaddr0 = acb->mem_base0 + - ARCMSR_ARC1214_OUTBOUND_MESSAGE0; - reg->outbound_msgaddr1 = acb->mem_base0 + - ARCMSR_ARC1214_OUTBOUND_MESSAGE1; - reg->inbound_doorbell = acb->mem_base0 + - ARCMSR_ARC1214_INBOUND_DOORBELL; - reg->outbound_doorbell = acb->mem_base0 + - ARCMSR_ARC1214_OUTBOUND_DOORBELL; - reg->outbound_doorbell_enable = acb->mem_base0 + - ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE; - reg->inboundlist_base_low = acb->mem_base0 + - ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW; - reg->inboundlist_base_high = acb->mem_base0 + - ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH; - reg->inboundlist_write_pointer = acb->mem_base0 + - ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER; - reg->outboundlist_base_low = acb->mem_base0 + - ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW; - reg->outboundlist_base_high = acb->mem_base0 + - ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH; - reg->outboundlist_copy_pointer = acb->mem_base0 + - ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER; - reg->outboundlist_read_pointer = acb->mem_base0 + - ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER; - reg->outboundlist_interrupt_cause = acb->mem_base0 + - ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE; - reg->outboundlist_interrupt_enable = acb->mem_base0 + - ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE; - reg->message_wbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_WBUFFER; - reg->message_rbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_RBUFFER; - reg->msgcode_rwbuffer = acb->mem_base0 + - ARCMSR_ARC1214_MESSAGE_RWBUFFER; iop_firm_model = (char __iomem *)(®->msgcode_rwbuffer[15]); iop_firm_version = (char __iomem *)(®->msgcode_rwbuffer[17]); iop_device_map = (char __iomem *)(®->msgcode_rwbuffer[21]); @@ -2855,8 +2867,6 @@ static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb) if (!arcmsr_hbaD_wait_msgint_ready(acb)) { pr_notice("arcmsr%d: wait get adapter firmware " "miscellaneous data timeout\n", acb->host->host_no); - dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize, - acb->dma_coherent2, acb->dma_coherent_handle2); return false; } count = 8; @@ -2880,15 +2890,15 @@ static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb) iop_device_map++; count--; } - acb->signature = readl(®->msgcode_rwbuffer[1]); + acb->signature = readl(®->msgcode_rwbuffer[0]); /*firm_signature,1,00-03*/ - acb->firm_request_len = readl(®->msgcode_rwbuffer[2]); + acb->firm_request_len = readl(®->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/ - acb->firm_numbers_queue = readl(®->msgcode_rwbuffer[3]); + acb->firm_numbers_queue = readl(®->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/ - acb->firm_sdram_size = readl(®->msgcode_rwbuffer[4]); + acb->firm_sdram_size = readl(®->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/ - acb->firm_hd_channels = readl(®->msgcode_rwbuffer[5]); + acb->firm_hd_channels = readl(®->msgcode_rwbuffer[4]); /*firm_hd_channels,4,16-19*/ acb->firm_cfg_version = readl(®->msgcode_rwbuffer[25]); pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n", @@ -3998,6 +4008,7 @@ static const char *arcmsr_info(struct Scsi_Host *host) case PCI_DEVICE_ID_ARECA_1160: case PCI_DEVICE_ID_ARECA_1170: case PCI_DEVICE_ID_ARECA_1201: + case PCI_DEVICE_ID_ARECA_1203: case PCI_DEVICE_ID_ARECA_1220: case PCI_DEVICE_ID_ARECA_1230: case PCI_DEVICE_ID_ARECA_1260: diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index deaaf84989cd..12b88294d667 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -677,7 +677,8 @@ int round_period(unsigned int period) * Copyright: Copyright (c) 1996 John Shifflett, GeoLog Consulting */ static -unsigned char calc_sync_xfer(unsigned int period, unsigned int offset) +unsigned char __maybe_unused calc_sync_xfer(unsigned int period, + unsigned int offset) { return sync_xfer_table[round_period(period)].reg_value | ((offset < SDTR_SIZE) ? offset : SDTR_SIZE); diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c index d28d6c0f18c0..221f18c5df93 100644 --- a/drivers/scsi/arm/cumana_1.c +++ b/drivers/scsi/arm/cumana_1.c @@ -4,9 +4,7 @@ * Copyright 1995-2002, Russell King */ #include <linux/module.h> -#include <linux/signal.h> #include <linux/ioport.h> -#include <linux/delay.h> #include <linux/blkdev.h> #include <linux/init.h> @@ -15,15 +13,14 @@ #include <scsi/scsi_host.h> -#include <scsi/scsicam.h> - #define PSEUDO_DMA #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) -#define NCR5380_local_declare() struct Scsi_Host *_instance -#define NCR5380_setup(instance) _instance = instance -#define NCR5380_read(reg) cumanascsi_read(_instance, reg) -#define NCR5380_write(reg, value) cumanascsi_write(_instance, reg, value) +#define NCR5380_read(reg) cumanascsi_read(instance, reg) +#define NCR5380_write(reg, value) cumanascsi_write(instance, reg, value) + +#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) + #define NCR5380_intr cumanascsi_intr #define NCR5380_queue_command cumanascsi_queue_command #define NCR5380_info cumanascsi_info @@ -211,6 +208,8 @@ static struct scsi_host_template cumanascsi_template = { .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING, .proc_name = "CumanaSCSI-1", + .cmd_size = NCR5380_CMD_SIZE, + .max_sectors = 128, }; static int cumanascsi1_probe(struct expansion_card *ec, @@ -240,23 +239,21 @@ static int cumanascsi1_probe(struct expansion_card *ec, host->irq = ec->irq; - NCR5380_init(host, 0); + ret = NCR5380_init(host, 0); + if (ret) + goto out_unmap; + + NCR5380_maybe_reset_bus(host); priv(host)->ctrl = 0; writeb(0, priv(host)->base + CTRL); - host->n_io_port = 255; - if (!(request_region(host->io_port, host->n_io_port, "CumanaSCSI-1"))) { - ret = -EBUSY; - goto out_unmap; - } - ret = request_irq(host->irq, cumanascsi_intr, 0, "CumanaSCSI-1", host); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, host->irq, ret); - goto out_unmap; + goto out_exit; } ret = scsi_add_host(host, &ec->dev); @@ -268,6 +265,8 @@ static int cumanascsi1_probe(struct expansion_card *ec, out_free_irq: free_irq(host->irq, host); + out_exit: + NCR5380_exit(host); out_unmap: iounmap(priv(host)->base); iounmap(priv(host)->dma); diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index decdc71b6b86..24388795ee9a 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -98,6 +98,7 @@ static int level_mask = LOG_ERROR; module_param(level_mask, int, 0644); +#ifndef MODULE static int __init fas216_log_setup(char *str) { char *s; @@ -138,6 +139,7 @@ static int __init fas216_log_setup(char *str) } __setup("fas216_logging=", fas216_log_setup); +#endif static inline unsigned char fas216_readb(FAS216_Info *info, unsigned int reg) { diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c index 7c6fa1479c9c..1fab1d1896b1 100644 --- a/drivers/scsi/arm/oak.c +++ b/drivers/scsi/arm/oak.c @@ -5,9 +5,7 @@ */ #include <linux/module.h> -#include <linux/signal.h> #include <linux/ioport.h> -#include <linux/delay.h> #include <linux/blkdev.h> #include <linux/init.h> @@ -20,14 +18,16 @@ #define DONT_USE_INTR #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) -#define NCR5380_local_declare() void __iomem *_base -#define NCR5380_setup(host) _base = priv(host)->base -#define NCR5380_read(reg) readb(_base + ((reg) << 2)) -#define NCR5380_write(reg, value) writeb(value, _base + ((reg) << 2)) +#define NCR5380_read(reg) \ + readb(priv(instance)->base + ((reg) << 2)) +#define NCR5380_write(reg, value) \ + writeb(value, priv(instance)->base + ((reg) << 2)) + +#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) + #define NCR5380_queue_command oakscsi_queue_command #define NCR5380_info oakscsi_info -#define NCR5380_show_info oakscsi_show_info #define NCR5380_implementation_fields \ void __iomem *base @@ -103,7 +103,6 @@ printk("reading %p len %d\n", addr, len); static struct scsi_host_template oakscsi_template = { .module = THIS_MODULE, - .show_info = oakscsi_show_info, .name = "Oak 16-bit SCSI", .info = oakscsi_info, .queuecommand = oakscsi_queue_command, @@ -115,6 +114,8 @@ static struct scsi_host_template oakscsi_template = { .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING, .proc_name = "oakscsi", + .cmd_size = NCR5380_CMD_SIZE, + .max_sectors = 128, }; static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id) @@ -142,15 +143,21 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id) host->irq = NO_IRQ; host->n_io_port = 255; - NCR5380_init(host, 0); + ret = NCR5380_init(host, 0); + if (ret) + goto out_unmap; + + NCR5380_maybe_reset_bus(host); ret = scsi_add_host(host, &ec->dev); if (ret) - goto out_unmap; + goto out_exit; scsi_scan_host(host); goto out; + out_exit: + NCR5380_exit(host); out_unmap: iounmap(priv(host)->base); unreg: diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index db87ece6edb2..389825ba5d96 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c @@ -1,15 +1,15 @@ /* * NCR 5380 generic driver routines. These should make it *trivial* - * to implement 5380 SCSI drivers under Linux with a non-trantor - * architecture. + * to implement 5380 SCSI drivers under Linux with a non-trantor + * architecture. * - * Note that these routines also work with NR53c400 family chips. + * Note that these routines also work with NR53c400 family chips. * * Copyright 1993, Drew Eckhardt - * Visionary Computing - * (Unix and Linux consulting and custom programming) - * drew@colorado.edu - * +1 (303) 666-5836 + * Visionary Computing + * (Unix and Linux consulting and custom programming) + * drew@colorado.edu + * +1 (303) 666-5836 * * For more information, please consult * @@ -24,84 +24,10 @@ * 1+ (800) 334-5454 */ -/* - * ++roman: To port the 5380 driver to the Atari, I had to do some changes in - * this file, too: - * - * - Some of the debug statements were incorrect (undefined variables and the - * like). I fixed that. - * - * - In information_transfer(), I think a #ifdef was wrong. Looking at the - * possible DMA transfer size should also happen for REAL_DMA. I added this - * in the #if statement. - * - * - When using real DMA, information_transfer() should return in a DATAOUT - * phase after starting the DMA. It has nothing more to do. - * - * - The interrupt service routine should run main after end of DMA, too (not - * only after RESELECTION interrupts). Additionally, it should _not_ test - * for more interrupts after running main, since a DMA process may have - * been started and interrupts are turned on now. The new int could happen - * inside the execution of NCR5380_intr(), leading to recursive - * calls. - * - * - I've added a function merge_contiguous_buffers() that tries to - * merge scatter-gather buffers that are located at contiguous - * physical addresses and can be processed with the same DMA setup. - * Since most scatter-gather operations work on a page (4K) of - * 4 buffers (1K), in more than 90% of all cases three interrupts and - * DMA setup actions are saved. - * - * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA - * and USLEEP, because these were messing up readability and will never be - * needed for Atari SCSI. - * - * - I've revised the NCR5380_main() calling scheme (relax the 'main_running' - * stuff), and 'main' is executed in a bottom half if awoken by an - * interrupt. - * - * - The code was quite cluttered up by "#if (NDEBUG & NDEBUG_*) printk..." - * constructs. In my eyes, this made the source rather unreadable, so I - * finally replaced that by the *_PRINTK() macros. - * - */ - -/* - * Further development / testing that should be done : - * 1. Test linked command handling code after Eric is ready with - * the high level code. - */ +/* Ported to Atari by Roman Hodek and others. */ /* Adapted for the sun3 by Sam Creasey. */ -#include <scsi/scsi_dbg.h> -#include <scsi/scsi_transport_spi.h> - -#if (NDEBUG & NDEBUG_LISTS) -#define LIST(x, y) \ - do { \ - printk("LINE:%d Adding %p to %p\n", \ - __LINE__, (void*)(x), (void*)(y)); \ - if ((x) == (y)) \ - udelay(5); \ - } while (0) -#define REMOVE(w, x, y, z) \ - do { \ - printk("LINE:%d Removing: %p->%p %p->%p \n", \ - __LINE__, (void*)(w), (void*)(x), \ - (void*)(y), (void*)(z)); \ - if ((x) == (y)) \ - udelay(5); \ - } while (0) -#else -#define LIST(x,y) -#define REMOVE(w,x,y,z) -#endif - -#ifndef notyet -#undef LINKED -#endif - /* * Design * @@ -126,17 +52,7 @@ * piece of hardware that requires you to sit in a loop polling for * the REQ signal as long as you are connected. Some devices are * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect - * while doing long seek operations. - * - * The workaround for this is to keep track of devices that have - * disconnected. If the device hasn't disconnected, for commands that - * should disconnect, we do something like - * - * while (!REQ is asserted) { sleep for N usecs; poll for M usecs } - * - * Some tweaking of N and M needs to be done. An algorithm based - * on "time to data" would give the best results as long as short time - * to datas (ie, on the same track) were considered, however these + * while doing long seek operations. [...] These * broken devices are the exception rather than the rule and I'd rather * spend my time optimizing for the normal case. * @@ -177,12 +93,10 @@ * * These macros control options : * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically - * for commands that return with a CHECK CONDITION status. + * for commands that return with a CHECK CONDITION status. * * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential - * transceivers. - * - * LINKED - if defined, linked commands are supported. + * transceivers. * * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. * @@ -195,17 +109,17 @@ * NCR5380_write(register, value) - write to the specific register * * NCR5380_implementation_fields - additional fields needed for this - * specific implementation of the NCR5380 + * specific implementation of the NCR5380 * * Either real DMA *or* pseudo DMA may be implemented * REAL functions : * NCR5380_REAL_DMA should be defined if real DMA is to be used. * Note that the DMA setup functions should return the number of bytes - * that they were able to program the controller for. + * that they were able to program the controller for. * * Also note that generic i386/PC versions of these macros are - * available as NCR5380_i386_dma_write_setup, - * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. + * available as NCR5380_i386_dma_write_setup, + * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. * * NCR5380_dma_write_setup(instance, src, count) - initialize * NCR5380_dma_read_setup(instance, dst, count) - initialize @@ -221,18 +135,8 @@ * possible) function may be used. */ -/* Macros ease life... :-) */ -#define SETUP_HOSTDATA(in) \ - struct NCR5380_hostdata *hostdata = \ - (struct NCR5380_hostdata *)(in)->hostdata -#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) - -#define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble) -#define SET_NEXT(cmd,next) ((cmd)->host_scribble = (void *)(next)) -#define NEXTADDR(cmd) ((struct scsi_cmnd **)&(cmd)->host_scribble) - -#define HOSTNO instance->host_no -#define H_NO(cmd) (cmd)->device->host->host_no +static int do_abort(struct Scsi_Host *); +static void do_reset(struct Scsi_Host *); #ifdef SUPPORT_TAGS @@ -251,9 +155,7 @@ * cannot know it in advance :-( We just see a QUEUE_FULL status being * returned. So, in this case, the driver internal queue size assumption is * reduced to the number of active tags if QUEUE_FULL is returned by the - * target. The command is returned to the mid-level, but with status changed - * to BUSY, since --as I've seen-- the mid-level can't handle QUEUE_FULL - * correctly. + * target. * * We're also not allowed running tagged commands as long as an untagged * command is active. And REQUEST SENSE commands after a contingent allegiance @@ -304,7 +206,8 @@ static void __init init_tags(struct NCR5380_hostdata *hostdata) static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) { u8 lun = cmd->device->lun; - SETUP_HOSTDATA(cmd->device->host); + struct Scsi_Host *instance = cmd->device->host; + struct NCR5380_hostdata *hostdata = shost_priv(instance); if (hostdata->busy[cmd->device->id] & (1 << lun)) return 1; @@ -314,8 +217,8 @@ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) return 0; if (hostdata->TagAlloc[scmd_id(cmd)][lun].nr_allocated >= hostdata->TagAlloc[scmd_id(cmd)][lun].queue_size) { - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", - H_NO(cmd), cmd->device->id, lun); + dsprintk(NDEBUG_TAGS, instance, "target %d lun %d: no free tags\n", + scmd_id(cmd), lun); return 1; } return 0; @@ -330,7 +233,8 @@ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) { u8 lun = cmd->device->lun; - SETUP_HOSTDATA(cmd->device->host); + struct Scsi_Host *instance = cmd->device->host; + struct NCR5380_hostdata *hostdata = shost_priv(instance); /* If we or the target don't support tagged queuing, allocate the LUN for * an untagged command. @@ -340,18 +244,16 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) !cmd->device->tagged_supported) { cmd->tag = TAG_NONE; hostdata->busy[cmd->device->id] |= (1 << lun); - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " - "command\n", H_NO(cmd), cmd->device->id, lun); + dsprintk(NDEBUG_TAGS, instance, "target %d lun %d now allocated by untagged command\n", + scmd_id(cmd), lun); } else { struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); set_bit(cmd->tag, ta->allocated); ta->nr_allocated++; - dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " - "(now %d tags in use)\n", - H_NO(cmd), cmd->tag, cmd->device->id, - lun, ta->nr_allocated); + dsprintk(NDEBUG_TAGS, instance, "using tag %d for target %d lun %d (%d tags allocated)\n", + cmd->tag, scmd_id(cmd), lun, ta->nr_allocated); } } @@ -363,21 +265,22 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) static void cmd_free_tag(struct scsi_cmnd *cmd) { u8 lun = cmd->device->lun; - SETUP_HOSTDATA(cmd->device->host); + struct Scsi_Host *instance = cmd->device->host; + struct NCR5380_hostdata *hostdata = shost_priv(instance); if (cmd->tag == TAG_NONE) { hostdata->busy[cmd->device->id] &= ~(1 << lun); - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", - H_NO(cmd), cmd->device->id, lun); + dsprintk(NDEBUG_TAGS, instance, "target %d lun %d untagged cmd freed\n", + scmd_id(cmd), lun); } else if (cmd->tag >= MAX_TAGS) { - printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", - H_NO(cmd), cmd->tag); + shost_printk(KERN_NOTICE, instance, + "trying to free bad tag %d!\n", cmd->tag); } else { struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; clear_bit(cmd->tag, ta->allocated); ta->nr_allocated--; - dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", - H_NO(cmd), cmd->tag, cmd->device->id, lun); + dsprintk(NDEBUG_TAGS, instance, "freed tag %d for target %d lun %d\n", + cmd->tag, scmd_id(cmd), lun); } } @@ -401,17 +304,15 @@ static void free_all_tags(struct NCR5380_hostdata *hostdata) #endif /* SUPPORT_TAGS */ - -/* - * Function: void merge_contiguous_buffers( struct scsi_cmnd *cmd ) - * - * Purpose: Try to merge several scatter-gather requests into one DMA - * transfer. This is possible if the scatter buffers lie on - * physical contiguous addresses. - * - * Parameters: struct scsi_cmnd *cmd - * The command to work on. The first scatter buffer's data are - * assumed to be already transferred into ptr/this_residual. +/** + * merge_contiguous_buffers - coalesce scatter-gather list entries + * @cmd: command requesting IO + * + * Try to merge several scatter-gather buffers into one DMA transfer. + * This is possible if the scatter buffers lie on physically + * contiguous addresses. The first scatter-gather buffer's data are + * assumed to be already transferred into cmd->SCp.this_residual. + * Every buffer merged avoids an interrupt and a DMA setup operation. */ static void merge_contiguous_buffers(struct scsi_cmnd *cmd) @@ -463,9 +364,7 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd) cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); cmd->SCp.this_residual = cmd->SCp.buffer->length; - /* ++roman: Try to merge some scatter-buffers if they are at - * contiguous physical addresses. - */ + merge_contiguous_buffers(cmd); } else { cmd->SCp.buffer = NULL; @@ -473,31 +372,110 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd) cmd->SCp.ptr = NULL; cmd->SCp.this_residual = 0; } + + cmd->SCp.Status = 0; + cmd->SCp.Message = 0; } -#include <linux/delay.h> +/** + * NCR5380_poll_politely2 - wait for two chip register values + * @instance: controller to poll + * @reg1: 5380 register to poll + * @bit1: Bitmask to check + * @val1: Expected value + * @reg2: Second 5380 register to poll + * @bit2: Second bitmask to check + * @val2: Second expected value + * @wait: Time-out in jiffies + * + * Polls the chip in a reasonably efficient manner waiting for an + * event to occur. After a short quick poll we begin to yield the CPU + * (if possible). In irq contexts the time-out is arbitrarily limited. + * Callers may hold locks as long as they are held in irq mode. + * + * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT. + */ + +static int NCR5380_poll_politely2(struct Scsi_Host *instance, + int reg1, int bit1, int val1, + int reg2, int bit2, int val2, int wait) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned long deadline = jiffies + wait; + unsigned long n; + + /* Busy-wait for up to 10 ms */ + n = min(10000U, jiffies_to_usecs(wait)); + n *= hostdata->accesses_per_ms; + n /= 2000; + do { + if ((NCR5380_read(reg1) & bit1) == val1) + return 0; + if ((NCR5380_read(reg2) & bit2) == val2) + return 0; + cpu_relax(); + } while (n--); + + if (irqs_disabled() || in_interrupt()) + return -ETIMEDOUT; + + /* Repeatedly sleep for 1 ms until deadline */ + while (time_is_after_jiffies(deadline)) { + schedule_timeout_uninterruptible(1); + if ((NCR5380_read(reg1) & bit1) == val1) + return 0; + if ((NCR5380_read(reg2) & bit2) == val2) + return 0; + } + + return -ETIMEDOUT; +} + +static inline int NCR5380_poll_politely(struct Scsi_Host *instance, + int reg, int bit, int val, int wait) +{ + return NCR5380_poll_politely2(instance, reg, bit, val, + reg, bit, val, wait); +} #if NDEBUG static struct { unsigned char mask; const char *name; } signals[] = { - { SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" }, - { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" }, - { SR_SEL, "SEL" }, {0, NULL} -}, basrs[] = { - {BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL} -}, icrs[] = { - {ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"}, - {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"}, - {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"}, + {SR_DBP, "PARITY"}, + {SR_RST, "RST"}, + {SR_BSY, "BSY"}, + {SR_REQ, "REQ"}, + {SR_MSG, "MSG"}, + {SR_CD, "CD"}, + {SR_IO, "IO"}, + {SR_SEL, "SEL"}, {0, NULL} -}, mrs[] = { - {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"}, - {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR, - "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"}, +}, +basrs[] = { + {BASR_ATN, "ATN"}, + {BASR_ACK, "ACK"}, + {0, NULL} +}, +icrs[] = { + {ICR_ASSERT_RST, "ASSERT RST"}, + {ICR_ASSERT_ACK, "ASSERT ACK"}, + {ICR_ASSERT_BSY, "ASSERT BSY"}, + {ICR_ASSERT_SEL, "ASSERT SEL"}, + {ICR_ASSERT_ATN, "ASSERT ATN"}, + {ICR_ASSERT_DATA, "ASSERT DATA"}, + {0, NULL} +}, +mrs[] = { + {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, + {MR_TARGET, "MODE TARGET"}, + {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, + {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, + {MR_ENABLE_EOP_INTR, "MODE EOP INTR"}, {MR_MONITOR_BSY, "MODE MONITOR BSY"}, - {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"}, + {MR_DMA_MODE, "MODE DMA"}, + {MR_ARBITRATE, "MODE ARBITRATION"}, {0, NULL} }; @@ -511,15 +489,13 @@ static struct { static void NCR5380_print(struct Scsi_Host *instance) { unsigned char status, data, basr, mr, icr, i; - unsigned long flags; - local_irq_save(flags); data = NCR5380_read(CURRENT_SCSI_DATA_REG); status = NCR5380_read(STATUS_REG); mr = NCR5380_read(MODE_REG); icr = NCR5380_read(INITIATOR_COMMAND_REG); basr = NCR5380_read(BUS_AND_STATUS_REG); - local_irq_restore(flags); + printk("STATUS_REG: %02x ", status); for (i = 0; signals[i].mask; ++i) if (status & signals[i].mask) @@ -543,8 +519,12 @@ static struct { unsigned char value; const char *name; } phases[] = { - {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"}, - {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"}, + {PHASE_DATAOUT, "DATAOUT"}, + {PHASE_DATAIN, "DATAIN"}, + {PHASE_CMDOUT, "CMDOUT"}, + {PHASE_STATIN, "STATIN"}, + {PHASE_MSGOUT, "MSGOUT"}, + {PHASE_MSGIN, "MSGIN"}, {PHASE_UNKNOWN, "UNKNOWN"} }; @@ -553,8 +533,6 @@ static struct { * @instance: adapter to dump * * Print the current SCSI phase for debugging purposes - * - * Locks: none */ static void NCR5380_print_phase(struct Scsi_Host *instance) @@ -564,54 +542,21 @@ static void NCR5380_print_phase(struct Scsi_Host *instance) status = NCR5380_read(STATUS_REG); if (!(status & SR_REQ)) - printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO); + shost_printk(KERN_DEBUG, instance, "REQ not asserted, phase unknown.\n"); else { for (i = 0; (phases[i].value != PHASE_UNKNOWN) && (phases[i].value != (status & PHASE_MASK)); ++i) ; - printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name); + shost_printk(KERN_DEBUG, instance, "phase %s\n", phases[i].name); } } - #endif -/* - * ++roman: New scheme of calling NCR5380_main() - * - * If we're not in an interrupt, we can call our main directly, it cannot be - * already running. Else, we queue it on a task queue, if not 'main_running' - * tells us that a lower level is already executing it. This way, - * 'main_running' needs not be protected in a special way. - * - * queue_main() is a utility function for putting our main onto the task - * queue, if main_running is false. It should be called only from a - * interrupt or bottom half. - */ - -#include <linux/gfp.h> -#include <linux/workqueue.h> -#include <linux/interrupt.h> - -static inline void queue_main(struct NCR5380_hostdata *hostdata) -{ - if (!hostdata->main_running) { - /* If in interrupt and NCR5380_main() not already running, - queue it on the 'immediate' task queue, to be processed - immediately after the current interrupt processing has - finished. */ - schedule_work(&hostdata->main_task); - } - /* else: nothing to do: the running NCR5380_main() will pick up - any newly queued command. */ -} - /** * NCR58380_info - report driver and host information * @instance: relevant scsi host instance * * For use as the host template info() handler. - * - * Locks: none */ static const char *NCR5380_info(struct Scsi_Host *instance) @@ -630,13 +575,14 @@ static void prepare_info(struct Scsi_Host *instance) "base 0x%lx, irq %d, " "can_queue %d, cmd_per_lun %d, " "sg_tablesize %d, this_id %d, " - "flags { %s}, " + "flags { %s%s}, " "options { %s} ", instance->hostt->name, instance->io_port, instance->n_io_port, instance->base, instance->irq, instance->can_queue, instance->cmd_per_lun, instance->sg_tablesize, instance->this_id, hostdata->flags & FLAG_TAGGED_QUEUING ? "TAGGED_QUEUING " : "", + hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "", #ifdef DIFFERENTIAL "DIFFERENTIAL " #endif @@ -653,102 +599,6 @@ static void prepare_info(struct Scsi_Host *instance) } /** - * NCR5380_print_status - dump controller info - * @instance: controller to dump - * - * Print commands in the various queues, called from NCR5380_abort - * to aid debugging. - */ - -static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd) -{ - int i, s; - unsigned char *command; - printk("scsi%d: destination target %d, lun %llu\n", - H_NO(cmd), cmd->device->id, cmd->device->lun); - printk(KERN_CONT " command = "); - command = cmd->cmnd; - printk(KERN_CONT "%2d (0x%02x)", command[0], command[0]); - for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) - printk(KERN_CONT " %02x", command[i]); - printk("\n"); -} - -static void NCR5380_print_status(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata; - struct scsi_cmnd *ptr; - unsigned long flags; - - NCR5380_dprint(NDEBUG_ANY, instance); - NCR5380_dprint_phase(NDEBUG_ANY, instance); - - hostdata = (struct NCR5380_hostdata *)instance->hostdata; - - local_irq_save(flags); - printk("NCR5380: coroutine is%s running.\n", - hostdata->main_running ? "" : "n't"); - if (!hostdata->connected) - printk("scsi%d: no currently connected command\n", HOSTNO); - else - lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected); - printk("scsi%d: issue_queue\n", HOSTNO); - for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) - lprint_Scsi_Cmnd(ptr); - - printk("scsi%d: disconnected_queue\n", HOSTNO); - for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; - ptr = NEXT(ptr)) - lprint_Scsi_Cmnd(ptr); - - local_irq_restore(flags); - printk("\n"); -} - -static void show_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m) -{ - int i, s; - unsigned char *command; - seq_printf(m, "scsi%d: destination target %d, lun %llu\n", - H_NO(cmd), cmd->device->id, cmd->device->lun); - seq_puts(m, " command = "); - command = cmd->cmnd; - seq_printf(m, "%2d (0x%02x)", command[0], command[0]); - for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) - seq_printf(m, " %02x", command[i]); - seq_putc(m, '\n'); -} - -static int __maybe_unused NCR5380_show_info(struct seq_file *m, - struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata; - struct scsi_cmnd *ptr; - unsigned long flags; - - hostdata = (struct NCR5380_hostdata *)instance->hostdata; - - local_irq_save(flags); - seq_printf(m, "NCR5380: coroutine is%s running.\n", - hostdata->main_running ? "" : "n't"); - if (!hostdata->connected) - seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO); - else - show_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m); - seq_printf(m, "scsi%d: issue_queue\n", HOSTNO); - for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) - show_Scsi_Cmnd(ptr, m); - - seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO); - for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; - ptr = NEXT(ptr)) - show_Scsi_Cmnd(ptr, m); - - local_irq_restore(flags); - return 0; -} - -/** * NCR5380_init - initialise an NCR5380 * @instance: adapter to configure * @flags: control flags @@ -764,11 +614,11 @@ static int __maybe_unused NCR5380_show_info(struct seq_file *m, static int __init NCR5380_init(struct Scsi_Host *instance, int flags) { + struct NCR5380_hostdata *hostdata = shost_priv(instance); int i; - SETUP_HOSTDATA(instance); + unsigned long deadline; hostdata->host = instance; - hostdata->aborted = 0; hostdata->id_mask = 1 << instance->this_id; hostdata->id_higher_mask = 0; for (i = hostdata->id_mask; i <= 0x80; i <<= 1) @@ -782,13 +632,21 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) #if defined (REAL_DMA) hostdata->dma_len = 0; #endif - hostdata->targets_present = 0; + spin_lock_init(&hostdata->lock); hostdata->connected = NULL; - hostdata->issue_queue = NULL; - hostdata->disconnected_queue = NULL; + hostdata->sensing = NULL; + INIT_LIST_HEAD(&hostdata->autosense); + INIT_LIST_HEAD(&hostdata->unissued); + INIT_LIST_HEAD(&hostdata->disconnected); + hostdata->flags = flags; INIT_WORK(&hostdata->main_task, NCR5380_main); + hostdata->work_q = alloc_workqueue("ncr5380_%d", + WQ_UNBOUND | WQ_MEM_RECLAIM, + 1, instance->host_no); + if (!hostdata->work_q) + return -ENOMEM; prepare_info(instance); @@ -797,6 +655,72 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(SELECT_ENABLE_REG, 0); + /* Calibrate register polling loop */ + i = 0; + deadline = jiffies + 1; + do { + cpu_relax(); + } while (time_is_after_jiffies(deadline)); + deadline += msecs_to_jiffies(256); + do { + NCR5380_read(STATUS_REG); + ++i; + cpu_relax(); + } while (time_is_after_jiffies(deadline)); + hostdata->accesses_per_ms = i / 256; + + return 0; +} + +/** + * NCR5380_maybe_reset_bus - Detect and correct bus wedge problems. + * @instance: adapter to check + * + * If the system crashed, it may have crashed with a connected target and + * the SCSI bus busy. Check for BUS FREE phase. If not, try to abort the + * currently established nexus, which we know nothing about. Failing that + * do a bus reset. + * + * Note that a bus reset will cause the chip to assert IRQ. + * + * Returns 0 if successful, otherwise -ENXIO. + */ + +static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int pass; + + for (pass = 1; (NCR5380_read(STATUS_REG) & SR_BSY) && pass <= 6; ++pass) { + switch (pass) { + case 1: + case 3: + case 5: + shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n"); + NCR5380_poll_politely(instance, + STATUS_REG, SR_BSY, 0, 5 * HZ); + break; + case 2: + shost_printk(KERN_ERR, instance, "bus busy, attempting abort\n"); + do_abort(instance); + break; + case 4: + shost_printk(KERN_ERR, instance, "bus busy, attempting reset\n"); + do_reset(instance); + /* Wait after a reset; the SCSI standard calls for + * 250ms, we wait 500ms to be on the safe side. + * But some Toshiba CD-ROMs need ten times that. + */ + if (hostdata->flags & FLAG_TOSHIBA_DELAY) + msleep(2500); + else + msleep(500); + break; + case 6: + shost_printk(KERN_ERR, instance, "bus locked solid\n"); + return -ENXIO; + } + } return 0; } @@ -812,6 +736,38 @@ static void NCR5380_exit(struct Scsi_Host *instance) struct NCR5380_hostdata *hostdata = shost_priv(instance); cancel_work_sync(&hostdata->main_task); + destroy_workqueue(hostdata->work_q); +} + +/** + * complete_cmd - finish processing a command and return it to the SCSI ML + * @instance: the host instance + * @cmd: command to complete + */ + +static void complete_cmd(struct Scsi_Host *instance, + struct scsi_cmnd *cmd) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + dsprintk(NDEBUG_QUEUES, instance, "complete_cmd: cmd %p\n", cmd); + + if (hostdata->sensing == cmd) { + /* Autosense processing ends here */ + if ((cmd->result & 0xff) != SAM_STAT_GOOD) { + scsi_eh_restore_cmnd(cmd, &hostdata->ses); + set_host_byte(cmd, DID_ERROR); + } else + scsi_eh_restore_cmnd(cmd, &hostdata->ses); + hostdata->sensing = NULL; + } + +#ifdef SUPPORT_TAGS + cmd_free_tag(cmd); +#else + hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); +#endif + cmd->scsi_done(cmd); } /** @@ -819,7 +775,7 @@ static void NCR5380_exit(struct Scsi_Host *instance) * @instance: the relevant SCSI adapter * @cmd: SCSI command * - * cmd is added to the per instance issue_queue, with minor + * cmd is added to the per-instance issue queue, with minor * twiddling done to the host specific fields of cmd. If the * main coroutine is not running, it is restarted. */ @@ -828,44 +784,23 @@ static int NCR5380_queue_command(struct Scsi_Host *instance, struct scsi_cmnd *cmd) { struct NCR5380_hostdata *hostdata = shost_priv(instance); - struct scsi_cmnd *tmp; + struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); unsigned long flags; #if (NDEBUG & NDEBUG_NO_WRITE) switch (cmd->cmnd[0]) { case WRITE_6: case WRITE_10: - printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", - H_NO(cmd)); + shost_printk(KERN_DEBUG, instance, "WRITE attempted with NDEBUG_NO_WRITE set\n"); cmd->result = (DID_ERROR << 16); cmd->scsi_done(cmd); return 0; } #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ - /* - * We use the host_scribble field as a pointer to the next command - * in a queue - */ - - SET_NEXT(cmd, NULL); cmd->result = 0; /* - * Insert the cmd into the issue queue. Note that REQUEST SENSE - * commands are added to the head of the queue since any command will - * clear the contingent allegiance condition that exists and the - * sense data is only guaranteed to be valid while the condition exists. - */ - - /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. - * Otherwise a running NCR5380_main may steal the lock. - * Lock before actually inserting due to fairness reasons explained in - * atari_scsi.c. If we insert first, then it's impossible for this driver - * to release the lock. - * Stop timer for this command while waiting for the lock, or timeouts - * may happen (and they really do), and it's no good if the command doesn't - * appear in any of the queues. * ++roman: Just disabling the NCR interrupt isn't sufficient here, * because also a timer int can trigger an abort or reset, which would * alter queues and touch the lock. @@ -873,7 +808,7 @@ static int NCR5380_queue_command(struct Scsi_Host *instance, if (!NCR5380_acquire_dma_irq(instance)) return SCSI_MLQUEUE_HOST_BUSY; - local_irq_save(flags); + spin_lock_irqsave(&hostdata->lock, flags); /* * Insert the cmd into the issue queue. Note that REQUEST SENSE @@ -882,33 +817,18 @@ static int NCR5380_queue_command(struct Scsi_Host *instance, * sense data is only guaranteed to be valid while the condition exists. */ - if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { - LIST(cmd, hostdata->issue_queue); - SET_NEXT(cmd, hostdata->issue_queue); - hostdata->issue_queue = cmd; - } else { - for (tmp = (struct scsi_cmnd *)hostdata->issue_queue; - NEXT(tmp); tmp = NEXT(tmp)) - ; - LIST(cmd, tmp); - SET_NEXT(tmp, cmd); - } - local_irq_restore(flags); + if (cmd->cmnd[0] == REQUEST_SENSE) + list_add(&ncmd->list, &hostdata->unissued); + else + list_add_tail(&ncmd->list, &hostdata->unissued); - dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd), - (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); + spin_unlock_irqrestore(&hostdata->lock, flags); - /* If queue_command() is called from an interrupt (real one or bottom - * half), we let queue_main() do the job of taking care about main. If it - * is already running, this is a no-op, else main will be queued. - * - * If we're not in an interrupt, we can call NCR5380_main() - * unconditionally, because it cannot be already running. - */ - if (in_interrupt() || irqs_disabled()) - queue_main(hostdata); - else - NCR5380_main(&hostdata->main_task); + dsprintk(NDEBUG_QUEUES, instance, "command %p added to %s of queue\n", + cmd, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); + + /* Kick off command processing */ + queue_work(hostdata->work_q, &hostdata->main_task); return 0; } @@ -917,22 +837,85 @@ static inline void maybe_release_dma_irq(struct Scsi_Host *instance) struct NCR5380_hostdata *hostdata = shost_priv(instance); /* Caller does the locking needed to set & test these data atomically */ - if (!hostdata->disconnected_queue && - !hostdata->issue_queue && + if (list_empty(&hostdata->disconnected) && + list_empty(&hostdata->unissued) && + list_empty(&hostdata->autosense) && !hostdata->connected && - !hostdata->retain_dma_intr) + !hostdata->selecting) NCR5380_release_dma_irq(instance); } /** + * dequeue_next_cmd - dequeue a command for processing + * @instance: the scsi host instance + * + * Priority is given to commands on the autosense queue. These commands + * need autosense because of a CHECK CONDITION result. + * + * Returns a command pointer if a command is found for a target that is + * not already busy. Otherwise returns NULL. + */ + +static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + struct NCR5380_cmd *ncmd; + struct scsi_cmnd *cmd; + + if (hostdata->sensing || list_empty(&hostdata->autosense)) { + list_for_each_entry(ncmd, &hostdata->unissued, list) { + cmd = NCR5380_to_scmd(ncmd); + dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n", + cmd, scmd_id(cmd), hostdata->busy[scmd_id(cmd)], cmd->device->lun); + + if ( +#ifdef SUPPORT_TAGS + !is_lun_busy(cmd, 1) +#else + !(hostdata->busy[scmd_id(cmd)] & (1 << cmd->device->lun)) +#endif + ) { + list_del(&ncmd->list); + dsprintk(NDEBUG_QUEUES, instance, + "dequeue: removed %p from issue queue\n", cmd); + return cmd; + } + } + } else { + /* Autosense processing begins here */ + ncmd = list_first_entry(&hostdata->autosense, + struct NCR5380_cmd, list); + list_del(&ncmd->list); + cmd = NCR5380_to_scmd(ncmd); + dsprintk(NDEBUG_QUEUES, instance, + "dequeue: removed %p from autosense queue\n", cmd); + scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); + hostdata->sensing = cmd; + return cmd; + } + return NULL; +} + +static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); + + if (hostdata->sensing == cmd) { + scsi_eh_restore_cmnd(cmd, &hostdata->ses); + list_add(&ncmd->list, &hostdata->autosense); + hostdata->sensing = NULL; + } else + list_add(&ncmd->list, &hostdata->unissued); +} + +/** * NCR5380_main - NCR state machines * * NCR5380_main is a coroutine that runs as long as more work can * be done on the NCR5380 host adapters in a system. Both * NCR5380_queue_command() and NCR5380_intr() will try to start it * in case it is not running. - * - * Locks: called as its own thread with no locks held. */ static void NCR5380_main(struct work_struct *work) @@ -940,154 +923,70 @@ static void NCR5380_main(struct work_struct *work) struct NCR5380_hostdata *hostdata = container_of(work, struct NCR5380_hostdata, main_task); struct Scsi_Host *instance = hostdata->host; - struct scsi_cmnd *tmp, *prev; int done; - unsigned long flags; /* - * We run (with interrupts disabled) until we're sure that none of - * the host adapters have anything that can be done, at which point - * we set main_running to 0 and exit. - * - * Interrupts are enabled before doing various other internal - * instructions, after we've decided that we need to run through - * the loop again. - * - * this should prevent any race conditions. - * * ++roman: Just disabling the NCR interrupt isn't sufficient here, * because also a timer int can trigger an abort or reset, which can * alter queues and touch the Falcon lock. */ - /* Tell int handlers main() is now already executing. Note that - no races are possible here. If an int comes in before - 'main_running' is set here, and queues/executes main via the - task queue, it doesn't do any harm, just this instance of main - won't find any work left to do. */ - if (hostdata->main_running) - return; - hostdata->main_running = 1; - - local_save_flags(flags); do { - local_irq_disable(); /* Freeze request queues */ done = 1; - if (!hostdata->connected) { - dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO); - /* - * Search through the issue_queue for a command destined - * for a target that's not busy. - */ -#if (NDEBUG & NDEBUG_LISTS) - for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; - tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) - ; - /*printk("%p ", tmp);*/ - if ((tmp == prev) && tmp) - printk(" LOOP\n"); - /* else printk("\n"); */ -#endif - for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, - prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) { - u8 lun = tmp->device->lun; - - dprintk(NDEBUG_LISTS, - "MAIN tmp=%p target=%d busy=%d lun=%d\n", - tmp, scmd_id(tmp), hostdata->busy[scmd_id(tmp)], - lun); - /* When we find one, remove it from the issue queue. */ - /* ++guenther: possible race with Falcon locking */ - if ( -#ifdef SUPPORT_TAGS - !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE) -#else - !(hostdata->busy[tmp->device->id] & (1 << lun)) -#endif - ) { - /* ++guenther: just to be sure, this must be atomic */ - local_irq_disable(); - if (prev) { - REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); - SET_NEXT(prev, NEXT(tmp)); - } else { - REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp)); - hostdata->issue_queue = NEXT(tmp); - } - SET_NEXT(tmp, NULL); - hostdata->retain_dma_intr++; + spin_lock_irq(&hostdata->lock); + while (!hostdata->connected && !hostdata->selecting) { + struct scsi_cmnd *cmd = dequeue_next_cmd(instance); - /* reenable interrupts after finding one */ - local_irq_restore(flags); + if (!cmd) + break; - /* - * Attempt to establish an I_T_L nexus here. - * On success, instance->hostdata->connected is set. - * On failure, we must add the command back to the - * issue queue so we can keep trying. - */ - dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " - "lun %d removed from issue_queue\n", - HOSTNO, tmp->device->id, lun); - /* - * REQUEST SENSE commands are issued without tagged - * queueing, even on SCSI-II devices because the - * contingent allegiance condition exists for the - * entire unit. - */ - /* ++roman: ...and the standard also requires that - * REQUEST SENSE command are untagged. - */ + dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd); + + /* + * Attempt to establish an I_T_L nexus here. + * On success, instance->hostdata->connected is set. + * On failure, we must add the command back to the + * issue queue so we can keep trying. + */ + /* + * REQUEST SENSE commands are issued without tagged + * queueing, even on SCSI-II devices because the + * contingent allegiance condition exists for the + * entire unit. + */ + /* ++roman: ...and the standard also requires that + * REQUEST SENSE command are untagged. + */ #ifdef SUPPORT_TAGS - cmd_get_tag(tmp, tmp->cmnd[0] != REQUEST_SENSE); + cmd_get_tag(cmd, cmd->cmnd[0] != REQUEST_SENSE); #endif - if (!NCR5380_select(instance, tmp)) { - local_irq_disable(); - hostdata->retain_dma_intr--; - /* release if target did not response! */ - maybe_release_dma_irq(instance); - local_irq_restore(flags); - break; - } else { - local_irq_disable(); - LIST(tmp, hostdata->issue_queue); - SET_NEXT(tmp, hostdata->issue_queue); - hostdata->issue_queue = tmp; + if (!NCR5380_select(instance, cmd)) { + dsprintk(NDEBUG_MAIN, instance, "main: select complete\n"); + maybe_release_dma_irq(instance); + } else { + dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance, + "main: select failed, returning %p to queue\n", cmd); + requeue_cmd(instance, cmd); #ifdef SUPPORT_TAGS - cmd_free_tag(tmp); + cmd_free_tag(cmd); #endif - hostdata->retain_dma_intr--; - local_irq_restore(flags); - dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, " - "returned to issue_queue\n", HOSTNO); - if (hostdata->connected) - break; - } - } /* if target/lun/target queue is not busy */ - } /* for issue_queue */ - } /* if (!hostdata->connected) */ - + } + } if (hostdata->connected #ifdef REAL_DMA && !hostdata->dma_len #endif ) { - local_irq_restore(flags); - dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n", - HOSTNO); + dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n"); NCR5380_information_transfer(instance); - dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO); done = 0; } + spin_unlock_irq(&hostdata->lock); + if (!done) + cond_resched(); } while (!done); - - /* Better allow ints _after_ 'main_running' has been cleared, else - an interrupt could believe we'll pick up the work it left for - us, but we won't see it anymore here... */ - hostdata->main_running = 0; - local_irq_restore(flags); } @@ -1096,27 +995,20 @@ static void NCR5380_main(struct work_struct *work) * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) * * Purpose : Called by interrupt handler when DMA finishes or a phase - * mismatch occurs (which would finish the DMA transfer). + * mismatch occurs (which would finish the DMA transfer). * * Inputs : instance - this instance of the NCR5380. - * */ static void NCR5380_dma_complete(struct Scsi_Host *instance) { - SETUP_HOSTDATA(instance); + struct NCR5380_hostdata *hostdata = shost_priv(instance); int transferred; unsigned char **data; - volatile int *count; + int *count; int saved_data = 0, overrun = 0; unsigned char p; - if (!hostdata->connected) { - printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " - "no connected cmd\n", HOSTNO); - return; - } - if (hostdata->read_overruns) { p = hostdata->connected->SCp.phase; if (p & SR_IO) { @@ -1126,15 +1018,11 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) (BASR_PHASE_MATCH|BASR_ACK)) { saved_data = NCR5380_read(INPUT_DATA_REG); overrun = 1; - dprintk(NDEBUG_DMA, "scsi%d: read overrun handled\n", HOSTNO); + dsprintk(NDEBUG_DMA, instance, "read overrun handled\n"); } } } - dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", - HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), - NCR5380_read(STATUS_REG)); - #if defined(CONFIG_SUN3) if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", @@ -1153,9 +1041,9 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) } #endif - (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); transferred = hostdata->dma_len - NCR5380_dma_residual(instance); hostdata->dma_len = 0; @@ -1194,140 +1082,160 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses * from the disconnected queue, and restarting NCR5380_main() * as required. + * + * The chip can assert IRQ in any of six different conditions. The IRQ flag + * is then cleared by reading the Reset Parity/Interrupt Register (RPIR). + * Three of these six conditions are latched in the Bus and Status Register: + * - End of DMA (cleared by ending DMA Mode) + * - Parity error (cleared by reading RPIR) + * - Loss of BSY (cleared by reading RPIR) + * Two conditions have flag bits that are not latched: + * - Bus phase mismatch (non-maskable in DMA Mode, cleared by ending DMA Mode) + * - Bus reset (non-maskable) + * The remaining condition has no flag bit at all: + * - Selection/reselection + * + * Hence, establishing the cause(s) of any interrupt is partly guesswork. + * In "The DP8490 and DP5380 Comparison Guide", National Semiconductor + * claimed that "the design of the [DP8490] interrupt logic ensures + * interrupts will not be lost (they can be on the DP5380)." + * The L5380/53C80 datasheet from LOGIC Devices has more details. + * + * Checking for bus reset by reading RST is futile because of interrupt + * latency, but a bus reset will reset chip logic. Checking for parity error + * is unnecessary because that interrupt is never enabled. A Loss of BSY + * condition will clear DMA Mode. We can tell when this occurs because the + * the Busy Monitor interrupt is enabled together with DMA Mode. */ static irqreturn_t NCR5380_intr(int irq, void *dev_id) { struct Scsi_Host *instance = dev_id; - int done = 1, handled = 0; + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int handled = 0; unsigned char basr; + unsigned long flags; - dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO); + spin_lock_irqsave(&hostdata->lock, flags); - /* Look for pending interrupts */ basr = NCR5380_read(BUS_AND_STATUS_REG); - dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr); - /* dispatch to appropriate routine if found and done=0 */ if (basr & BASR_IRQ) { - NCR5380_dprint(NDEBUG_INTR, instance); - if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { - done = 0; - dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO); - NCR5380_reselect(instance); - (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } else if (basr & BASR_PARITY_ERROR) { - dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO); - (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { - dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO); - (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } else { - /* - * The rest of the interrupt conditions can occur only during a - * DMA transfer - */ + unsigned char mr = NCR5380_read(MODE_REG); + unsigned char sr = NCR5380_read(STATUS_REG); + + dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n", + irq, basr, sr, mr); #if defined(REAL_DMA) - /* - * We should only get PHASE MISMATCH and EOP interrupts if we have - * DMA enabled, so do a sanity check based on the current setting - * of the MODE register. + if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) { + /* Probably End of DMA, Phase Mismatch or Loss of BSY. + * We ack IRQ after clearing Mode Register. Workarounds + * for End of DMA errata need to happen in DMA Mode. */ - if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) && - ((basr & BASR_END_DMA_TRANSFER) || - !(basr & BASR_PHASE_MATCH))) { + dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n"); - dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); - NCR5380_dma_complete( instance ); - done = 0; - } else + if (hostdata->connected) { + NCR5380_dma_complete(instance); + queue_work(hostdata->work_q, &hostdata->main_task); + } else { + NCR5380_write(MODE_REG, MR_BASE); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + } + } else #endif /* REAL_DMA */ - { -/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ - if (basr & BASR_PHASE_MATCH) - dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, " - "BASR 0x%x, MR 0x%x, SR 0x%x\n", - HOSTNO, basr, NCR5380_read(MODE_REG), - NCR5380_read(STATUS_REG)); - (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); + if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) && + (sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) { + /* Probably reselected */ + NCR5380_write(SELECT_ENABLE_REG, 0); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + + dsprintk(NDEBUG_INTR, instance, "interrupt with SEL and IO\n"); + + if (!hostdata->connected) { + NCR5380_reselect(instance); + queue_work(hostdata->work_q, &hostdata->main_task); + } + if (!hostdata->connected) + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); + } else { + /* Probably Bus Reset */ + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + + dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); #ifdef SUN3_SCSI_VME - dregs->csr |= CSR_DMA_ENABLE; + dregs->csr |= CSR_DMA_ENABLE; #endif - } - } /* if !(SELECTION || PARITY) */ + } handled = 1; - } /* BASR & IRQ */ else { - printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, " - "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, - NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); - (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); + } else { + shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n"); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif } - if (!done) { - dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO); - /* Put a call to NCR5380_main() on the queue... */ - queue_main(shost_priv(instance)); - } + spin_unlock_irqrestore(&hostdata->lock, flags); + return IRQ_RETVAL(handled); } /* * Function : int NCR5380_select(struct Scsi_Host *instance, - * struct scsi_cmnd *cmd) + * struct scsi_cmnd *cmd) * * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, - * including ARBITRATION, SELECTION, and initial message out for - * IDENTIFY and queue messages. + * including ARBITRATION, SELECTION, and initial message out for + * IDENTIFY and queue messages. * * Inputs : instance - instantiation of the 5380 driver on which this - * target lives, cmd - SCSI command to execute. + * target lives, cmd - SCSI command to execute. * - * Returns : -1 if selection could not execute for some reason, - * 0 if selection succeeded or failed because the target - * did not respond. + * Returns cmd if selection failed but should be retried, + * NULL if selection failed and should not be retried, or + * NULL if selection succeeded (hostdata->connected == cmd). * * Side effects : - * If bus busy, arbitration failed, etc, NCR5380_select() will exit - * with registers as they should have been on entry - ie - * SELECT_ENABLE will be set appropriately, the NCR5380 - * will cease to drive any SCSI bus signals. + * If bus busy, arbitration failed, etc, NCR5380_select() will exit + * with registers as they should have been on entry - ie + * SELECT_ENABLE will be set appropriately, the NCR5380 + * will cease to drive any SCSI bus signals. * - * If successful : I_T_L or I_T_L_Q nexus will be established, - * instance->connected will be set to cmd. - * SELECT interrupt will be disabled. + * If successful : I_T_L or I_T_L_Q nexus will be established, + * instance->connected will be set to cmd. + * SELECT interrupt will be disabled. * - * If failed (no target) : cmd->scsi_done() will be called, and the - * cmd->result host byte set to DID_BAD_TARGET. + * If failed (no target) : cmd->scsi_done() will be called, and the + * cmd->result host byte set to DID_BAD_TARGET. */ -static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) +static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, + struct scsi_cmnd *cmd) { - SETUP_HOSTDATA(instance); + struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char tmp[3], phase; unsigned char *data; int len; - unsigned long timeout; - unsigned long flags; + int err; - hostdata->restart_select = 0; NCR5380_dprint(NDEBUG_ARBITRATION, instance); - dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO, - instance->this_id); + dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n", + instance->this_id); + + /* + * Arbitration and selection phases are slow and involve dropping the + * lock, so we have to watch out for EH. An exception handler may + * change 'selecting' to NULL. This function will then return NULL + * so that the caller will forget about 'cmd'. (During information + * transfer phases, EH may change 'connected' to NULL.) + */ + hostdata->selecting = cmd; /* * Set the phase bits to 0, otherwise the NCR5380 won't drive the * data bus during SELECTION. */ - local_irq_save(flags); - if (hostdata->connected) { - local_irq_restore(flags); - return -1; - } NCR5380_write(TARGET_COMMAND_REG, 0); /* @@ -1337,96 +1245,82 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); NCR5380_write(MODE_REG, MR_ARBITRATE); - local_irq_restore(flags); - - /* Wait for arbitration logic to complete */ -#if defined(NCR_TIMEOUT) - { - unsigned long timeout = jiffies + 2*NCR_TIMEOUT; + /* The chip now waits for BUS FREE phase. Then after the 800 ns + * Bus Free Delay, arbitration will begin. + */ - while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && - time_before(jiffies, timeout) && !hostdata->connected) - ; - if (time_after_eq(jiffies, timeout)) { - printk("scsi : arbitration timeout at %d\n", __LINE__); - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return -1; - } + spin_unlock_irq(&hostdata->lock); + err = NCR5380_poll_politely2(instance, MODE_REG, MR_ARBITRATE, 0, + INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS, + ICR_ARBITRATION_PROGRESS, HZ); + spin_lock_irq(&hostdata->lock); + if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) { + /* Reselection interrupt */ + goto out; } -#else /* NCR_TIMEOUT */ - while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && - !hostdata->connected) - ; -#endif - - dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO); - - if (hostdata->connected) { + if (!hostdata->selecting) { + /* Command was aborted */ NCR5380_write(MODE_REG, MR_BASE); - return -1; + goto out; } - /* - * The arbitration delay is 2.2us, but this is a minimum and there is - * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate - * the integral nature of udelay(). - * - */ + if (err < 0) { + NCR5380_write(MODE_REG, MR_BASE); + shost_printk(KERN_ERR, instance, + "select: arbitration timeout\n"); + goto out; + } + spin_unlock_irq(&hostdata->lock); + /* The SCSI-2 arbitration delay is 2.4 us */ udelay(3); /* Check for lost arbitration */ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || - (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || - hostdata->connected) { + (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { NCR5380_write(MODE_REG, MR_BASE); - dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", - HOSTNO); - return -1; + dsprintk(NDEBUG_ARBITRATION, instance, "lost arbitration, deasserting MR_ARBITRATE\n"); + spin_lock_irq(&hostdata->lock); + goto out; } - /* after/during arbitration, BSY should be asserted. - IBM DPES-31080 Version S31Q works now */ - /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */ + /* After/during arbitration, BSY should be asserted. + * IBM DPES-31080 Version S31Q works now + * Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) + */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY); - if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || - hostdata->connected) { - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", - HOSTNO); - return -1; - } - /* * Again, bus clear + bus settle time is 1.2us, however, this is * a minimum so we'll udelay ceil(1.2) */ -#ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY - /* ++roman: But some targets (see above :-) seem to need a bit more... */ - udelay(15); -#else - udelay(2); -#endif + if (hostdata->flags & FLAG_TOSHIBA_DELAY) + udelay(15); + else + udelay(2); - if (hostdata->connected) { + spin_lock_irq(&hostdata->lock); + + /* NCR5380_reselect() clears MODE_REG after a reselection interrupt */ + if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) + goto out; + + if (!hostdata->selecting) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - return -1; + goto out; } - dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO); + dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n"); /* * Now that we have won arbitration, start Selection process, asserting * the host and target ID's on the SCSI bus. */ - NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id))); + NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask | (1 << scmd_id(cmd))); /* * Raise ATN while SEL is true before BSY goes false from arbitration, @@ -1434,22 +1328,18 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) * phase immediately after selection. */ - NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | - ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL )); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY | + ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL); NCR5380_write(MODE_REG, MR_BASE); /* * Reselect interrupts must be turned off prior to the dropping of BSY, * otherwise we will trigger an interrupt. */ - - if (hostdata->connected) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - return -1; - } - NCR5380_write(SELECT_ENABLE_REG, 0); + spin_unlock_irq(&hostdata->lock); + /* * The initiator shall then wait at least two deskew delays and release * the BSY signal. @@ -1457,8 +1347,8 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ /* Reset BSY */ - NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA | - ICR_ASSERT_ATN | ICR_ASSERT_SEL)); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | + ICR_ASSERT_ATN | ICR_ASSERT_SEL); /* * Something weird happens when we cease to drive BSY - looks @@ -1479,45 +1369,39 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) udelay(1); - dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); + dsprintk(NDEBUG_SELECTION, instance, "selecting target %d\n", scmd_id(cmd)); /* * The SCSI specification calls for a 250 ms timeout for the actual * selection. */ - timeout = jiffies + msecs_to_jiffies(250); - - /* - * XXX very interesting - we're seeing a bounce where the BSY we - * asserted is being reflected / still asserted (propagation delay?) - * and it's detecting as true. Sigh. - */ - -#if 0 - /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert - * IO while SEL is true. But again, there are some disks out the in the - * world that do that nevertheless. (Somebody claimed that this announces - * reselection capability of the target.) So we better skip that test and - * only wait for BSY... (Famous german words: Der KlĂĽgere gibt nach :-) - */ - - while (time_before(jiffies, timeout) && - !(NCR5380_read(STATUS_REG) & (SR_BSY | SR_IO))) - ; + err = NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, SR_BSY, + msecs_to_jiffies(250)); if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { + spin_lock_irq(&hostdata->lock); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_reselect(instance); - printk(KERN_ERR "scsi%d: reselection after won arbitration?\n", - HOSTNO); + if (!hostdata->connected) + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); + shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n"); + goto out; + } + + if (err < 0) { + spin_lock_irq(&hostdata->lock); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return -1; + /* Can't touch cmd if it has been reclaimed by the scsi ML */ + if (hostdata->selecting) { + cmd->result = DID_BAD_TARGET << 16; + complete_cmd(instance, cmd); + dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n"); + cmd = NULL; + } + goto out; } -#else - while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY)) - ; -#endif /* * No less than two deskew delays after the initiator detects the @@ -1529,29 +1413,6 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - if (!(NCR5380_read(STATUS_REG) & SR_BSY)) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - if (hostdata->targets_present & (1 << cmd->device->id)) { - printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); - if (hostdata->restart_select) - printk(KERN_NOTICE "\trestart select\n"); - NCR5380_dprint(NDEBUG_ANY, instance); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return -1; - } - cmd->result = DID_BAD_TARGET << 16; -#ifdef SUPPORT_TAGS - cmd_free_tag(cmd); -#endif - cmd->scsi_done(cmd); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return 0; - } - - hostdata->targets_present |= (1 << cmd->device->id); - /* * Since we followed the SCSI spec, and raised ATN while SEL * was true but before BSY was false during selection, the information @@ -1563,16 +1424,27 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) * until it wraps back to 0. * * XXX - it turns out that there are some broken SCSI-II devices, - * which claim to support tagged queuing but fail when more than - * some number of commands are issued at once. + * which claim to support tagged queuing but fail when more than + * some number of commands are issued at once. */ /* Wait for start of REQ/ACK handshake */ - while (!(NCR5380_read(STATUS_REG) & SR_REQ)) - ; - dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n", - HOSTNO, cmd->device->id); + err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ); + spin_lock_irq(&hostdata->lock); + if (err < 0) { + shost_printk(KERN_ERR, instance, "select: REQ timeout\n"); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); + goto out; + } + if (!hostdata->selecting) { + do_abort(instance); + goto out; + } + + dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n", + scmd_id(cmd)); tmp[0] = IDENTIFY(1, cmd->device->lun); #ifdef SUPPORT_TAGS @@ -1591,11 +1463,12 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO); + dsprintk(NDEBUG_SELECTION, instance, "nexus established.\n"); /* XXX need to handle errors here */ + hostdata->connected = cmd; #ifndef SUPPORT_TAGS - hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); + hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun; #endif #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; @@ -1603,24 +1476,30 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) initialize_SCp(cmd); - return 0; + cmd = NULL; + +out: + if (!hostdata->selecting) + return NULL; + hostdata->selecting = NULL; + return cmd; } /* * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, - * unsigned char *phase, int *count, unsigned char **data) + * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using polled I/O * * Inputs : instance - instance of driver, *phase - pointer to - * what phase is expected, *count - pointer to number of - * bytes to transfer, **data - pointer to data pointer. + * what phase is expected, *count - pointer to number of + * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring - * maximum number of bytes, 0 if all bytes are transferred or exit - * is in same phase. + * maximum number of bytes, 0 if all bytes are transferred or exit + * is in same phase. * - * Also, *phase, *count, *data are modified in place. + * Also, *phase, *count, *data are modified in place. * * XXX Note : handling for bus free may be useful. */ @@ -1635,9 +1514,9 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { - register unsigned char p = *phase, tmp; - register int c = *count; - register unsigned char *d = *data; + unsigned char p = *phase, tmp; + int c = *count; + unsigned char *d = *data; /* * The NCR5380 chip will only drive the SCSI bus when the @@ -1652,14 +1531,15 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, * Wait for assertion of REQ, after which the phase bits will be * valid */ - while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)) - ; - dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO); + if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0) + break; + + dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n"); /* Check for phase mismatch */ - if ((tmp & PHASE_MASK) != p) { - dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO); + if ((NCR5380_read(STATUS_REG) & PHASE_MASK) != p) { + dsprintk(NDEBUG_PIO, instance, "phase mismatch\n"); NCR5380_dprint_phase(NDEBUG_PIO, instance); break; } @@ -1684,35 +1564,36 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_DATA | ICR_ASSERT_ACK); + ICR_ASSERT_DATA | ICR_ASSERT_ACK); } else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_DATA | ICR_ASSERT_ATN); + ICR_ASSERT_DATA | ICR_ASSERT_ATN); NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); + ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); } } else { NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); } - while (NCR5380_read(STATUS_REG) & SR_REQ) - ; + if (NCR5380_poll_politely(instance, + STATUS_REG, SR_REQ, 0, 5 * HZ) < 0) + break; - dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO); + dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n"); - /* - * We have several special cases to consider during REQ/ACK handshaking : - * 1. We were in MSGOUT phase, and we are on the last byte of the - * message. ATN must be dropped as ACK is dropped. - * - * 2. We are in a MSGIN phase, and we are on the last byte of the - * message. We must exit with ACK asserted, so that the calling - * code may raise ATN before dropping ACK to reject the message. - * - * 3. ACK and ATN are clear and the target may proceed as normal. - */ +/* + * We have several special cases to consider during REQ/ACK handshaking : + * 1. We were in MSGOUT phase, and we are on the last byte of the + * message. ATN must be dropped as ACK is dropped. + * + * 2. We are in a MSGIN phase, and we are on the last byte of the + * message. We must exit with ACK asserted, so that the calling + * code may raise ATN before dropping ACK to reject the message. + * + * 3. ACK and ATN are clear and the target may proceed as normal. + */ if (!(p == PHASE_MSGIN && c == 1)) { if (p == PHASE_MSGOUT && c > 1) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); @@ -1721,16 +1602,16 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, } } while (--c); - dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c); + dsprintk(NDEBUG_PIO, instance, "residual %d\n", c); *count = c; *data = d; tmp = NCR5380_read(STATUS_REG); /* The phase read from the bus is valid if either REQ is (already) - * asserted or if ACK hasn't been released yet. The latter is the case if - * we're in MSGIN and all wanted bytes have been received. + * asserted or if ACK hasn't been released yet. The latter applies if + * we're in MSG IN, DATA IN or STATUS and all bytes have been received. */ - if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0)) + if ((tmp & SR_REQ) || ((tmp & SR_IO) && c == 0)) *phase = tmp & PHASE_MASK; else *phase = PHASE_UNKNOWN; @@ -1741,19 +1622,45 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, return -1; } -/* - * Function : do_abort (Scsi_Host *host) +/** + * do_reset - issue a reset command + * @instance: adapter to reset + * + * Issue a reset sequence to the NCR5380 and try and get the bus + * back into sane shape. * - * Purpose : abort the currently established nexus. Should only be - * called from a routine which can drop into a + * This clears the reset interrupt flag because there may be no handler for + * it. When the driver is initialized, the NCR5380_intr() handler has not yet + * been installed. And when in EH we may have released the ST DMA interrupt. + */ + +static void do_reset(struct Scsi_Host *instance) +{ + unsigned long flags; + + local_irq_save(flags); + NCR5380_write(TARGET_COMMAND_REG, + PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK)); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); + udelay(50); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); + local_irq_restore(flags); +} + +/** + * do_abort - abort the currently established nexus by going to + * MESSAGE OUT phase and sending an ABORT message. + * @instance: relevant scsi host instance * - * Returns : 0 on success, -1 on failure. + * Returns 0 on success, -1 on failure. */ static int do_abort(struct Scsi_Host *instance) { - unsigned char tmp, *msgptr, phase; + unsigned char *msgptr, phase, tmp; int len; + int rc; /* Request message out phase */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); @@ -1768,16 +1675,20 @@ static int do_abort(struct Scsi_Host *instance) * the target sees, so we just handshake. */ - while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)) - ; + rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ); + if (rc < 0) + goto timeout; + + tmp = NCR5380_read(STATUS_REG) & PHASE_MASK; NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); - if ((tmp & PHASE_MASK) != PHASE_MSGOUT) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | - ICR_ASSERT_ACK); - while (NCR5380_read(STATUS_REG) & SR_REQ) - ; + if (tmp != PHASE_MSGOUT) { + NCR5380_write(INITIATOR_COMMAND_REG, + ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); + rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 3 * HZ); + if (rc < 0) + goto timeout; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); } @@ -1793,26 +1704,29 @@ static int do_abort(struct Scsi_Host *instance) */ return len ? -1 : 0; + +timeout: + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + return -1; } #if defined(REAL_DMA) /* * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, - * unsigned char *phase, int *count, unsigned char **data) + * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using either real - * or pseudo DMA. + * or pseudo DMA. * * Inputs : instance - instance of driver, *phase - pointer to - * what phase is expected, *count - pointer to number of - * bytes to transfer, **data - pointer to data pointer. + * what phase is expected, *count - pointer to number of + * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring - * maximum number of bytes, 0 if all bytes or transferred or exit - * is in same phase. - * - * Also, *phase, *count, *data are modified in place. + * maximum number of bytes, 0 if all bytes or transferred or exit + * is in same phase. * + * Also, *phase, *count, *data are modified in place. */ @@ -1820,10 +1734,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { - SETUP_HOSTDATA(instance); + struct NCR5380_hostdata *hostdata = shost_priv(instance); register int c = *count; register unsigned char p = *phase; - unsigned long flags; #if defined(CONFIG_SUN3) /* sanity check */ @@ -1834,29 +1747,22 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, } hostdata->dma_len = c; - dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", - instance->host_no, (p & SR_IO) ? "reading" : "writing", - c, (p & SR_IO) ? "to" : "from", *data); + dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", + (p & SR_IO) ? "receive" : "send", c, *data); /* netbsd turns off ints here, why not be safe and do it too */ - local_irq_save(flags); /* send start chain */ sun3scsi_dma_start(c, *data); + NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); + NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | + MR_ENABLE_EOP_INTR); if (p & SR_IO) { - NCR5380_write(TARGET_COMMAND_REG, 1); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(INITIATOR_COMMAND_REG, 0); - NCR5380_write(MODE_REG, - (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); } else { - NCR5380_write(TARGET_COMMAND_REG, 0); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA); - NCR5380_write(MODE_REG, - (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); NCR5380_write(START_DMA_SEND_REG, 0); } @@ -1864,8 +1770,6 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, dregs->csr |= CSR_DMA_ENABLE; #endif - local_irq_restore(flags); - sun3_dma_active = 1; #else /* !defined(CONFIG_SUN3) */ @@ -1880,25 +1784,20 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, if (hostdata->read_overruns && (p & SR_IO)) c -= hostdata->read_overruns; - dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", - HOSTNO, (p & SR_IO) ? "reading" : "writing", - c, (p & SR_IO) ? "to" : "from", d); + dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", + (p & SR_IO) ? "receive" : "send", c, d); NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); - -#ifdef REAL_DMA - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY); -#endif /* def REAL_DMA */ + NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | + MR_ENABLE_EOP_INTR); if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) { /* On the Medusa, it is a must to initialize the DMA before * starting the NCR. This is also the cleaner way for the TT. */ - local_irq_save(flags); hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); - local_irq_restore(flags); } if (p & SR_IO) @@ -1912,11 +1811,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, /* On the Falcon, the DMA setup must be done after the last */ /* NCR access, else the DMA setup gets trashed! */ - local_irq_save(flags); hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); - local_irq_restore(flags); } #endif /* !defined(CONFIG_SUN3) */ @@ -1928,38 +1825,37 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, * Function : NCR5380_information_transfer (struct Scsi_Host *instance) * * Purpose : run through the various SCSI phases and do as the target - * directs us to. Operates on the currently connected command, - * instance->connected. + * directs us to. Operates on the currently connected command, + * instance->connected. * * Inputs : instance, instance for which we are doing commands * * Side effects : SCSI things happen, the disconnected queue will be - * modified if a command disconnects, *instance->connected will - * change. + * modified if a command disconnects, *instance->connected will + * change. * * XXX Note : we need to watch for bus free or a reset condition here - * to recover from an unexpected bus free condition. + * to recover from an unexpected bus free condition. */ static void NCR5380_information_transfer(struct Scsi_Host *instance) { - SETUP_HOSTDATA(instance); - unsigned long flags; + struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char msgout = NOP; int sink = 0; int len; -#if defined(REAL_DMA) int transfersize; -#endif unsigned char *data; unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; - struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected; + struct scsi_cmnd *cmd; #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif - while (1) { + while ((cmd = hostdata->connected)) { + struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); + tmp = NCR5380_read(STATUS_REG); /* We only have a valid SCSI phase when REQ is asserted */ if (tmp & SR_REQ) { @@ -1984,7 +1880,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) /* this command setup for dma yet? */ if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != cmd)) { if (cmd->request->cmd_type == REQ_TYPE_FS) { - sun3scsi_dma_setup(d, count, + sun3scsi_dma_setup(instance, d, count, rq_data_dir(cmd->request)); sun3_dma_setup_done = cmd; } @@ -2000,11 +1896,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | - ICR_ASSERT_ACK); + ICR_ASSERT_ACK); while (NCR5380_read(STATUS_REG) & SR_REQ) ; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_ATN); + ICR_ASSERT_ATN); sink = 0; continue; } @@ -2012,12 +1908,12 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) switch (phase) { case PHASE_DATAOUT: #if (NDEBUG & NDEBUG_NO_DATAOUT) - printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT " - "aborted\n", HOSTNO); + shost_printk(KERN_DEBUG, instance, "NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n"); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; - cmd->scsi_done(cmd); + complete_cmd(instance, cmd); + hostdata->connected = NULL; return; #endif case PHASE_DATAIN: @@ -2031,13 +1927,10 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); - /* ++roman: Try to merge some scatter-buffers if - * they are at contiguous physical addresses. - */ merge_contiguous_buffers(cmd); - dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n", - HOSTNO, cmd->SCp.this_residual, - cmd->SCp.buffers_residual); + dsprintk(NDEBUG_INFORMATION, instance, "%d bytes and %d buffers left\n", + cmd->SCp.this_residual, + cmd->SCp.buffers_residual); } /* @@ -2051,16 +1944,18 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) */ /* ++roman: I suggest, this should be - * #if def(REAL_DMA) + * #if def(REAL_DMA) * instead of leaving REAL_DMA out. */ #if defined(REAL_DMA) - if ( #if !defined(CONFIG_SUN3) - !cmd->device->borken && + transfersize = 0; + if (!cmd->device->borken) #endif - (transfersize = NCR5380_dma_xfer_len(instance, cmd, phase)) >= DMA_MIN_SIZE) { + transfersize = NCR5380_dma_xfer_len(instance, cmd, phase); + + if (transfersize >= DMA_MIN_SIZE) { len = transfersize; cmd->SCp.phase = phase; if (NCR5380_transfer_dma(instance, &phase, @@ -2068,16 +1963,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) /* * If the watchdog timer fires, all future * accesses to this device will use the - * polled-IO. */ + * polled-IO. + */ scmd_printk(KERN_INFO, cmd, "switching to slow handshake\n"); cmd->device->borken = 1; - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_ATN); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; - cmd->scsi_done(cmd); /* XXX - need to source or sink data here, as appropriate */ } else { #ifdef REAL_DMA @@ -2093,174 +1986,84 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) } } else #endif /* defined(REAL_DMA) */ - NCR5380_transfer_pio(instance, &phase, - (int *)&cmd->SCp.this_residual, - (unsigned char **)&cmd->SCp.ptr); + { + /* Break up transfer into 3 ms chunks, + * presuming 6 accesses per handshake. + */ + transfersize = min((unsigned long)cmd->SCp.this_residual, + hostdata->accesses_per_ms / 2); + len = transfersize; + NCR5380_transfer_pio(instance, &phase, &len, + (unsigned char **)&cmd->SCp.ptr); + cmd->SCp.this_residual -= transfersize - len; + } #if defined(CONFIG_SUN3) && defined(REAL_DMA) /* if we had intended to dma that command clear it */ if (sun3_dma_setup_done == cmd) sun3_dma_setup_done = NULL; #endif - break; + return; case PHASE_MSGIN: len = 1; data = &tmp; - NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */ NCR5380_transfer_pio(instance, &phase, &len, &data); cmd->SCp.Message = tmp; switch (tmp) { - /* - * Linking lets us reduce the time required to get the - * next command out to the device, hopefully this will - * mean we don't waste another revolution due to the delays - * required by ARBITRATION and another SELECTION. - * - * In the current implementation proposal, low level drivers - * merely have to start the next command, pointed to by - * next_link, done() is called as with unlinked commands. - */ -#ifdef LINKED - case LINKED_CMD_COMPLETE: - case LINKED_FLG_CMD_COMPLETE: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked command " - "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); - - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - /* - * Sanity check : A linked command should only terminate - * with one of these messages if there are more linked - * commands available. - */ - - if (!cmd->next_link) { - printk(KERN_NOTICE "scsi%d: target %d lun %llu " - "linked command complete, no next_link\n", - HOSTNO, cmd->device->id, cmd->device->lun); - sink = 1; - do_abort(instance); - return; - } - - initialize_SCp(cmd->next_link); - /* The next command is still part of this process; copy it - * and don't free it! */ - cmd->next_link->tag = cmd->tag; - cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); - dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request " - "done, calling scsi_done().\n", - HOSTNO, cmd->device->id, cmd->device->lun); - cmd->scsi_done(cmd); - cmd = hostdata->connected; - break; -#endif /* def LINKED */ case ABORT: case COMMAND_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu " - "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); + dsprintk(NDEBUG_QUEUES, instance, + "COMMAND COMPLETE %p target %d lun %llu\n", + cmd, scmd_id(cmd), cmd->device->lun); - local_irq_save(flags); - hostdata->retain_dma_intr++; hostdata->connected = NULL; #ifdef SUPPORT_TAGS cmd_free_tag(cmd); if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { - /* Turn a QUEUE FULL status into BUSY, I think the - * mid level cannot handle QUEUE FULL :-( (The - * command is retried after BUSY). Also update our - * queue size to the number of currently issued - * commands now. - */ - /* ++Andreas: the mid level code knows about - QUEUE_FULL now. */ - struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][cmd->device->lun]; - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned " - "QUEUE_FULL after %d commands\n", - HOSTNO, cmd->device->id, cmd->device->lun, - ta->nr_allocated); + u8 lun = cmd->device->lun; + struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; + + dsprintk(NDEBUG_TAGS, instance, + "QUEUE_FULL %p target %d lun %d nr_allocated %d\n", + cmd, scmd_id(cmd), lun, ta->nr_allocated); if (ta->queue_size > ta->nr_allocated) - ta->nr_allocated = ta->queue_size; + ta->queue_size = ta->nr_allocated; } -#else - hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - /* - * I'm not sure what the correct thing to do here is : - * - * If the command that just executed is NOT a request - * sense, the obvious thing to do is to set the result - * code to the values of the stored parameters. - * - * If it was a REQUEST SENSE command, we need some way to - * differentiate between the failure code of the original - * and the failure code of the REQUEST sense - the obvious - * case is success, where we fall through and leave the - * result code unchanged. - * - * The non-obvious place is where the REQUEST SENSE failed - */ - - if (cmd->cmnd[0] != REQUEST_SENSE) - cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); - else if (status_byte(cmd->SCp.Status) != GOOD) - cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); - - if ((cmd->cmnd[0] == REQUEST_SENSE) && - hostdata->ses.cmd_len) { - scsi_eh_restore_cmnd(cmd, &hostdata->ses); - hostdata->ses.cmd_len = 0 ; + cmd->result &= ~0xffff; + cmd->result |= cmd->SCp.Status; + cmd->result |= cmd->SCp.Message << 8; + + if (cmd->cmnd[0] == REQUEST_SENSE) + complete_cmd(instance, cmd); + else { + if (cmd->SCp.Status == SAM_STAT_CHECK_CONDITION || + cmd->SCp.Status == SAM_STAT_COMMAND_TERMINATED) { + dsprintk(NDEBUG_QUEUES, instance, "autosense: adding cmd %p to tail of autosense queue\n", + cmd); + list_add_tail(&ncmd->list, + &hostdata->autosense); + } else + complete_cmd(instance, cmd); } - if ((cmd->cmnd[0] != REQUEST_SENSE) && - (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { - scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); - - dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO); - - LIST(cmd,hostdata->issue_queue); - SET_NEXT(cmd, hostdata->issue_queue); - hostdata->issue_queue = (struct scsi_cmnd *) cmd; - dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of " - "issue queue\n", H_NO(cmd)); - } else { - cmd->scsi_done(cmd); - } - - local_irq_restore(flags); - - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); - while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) - barrier(); + /* Enable reselect interrupts */ + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - local_irq_save(flags); - hostdata->retain_dma_intr--; - /* ++roman: For Falcon SCSI, release the lock on the - * ST-DMA here if no other commands are waiting on the - * disconnected queue. - */ maybe_release_dma_irq(instance); - local_irq_restore(flags); return; case MESSAGE_REJECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); switch (hostdata->last_message) { case HEAD_OF_QUEUE_TAG: case ORDERED_QUEUE_TAG: @@ -2274,27 +2077,20 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) cmd->device->tagged_supported = 0; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); cmd->tag = TAG_NONE; - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu rejected " - "QUEUE_TAG message; tagged queuing " - "disabled\n", - HOSTNO, cmd->device->id, cmd->device->lun); + dsprintk(NDEBUG_TAGS, instance, "target %d lun %llu rejected QUEUE_TAG message; tagged queuing disabled\n", + scmd_id(cmd), cmd->device->lun); break; } break; case DISCONNECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - local_irq_save(flags); - cmd->device->disconnect = 1; - LIST(cmd,hostdata->disconnected_queue); - SET_NEXT(cmd, hostdata->disconnected_queue); hostdata->connected = NULL; - hostdata->disconnected_queue = cmd; - local_irq_restore(flags); - dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %llu was " - "moved from connected to the " - "disconnected_queue\n", HOSTNO, - cmd->device->id, cmd->device->lun); + list_add(&ncmd->list, &hostdata->disconnected); + dsprintk(NDEBUG_INFORMATION | NDEBUG_QUEUES, + instance, "connected command %p for target %d lun %llu moved to disconnected queue\n", + cmd, scmd_id(cmd), cmd->device->lun); + /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. @@ -2303,9 +2099,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - /* Wait for bus free to avoid nasty timeouts */ - while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) - barrier(); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif @@ -2324,37 +2117,30 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) case RESTORE_POINTERS: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); break; case EXTENDED_MESSAGE: /* - * Extended messages are sent in the following format : - * Byte - * 0 EXTENDED_MESSAGE == 1 - * 1 length (includes one byte for code, doesn't - * include first two bytes) - * 2 code - * 3..length+1 arguments - * - * Start the extended message buffer with the EXTENDED_MESSAGE + * Start the message buffer with the EXTENDED_MESSAGE * byte, since spi_print_msg() wants the whole thing. */ extended_msg[0] = EXTENDED_MESSAGE; /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO); + spin_unlock_irq(&hostdata->lock); + + dsprintk(NDEBUG_EXTENDED, instance, "receiving extended message\n"); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO, - (int)extended_msg[1], (int)extended_msg[2]); + dsprintk(NDEBUG_EXTENDED, instance, "length %d, code 0x%02x\n", + (int)extended_msg[1], + (int)extended_msg[2]); - if (!len && extended_msg[1] <= - (sizeof(extended_msg) - 1)) { + if (!len && extended_msg[1] > 0 && + extended_msg[1] <= sizeof(extended_msg) - 2) { /* Accept third byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = extended_msg[1] - 1; @@ -2362,8 +2148,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n", - HOSTNO, len); + dsprintk(NDEBUG_EXTENDED, instance, "message received, residual %d\n", + len); switch (extended_msg[2]) { case EXTENDED_SDTR: @@ -2373,15 +2159,18 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) tmp = 0; } } else if (len) { - printk(KERN_NOTICE "scsi%d: error receiving " - "extended message\n", HOSTNO); + shost_printk(KERN_ERR, instance, "error receiving extended message\n"); tmp = 0; } else { - printk(KERN_NOTICE "scsi%d: extended message " - "code %02x length %d is too long\n", - HOSTNO, extended_msg[2], extended_msg[1]); + shost_printk(KERN_NOTICE, instance, "extended message code %02x length %d is too long\n", + extended_msg[2], extended_msg[1]); tmp = 0; } + + spin_lock_irq(&hostdata->lock); + if (!hostdata->connected) + return; + /* Fall through to reject message */ /* @@ -2390,8 +2179,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) */ default: if (!tmp) { - printk(KERN_INFO "scsi%d: rejecting message ", - instance->host_no); + shost_printk(KERN_ERR, instance, "rejecting message "); spi_print_msg(extended_msg); printk("\n"); } else if (tmp != EXTENDED_MESSAGE) @@ -2414,18 +2202,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) hostdata->last_message = msgout; NCR5380_transfer_pio(instance, &phase, &len, &data); if (msgout == ABORT) { - local_irq_save(flags); -#ifdef SUPPORT_TAGS - cmd_free_tag(cmd); -#else - hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); -#endif hostdata->connected = NULL; cmd->result = DID_ERROR << 16; - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); + complete_cmd(instance, cmd); maybe_release_dma_irq(instance); - local_irq_restore(flags); - cmd->scsi_done(cmd); + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return; } msgout = NOP; @@ -2447,22 +2228,25 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) cmd->SCp.Status = tmp; break; default: - printk("scsi%d: unknown phase\n", HOSTNO); + shost_printk(KERN_ERR, instance, "unknown phase\n"); NCR5380_dprint(NDEBUG_ANY, instance); } /* switch(phase) */ - } /* if (tmp * SR_REQ) */ - } /* while (1) */ + } else { + spin_unlock_irq(&hostdata->lock); + NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ); + spin_lock_irq(&hostdata->lock); + } + } } /* * Function : void NCR5380_reselect (struct Scsi_Host *instance) * * Purpose : does reselection, initializing the instance->connected - * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q - * nexus has been reestablished, + * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q + * nexus has been reestablished, * * Inputs : instance - this instance of the NCR5380. - * */ @@ -2471,7 +2255,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) static void NCR5380_reselect(struct Scsi_Host *instance) { - SETUP_HOSTDATA(instance); + struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char target_mask; unsigned char lun; #ifdef SUPPORT_TAGS @@ -2480,7 +2264,8 @@ static void NCR5380_reselect(struct Scsi_Host *instance) unsigned char msg[3]; int __maybe_unused len; unsigned char __maybe_unused *data, __maybe_unused phase; - struct scsi_cmnd *tmp = NULL, *prev; + struct NCR5380_cmd *ncmd; + struct scsi_cmnd *tmp; /* * Disable arbitration, etc. since the host adapter obviously @@ -2488,11 +2273,10 @@ static void NCR5380_reselect(struct Scsi_Host *instance) */ NCR5380_write(MODE_REG, MR_BASE); - hostdata->restart_select = 1; target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); - dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO); + dsprintk(NDEBUG_RESELECTION, instance, "reselect\n"); /* * At this point, we have detected that our SCSI ID is on the bus, @@ -2504,17 +2288,22 @@ static void NCR5380_reselect(struct Scsi_Host *instance) */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); - - while (NCR5380_read(STATUS_REG) & SR_SEL) - ; + if (NCR5380_poll_politely(instance, + STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) { + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + return; + } NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* * Wait for target to go into MSGIN. */ - while (!(NCR5380_read(STATUS_REG) & SR_REQ)) - ; + if (NCR5380_poll_politely(instance, + STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) { + do_abort(instance); + return; + } #if defined(CONFIG_SUN3) && defined(REAL_DMA) /* acknowledge toggle to MSGIN */ @@ -2527,15 +2316,21 @@ static void NCR5380_reselect(struct Scsi_Host *instance) data = msg; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); + + if (len) { + do_abort(instance); + return; + } #endif if (!(msg[0] & 0x80)) { - printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); + shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got "); spi_print_msg(msg); + printk("\n"); do_abort(instance); return; } - lun = (msg[0] & 0x07); + lun = msg[0] & 0x07; #if defined(SUPPORT_TAGS) && !defined(CONFIG_SUN3) /* If the phase is still MSGIN, the target wants to send some more @@ -2551,8 +2346,8 @@ static void NCR5380_reselect(struct Scsi_Host *instance) if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && msg[1] == SIMPLE_QUEUE_TAG) tag = msg[2]; - dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at " - "reselection\n", HOSTNO, target_mask, lun, tag); + dsprintk(NDEBUG_TAGS, instance, "reselect: target mask %02x, lun %d sent tag %d\n", + target_mask, lun, tag); } #endif @@ -2561,36 +2356,34 @@ static void NCR5380_reselect(struct Scsi_Host *instance) * just reestablished, and remove it from the disconnected queue. */ - for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL; - tmp; prev = tmp, tmp = NEXT(tmp)) { - if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) + tmp = NULL; + list_for_each_entry(ncmd, &hostdata->disconnected, list) { + struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); + + if (target_mask == (1 << scmd_id(cmd)) && + lun == (u8)cmd->device->lun #ifdef SUPPORT_TAGS - && (tag == tmp->tag) + && (tag == cmd->tag) #endif ) { - if (prev) { - REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); - SET_NEXT(prev, NEXT(tmp)); - } else { - REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp)); - hostdata->disconnected_queue = NEXT(tmp); - } - SET_NEXT(tmp, NULL); + list_del(&ncmd->list); + tmp = cmd; break; } } - if (!tmp) { - printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d " -#ifdef SUPPORT_TAGS - "tag %d " -#endif - "not in disconnected_queue.\n", - HOSTNO, target_mask, lun + if (tmp) { + dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance, + "reselect: removed %p from disconnected queue\n", tmp); + } else { + #ifdef SUPPORT_TAGS - , tag + shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d tag %d not in disconnected queue.\n", + target_mask, lun, tag); +#else + shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n", + target_mask, lun); #endif - ); /* * Since we have an established nexus that we can't do anything * with, we must abort it. @@ -2614,7 +2407,8 @@ static void NCR5380_reselect(struct Scsi_Host *instance) } /* setup this command for dma if not already */ if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != tmp)) { - sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request)); + sun3scsi_dma_setup(instance, d, count, + rq_data_dir(tmp->request)); sun3_dma_setup_done = tmp; } } @@ -2639,235 +2433,168 @@ static void NCR5380_reselect(struct Scsi_Host *instance) if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && msg[1] == SIMPLE_QUEUE_TAG) tag = msg[2]; - dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at reselection\n" - HOSTNO, target_mask, lun, tag); + dsprintk(NDEBUG_TAGS, instance, "reselect: target mask %02x, lun %d sent tag %d\n" + target_mask, lun, tag); } #endif hostdata->connected = tmp; - dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n", - HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); + dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu, tag %d\n", + scmd_id(tmp), tmp->device->lun, tmp->tag); } -/* - * Function : int NCR5380_abort (struct scsi_cmnd *cmd) - * - * Purpose : abort a command - * - * Inputs : cmd - the scsi_cmnd to abort, code - code to set the - * host byte of the result field to, if zero DID_ABORTED is - * used. - * - * Returns : SUCCESS - success, FAILED on failure. - * - * XXX - there is no way to abort the command that is currently - * connected, you have to wait for it to complete. If this is - * a problem, we could implement longjmp() / setjmp(), setjmp() - * called where the loop started in NCR5380_main(). +/** + * list_find_cmd - test for presence of a command in a linked list + * @haystack: list of commands + * @needle: command to search for */ -static -int NCR5380_abort(struct scsi_cmnd *cmd) +static bool list_find_cmd(struct list_head *haystack, + struct scsi_cmnd *needle) { - struct Scsi_Host *instance = cmd->device->host; - SETUP_HOSTDATA(instance); - struct scsi_cmnd *tmp, **prev; - unsigned long flags; - - scmd_printk(KERN_NOTICE, cmd, "aborting command\n"); - - NCR5380_print_status(instance); + struct NCR5380_cmd *ncmd; - local_irq_save(flags); + list_for_each_entry(ncmd, haystack, list) + if (NCR5380_to_scmd(ncmd) == needle) + return true; + return false; +} - dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, - NCR5380_read(BUS_AND_STATUS_REG), - NCR5380_read(STATUS_REG)); +/** + * list_remove_cmd - remove a command from linked list + * @haystack: list of commands + * @needle: command to remove + */ -#if 1 - /* - * Case 1 : If the command is the currently executing command, - * we'll set the aborted flag and return control so that - * information transfer routine can exit cleanly. - */ +static bool list_del_cmd(struct list_head *haystack, + struct scsi_cmnd *needle) +{ + if (list_find_cmd(haystack, needle)) { + struct NCR5380_cmd *ncmd = scsi_cmd_priv(needle); - if (hostdata->connected == cmd) { + list_del(&ncmd->list); + return true; + } + return false; +} - dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO); - /* - * We should perform BSY checking, and make sure we haven't slipped - * into BUS FREE. - */ +/** + * NCR5380_abort - scsi host eh_abort_handler() method + * @cmd: the command to be aborted + * + * Try to abort a given command by removing it from queues and/or sending + * the target an abort message. This may not succeed in causing a target + * to abort the command. Nonetheless, the low-level driver must forget about + * the command because the mid-layer reclaims it and it may be re-issued. + * + * The normal path taken by a command is as follows. For EH we trace this + * same path to locate and abort the command. + * + * unissued -> selecting -> [unissued -> selecting ->]... connected -> + * [disconnected -> connected ->]... + * [autosense -> connected ->] done + * + * If cmd was not found at all then presumably it has already been completed, + * in which case return SUCCESS to try to avoid further EH measures. + * + * If the command has not completed yet, we must not fail to find it. + * We have no option but to forget the aborted command (even if it still + * lacks sense data). The mid-layer may re-issue a command that is in error + * recovery (see scsi_send_eh_cmnd), but the logic and data structures in + * this driver are such that a command can appear on one queue only. + * + * The lock protects driver data structures, but EH handlers also use it + * to serialize their own execution and prevent their own re-entry. + */ - /* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */ - /* - * Since we can't change phases until we've completed the current - * handshake, we have to source or sink a byte of data if the current - * phase is not MSGOUT. - */ +static int NCR5380_abort(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *instance = cmd->device->host; + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned long flags; + int result = SUCCESS; - /* - * Return control to the executing NCR drive so we can clear the - * aborted flag and get back into our main loop. - */ + spin_lock_irqsave(&hostdata->lock, flags); - if (do_abort(instance) == 0) { - hostdata->aborted = 1; - hostdata->connected = NULL; - cmd->result = DID_ABORT << 16; -#ifdef SUPPORT_TAGS - cmd_free_tag(cmd); -#else - hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); -#endif - maybe_release_dma_irq(instance); - local_irq_restore(flags); - cmd->scsi_done(cmd); - return SUCCESS; - } else { - local_irq_restore(flags); - printk("scsi%d: abort of connected command failed!\n", HOSTNO); - return FAILED; - } - } +#if (NDEBUG & NDEBUG_ANY) + scmd_printk(KERN_INFO, cmd, __func__); #endif + NCR5380_dprint(NDEBUG_ANY, instance); + NCR5380_dprint_phase(NDEBUG_ANY, instance); - /* - * Case 2 : If the command hasn't been issued yet, we simply remove it - * from the issue queue. - */ - for (prev = (struct scsi_cmnd **)&(hostdata->issue_queue), - tmp = (struct scsi_cmnd *)hostdata->issue_queue; - tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { - if (cmd == tmp) { - REMOVE(5, *prev, tmp, NEXT(tmp)); - (*prev) = NEXT(tmp); - SET_NEXT(tmp, NULL); - tmp->result = DID_ABORT << 16; - maybe_release_dma_irq(instance); - local_irq_restore(flags); - dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n", - HOSTNO); - /* Tagged queuing note: no tag to free here, hasn't been assigned - * yet... */ - tmp->scsi_done(tmp); - return SUCCESS; - } + if (list_del_cmd(&hostdata->unissued, cmd)) { + dsprintk(NDEBUG_ABORT, instance, + "abort: removed %p from issue queue\n", cmd); + cmd->result = DID_ABORT << 16; + cmd->scsi_done(cmd); /* No tag or busy flag to worry about */ + goto out; } - /* - * Case 3 : If any commands are connected, we're going to fail the abort - * and let the high level SCSI driver retry at a later time or - * issue a reset. - * - * Timeouts, and therefore aborted commands, will be highly unlikely - * and handling them cleanly in this situation would make the common - * case of noresets less efficient, and would pollute our code. So, - * we fail. - */ - - if (hostdata->connected) { - local_irq_restore(flags); - dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO); - return FAILED; + if (hostdata->selecting == cmd) { + dsprintk(NDEBUG_ABORT, instance, + "abort: cmd %p == selecting\n", cmd); + hostdata->selecting = NULL; + cmd->result = DID_ABORT << 16; + complete_cmd(instance, cmd); + goto out; } - /* - * Case 4: If the command is currently disconnected from the bus, and - * there are no connected commands, we reconnect the I_T_L or - * I_T_L_Q nexus associated with it, go into message out, and send - * an abort message. - * - * This case is especially ugly. In order to reestablish the nexus, we - * need to call NCR5380_select(). The easiest way to implement this - * function was to abort if the bus was busy, and let the interrupt - * handler triggered on the SEL for reselect take care of lost arbitrations - * where necessary, meaning interrupts need to be enabled. - * - * When interrupts are enabled, the queues may change - so we - * can't remove it from the disconnected queue before selecting it - * because that could cause a failure in hashing the nexus if that - * device reselected. - * - * Since the queues may change, we can't use the pointers from when we - * first locate it. - * - * So, we must first locate the command, and if NCR5380_select() - * succeeds, then issue the abort, relocate the command and remove - * it from the disconnected queue. - */ - - for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; - tmp = NEXT(tmp)) { - if (cmd == tmp) { - local_irq_restore(flags); - dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO); - - if (NCR5380_select(instance, cmd)) - return FAILED; - - dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO); - - do_abort(instance); + if (list_del_cmd(&hostdata->disconnected, cmd)) { + dsprintk(NDEBUG_ABORT, instance, + "abort: removed %p from disconnected list\n", cmd); + /* Can't call NCR5380_select() and send ABORT because that + * means releasing the lock. Need a bus reset. + */ + set_host_byte(cmd, DID_ERROR); + complete_cmd(instance, cmd); + result = FAILED; + goto out; + } - local_irq_save(flags); - for (prev = (struct scsi_cmnd **)&(hostdata->disconnected_queue), - tmp = (struct scsi_cmnd *)hostdata->disconnected_queue; - tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { - if (cmd == tmp) { - REMOVE(5, *prev, tmp, NEXT(tmp)); - *prev = NEXT(tmp); - SET_NEXT(tmp, NULL); - tmp->result = DID_ABORT << 16; - /* We must unlock the tag/LUN immediately here, since the - * target goes to BUS FREE and doesn't send us another - * message (COMMAND_COMPLETE or the like) - */ -#ifdef SUPPORT_TAGS - cmd_free_tag(tmp); -#else - hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); + if (hostdata->connected == cmd) { + dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd); + hostdata->connected = NULL; +#ifdef REAL_DMA + hostdata->dma_len = 0; #endif - maybe_release_dma_irq(instance); - local_irq_restore(flags); - tmp->scsi_done(tmp); - return SUCCESS; - } - } + if (do_abort(instance)) { + set_host_byte(cmd, DID_ERROR); + complete_cmd(instance, cmd); + result = FAILED; + goto out; } + set_host_byte(cmd, DID_ABORT); + complete_cmd(instance, cmd); + goto out; } - /* Maybe it is sufficient just to release the ST-DMA lock... (if - * possible at all) At least, we should check if the lock could be - * released after the abort, in case it is kept due to some bug. - */ - maybe_release_dma_irq(instance); - local_irq_restore(flags); + if (list_del_cmd(&hostdata->autosense, cmd)) { + dsprintk(NDEBUG_ABORT, instance, + "abort: removed %p from sense queue\n", cmd); + set_host_byte(cmd, DID_ERROR); + complete_cmd(instance, cmd); + } - /* - * Case 5 : If we reached this point, the command was not found in any of - * the queues. - * - * We probably reached this point because of an unlikely race condition - * between the command completing successfully and the abortion code, - * so we won't panic, but we will notify the user in case something really - * broke. - */ +out: + if (result == FAILED) + dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd); + else + dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); - printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); + queue_work(hostdata->work_q, &hostdata->main_task); + maybe_release_dma_irq(instance); + spin_unlock_irqrestore(&hostdata->lock, flags); - return FAILED; + return result; } -/* - * Function : int NCR5380_reset (struct scsi_cmnd *cmd) - * - * Purpose : reset the SCSI bus. - * - * Returns : SUCCESS or FAILURE +/** + * NCR5380_bus_reset - reset the SCSI bus + * @cmd: SCSI command undergoing EH * + * Returns SUCCESS */ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) @@ -2876,23 +2603,22 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) struct NCR5380_hostdata *hostdata = shost_priv(instance); int i; unsigned long flags; + struct NCR5380_cmd *ncmd; - NCR5380_print_status(instance); + spin_lock_irqsave(&hostdata->lock, flags); + +#if (NDEBUG & NDEBUG_ANY) + scmd_printk(KERN_INFO, cmd, __func__); +#endif + NCR5380_dprint(NDEBUG_ANY, instance); + NCR5380_dprint_phase(NDEBUG_ANY, instance); + + do_reset(instance); - /* get in phase */ - NCR5380_write(TARGET_COMMAND_REG, - PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG))); - /* assert RST */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); - udelay(40); /* reset NCR registers */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(SELECT_ENABLE_REG, 0); - /* ++roman: reset interrupt condition! otherwise no interrupts don't get - * through anymore ... */ - (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); /* After the reset, there are no more connected or disconnected commands * and no busy units; so clear the low-level status here to avoid @@ -2900,17 +2626,39 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) * commands! */ - if (hostdata->issue_queue) - dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); - if (hostdata->connected) - dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); - if (hostdata->disconnected_queue) - dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); + if (list_del_cmd(&hostdata->unissued, cmd)) { + cmd->result = DID_RESET << 16; + cmd->scsi_done(cmd); + } + + if (hostdata->selecting) { + hostdata->selecting->result = DID_RESET << 16; + complete_cmd(instance, hostdata->selecting); + hostdata->selecting = NULL; + } + + list_for_each_entry(ncmd, &hostdata->disconnected, list) { + struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); + + set_host_byte(cmd, DID_RESET); + cmd->scsi_done(cmd); + } + INIT_LIST_HEAD(&hostdata->disconnected); + + list_for_each_entry(ncmd, &hostdata->autosense, list) { + struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); + + set_host_byte(cmd, DID_RESET); + cmd->scsi_done(cmd); + } + INIT_LIST_HEAD(&hostdata->autosense); + + if (hostdata->connected) { + set_host_byte(hostdata->connected, DID_RESET); + complete_cmd(instance, hostdata->connected); + hostdata->connected = NULL; + } - local_irq_save(flags); - hostdata->issue_queue = NULL; - hostdata->connected = NULL; - hostdata->disconnected_queue = NULL; #ifdef SUPPORT_TAGS free_all_tags(hostdata); #endif @@ -2920,8 +2668,9 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) hostdata->dma_len = 0; #endif + queue_work(hostdata->work_q, &hostdata->main_task); maybe_release_dma_irq(instance); - local_irq_restore(flags); + spin_unlock_irqrestore(&hostdata->lock, flags); return SUCCESS; } diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 5ede3daa93dc..78d1b2963f2c 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -66,7 +66,6 @@ #include <linux/module.h> #include <linux/types.h> -#include <linux/delay.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/init.h> @@ -98,7 +97,6 @@ #define NCR5380_queue_command atari_scsi_queue_command #define NCR5380_abort atari_scsi_abort -#define NCR5380_show_info atari_scsi_show_info #define NCR5380_info atari_scsi_info #define NCR5380_dma_read_setup(instance, data, count) \ @@ -161,23 +159,10 @@ static inline unsigned long SCSI_DMA_GETADR(void) return adr; } -#define HOSTDATA_DMALEN (((struct NCR5380_hostdata *) \ - (atari_scsi_host->hostdata))->dma_len) - -/* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms, - * we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more - * need ten times the standard value... */ -#ifndef CONFIG_ATARI_SCSI_TOSHIBA_DELAY -#define AFTER_RESET_DELAY (HZ/2) -#else -#define AFTER_RESET_DELAY (5*HZ/2) -#endif - #ifdef REAL_DMA static void atari_scsi_fetch_restbytes(void); #endif -static struct Scsi_Host *atari_scsi_host; static unsigned char (*atari_scsi_reg_read)(unsigned char reg); static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value); @@ -208,12 +193,12 @@ static int setup_cmd_per_lun = -1; module_param(setup_cmd_per_lun, int, 0); static int setup_sg_tablesize = -1; module_param(setup_sg_tablesize, int, 0); -#ifdef SUPPORT_TAGS static int setup_use_tagged_queuing = -1; module_param(setup_use_tagged_queuing, int, 0); -#endif static int setup_hostid = -1; module_param(setup_hostid, int, 0); +static int setup_toshiba_delay = -1; +module_param(setup_toshiba_delay, int, 0); #if defined(REAL_DMA) @@ -273,15 +258,17 @@ static void scsi_dma_buserr(int irq, void *dummy) #endif -static irqreturn_t scsi_tt_intr(int irq, void *dummy) +static irqreturn_t scsi_tt_intr(int irq, void *dev) { #ifdef REAL_DMA + struct Scsi_Host *instance = dev; + struct NCR5380_hostdata *hostdata = shost_priv(instance); int dma_stat; dma_stat = tt_scsi_dma.dma_ctrl; - dprintk(NDEBUG_INTR, "scsi%d: NCR5380 interrupt, DMA status = %02x\n", - atari_scsi_host->host_no, dma_stat & 0xff); + dsprintk(NDEBUG_INTR, instance, "NCR5380 interrupt, DMA status = %02x\n", + dma_stat & 0xff); /* Look if it was the DMA that has interrupted: First possibility * is that a bus error occurred... @@ -304,7 +291,8 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) * data reg! */ if ((dma_stat & 0x02) && !(dma_stat & 0x40)) { - atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr); + atari_dma_residual = hostdata->dma_len - + (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr); dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n", atari_dma_residual); @@ -356,15 +344,17 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) #endif /* REAL_DMA */ - NCR5380_intr(irq, dummy); + NCR5380_intr(irq, dev); return IRQ_HANDLED; } -static irqreturn_t scsi_falcon_intr(int irq, void *dummy) +static irqreturn_t scsi_falcon_intr(int irq, void *dev) { #ifdef REAL_DMA + struct Scsi_Host *instance = dev; + struct NCR5380_hostdata *hostdata = shost_priv(instance); int dma_stat; /* Turn off DMA and select sector counter register before @@ -399,7 +389,7 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy) printk(KERN_ERR "SCSI DMA error: %ld bytes lost in " "ST-DMA fifo\n", transferred & 15); - atari_dma_residual = HOSTDATA_DMALEN - transferred; + atari_dma_residual = hostdata->dma_len - transferred; dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n", atari_dma_residual); } else @@ -411,13 +401,14 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy) * data to the original destination address. */ memcpy(atari_dma_orig_addr, phys_to_virt(atari_dma_startaddr), - HOSTDATA_DMALEN - atari_dma_residual); + hostdata->dma_len - atari_dma_residual); atari_dma_orig_addr = NULL; } #endif /* REAL_DMA */ - NCR5380_intr(irq, dummy); + NCR5380_intr(irq, dev); + return IRQ_HANDLED; } @@ -488,7 +479,7 @@ static int __init atari_scsi_setup(char *str) * Defaults depend on TT or Falcon, determined at run time. * Negative values mean don't change. */ - int ints[6]; + int ints[8]; get_options(str, ARRAY_SIZE(ints), ints); @@ -504,10 +495,11 @@ static int __init atari_scsi_setup(char *str) setup_sg_tablesize = ints[3]; if (ints[0] >= 4) setup_hostid = ints[4]; -#ifdef SUPPORT_TAGS if (ints[0] >= 5) setup_use_tagged_queuing = ints[5]; -#endif + /* ints[6] (use_pdma) is ignored */ + if (ints[0] >= 7) + setup_toshiba_delay = ints[7]; return 1; } @@ -516,38 +508,6 @@ __setup("atascsi=", atari_scsi_setup); #endif /* !MODULE */ -#ifdef CONFIG_ATARI_SCSI_RESET_BOOT -static void __init atari_scsi_reset_boot(void) -{ - unsigned long end; - - /* - * Do a SCSI reset to clean up the bus during initialization. No messing - * with the queues, interrupts, or locks necessary here. - */ - - printk("Atari SCSI: resetting the SCSI bus..."); - - /* get in phase */ - NCR5380_write(TARGET_COMMAND_REG, - PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG))); - - /* assert RST */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); - /* The min. reset hold time is 25us, so 40us should be enough */ - udelay(50); - /* reset RST and interrupt */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); - - end = jiffies + AFTER_RESET_DELAY; - while (time_before(jiffies, end)) - barrier(); - - printk(" done\n"); -} -#endif - #if defined(REAL_DMA) static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, @@ -815,14 +775,14 @@ static int atari_scsi_bus_reset(struct scsi_cmnd *cmd) static struct scsi_host_template atari_scsi_template = { .module = THIS_MODULE, .proc_name = DRV_MODULE_NAME, - .show_info = atari_scsi_show_info, .name = "Atari native SCSI", .info = atari_scsi_info, .queuecommand = atari_scsi_queue_command, .eh_abort_handler = atari_scsi_abort, .eh_bus_reset_handler = atari_scsi_bus_reset, .this_id = 7, - .use_clustering = DISABLE_CLUSTERING + .use_clustering = DISABLE_CLUSTERING, + .cmd_size = NCR5380_CMD_SIZE, }; static int __init atari_scsi_probe(struct platform_device *pdev) @@ -880,7 +840,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev) } else { /* Test if a host id is set in the NVRam */ if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) { - unsigned char b = nvram_read_byte(14); + unsigned char b = nvram_read_byte(16); /* Arbitration enabled? (for TOS) * If yes, use configured host ID @@ -915,21 +875,18 @@ static int __init atari_scsi_probe(struct platform_device *pdev) error = -ENOMEM; goto fail_alloc; } - atari_scsi_host = instance; - -#ifdef CONFIG_ATARI_SCSI_RESET_BOOT - atari_scsi_reset_boot(); -#endif instance->irq = irq->start; host_flags |= IS_A_TT() ? 0 : FLAG_LATE_DMA_SETUP; - #ifdef SUPPORT_TAGS host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; #endif + host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; - NCR5380_init(instance, host_flags); + error = NCR5380_init(instance, host_flags); + if (error) + goto fail_init; if (IS_A_TT()) { error = request_irq(instance->irq, scsi_tt_intr, 0, @@ -975,6 +932,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev) #endif } + NCR5380_maybe_reset_bus(instance); + error = scsi_add_host(instance, NULL); if (error) goto fail_host; @@ -989,6 +948,7 @@ fail_host: free_irq(instance->irq, instance); fail_irq: NCR5380_exit(instance); +fail_init: scsi_host_put(instance); fail_alloc: if (atari_dma_buffer) diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index 05301bc752ee..8b52a9dbb9cf 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c @@ -41,77 +41,128 @@ static struct scsi_host_template atp870u_template; static void send_s870(struct atp_unit *dev,unsigned char c); -static void is885(struct atp_unit *dev, unsigned int wkport,unsigned char c); -static void tscam_885(void); +static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip, unsigned char lvdmode); + +static inline void atp_writeb_base(struct atp_unit *atp, u8 reg, u8 val) +{ + outb(val, atp->baseport + reg); +} + +static inline void atp_writew_base(struct atp_unit *atp, u8 reg, u16 val) +{ + outw(val, atp->baseport + reg); +} + +static inline void atp_writeb_io(struct atp_unit *atp, u8 channel, u8 reg, u8 val) +{ + outb(val, atp->ioport[channel] + reg); +} + +static inline void atp_writew_io(struct atp_unit *atp, u8 channel, u8 reg, u16 val) +{ + outw(val, atp->ioport[channel] + reg); +} + +static inline void atp_writeb_pci(struct atp_unit *atp, u8 channel, u8 reg, u8 val) +{ + outb(val, atp->pciport[channel] + reg); +} + +static inline void atp_writel_pci(struct atp_unit *atp, u8 channel, u8 reg, u32 val) +{ + outl(val, atp->pciport[channel] + reg); +} + +static inline u8 atp_readb_base(struct atp_unit *atp, u8 reg) +{ + return inb(atp->baseport + reg); +} + +static inline u16 atp_readw_base(struct atp_unit *atp, u8 reg) +{ + return inw(atp->baseport + reg); +} + +static inline u32 atp_readl_base(struct atp_unit *atp, u8 reg) +{ + return inl(atp->baseport + reg); +} + +static inline u8 atp_readb_io(struct atp_unit *atp, u8 channel, u8 reg) +{ + return inb(atp->ioport[channel] + reg); +} + +static inline u16 atp_readw_io(struct atp_unit *atp, u8 channel, u8 reg) +{ + return inw(atp->ioport[channel] + reg); +} + +static inline u8 atp_readb_pci(struct atp_unit *atp, u8 channel, u8 reg) +{ + return inb(atp->pciport[channel] + reg); +} + +static inline bool is880(struct atp_unit *atp) +{ + return atp->pdev->device == ATP880_DEVID1 || + atp->pdev->device == ATP880_DEVID2; +} + +static inline bool is885(struct atp_unit *atp) +{ + return atp->pdev->device == ATP885_DEVID; +} static irqreturn_t atp870u_intr_handle(int irq, void *dev_id) { unsigned long flags; - unsigned short int tmpcip, id; + unsigned short int id; unsigned char i, j, c, target_id, lun,cmdp; unsigned char *prd; struct scsi_cmnd *workreq; - unsigned int workport, tmport, tmport1; unsigned long adrcnt, k; #ifdef ED_DBGP unsigned long l; #endif - int errstus; struct Scsi_Host *host = dev_id; struct atp_unit *dev = (struct atp_unit *)&host->hostdata; for (c = 0; c < 2; c++) { - tmport = dev->ioport[c] + 0x1f; - j = inb(tmport); + j = atp_readb_io(dev, c, 0x1f); if ((j & 0x80) != 0) - { - goto ch_sel; - } + break; dev->in_int[c] = 0; } - return IRQ_NONE; -ch_sel: + if ((j & 0x80) == 0) + return IRQ_NONE; #ifdef ED_DBGP printk("atp870u_intr_handle enter\n"); #endif dev->in_int[c] = 1; - cmdp = inb(dev->ioport[c] + 0x10); - workport = dev->ioport[c]; + cmdp = atp_readb_io(dev, c, 0x10); if (dev->working[c] != 0) { - if (dev->dev_id == ATP885_DEVID) { - tmport1 = workport + 0x16; - if ((inb(tmport1) & 0x80) == 0) - outb((inb(tmport1) | 0x80), tmport1); + if (is885(dev)) { + if ((atp_readb_io(dev, c, 0x16) & 0x80) == 0) + atp_writeb_io(dev, c, 0x16, (atp_readb_io(dev, c, 0x16) | 0x80)); } - tmpcip = dev->pciport[c]; - if ((inb(tmpcip) & 0x08) != 0) + if ((atp_readb_pci(dev, c, 0x00) & 0x08) != 0) { - tmpcip += 0x2; for (k=0; k < 1000; k++) { - if ((inb(tmpcip) & 0x08) == 0) { - goto stop_dma; - } - if ((inb(tmpcip) & 0x01) == 0) { - goto stop_dma; - } + if ((atp_readb_pci(dev, c, 2) & 0x08) == 0) + break; + if ((atp_readb_pci(dev, c, 2) & 0x01) == 0) + break; } } -stop_dma: - tmpcip = dev->pciport[c]; - outb(0x00, tmpcip); - tmport -= 0x08; + atp_writeb_pci(dev, c, 0, 0x00); - i = inb(tmport); + i = atp_readb_io(dev, c, 0x17); - if (dev->dev_id == ATP885_DEVID) { - tmpcip += 2; - outb(0x06, tmpcip); - tmpcip -= 2; - } + if (is885(dev)) + atp_writeb_pci(dev, c, 2, 0x06); - tmport -= 0x02; - target_id = inb(tmport); - tmport += 0x02; + target_id = atp_readb_io(dev, c, 0x15); /* * Remap wide devices onto id numbers @@ -129,7 +180,7 @@ stop_dma: } dev->last_cmd[c] |= 0x40; } - if (dev->dev_id == ATP885_DEVID) + if (is885(dev)) dev->r1f[c][target_id] |= j; #ifdef ED_DBGP printk("atp870u_intr_handle status = %x\n",i); @@ -138,12 +189,11 @@ stop_dma: if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } - if (dev->dev_id == ATP885_DEVID) { - tmport -= 0x05; + if (is885(dev)) { adrcnt = 0; - ((unsigned char *) &adrcnt)[2] = inb(tmport++); - ((unsigned char *) &adrcnt)[1] = inb(tmport++); - ((unsigned char *) &adrcnt)[0] = inb(tmport); + ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12); + ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13); + ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14); if (dev->id[c][target_id].last_len != adrcnt) { k = dev->id[c][target_id].last_len; @@ -152,7 +202,7 @@ stop_dma: dev->id[c][target_id].last_len = adrcnt; } #ifdef ED_DBGP - printk("tmport = %x dev->id[c][target_id].last_len = %d dev->id[c][target_id].tran_len = %d\n",tmport,dev->id[c][target_id].last_len,dev->id[c][target_id].tran_len); + printk("dev->id[c][target_id].last_len = %d dev->id[c][target_id].tran_len = %d\n",dev->id[c][target_id].last_len,dev->id[c][target_id].tran_len); #endif } @@ -160,11 +210,9 @@ stop_dma: * Flip wide */ if (dev->wide_id[c] != 0) { - tmport = workport + 0x1b; - outb(0x01, tmport); - while ((inb(tmport) & 0x01) != 0x01) { - outb(0x01, tmport); - } + atp_writeb_io(dev, c, 0x1b, 0x01); + while ((atp_readb_io(dev, c, 0x1b) & 0x01) != 0x01) + atp_writeb_io(dev, c, 0x1b, 0x01); } /* * Issue more commands @@ -185,37 +233,34 @@ stop_dma: #ifdef ED_DBGP printk("Status 0x85 return\n"); #endif - goto handled; + return IRQ_HANDLED; } if (i == 0x40) { dev->last_cmd[c] |= 0x40; dev->in_int[c] = 0; - goto handled; + return IRQ_HANDLED; } if (i == 0x21) { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } - tmport -= 0x05; adrcnt = 0; - ((unsigned char *) &adrcnt)[2] = inb(tmport++); - ((unsigned char *) &adrcnt)[1] = inb(tmport++); - ((unsigned char *) &adrcnt)[0] = inb(tmport); + ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12); + ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13); + ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14); k = dev->id[c][target_id].last_len; k -= adrcnt; dev->id[c][target_id].tran_len = k; dev->id[c][target_id].last_len = adrcnt; - tmport -= 0x04; - outb(0x41, tmport); - tmport += 0x08; - outb(0x08, tmport); + atp_writeb_io(dev, c, 0x10, 0x41); + atp_writeb_io(dev, c, 0x18, 0x08); dev->in_int[c] = 0; - goto handled; + return IRQ_HANDLED; } - if (dev->dev_id == ATP885_DEVID) { + if (is885(dev)) { if ((i == 0x4c) || (i == 0x4d) || (i == 0x8c) || (i == 0x8d)) { if ((i == 0x4c) || (i == 0x8c)) i=0x48; @@ -229,11 +274,9 @@ stop_dma: printk(KERN_DEBUG "Device reselect\n"); #endif lun = 0; - tmport -= 0x07; - if (cmdp == 0x44 || i==0x80) { - tmport += 0x0d; - lun = inb(tmport) & 0x07; - } else { + if (cmdp == 0x44 || i == 0x80) + lun = atp_readb_io(dev, c, 0x1d) & 0x07; + else { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } @@ -241,49 +284,41 @@ stop_dma: #ifdef ED_DBGP printk("cmdp = 0x41\n"); #endif - tmport += 0x02; adrcnt = 0; - ((unsigned char *) &adrcnt)[2] = inb(tmport++); - ((unsigned char *) &adrcnt)[1] = inb(tmport++); - ((unsigned char *) &adrcnt)[0] = inb(tmport); + ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12); + ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13); + ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14); k = dev->id[c][target_id].last_len; k -= adrcnt; dev->id[c][target_id].tran_len = k; dev->id[c][target_id].last_len = adrcnt; - tmport += 0x04; - outb(0x08, tmport); + atp_writeb_io(dev, c, 0x18, 0x08); dev->in_int[c] = 0; - goto handled; + return IRQ_HANDLED; } else { #ifdef ED_DBGP printk("cmdp != 0x41\n"); #endif - outb(0x46, tmport); + atp_writeb_io(dev, c, 0x10, 0x46); dev->id[c][target_id].dirct = 0x00; - tmport += 0x02; - outb(0x00, tmport++); - outb(0x00, tmport++); - outb(0x00, tmport++); - tmport += 0x03; - outb(0x08, tmport); + atp_writeb_io(dev, c, 0x12, 0x00); + atp_writeb_io(dev, c, 0x13, 0x00); + atp_writeb_io(dev, c, 0x14, 0x00); + atp_writeb_io(dev, c, 0x18, 0x08); dev->in_int[c] = 0; - goto handled; + return IRQ_HANDLED; } } if (dev->last_cmd[c] != 0xff) { dev->last_cmd[c] |= 0x40; } - if (dev->dev_id == ATP885_DEVID) { - j = inb(dev->baseport + 0x29) & 0xfe; - outb(j, dev->baseport + 0x29); - tmport = workport + 0x16; - } else { - tmport = workport + 0x10; - outb(0x45, tmport); - tmport += 0x06; - } - - target_id = inb(tmport); + if (is885(dev)) { + j = atp_readb_base(dev, 0x29) & 0xfe; + atp_writeb_base(dev, 0x29, j); + } else + atp_writeb_io(dev, c, 0x10, 0x45); + + target_id = atp_readb_io(dev, c, 0x16); /* * Remap wide identifiers */ @@ -292,10 +327,8 @@ stop_dma: } else { target_id &= 0x07; } - if (dev->dev_id == ATP885_DEVID) { - tmport = workport + 0x10; - outb(0x45, tmport); - } + if (is885(dev)) + atp_writeb_io(dev, c, 0x10, 0x45); workreq = dev->id[c][target_id].curr_req; #ifdef ED_DBGP scmd_printk(KERN_DEBUG, workreq, "CDB"); @@ -304,18 +337,16 @@ stop_dma: printk("\n"); #endif - tmport = workport + 0x0f; - outb(lun, tmport); - tmport += 0x02; - outb(dev->id[c][target_id].devsp, tmport++); + atp_writeb_io(dev, c, 0x0f, lun); + atp_writeb_io(dev, c, 0x11, dev->id[c][target_id].devsp); adrcnt = dev->id[c][target_id].tran_len; k = dev->id[c][target_id].last_len; - outb(((unsigned char *) &k)[2], tmport++); - outb(((unsigned char *) &k)[1], tmport++); - outb(((unsigned char *) &k)[0], tmport++); + atp_writeb_io(dev, c, 0x12, ((unsigned char *) &k)[2]); + atp_writeb_io(dev, c, 0x13, ((unsigned char *) &k)[1]); + atp_writeb_io(dev, c, 0x14, ((unsigned char *) &k)[0]); #ifdef ED_DBGP - printk("k %x, k[0] 0x%x k[1] 0x%x k[2] 0x%x\n", k, inb(tmport-1), inb(tmport-2), inb(tmport-3)); + printk("k %x, k[0] 0x%x k[1] 0x%x k[2] 0x%x\n", k, atp_readb_io(dev, c, 0x14), atp_readb_io(dev, c, 0x13), atp_readb_io(dev, c, 0x12)); #endif /* Remap wide */ j = target_id; @@ -324,35 +355,28 @@ stop_dma: } /* Add direction */ j |= dev->id[c][target_id].dirct; - outb(j, tmport++); - outb(0x80,tmport); + atp_writeb_io(dev, c, 0x15, j); + atp_writeb_io(dev, c, 0x16, 0x80); /* enable 32 bit fifo transfer */ - if (dev->dev_id == ATP885_DEVID) { - tmpcip = dev->pciport[c] + 1; - i=inb(tmpcip) & 0xf3; + if (is885(dev)) { + i = atp_readb_pci(dev, c, 1) & 0xf3; //j=workreq->cmnd[0]; if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) { i |= 0x0c; } - outb(i,tmpcip); - } else if ((dev->dev_id == ATP880_DEVID1) || - (dev->dev_id == ATP880_DEVID2) ) { - tmport = workport - 0x05; - if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) { - outb((unsigned char) ((inb(tmport) & 0x3f) | 0xc0), tmport); - } else { - outb((unsigned char) (inb(tmport) & 0x3f), tmport); - } + atp_writeb_pci(dev, c, 1, i); + } else if (is880(dev)) { + if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) + atp_writeb_base(dev, 0x3b, (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0); + else + atp_writeb_base(dev, 0x3b, atp_readb_base(dev, 0x3b) & 0x3f); } else { - tmport = workport + 0x3a; - if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) { - outb((unsigned char) ((inb(tmport) & 0xf3) | 0x08), tmport); - } else { - outb((unsigned char) (inb(tmport) & 0xf3), tmport); - } + if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) + atp_writeb_base(dev, 0x3a, (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08); + else + atp_writeb_base(dev, 0x3a, atp_readb_base(dev, 0x3a) & 0xf3); } - tmport = workport + 0x1b; j = 0; id = 1; id = id << target_id; @@ -362,18 +386,16 @@ stop_dma: if ((id & dev->wide_id[c]) != 0) { j |= 0x01; } - outb(j, tmport); - while ((inb(tmport) & 0x01) != j) { - outb(j,tmport); - } + atp_writeb_io(dev, c, 0x1b, j); + while ((atp_readb_io(dev, c, 0x1b) & 0x01) != j) + atp_writeb_io(dev, c, 0x1b, j); if (dev->id[c][target_id].last_len == 0) { - tmport = workport + 0x18; - outb(0x08, tmport); + atp_writeb_io(dev, c, 0x18, 0x08); dev->in_int[c] = 0; #ifdef ED_DBGP printk("dev->id[c][target_id].last_len = 0\n"); #endif - goto handled; + return IRQ_HANDLED; } #ifdef ED_DBGP printk("target_id = %d adrcnt = %d\n",target_id,adrcnt); @@ -401,39 +423,33 @@ stop_dma: } } } - tmpcip = dev->pciport[c] + 0x04; - outl(dev->id[c][target_id].prdaddr, tmpcip); + atp_writel_pci(dev, c, 0x04, dev->id[c][target_id].prdaddr); #ifdef ED_DBGP printk("dev->id[%d][%d].prdaddr 0x%8x\n", c, target_id, dev->id[c][target_id].prdaddr); #endif - if (dev->dev_id == ATP885_DEVID) { - tmpcip -= 0x04; - } else { - tmpcip -= 0x02; - outb(0x06, tmpcip); - outb(0x00, tmpcip); - tmpcip -= 0x02; + if (!is885(dev)) { + atp_writeb_pci(dev, c, 2, 0x06); + atp_writeb_pci(dev, c, 2, 0x00); } - tmport = workport + 0x18; /* * Check transfer direction */ if (dev->id[c][target_id].dirct != 0) { - outb(0x08, tmport); - outb(0x01, tmpcip); + atp_writeb_io(dev, c, 0x18, 0x08); + atp_writeb_pci(dev, c, 0, 0x01); dev->in_int[c] = 0; #ifdef ED_DBGP printk("status 0x80 return dirct != 0\n"); #endif - goto handled; + return IRQ_HANDLED; } - outb(0x08, tmport); - outb(0x09, tmpcip); + atp_writeb_io(dev, c, 0x18, 0x08); + atp_writeb_pci(dev, c, 0, 0x09); dev->in_int[c] = 0; #ifdef ED_DBGP printk("status 0x80 return dirct = 0\n"); #endif - goto handled; + return IRQ_HANDLED; } /* @@ -442,31 +458,22 @@ stop_dma: workreq = dev->id[c][target_id].curr_req; - if (i == 0x42) { - if ((dev->last_cmd[c] & 0xf0) != 0x40) - { - dev->last_cmd[c] = 0xff; - } - errstus = 0x02; - workreq->result = errstus; - goto go_42; - } - if (i == 0x16) { + if (i == 0x42 || i == 0x16) { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } - errstus = 0; - tmport -= 0x08; - errstus = inb(tmport); - if (((dev->r1f[c][target_id] & 0x10) != 0)&&(dev->dev_id==ATP885_DEVID)) { - printk(KERN_WARNING "AEC67162 CRC ERROR !\n"); - errstus = 0x02; - } - workreq->result = errstus; -go_42: - if (dev->dev_id == ATP885_DEVID) { - j = inb(dev->baseport + 0x29) | 0x01; - outb(j, dev->baseport + 0x29); + if (i == 0x16) { + workreq->result = atp_readb_io(dev, c, 0x0f); + if (((dev->r1f[c][target_id] & 0x10) != 0) && is885(dev)) { + printk(KERN_WARNING "AEC67162 CRC ERROR !\n"); + workreq->result = 0x02; + } + } else + workreq->result = 0x02; + + if (is885(dev)) { + j = atp_readb_base(dev, 0x29) | 0x01; + atp_writeb_base(dev, 0x29, j); } /* * Complete the command @@ -488,11 +495,9 @@ go_42: * Take it back wide */ if (dev->wide_id[c] != 0) { - tmport = workport + 0x1b; - outb(0x01, tmport); - while ((inb(tmport) & 0x01) != 0x01) { - outb(0x01, tmport); - } + atp_writeb_io(dev, c, 0x1b, 0x01); + while ((atp_readb_io(dev, c, 0x1b) & 0x01) != 0x01) + atp_writeb_io(dev, c, 0x1b, 0x01); } /* * If there is stuff to send and nothing going then send it @@ -507,7 +512,7 @@ go_42: } spin_unlock_irqrestore(dev->host->host_lock, flags); dev->in_int[c] = 0; - goto handled; + return IRQ_HANDLED; } if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; @@ -517,84 +522,54 @@ go_42: } i &= 0x0f; if (i == 0x09) { - tmpcip += 4; - outl(dev->id[c][target_id].prdaddr, tmpcip); - tmpcip = tmpcip - 2; - outb(0x06, tmpcip); - outb(0x00, tmpcip); - tmpcip = tmpcip - 2; - tmport = workport + 0x10; - outb(0x41, tmport); - if (dev->dev_id == ATP885_DEVID) { - tmport += 2; + atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); + atp_writeb_pci(dev, c, 2, 0x06); + atp_writeb_pci(dev, c, 2, 0x00); + atp_writeb_io(dev, c, 0x10, 0x41); + if (is885(dev)) { k = dev->id[c][target_id].last_len; - outb((unsigned char) (((unsigned char *) (&k))[2]), tmport++); - outb((unsigned char) (((unsigned char *) (&k))[1]), tmport++); - outb((unsigned char) (((unsigned char *) (&k))[0]), tmport); + atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&k))[2]); + atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&k))[1]); + atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&k))[0]); dev->id[c][target_id].dirct = 0x00; - tmport += 0x04; } else { dev->id[c][target_id].dirct = 0x00; - tmport += 0x08; } - outb(0x08, tmport); - outb(0x09, tmpcip); + atp_writeb_io(dev, c, 0x18, 0x08); + atp_writeb_pci(dev, c, 0, 0x09); dev->in_int[c] = 0; - goto handled; + return IRQ_HANDLED; } if (i == 0x08) { - tmpcip += 4; - outl(dev->id[c][target_id].prdaddr, tmpcip); - tmpcip = tmpcip - 2; - outb(0x06, tmpcip); - outb(0x00, tmpcip); - tmpcip = tmpcip - 2; - tmport = workport + 0x10; - outb(0x41, tmport); - if (dev->dev_id == ATP885_DEVID) { - tmport += 2; + atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); + atp_writeb_pci(dev, c, 2, 0x06); + atp_writeb_pci(dev, c, 2, 0x00); + atp_writeb_io(dev, c, 0x10, 0x41); + if (is885(dev)) { k = dev->id[c][target_id].last_len; - outb((unsigned char) (((unsigned char *) (&k))[2]), tmport++); - outb((unsigned char) (((unsigned char *) (&k))[1]), tmport++); - outb((unsigned char) (((unsigned char *) (&k))[0]), tmport++); - } else { - tmport += 5; + atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&k))[2]); + atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&k))[1]); + atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&k))[0]); } - outb((unsigned char) (inb(tmport) | 0x20), tmport); + atp_writeb_io(dev, c, 0x15, atp_readb_io(dev, c, 0x15) | 0x20); dev->id[c][target_id].dirct = 0x20; - tmport += 0x03; - outb(0x08, tmport); - outb(0x01, tmpcip); + atp_writeb_io(dev, c, 0x18, 0x08); + atp_writeb_pci(dev, c, 0, 0x01); dev->in_int[c] = 0; - goto handled; - } - tmport -= 0x07; - if (i == 0x0a) { - outb(0x30, tmport); - } else { - outb(0x46, tmport); + return IRQ_HANDLED; } + if (i == 0x0a) + atp_writeb_io(dev, c, 0x10, 0x30); + else + atp_writeb_io(dev, c, 0x10, 0x46); dev->id[c][target_id].dirct = 0x00; - tmport += 0x02; - outb(0x00, tmport++); - outb(0x00, tmport++); - outb(0x00, tmport++); - tmport += 0x03; - outb(0x08, tmport); - dev->in_int[c] = 0; - goto handled; - } else { -// tmport = workport + 0x17; -// inb(tmport); -// dev->working[c] = 0; - dev->in_int[c] = 0; - goto handled; + atp_writeb_io(dev, c, 0x12, 0x00); + atp_writeb_io(dev, c, 0x13, 0x00); + atp_writeb_io(dev, c, 0x14, 0x00); + atp_writeb_io(dev, c, 0x18, 0x08); } - -handled: -#ifdef ED_DBGP - printk("atp870u_intr_handle exit\n"); -#endif + dev->in_int[c] = 0; + return IRQ_HANDLED; } /** @@ -608,7 +583,7 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p, void (*done) (struct scsi_cmnd *)) { unsigned char c; - unsigned int tmport,m; + unsigned int m; struct atp_unit *dev; struct Scsi_Host *host; @@ -677,11 +652,10 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p, return 0; } dev->quereq[c][dev->quend[c]] = req_p; - tmport = dev->ioport[c] + 0x1c; #ifdef ED_DBGP - printk("dev->ioport[c] = %x inb(tmport) = %x dev->in_int[%d] = %d dev->in_snd[%d] = %d\n",dev->ioport[c],inb(tmport),c,dev->in_int[c],c,dev->in_snd[c]); + printk("dev->ioport[c] = %x atp_readb_io(dev, c, 0x1c) = %x dev->in_int[%d] = %d dev->in_snd[%d] = %d\n",dev->ioport[c],atp_readb_io(dev, c, 0x1c),c,dev->in_int[c],c,dev->in_snd[c]); #endif - if ((inb(tmport) == 0) && (dev->in_int[c] == 0) && (dev->in_snd[c] == 0)) { + if ((atp_readb_io(dev, c, 0x1c) == 0) && (dev->in_int[c] == 0) && (dev->in_snd[c] == 0)) { #ifdef ED_DBGP printk("Call sent_s870(atp870u_queuecommand)\n"); #endif @@ -706,14 +680,12 @@ static DEF_SCSI_QCMD(atp870u_queuecommand) */ static void send_s870(struct atp_unit *dev,unsigned char c) { - unsigned int tmport; - struct scsi_cmnd *workreq; + struct scsi_cmnd *workreq = NULL; unsigned int i;//,k; unsigned char j, target_id; unsigned char *prd; - unsigned short int tmpcip, w; + unsigned short int w; unsigned long l, bttl = 0; - unsigned int workport; unsigned long sg_count; if (dev->in_snd[c] != 0) { @@ -729,53 +701,42 @@ static void send_s870(struct atp_unit *dev,unsigned char c) if ((dev->last_cmd[c] != 0xff) && ((dev->last_cmd[c] & 0x40) != 0)) { dev->last_cmd[c] &= 0x0f; workreq = dev->id[c][dev->last_cmd[c]].curr_req; - if (workreq != NULL) { /* check NULL pointer */ - goto cmd_subp; - } - dev->last_cmd[c] = 0xff; - if (dev->quhd[c] == dev->quend[c]) { - dev->in_snd[c] = 0; - return ; + if (!workreq) { + dev->last_cmd[c] = 0xff; + if (dev->quhd[c] == dev->quend[c]) { + dev->in_snd[c] = 0; + return; + } } } - if ((dev->last_cmd[c] != 0xff) && (dev->working[c] != 0)) { - dev->in_snd[c] = 0; - return ; - } - dev->working[c]++; - j = dev->quhd[c]; - dev->quhd[c]++; - if (dev->quhd[c] >= qcnt) { - dev->quhd[c] = 0; - } - workreq = dev->quereq[c][dev->quhd[c]]; - if (dev->id[c][scmd_id(workreq)].curr_req == NULL) { + if (!workreq) { + if ((dev->last_cmd[c] != 0xff) && (dev->working[c] != 0)) { + dev->in_snd[c] = 0; + return; + } + dev->working[c]++; + j = dev->quhd[c]; + dev->quhd[c]++; + if (dev->quhd[c] >= qcnt) + dev->quhd[c] = 0; + workreq = dev->quereq[c][dev->quhd[c]]; + if (dev->id[c][scmd_id(workreq)].curr_req != NULL) { + dev->quhd[c] = j; + dev->working[c]--; + dev->in_snd[c] = 0; + return; + } dev->id[c][scmd_id(workreq)].curr_req = workreq; dev->last_cmd[c] = scmd_id(workreq); - goto cmd_subp; - } - dev->quhd[c] = j; - dev->working[c]--; - dev->in_snd[c] = 0; - return; -cmd_subp: - workport = dev->ioport[c]; - tmport = workport + 0x1f; - if ((inb(tmport) & 0xb0) != 0) { - goto abortsnd; - } - tmport = workport + 0x1c; - if (inb(tmport) == 0) { - goto oktosend; } -abortsnd: + if ((atp_readb_io(dev, c, 0x1f) & 0xb0) != 0 || atp_readb_io(dev, c, 0x1c) != 0) { #ifdef ED_DBGP - printk("Abort to Send\n"); + printk("Abort to Send\n"); #endif - dev->last_cmd[c] |= 0x40; - dev->in_snd[c] = 0; - return; -oktosend: + dev->last_cmd[c] |= 0x40; + dev->in_snd[c] = 0; + return; + } #ifdef ED_DBGP printk("OK to Send\n"); scmd_printk(KERN_DEBUG, workreq, "CDB"); @@ -786,9 +747,9 @@ oktosend: #endif l = scsi_bufflen(workreq); - if (dev->dev_id == ATP885_DEVID) { - j = inb(dev->baseport + 0x29) & 0xfe; - outb(j, dev->baseport + 0x29); + if (is885(dev)) { + j = atp_readb_base(dev, 0x29) & 0xfe; + atp_writeb_base(dev, 0x29, j); dev->r1f[c][scmd_id(workreq)] = 0; } @@ -800,7 +761,6 @@ oktosend: l = 0; } - tmport = workport + 0x1b; j = 0; target_id = scmd_id(workreq); @@ -812,9 +772,9 @@ oktosend: if ((w & dev->wide_id[c]) != 0) { j |= 0x01; } - outb(j, tmport); - while ((inb(tmport) & 0x01) != j) { - outb(j,tmport); + atp_writeb_io(dev, c, 0x1b, j); + while ((atp_readb_io(dev, c, 0x1b) & 0x01) != j) { + atp_writeb_pci(dev, c, 0x1b, j); #ifdef ED_DBGP printk("send_s870 while loop 1\n"); #endif @@ -823,24 +783,19 @@ oktosend: * Write the command */ - tmport = workport; - outb(workreq->cmd_len, tmport++); - outb(0x2c, tmport++); - if (dev->dev_id == ATP885_DEVID) { - outb(0x7f, tmport++); - } else { - outb(0xcf, tmport++); - } - for (i = 0; i < workreq->cmd_len; i++) { - outb(workreq->cmnd[i], tmport++); - } - tmport = workport + 0x0f; - outb(workreq->device->lun, tmport); - tmport += 0x02; + atp_writeb_io(dev, c, 0x00, workreq->cmd_len); + atp_writeb_io(dev, c, 0x01, 0x2c); + if (is885(dev)) + atp_writeb_io(dev, c, 0x02, 0x7f); + else + atp_writeb_io(dev, c, 0x02, 0xcf); + for (i = 0; i < workreq->cmd_len; i++) + atp_writeb_io(dev, c, 0x03 + i, workreq->cmnd[i]); + atp_writeb_io(dev, c, 0x0f, workreq->device->lun); /* * Write the target */ - outb(dev->id[c][target_id].devsp, tmport++); + atp_writeb_io(dev, c, 0x11, dev->id[c][target_id].devsp); #ifdef ED_DBGP printk("dev->id[%d][%d].devsp = %2x\n",c,target_id,dev->id[c][target_id].devsp); #endif @@ -849,9 +804,9 @@ oktosend: /* * Write transfer size */ - outb((unsigned char) (((unsigned char *) (&l))[2]), tmport++); - outb((unsigned char) (((unsigned char *) (&l))[1]), tmport++); - outb((unsigned char) (((unsigned char *) (&l))[0]), tmport++); + atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&l))[2]); + atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&l))[1]); + atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&l))[0]); j = target_id; dev->id[c][j].last_len = l; dev->id[c][j].tran_len = 0; @@ -867,29 +822,24 @@ oktosend: /* * Check transfer direction */ - if (workreq->sc_data_direction == DMA_TO_DEVICE) { - outb((unsigned char) (j | 0x20), tmport++); - } else { - outb(j, tmport++); - } - outb((unsigned char) (inb(tmport) | 0x80), tmport); - outb(0x80, tmport); - tmport = workport + 0x1c; + if (workreq->sc_data_direction == DMA_TO_DEVICE) + atp_writeb_io(dev, c, 0x15, j | 0x20); + else + atp_writeb_io(dev, c, 0x15, j); + atp_writeb_io(dev, c, 0x16, atp_readb_io(dev, c, 0x16) | 0x80); + atp_writeb_io(dev, c, 0x16, 0x80); dev->id[c][target_id].dirct = 0; if (l == 0) { - if (inb(tmport) == 0) { - tmport = workport + 0x18; + if (atp_readb_io(dev, c, 0x1c) == 0) { #ifdef ED_DBGP printk("change SCSI_CMD_REG 0x08\n"); #endif - outb(0x08, tmport); - } else { + atp_writeb_io(dev, c, 0x18, 0x08); + } else dev->last_cmd[c] |= 0x40; - } dev->in_snd[c] = 0; return; } - tmpcip = dev->pciport[c]; prd = dev->id[c][target_id].prd_table; dev->id[c][target_id].prd_pos = prd; @@ -926,50 +876,37 @@ oktosend: printk("2. bttl %x, l %x\n",bttl, l); #endif } - tmpcip += 4; #ifdef ED_DBGP - printk("send_s870: prdaddr_2 0x%8x tmpcip %x target_id %d\n", dev->id[c][target_id].prdaddr,tmpcip,target_id); + printk("send_s870: prdaddr_2 0x%8x target_id %d\n", dev->id[c][target_id].prdaddr,target_id); #endif dev->id[c][target_id].prdaddr = dev->id[c][target_id].prd_bus; - outl(dev->id[c][target_id].prdaddr, tmpcip); - tmpcip = tmpcip - 2; - outb(0x06, tmpcip); - outb(0x00, tmpcip); - if (dev->dev_id == ATP885_DEVID) { - tmpcip--; - j=inb(tmpcip) & 0xf3; + atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); + atp_writeb_pci(dev, c, 2, 0x06); + atp_writeb_pci(dev, c, 2, 0x00); + if (is885(dev)) { + j = atp_readb_pci(dev, c, 1) & 0xf3; if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) { j |= 0x0c; } - outb(j,tmpcip); - tmpcip--; - } else if ((dev->dev_id == ATP880_DEVID1) || - (dev->dev_id == ATP880_DEVID2)) { - tmpcip =tmpcip -2; - tmport = workport - 0x05; - if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) { - outb((unsigned char) ((inb(tmport) & 0x3f) | 0xc0), tmport); - } else { - outb((unsigned char) (inb(tmport) & 0x3f), tmport); - } + atp_writeb_pci(dev, c, 1, j); + } else if (is880(dev)) { + if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) + atp_writeb_base(dev, 0x3b, (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0); + else + atp_writeb_base(dev, 0x3b, atp_readb_base(dev, 0x3b) & 0x3f); } else { - tmpcip =tmpcip -2; - tmport = workport + 0x3a; - if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) { - outb((inb(tmport) & 0xf3) | 0x08, tmport); - } else { - outb(inb(tmport) & 0xf3, tmport); - } + if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) + atp_writeb_base(dev, 0x3a, (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08); + else + atp_writeb_base(dev, 0x3a, atp_readb_base(dev, 0x3a) & 0xf3); } - tmport = workport + 0x1c; if(workreq->sc_data_direction == DMA_TO_DEVICE) { dev->id[c][target_id].dirct = 0x20; - if (inb(tmport) == 0) { - tmport = workport + 0x18; - outb(0x08, tmport); - outb(0x01, tmpcip); + if (atp_readb_io(dev, c, 0x1c) == 0) { + atp_writeb_io(dev, c, 0x18, 0x08); + atp_writeb_pci(dev, c, 0, 0x01); #ifdef ED_DBGP printk( "start DMA(to target)\n"); #endif @@ -979,10 +916,9 @@ oktosend: dev->in_snd[c] = 0; return; } - if (inb(tmport) == 0) { - tmport = workport + 0x18; - outb(0x08, tmport); - outb(0x09, tmpcip); + if (atp_readb_io(dev, c, 0x1c) == 0) { + atp_writeb_io(dev, c, 0x18, 0x08); + atp_writeb_pci(dev, c, 0, 0x09); #ifdef ED_DBGP printk( "start DMA(to host)\n"); #endif @@ -996,49 +932,40 @@ oktosend: static unsigned char fun_scam(struct atp_unit *dev, unsigned short int *val) { - unsigned int tmport; unsigned short int i, k; unsigned char j; - tmport = dev->ioport[0] + 0x1c; - outw(*val, tmport); -FUN_D7: + atp_writew_io(dev, 0, 0x1c, *val); for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ - k = inw(tmport); + k = atp_readw_io(dev, 0, 0x1c); j = (unsigned char) (k >> 8); - if ((k & 0x8000) != 0) { /* DB7 all release? */ - goto FUN_D7; - } + if ((k & 0x8000) != 0) /* DB7 all release? */ + i = 0; } *val |= 0x4000; /* assert DB6 */ - outw(*val, tmport); + atp_writew_io(dev, 0, 0x1c, *val); *val &= 0xdfff; /* assert DB5 */ - outw(*val, tmport); -FUN_D5: + atp_writew_io(dev, 0, 0x1c, *val); for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ - if ((inw(tmport) & 0x2000) != 0) { /* DB5 all release? */ - goto FUN_D5; - } + if ((atp_readw_io(dev, 0, 0x1c) & 0x2000) != 0) /* DB5 all release? */ + i = 0; } *val |= 0x8000; /* no DB4-0, assert DB7 */ *val &= 0xe0ff; - outw(*val, tmport); + atp_writew_io(dev, 0, 0x1c, *val); *val &= 0xbfff; /* release DB6 */ - outw(*val, tmport); -FUN_D6: + atp_writew_io(dev, 0, 0x1c, *val); for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ - if ((inw(tmport) & 0x4000) != 0) { /* DB6 all release? */ - goto FUN_D6; - } + if ((atp_readw_io(dev, 0, 0x1c) & 0x4000) != 0) /* DB6 all release? */ + i = 0; } return j; } -static void tscam(struct Scsi_Host *host) +static void tscam(struct Scsi_Host *host, bool wide_chip, u8 scam_on) { - unsigned int tmport; unsigned char i, j, k; unsigned long n; unsigned short int m, assignid_map, val; @@ -1055,31 +982,28 @@ static void tscam(struct Scsi_Host *host) } */ - tmport = dev->ioport[0] + 1; - outb(0x08, tmport++); - outb(0x7f, tmport); - tmport = dev->ioport[0] + 0x11; - outb(0x20, tmport); + atp_writeb_io(dev, 0, 1, 0x08); + atp_writeb_io(dev, 0, 2, 0x7f); + atp_writeb_io(dev, 0, 0x11, 0x20); - if ((dev->scam_on & 0x40) == 0) { + if ((scam_on & 0x40) == 0) { return; } m = 1; m <<= dev->host_id[0]; j = 16; - if (dev->chip_ver < 4) { + if (!wide_chip) { m |= 0xff00; j = 8; } assignid_map = m; - tmport = dev->ioport[0] + 0x02; - outb(0x02, tmport++); /* 2*2=4ms,3EH 2/32*3E=3.9ms */ - outb(0, tmport++); - outb(0, tmport++); - outb(0, tmport++); - outb(0, tmport++); - outb(0, tmport++); - outb(0, tmport++); + atp_writeb_io(dev, 0, 0x02, 0x02); /* 2*2=4ms,3EH 2/32*3E=3.9ms */ + atp_writeb_io(dev, 0, 0x03, 0); + atp_writeb_io(dev, 0, 0x04, 0); + atp_writeb_io(dev, 0, 0x05, 0); + atp_writeb_io(dev, 0, 0x06, 0); + atp_writeb_io(dev, 0, 0x07, 0); + atp_writeb_io(dev, 0, 0x08, 0); for (i = 0; i < j; i++) { m = 1; @@ -1087,92 +1011,73 @@ static void tscam(struct Scsi_Host *host) if ((m & assignid_map) != 0) { continue; } - tmport = dev->ioport[0] + 0x0f; - outb(0, tmport++); - tmport += 0x02; - outb(0, tmport++); - outb(0, tmport++); - outb(0, tmport++); + atp_writeb_io(dev, 0, 0x0f, 0); + atp_writeb_io(dev, 0, 0x12, 0); + atp_writeb_io(dev, 0, 0x13, 0); + atp_writeb_io(dev, 0, 0x14, 0); if (i > 7) { k = (i & 0x07) | 0x40; } else { k = i; } - outb(k, tmport++); - tmport = dev->ioport[0] + 0x1b; - if (dev->chip_ver == 4) { - outb(0x01, tmport); - } else { - outb(0x00, tmport); - } -wait_rdyok: - tmport = dev->ioport[0] + 0x18; - outb(0x09, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - tmport -= 0x08; - k = inb(tmport); - if (k != 0x16) { - if ((k == 0x85) || (k == 0x42)) { - continue; - } - tmport = dev->ioport[0] + 0x10; - outb(0x41, tmport); - goto wait_rdyok; - } + atp_writeb_io(dev, 0, 0x15, k); + if (wide_chip) + atp_writeb_io(dev, 0, 0x1b, 0x01); + else + atp_writeb_io(dev, 0, 0x1b, 0x00); + do { + atp_writeb_io(dev, 0, 0x18, 0x09); + + while ((atp_readb_io(dev, 0, 0x1f) & 0x80) == 0x00) + cpu_relax(); + k = atp_readb_io(dev, 0, 0x17); + if ((k == 0x85) || (k == 0x42)) + break; + if (k != 0x16) + atp_writeb_io(dev, 0, 0x10, 0x41); + } while (k != 0x16); + if ((k == 0x85) || (k == 0x42)) + continue; assignid_map |= m; } - tmport = dev->ioport[0] + 0x02; - outb(0x7f, tmport); - tmport = dev->ioport[0] + 0x1b; - outb(0x02, tmport); + atp_writeb_io(dev, 0, 0x02, 0x7f); + atp_writeb_io(dev, 0, 0x1b, 0x02); - outb(0, 0x80); + udelay(2); val = 0x0080; /* bsy */ - tmport = dev->ioport[0] + 0x1c; - outw(val, tmport); + atp_writew_io(dev, 0, 0x1c, val); val |= 0x0040; /* sel */ - outw(val, tmport); + atp_writew_io(dev, 0, 0x1c, val); val |= 0x0004; /* msg */ - outw(val, tmport); - inb(0x80); /* 2 deskew delay(45ns*2=90ns) */ + atp_writew_io(dev, 0, 0x1c, val); + udelay(2); /* 2 deskew delay(45ns*2=90ns) */ val &= 0x007f; /* no bsy */ - outw(val, tmport); + atp_writew_io(dev, 0, 0x1c, val); mdelay(128); val &= 0x00fb; /* after 1ms no msg */ - outw(val, tmport); -wait_nomsg: - if ((inb(tmport) & 0x04) != 0) { - goto wait_nomsg; - } - outb(1, 0x80); + atp_writew_io(dev, 0, 0x1c, val); + while ((atp_readb_io(dev, 0, 0x1c) & 0x04) != 0) + ; + udelay(2); udelay(100); - for (n = 0; n < 0x30000; n++) { - if ((inb(tmport) & 0x80) != 0) { /* bsy ? */ - goto wait_io; - } - } - goto TCM_SYNC; -wait_io: - for (n = 0; n < 0x30000; n++) { - if ((inb(tmport) & 0x81) == 0x0081) { - goto wait_io1; - } - } - goto TCM_SYNC; -wait_io1: - inb(0x80); - val |= 0x8003; /* io,cd,db7 */ - outw(val, tmport); - inb(0x80); - val &= 0x00bf; /* no sel */ - outw(val, tmport); - outb(2, 0x80); -TCM_SYNC: + for (n = 0; n < 0x30000; n++) + if ((atp_readb_io(dev, 0, 0x1c) & 0x80) != 0) /* bsy ? */ + break; + if (n < 0x30000) + for (n = 0; n < 0x30000; n++) + if ((atp_readb_io(dev, 0, 0x1c) & 0x81) == 0x0081) { + udelay(2); + val |= 0x8003; /* io,cd,db7 */ + atp_writew_io(dev, 0, 0x1c, val); + udelay(2); + val &= 0x00bf; /* no sel */ + atp_writew_io(dev, 0, 0x1c, val); + udelay(2); + break; + } + while (1) { /* * The funny division into multiple delays is to accomodate * arches like ARM where udelay() multiplies its argument by @@ -1183,55 +1088,48 @@ TCM_SYNC: */ mdelay(2); udelay(48); - if ((inb(tmport) & 0x80) == 0x00) { /* bsy ? */ - outw(0, tmport--); - outb(0, tmport); - tmport = dev->ioport[0] + 0x15; - outb(0, tmport); - tmport += 0x03; - outb(0x09, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0) + if ((atp_readb_io(dev, 0, 0x1c) & 0x80) == 0x00) { /* bsy ? */ + atp_writew_io(dev, 0, 0x1c, 0); + atp_writeb_io(dev, 0, 0x1b, 0); + atp_writeb_io(dev, 0, 0x15, 0); + atp_writeb_io(dev, 0, 0x18, 0x09); + while ((atp_readb_io(dev, 0, 0x1f) & 0x80) == 0) cpu_relax(); - tmport -= 0x08; - inb(tmport); + atp_readb_io(dev, 0, 0x17); return; } val &= 0x00ff; /* synchronization */ val |= 0x3f00; fun_scam(dev, &val); - outb(3, 0x80); + udelay(2); val &= 0x00ff; /* isolation */ val |= 0x2000; fun_scam(dev, &val); - outb(4, 0x80); + udelay(2); i = 8; j = 0; -TCM_ID: - if ((inw(tmport) & 0x2000) == 0) { - goto TCM_ID; - } - outb(5, 0x80); - val &= 0x00ff; /* get ID_STRING */ - val |= 0x2000; - k = fun_scam(dev, &val); - if ((k & 0x03) == 0) { - goto TCM_5; - } - mbuf[j] <<= 0x01; - mbuf[j] &= 0xfe; - if ((k & 0x02) != 0) { - mbuf[j] |= 0x01; - } - i--; - if (i > 0) { - goto TCM_ID; + + while (1) { + if ((atp_readw_io(dev, 0, 0x1c) & 0x2000) == 0) + continue; + udelay(2); + val &= 0x00ff; /* get ID_STRING */ + val |= 0x2000; + k = fun_scam(dev, &val); + if ((k & 0x03) == 0) + break; + mbuf[j] <<= 0x01; + mbuf[j] &= 0xfe; + if ((k & 0x02) != 0) + mbuf[j] |= 0x01; + i--; + if (i > 0) + continue; + j++; + i = 8; } - j++; - i = 8; - goto TCM_ID; -TCM_5: /* isolation complete.. */ + /* isolation complete.. */ /* mbuf[32]=0; printk(" \n%x %x %x %s\n ",assignid_map,mbuf[0],mbuf[1],&mbuf[2]); */ i = 15; @@ -1239,33 +1137,33 @@ TCM_5: /* isolation complete.. */ if ((j & 0x20) != 0) { /* bit5=1:ID up to 7 */ i = 7; } - if ((j & 0x06) == 0) { /* IDvalid? */ - goto G2Q5; - } - k = mbuf[1]; -small_id: - m = 1; - m <<= k; - if ((m & assignid_map) == 0) { - goto G2Q_QUIN; - } - if (k > 0) { - k--; - goto small_id; - } -G2Q5: /* srch from max acceptable ID# */ - k = i; /* max acceptable ID# */ -G2Q_LP: - m = 1; - m <<= k; - if ((m & assignid_map) == 0) { - goto G2Q_QUIN; + if ((j & 0x06) != 0) { /* IDvalid? */ + k = mbuf[1]; + while (1) { + m = 1; + m <<= k; + if ((m & assignid_map) == 0) + break; + if (k > 0) + k--; + else + break; + } } - if (k > 0) { - k--; - goto G2Q_LP; + if ((m & assignid_map) != 0) { /* srch from max acceptable ID# */ + k = i; /* max acceptable ID# */ + while (1) { + m = 1; + m <<= k; + if ((m & assignid_map) == 0) + break; + if (k > 0) + k--; + else + break; + } } -G2Q_QUIN: /* k=binID#, */ + /* k=binID#, */ assignid_map |= m; if (k < 8) { quintet[0] = 0x38; /* 1st dft ID<8 */ @@ -1284,1227 +1182,6 @@ G2Q_QUIN: /* k=binID#, */ val |= m; fun_scam(dev, &val); - goto TCM_SYNC; - -} - -static void is870(struct atp_unit *dev, unsigned int wkport) -{ - unsigned int tmport; - unsigned char i, j, k, rmb, n; - unsigned short int m; - static unsigned char mbuf[512]; - static unsigned char satn[9] = { 0, 0, 0, 0, 0, 0, 0, 6, 6 }; - static unsigned char inqd[9] = { 0x12, 0, 0, 0, 0x24, 0, 0, 0x24, 6 }; - static unsigned char synn[6] = { 0x80, 1, 3, 1, 0x19, 0x0e }; - static unsigned char synu[6] = { 0x80, 1, 3, 1, 0x0c, 0x0e }; - static unsigned char synw[6] = { 0x80, 1, 3, 1, 0x0c, 0x07 }; - static unsigned char wide[6] = { 0x80, 1, 2, 3, 1, 0 }; - - tmport = wkport + 0x3a; - outb((unsigned char) (inb(tmport) | 0x10), tmport); - - for (i = 0; i < 16; i++) { - if ((dev->chip_ver != 4) && (i > 7)) { - break; - } - m = 1; - m = m << i; - if ((m & dev->active_id[0]) != 0) { - continue; - } - if (i == dev->host_id[0]) { - printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_id[0]); - continue; - } - tmport = wkport + 0x1b; - if (dev->chip_ver == 4) { - outb(0x01, tmport); - } else { - outb(0x00, tmport); - } - tmport = wkport + 1; - outb(0x08, tmport++); - outb(0x7f, tmport++); - outb(satn[0], tmport++); - outb(satn[1], tmport++); - outb(satn[2], tmport++); - outb(satn[3], tmport++); - outb(satn[4], tmport++); - outb(satn[5], tmport++); - tmport += 0x06; - outb(0, tmport); - tmport += 0x02; - outb(dev->id[0][i].devsp, tmport++); - outb(0, tmport++); - outb(satn[6], tmport++); - outb(satn[7], tmport++); - j = i; - if ((j & 0x08) != 0) { - j = (j & 0x07) | 0x40; - } - outb(j, tmport); - tmport += 0x03; - outb(satn[8], tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) - continue; - - while (inb(tmport) != 0x8e) - cpu_relax(); - - dev->active_id[0] |= m; - - tmport = wkport + 0x10; - outb(0x30, tmport); - tmport = wkport + 0x04; - outb(0x00, tmport); - -phase_cmd: - tmport = wkport + 0x18; - outb(0x08, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - tmport -= 0x08; - j = inb(tmport); - if (j != 0x16) { - tmport = wkport + 0x10; - outb(0x41, tmport); - goto phase_cmd; - } -sel_ok: - tmport = wkport + 3; - outb(inqd[0], tmport++); - outb(inqd[1], tmport++); - outb(inqd[2], tmport++); - outb(inqd[3], tmport++); - outb(inqd[4], tmport++); - outb(inqd[5], tmport); - tmport += 0x07; - outb(0, tmport); - tmport += 0x02; - outb(dev->id[0][i].devsp, tmport++); - outb(0, tmport++); - outb(inqd[6], tmport++); - outb(inqd[7], tmport++); - tmport += 0x03; - outb(inqd[8], tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) - continue; - - while (inb(tmport) != 0x8e) - cpu_relax(); - - tmport = wkport + 0x1b; - if (dev->chip_ver == 4) - outb(0x00, tmport); - - tmport = wkport + 0x18; - outb(0x08, tmport); - tmport += 0x07; - j = 0; -rd_inq_data: - k = inb(tmport); - if ((k & 0x01) != 0) { - tmport -= 0x06; - mbuf[j++] = inb(tmport); - tmport += 0x06; - goto rd_inq_data; - } - if ((k & 0x80) == 0) { - goto rd_inq_data; - } - tmport -= 0x08; - j = inb(tmport); - if (j == 0x16) { - goto inq_ok; - } - tmport = wkport + 0x10; - outb(0x46, tmport); - tmport += 0x02; - outb(0, tmport++); - outb(0, tmport++); - outb(0, tmport++); - tmport += 0x03; - outb(0x08, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - if (inb(tmport) != 0x16) { - goto sel_ok; - } -inq_ok: - mbuf[36] = 0; - printk(KERN_INFO " ID: %2d %s\n", i, &mbuf[8]); - dev->id[0][i].devtype = mbuf[0]; - rmb = mbuf[1]; - n = mbuf[7]; - if (dev->chip_ver != 4) { - goto not_wide; - } - if ((mbuf[7] & 0x60) == 0) { - goto not_wide; - } - if ((dev->global_map[0] & 0x20) == 0) { - goto not_wide; - } - tmport = wkport + 0x1b; - outb(0x01, tmport); - tmport = wkport + 3; - outb(satn[0], tmport++); - outb(satn[1], tmport++); - outb(satn[2], tmport++); - outb(satn[3], tmport++); - outb(satn[4], tmport++); - outb(satn[5], tmport++); - tmport += 0x06; - outb(0, tmport); - tmport += 0x02; - outb(dev->id[0][i].devsp, tmport++); - outb(0, tmport++); - outb(satn[6], tmport++); - outb(satn[7], tmport++); - tmport += 0x03; - outb(satn[8], tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) - continue; - - while (inb(tmport) != 0x8e) - cpu_relax(); - -try_wide: - j = 0; - tmport = wkport + 0x14; - outb(0x05, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0) { - if ((inb(tmport) & 0x01) != 0) { - tmport -= 0x06; - outb(wide[j++], tmport); - tmport += 0x06; - } - } - tmport -= 0x08; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - j = inb(tmport) & 0x0f; - if (j == 0x0f) { - goto widep_in; - } - if (j == 0x0a) { - goto widep_cmd; - } - if (j == 0x0e) { - goto try_wide; - } - continue; -widep_out: - tmport = wkport + 0x18; - outb(0x20, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0) { - if ((inb(tmport) & 0x01) != 0) { - tmport -= 0x06; - outb(0, tmport); - tmport += 0x06; - } - } - tmport -= 0x08; - j = inb(tmport) & 0x0f; - if (j == 0x0f) { - goto widep_in; - } - if (j == 0x0a) { - goto widep_cmd; - } - if (j == 0x0e) { - goto widep_out; - } - continue; -widep_in: - tmport = wkport + 0x14; - outb(0xff, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; - k = 0; -widep_in1: - j = inb(tmport); - if ((j & 0x01) != 0) { - tmport -= 0x06; - mbuf[k++] = inb(tmport); - tmport += 0x06; - goto widep_in1; - } - if ((j & 0x80) == 0x00) { - goto widep_in1; - } - tmport -= 0x08; - j = inb(tmport) & 0x0f; - if (j == 0x0f) { - goto widep_in; - } - if (j == 0x0a) { - goto widep_cmd; - } - if (j == 0x0e) { - goto widep_out; - } - continue; -widep_cmd: - tmport = wkport + 0x10; - outb(0x30, tmport); - tmport = wkport + 0x14; - outb(0x00, tmport); - tmport += 0x04; - outb(0x08, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - j = inb(tmport); - if (j != 0x16) { - if (j == 0x4e) { - goto widep_out; - } - continue; - } - if (mbuf[0] != 0x01) { - goto not_wide; - } - if (mbuf[1] != 0x02) { - goto not_wide; - } - if (mbuf[2] != 0x03) { - goto not_wide; - } - if (mbuf[3] != 0x01) { - goto not_wide; - } - m = 1; - m = m << i; - dev->wide_id[0] |= m; -not_wide: - if ((dev->id[0][i].devtype == 0x00) || (dev->id[0][i].devtype == 0x07) || ((dev->id[0][i].devtype == 0x05) && ((n & 0x10) != 0))) { - goto set_sync; - } - continue; -set_sync: - tmport = wkport + 0x1b; - j = 0; - if ((m & dev->wide_id[0]) != 0) { - j |= 0x01; - } - outb(j, tmport); - tmport = wkport + 3; - outb(satn[0], tmport++); - outb(satn[1], tmport++); - outb(satn[2], tmport++); - outb(satn[3], tmport++); - outb(satn[4], tmport++); - outb(satn[5], tmport++); - tmport += 0x06; - outb(0, tmport); - tmport += 0x02; - outb(dev->id[0][i].devsp, tmport++); - outb(0, tmport++); - outb(satn[6], tmport++); - outb(satn[7], tmport++); - tmport += 0x03; - outb(satn[8], tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) - continue; - - while (inb(tmport) != 0x8e) - cpu_relax(); - -try_sync: - j = 0; - tmport = wkport + 0x14; - outb(0x06, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0) { - if ((inb(tmport) & 0x01) != 0) { - tmport -= 0x06; - if ((m & dev->wide_id[0]) != 0) { - outb(synw[j++], tmport); - } else { - if ((m & dev->ultra_map[0]) != 0) { - outb(synu[j++], tmport); - } else { - outb(synn[j++], tmport); - } - } - tmport += 0x06; - } - } - tmport -= 0x08; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - j = inb(tmport) & 0x0f; - if (j == 0x0f) { - goto phase_ins; - } - if (j == 0x0a) { - goto phase_cmds; - } - if (j == 0x0e) { - goto try_sync; - } - continue; -phase_outs: - tmport = wkport + 0x18; - outb(0x20, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0x00) { - if ((inb(tmport) & 0x01) != 0x00) { - tmport -= 0x06; - outb(0x00, tmport); - tmport += 0x06; - } - } - tmport -= 0x08; - j = inb(tmport); - if (j == 0x85) { - goto tar_dcons; - } - j &= 0x0f; - if (j == 0x0f) { - goto phase_ins; - } - if (j == 0x0a) { - goto phase_cmds; - } - if (j == 0x0e) { - goto phase_outs; - } - continue; -phase_ins: - tmport = wkport + 0x14; - outb(0xff, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; - k = 0; -phase_ins1: - j = inb(tmport); - if ((j & 0x01) != 0x00) { - tmport -= 0x06; - mbuf[k++] = inb(tmport); - tmport += 0x06; - goto phase_ins1; - } - if ((j & 0x80) == 0x00) { - goto phase_ins1; - } - tmport -= 0x08; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - j = inb(tmport); - if (j == 0x85) { - goto tar_dcons; - } - j &= 0x0f; - if (j == 0x0f) { - goto phase_ins; - } - if (j == 0x0a) { - goto phase_cmds; - } - if (j == 0x0e) { - goto phase_outs; - } - continue; -phase_cmds: - tmport = wkport + 0x10; - outb(0x30, tmport); -tar_dcons: - tmport = wkport + 0x14; - outb(0x00, tmport); - tmport += 0x04; - outb(0x08, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - j = inb(tmport); - if (j != 0x16) { - continue; - } - if (mbuf[0] != 0x01) { - continue; - } - if (mbuf[1] != 0x03) { - continue; - } - if (mbuf[4] == 0x00) { - continue; - } - if (mbuf[3] > 0x64) { - continue; - } - if (mbuf[4] > 0x0c) { - mbuf[4] = 0x0c; - } - dev->id[0][i].devsp = mbuf[4]; - if ((mbuf[3] < 0x0d) && (rmb == 0)) { - j = 0xa0; - goto set_syn_ok; - } - if (mbuf[3] < 0x1a) { - j = 0x20; - goto set_syn_ok; - } - if (mbuf[3] < 0x33) { - j = 0x40; - goto set_syn_ok; - } - if (mbuf[3] < 0x4c) { - j = 0x50; - goto set_syn_ok; - } - j = 0x60; -set_syn_ok: - dev->id[0][i].devsp = (dev->id[0][i].devsp & 0x0f) | j; - } - tmport = wkport + 0x3a; - outb((unsigned char) (inb(tmport) & 0xef), tmport); -} - -static void is880(struct atp_unit *dev, unsigned int wkport) -{ - unsigned int tmport; - unsigned char i, j, k, rmb, n, lvdmode; - unsigned short int m; - static unsigned char mbuf[512]; - static unsigned char satn[9] = { 0, 0, 0, 0, 0, 0, 0, 6, 6 }; - static unsigned char inqd[9] = { 0x12, 0, 0, 0, 0x24, 0, 0, 0x24, 6 }; - static unsigned char synn[6] = { 0x80, 1, 3, 1, 0x19, 0x0e }; - unsigned char synu[6] = { 0x80, 1, 3, 1, 0x0a, 0x0e }; - static unsigned char synw[6] = { 0x80, 1, 3, 1, 0x19, 0x0e }; - unsigned char synuw[6] = { 0x80, 1, 3, 1, 0x0a, 0x0e }; - static unsigned char wide[6] = { 0x80, 1, 2, 3, 1, 0 }; - static unsigned char u3[9] = { 0x80, 1, 6, 4, 0x09, 00, 0x0e, 0x01, 0x02 }; - - lvdmode = inb(wkport + 0x3f) & 0x40; - - for (i = 0; i < 16; i++) { - m = 1; - m = m << i; - if ((m & dev->active_id[0]) != 0) { - continue; - } - if (i == dev->host_id[0]) { - printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_id[0]); - continue; - } - tmport = wkport + 0x5b; - outb(0x01, tmport); - tmport = wkport + 0x41; - outb(0x08, tmport++); - outb(0x7f, tmport++); - outb(satn[0], tmport++); - outb(satn[1], tmport++); - outb(satn[2], tmport++); - outb(satn[3], tmport++); - outb(satn[4], tmport++); - outb(satn[5], tmport++); - tmport += 0x06; - outb(0, tmport); - tmport += 0x02; - outb(dev->id[0][i].devsp, tmport++); - outb(0, tmport++); - outb(satn[6], tmport++); - outb(satn[7], tmport++); - j = i; - if ((j & 0x08) != 0) { - j = (j & 0x07) | 0x40; - } - outb(j, tmport); - tmport += 0x03; - outb(satn[8], tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) - continue; - - while (inb(tmport) != 0x8e) - cpu_relax(); - - dev->active_id[0] |= m; - - tmport = wkport + 0x50; - outb(0x30, tmport); - tmport = wkport + 0x54; - outb(0x00, tmport); - -phase_cmd: - tmport = wkport + 0x58; - outb(0x08, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - j = inb(tmport); - if (j != 0x16) { - tmport = wkport + 0x50; - outb(0x41, tmport); - goto phase_cmd; - } -sel_ok: - tmport = wkport + 0x43; - outb(inqd[0], tmport++); - outb(inqd[1], tmport++); - outb(inqd[2], tmport++); - outb(inqd[3], tmport++); - outb(inqd[4], tmport++); - outb(inqd[5], tmport); - tmport += 0x07; - outb(0, tmport); - tmport += 0x02; - outb(dev->id[0][i].devsp, tmport++); - outb(0, tmport++); - outb(inqd[6], tmport++); - outb(inqd[7], tmport++); - tmport += 0x03; - outb(inqd[8], tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) - continue; - - while (inb(tmport) != 0x8e) - cpu_relax(); - - tmport = wkport + 0x5b; - outb(0x00, tmport); - tmport = wkport + 0x58; - outb(0x08, tmport); - tmport += 0x07; - j = 0; -rd_inq_data: - k = inb(tmport); - if ((k & 0x01) != 0) { - tmport -= 0x06; - mbuf[j++] = inb(tmport); - tmport += 0x06; - goto rd_inq_data; - } - if ((k & 0x80) == 0) { - goto rd_inq_data; - } - tmport -= 0x08; - j = inb(tmport); - if (j == 0x16) { - goto inq_ok; - } - tmport = wkport + 0x50; - outb(0x46, tmport); - tmport += 0x02; - outb(0, tmport++); - outb(0, tmport++); - outb(0, tmport++); - tmport += 0x03; - outb(0x08, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - if (inb(tmport) != 0x16) - goto sel_ok; - -inq_ok: - mbuf[36] = 0; - printk(KERN_INFO " ID: %2d %s\n", i, &mbuf[8]); - dev->id[0][i].devtype = mbuf[0]; - rmb = mbuf[1]; - n = mbuf[7]; - if ((mbuf[7] & 0x60) == 0) { - goto not_wide; - } - if ((i < 8) && ((dev->global_map[0] & 0x20) == 0)) { - goto not_wide; - } - if (lvdmode == 0) { - goto chg_wide; - } - if (dev->sp[0][i] != 0x04) // force u2 - { - goto chg_wide; - } - - tmport = wkport + 0x5b; - outb(0x01, tmport); - tmport = wkport + 0x43; - outb(satn[0], tmport++); - outb(satn[1], tmport++); - outb(satn[2], tmport++); - outb(satn[3], tmport++); - outb(satn[4], tmport++); - outb(satn[5], tmport++); - tmport += 0x06; - outb(0, tmport); - tmport += 0x02; - outb(dev->id[0][i].devsp, tmport++); - outb(0, tmport++); - outb(satn[6], tmport++); - outb(satn[7], tmport++); - tmport += 0x03; - outb(satn[8], tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - - if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) - continue; - - while (inb(tmport) != 0x8e) - cpu_relax(); - -try_u3: - j = 0; - tmport = wkport + 0x54; - outb(0x09, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0) { - if ((inb(tmport) & 0x01) != 0) { - tmport -= 0x06; - outb(u3[j++], tmport); - tmport += 0x06; - } - } - tmport -= 0x08; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - j = inb(tmport) & 0x0f; - if (j == 0x0f) { - goto u3p_in; - } - if (j == 0x0a) { - goto u3p_cmd; - } - if (j == 0x0e) { - goto try_u3; - } - continue; -u3p_out: - tmport = wkport + 0x58; - outb(0x20, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0) { - if ((inb(tmport) & 0x01) != 0) { - tmport -= 0x06; - outb(0, tmport); - tmport += 0x06; - } - } - tmport -= 0x08; - j = inb(tmport) & 0x0f; - if (j == 0x0f) { - goto u3p_in; - } - if (j == 0x0a) { - goto u3p_cmd; - } - if (j == 0x0e) { - goto u3p_out; - } - continue; -u3p_in: - tmport = wkport + 0x54; - outb(0x09, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; - k = 0; -u3p_in1: - j = inb(tmport); - if ((j & 0x01) != 0) { - tmport -= 0x06; - mbuf[k++] = inb(tmport); - tmport += 0x06; - goto u3p_in1; - } - if ((j & 0x80) == 0x00) { - goto u3p_in1; - } - tmport -= 0x08; - j = inb(tmport) & 0x0f; - if (j == 0x0f) { - goto u3p_in; - } - if (j == 0x0a) { - goto u3p_cmd; - } - if (j == 0x0e) { - goto u3p_out; - } - continue; -u3p_cmd: - tmport = wkport + 0x50; - outb(0x30, tmport); - tmport = wkport + 0x54; - outb(0x00, tmport); - tmport += 0x04; - outb(0x08, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - j = inb(tmport); - if (j != 0x16) { - if (j == 0x4e) { - goto u3p_out; - } - continue; - } - if (mbuf[0] != 0x01) { - goto chg_wide; - } - if (mbuf[1] != 0x06) { - goto chg_wide; - } - if (mbuf[2] != 0x04) { - goto chg_wide; - } - if (mbuf[3] == 0x09) { - m = 1; - m = m << i; - dev->wide_id[0] |= m; - dev->id[0][i].devsp = 0xce; - continue; - } -chg_wide: - tmport = wkport + 0x5b; - outb(0x01, tmport); - tmport = wkport + 0x43; - outb(satn[0], tmport++); - outb(satn[1], tmport++); - outb(satn[2], tmport++); - outb(satn[3], tmport++); - outb(satn[4], tmport++); - outb(satn[5], tmport++); - tmport += 0x06; - outb(0, tmport); - tmport += 0x02; - outb(dev->id[0][i].devsp, tmport++); - outb(0, tmport++); - outb(satn[6], tmport++); - outb(satn[7], tmport++); - tmport += 0x03; - outb(satn[8], tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - if (inb(tmport) != 0x11 && inb(tmport) != 0x8e) - continue; - - while (inb(tmport) != 0x8e) - cpu_relax(); - -try_wide: - j = 0; - tmport = wkport + 0x54; - outb(0x05, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0) { - if ((inb(tmport) & 0x01) != 0) { - tmport -= 0x06; - outb(wide[j++], tmport); - tmport += 0x06; - } - } - tmport -= 0x08; - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - j = inb(tmport) & 0x0f; - if (j == 0x0f) { - goto widep_in; - } - if (j == 0x0a) { - goto widep_cmd; - } - if (j == 0x0e) { - goto try_wide; - } - continue; -widep_out: - tmport = wkport + 0x58; - outb(0x20, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0) { - if ((inb(tmport) & 0x01) != 0) { - tmport -= 0x06; - outb(0, tmport); - tmport += 0x06; - } - } - tmport -= 0x08; - j = inb(tmport) & 0x0f; - if (j == 0x0f) { - goto widep_in; - } - if (j == 0x0a) { - goto widep_cmd; - } - if (j == 0x0e) { - goto widep_out; - } - continue; -widep_in: - tmport = wkport + 0x54; - outb(0xff, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; - k = 0; -widep_in1: - j = inb(tmport); - if ((j & 0x01) != 0) { - tmport -= 0x06; - mbuf[k++] = inb(tmport); - tmport += 0x06; - goto widep_in1; - } - if ((j & 0x80) == 0x00) { - goto widep_in1; - } - tmport -= 0x08; - j = inb(tmport) & 0x0f; - if (j == 0x0f) { - goto widep_in; - } - if (j == 0x0a) { - goto widep_cmd; - } - if (j == 0x0e) { - goto widep_out; - } - continue; -widep_cmd: - tmport = wkport + 0x50; - outb(0x30, tmport); - tmport = wkport + 0x54; - outb(0x00, tmport); - tmport += 0x04; - outb(0x08, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - j = inb(tmport); - if (j != 0x16) { - if (j == 0x4e) { - goto widep_out; - } - continue; - } - if (mbuf[0] != 0x01) { - goto not_wide; - } - if (mbuf[1] != 0x02) { - goto not_wide; - } - if (mbuf[2] != 0x03) { - goto not_wide; - } - if (mbuf[3] != 0x01) { - goto not_wide; - } - m = 1; - m = m << i; - dev->wide_id[0] |= m; -not_wide: - if ((dev->id[0][i].devtype == 0x00) || (dev->id[0][i].devtype == 0x07) || ((dev->id[0][i].devtype == 0x05) && ((n & 0x10) != 0))) { - m = 1; - m = m << i; - if ((dev->async[0] & m) != 0) { - goto set_sync; - } - } - continue; -set_sync: - if (dev->sp[0][i] == 0x02) { - synu[4] = 0x0c; - synuw[4] = 0x0c; - } else { - if (dev->sp[0][i] >= 0x03) { - synu[4] = 0x0a; - synuw[4] = 0x0a; - } - } - tmport = wkport + 0x5b; - j = 0; - if ((m & dev->wide_id[0]) != 0) { - j |= 0x01; - } - outb(j, tmport); - tmport = wkport + 0x43; - outb(satn[0], tmport++); - outb(satn[1], tmport++); - outb(satn[2], tmport++); - outb(satn[3], tmport++); - outb(satn[4], tmport++); - outb(satn[5], tmport++); - tmport += 0x06; - outb(0, tmport); - tmport += 0x02; - outb(dev->id[0][i].devsp, tmport++); - outb(0, tmport++); - outb(satn[6], tmport++); - outb(satn[7], tmport++); - tmport += 0x03; - outb(satn[8], tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) { - continue; - } - while (inb(tmport) != 0x8e) - cpu_relax(); - -try_sync: - j = 0; - tmport = wkport + 0x54; - outb(0x06, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0) { - if ((inb(tmport) & 0x01) != 0) { - tmport -= 0x06; - if ((m & dev->wide_id[0]) != 0) { - if ((m & dev->ultra_map[0]) != 0) { - outb(synuw[j++], tmport); - } else { - outb(synw[j++], tmport); - } - } else { - if ((m & dev->ultra_map[0]) != 0) { - outb(synu[j++], tmport); - } else { - outb(synn[j++], tmport); - } - } - tmport += 0x06; - } - } - tmport -= 0x08; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - j = inb(tmport) & 0x0f; - if (j == 0x0f) { - goto phase_ins; - } - if (j == 0x0a) { - goto phase_cmds; - } - if (j == 0x0e) { - goto try_sync; - } - continue; -phase_outs: - tmport = wkport + 0x58; - outb(0x20, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0x00) { - if ((inb(tmport) & 0x01) != 0x00) { - tmport -= 0x06; - outb(0x00, tmport); - tmport += 0x06; - } - } - tmport -= 0x08; - j = inb(tmport); - if (j == 0x85) { - goto tar_dcons; - } - j &= 0x0f; - if (j == 0x0f) { - goto phase_ins; - } - if (j == 0x0a) { - goto phase_cmds; - } - if (j == 0x0e) { - goto phase_outs; - } - continue; -phase_ins: - tmport = wkport + 0x54; - outb(0x06, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; - k = 0; -phase_ins1: - j = inb(tmport); - if ((j & 0x01) != 0x00) { - tmport -= 0x06; - mbuf[k++] = inb(tmport); - tmport += 0x06; - goto phase_ins1; - } - if ((j & 0x80) == 0x00) { - goto phase_ins1; - } - tmport -= 0x08; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - j = inb(tmport); - if (j == 0x85) { - goto tar_dcons; - } - j &= 0x0f; - if (j == 0x0f) { - goto phase_ins; - } - if (j == 0x0a) { - goto phase_cmds; - } - if (j == 0x0e) { - goto phase_outs; - } - continue; -phase_cmds: - tmport = wkport + 0x50; - outb(0x30, tmport); -tar_dcons: - tmport = wkport + 0x54; - outb(0x00, tmport); - tmport += 0x04; - outb(0x08, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) - cpu_relax(); - - tmport -= 0x08; - j = inb(tmport); - if (j != 0x16) { - continue; - } - if (mbuf[0] != 0x01) { - continue; - } - if (mbuf[1] != 0x03) { - continue; - } - if (mbuf[4] == 0x00) { - continue; - } - if (mbuf[3] > 0x64) { - continue; - } - if (mbuf[4] > 0x0e) { - mbuf[4] = 0x0e; - } - dev->id[0][i].devsp = mbuf[4]; - if (mbuf[3] < 0x0c) { - j = 0xb0; - goto set_syn_ok; - } - if ((mbuf[3] < 0x0d) && (rmb == 0)) { - j = 0xa0; - goto set_syn_ok; - } - if (mbuf[3] < 0x1a) { - j = 0x20; - goto set_syn_ok; - } - if (mbuf[3] < 0x33) { - j = 0x40; - goto set_syn_ok; - } - if (mbuf[3] < 0x4c) { - j = 0x50; - goto set_syn_ok; - } - j = 0x60; -set_syn_ok: - dev->id[0][i].devsp = (dev->id[0][i].devsp & 0x0f) | j; } } @@ -2560,491 +1237,345 @@ static int atp870u_init_tables(struct Scsi_Host *host) return 0; } -/* return non-zero on detection */ -static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static void atp_set_host_id(struct atp_unit *atp, u8 c, u8 host_id) { - unsigned char k, m, c; - unsigned long flags; - unsigned int base_io, tmport, error,n; - unsigned char host_id; - struct Scsi_Host *shpnt = NULL; - struct atp_unit *atpdev, *p; - unsigned char setupdata[2][16]; - int count = 0; + atp_writeb_io(atp, c, 0, host_id | 0x08); + atp_writeb_io(atp, c, 0x18, 0); + while ((atp_readb_io(atp, c, 0x1f) & 0x80) == 0) + mdelay(1); + atp_readb_io(atp, c, 0x17); + atp_writeb_io(atp, c, 1, 8); + atp_writeb_io(atp, c, 2, 0x7f); + atp_writeb_io(atp, c, 0x11, 0x20); +} - atpdev = kzalloc(sizeof(*atpdev), GFP_KERNEL); - if (!atpdev) - return -ENOMEM; +static void atp870_init(struct Scsi_Host *shpnt) +{ + struct atp_unit *atpdev = shost_priv(shpnt); + struct pci_dev *pdev = atpdev->pdev; + unsigned char k, host_id; + u8 scam_on; + bool wide_chip = + (pdev->device == PCI_DEVICE_ID_ARTOP_AEC7610 && + pdev->revision == 4) || + (pdev->device == PCI_DEVICE_ID_ARTOP_AEC7612UW) || + (pdev->device == PCI_DEVICE_ID_ARTOP_AEC7612SUW); + + pci_read_config_byte(pdev, 0x49, &host_id); + + dev_info(&pdev->dev, "ACARD AEC-671X PCI Ultra/W SCSI-2/3 Host Adapter: IO:%lx, IRQ:%d.\n", + shpnt->io_port, shpnt->irq); + + atpdev->ioport[0] = shpnt->io_port; + atpdev->pciport[0] = shpnt->io_port + 0x20; + host_id &= 0x07; + atpdev->host_id[0] = host_id; + scam_on = atp_readb_pci(atpdev, 0, 2); + atpdev->global_map[0] = atp_readb_base(atpdev, 0x2d); + atpdev->ultra_map[0] = atp_readw_base(atpdev, 0x2e); + + if (atpdev->ultra_map[0] == 0) { + scam_on = 0x00; + atpdev->global_map[0] = 0x20; + atpdev->ultra_map[0] = 0xffff; + } - if (pci_enable_device(pdev)) - goto err_eio; + if (pdev->revision > 0x07) /* check if atp876 chip */ + atp_writeb_base(atpdev, 0x3e, 0x00); /* enable terminator */ + + k = (atp_readb_base(atpdev, 0x3a) & 0xf3) | 0x10; + atp_writeb_base(atpdev, 0x3a, k); + atp_writeb_base(atpdev, 0x3a, k & 0xdf); + mdelay(32); + atp_writeb_base(atpdev, 0x3a, k); + mdelay(32); + atp_set_host_id(atpdev, 0, host_id); + + tscam(shpnt, wide_chip, scam_on); + atp_writeb_base(atpdev, 0x3a, atp_readb_base(atpdev, 0x3a) | 0x10); + atp_is(atpdev, 0, wide_chip, 0); + atp_writeb_base(atpdev, 0x3a, atp_readb_base(atpdev, 0x3a) & 0xef); + atp_writeb_base(atpdev, 0x3b, atp_readb_base(atpdev, 0x3b) | 0x20); + shpnt->max_id = wide_chip ? 16 : 8; + shpnt->this_id = host_id; +} - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { - printk(KERN_INFO "atp870u: use 32bit DMA mask.\n"); - } else { - printk(KERN_ERR "atp870u: DMA mask required but not available.\n"); - goto err_eio; - } +static void atp880_init(struct Scsi_Host *shpnt) +{ + struct atp_unit *atpdev = shost_priv(shpnt); + struct pci_dev *pdev = atpdev->pdev; + unsigned char k, m, host_id; + unsigned int n; - /* - * It's probably easier to weed out some revisions like - * this than via the PCI device table - */ - if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) { - atpdev->chip_ver = pdev->revision; - if (atpdev->chip_ver < 2) - goto err_eio; - } + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80); - switch (ent->device) { - case PCI_DEVICE_ID_ARTOP_AEC7612UW: - case PCI_DEVICE_ID_ARTOP_AEC7612SUW: - case ATP880_DEVID1: - case ATP880_DEVID2: - case ATP885_DEVID: - atpdev->chip_ver = 0x04; - default: - break; - } - base_io = pci_resource_start(pdev, 0); - base_io &= 0xfffffff8; - - if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) { - atpdev->chip_ver = pdev->revision; - pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803 - - host_id = inb(base_io + 0x39); - host_id >>= 0x04; - - printk(KERN_INFO " ACARD AEC-67160 PCI Ultra3 LVD Host Adapter: %d" - " IO:%x, IRQ:%d.\n", count, base_io, pdev->irq); - atpdev->ioport[0] = base_io + 0x40; - atpdev->pciport[0] = base_io + 0x28; - atpdev->dev_id = ent->device; - atpdev->host_id[0] = host_id; - - tmport = base_io + 0x22; - atpdev->scam_on = inb(tmport); - tmport += 0x13; - atpdev->global_map[0] = inb(tmport); - tmport += 0x07; - atpdev->ultra_map[0] = inw(tmport); - - n = 0x3f09; -next_fblk_880: - if (n >= 0x4000) - goto flash_ok_880; + atpdev->ioport[0] = shpnt->io_port + 0x40; + atpdev->pciport[0] = shpnt->io_port + 0x28; + + host_id = atp_readb_base(atpdev, 0x39) >> 4; + dev_info(&pdev->dev, "ACARD AEC-67160 PCI Ultra3 LVD Host Adapter: IO:%lx, IRQ:%d.\n", + shpnt->io_port, shpnt->irq); + atpdev->host_id[0] = host_id; + + atpdev->global_map[0] = atp_readb_base(atpdev, 0x35); + atpdev->ultra_map[0] = atp_readw_base(atpdev, 0x3c); + + n = 0x3f09; + while (n < 0x4000) { m = 0; - outw(n, base_io + 0x34); + atp_writew_base(atpdev, 0x34, n); n += 0x0002; - if (inb(base_io + 0x30) == 0xff) - goto flash_ok_880; - - atpdev->sp[0][m++] = inb(base_io + 0x30); - atpdev->sp[0][m++] = inb(base_io + 0x31); - atpdev->sp[0][m++] = inb(base_io + 0x32); - atpdev->sp[0][m++] = inb(base_io + 0x33); - outw(n, base_io + 0x34); + if (atp_readb_base(atpdev, 0x30) == 0xff) + break; + + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); + atp_writew_base(atpdev, 0x34, n); n += 0x0002; - atpdev->sp[0][m++] = inb(base_io + 0x30); - atpdev->sp[0][m++] = inb(base_io + 0x31); - atpdev->sp[0][m++] = inb(base_io + 0x32); - atpdev->sp[0][m++] = inb(base_io + 0x33); - outw(n, base_io + 0x34); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); + atp_writew_base(atpdev, 0x34, n); n += 0x0002; - atpdev->sp[0][m++] = inb(base_io + 0x30); - atpdev->sp[0][m++] = inb(base_io + 0x31); - atpdev->sp[0][m++] = inb(base_io + 0x32); - atpdev->sp[0][m++] = inb(base_io + 0x33); - outw(n, base_io + 0x34); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); + atp_writew_base(atpdev, 0x34, n); n += 0x0002; - atpdev->sp[0][m++] = inb(base_io + 0x30); - atpdev->sp[0][m++] = inb(base_io + 0x31); - atpdev->sp[0][m++] = inb(base_io + 0x32); - atpdev->sp[0][m++] = inb(base_io + 0x33); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); + atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); n += 0x0018; - goto next_fblk_880; -flash_ok_880: - outw(0, base_io + 0x34); - atpdev->ultra_map[0] = 0; - atpdev->async[0] = 0; + } + atp_writew_base(atpdev, 0x34, 0); + atpdev->ultra_map[0] = 0; + atpdev->async[0] = 0; + for (k = 0; k < 16; k++) { + n = 1 << k; + if (atpdev->sp[0][k] > 1) + atpdev->ultra_map[0] |= n; + else + if (atpdev->sp[0][k] == 0) + atpdev->async[0] |= n; + } + atpdev->async[0] = ~(atpdev->async[0]); + atp_writeb_base(atpdev, 0x35, atpdev->global_map[0]); + + k = atp_readb_base(atpdev, 0x38) & 0x80; + atp_writeb_base(atpdev, 0x38, k); + atp_writeb_base(atpdev, 0x3b, 0x20); + mdelay(32); + atp_writeb_base(atpdev, 0x3b, 0); + mdelay(32); + atp_readb_io(atpdev, 0, 0x1b); + atp_readb_io(atpdev, 0, 0x17); + + atp_set_host_id(atpdev, 0, host_id); + + tscam(shpnt, true, atp_readb_base(atpdev, 0x22)); + atp_is(atpdev, 0, true, atp_readb_base(atpdev, 0x3f) & 0x40); + atp_writeb_base(atpdev, 0x38, 0xb0); + shpnt->max_id = 16; + shpnt->this_id = host_id; +} + +static void atp885_init(struct Scsi_Host *shpnt) +{ + struct atp_unit *atpdev = shost_priv(shpnt); + struct pci_dev *pdev = atpdev->pdev; + unsigned char k, m, c; + unsigned int n; + unsigned char setupdata[2][16]; + + dev_info(&pdev->dev, "ACARD AEC-67162 PCI Ultra3 LVD Host Adapter: IO:%lx, IRQ:%d.\n", + shpnt->io_port, shpnt->irq); + + atpdev->ioport[0] = shpnt->io_port + 0x80; + atpdev->ioport[1] = shpnt->io_port + 0xc0; + atpdev->pciport[0] = shpnt->io_port + 0x40; + atpdev->pciport[1] = shpnt->io_port + 0x50; + + c = atp_readb_base(atpdev, 0x29); + atp_writeb_base(atpdev, 0x29, c | 0x04); + + n = 0x1f80; + while (n < 0x2000) { + atp_writew_base(atpdev, 0x3c, n); + if (atp_readl_base(atpdev, 0x38) == 0xffffffff) + break; + for (m = 0; m < 2; m++) { + atpdev->global_map[m] = 0; + for (k = 0; k < 4; k++) { + atp_writew_base(atpdev, 0x3c, n++); + ((unsigned long *)&setupdata[m][0])[k] = atp_readl_base(atpdev, 0x38); + } + for (k = 0; k < 4; k++) { + atp_writew_base(atpdev, 0x3c, n++); + ((unsigned long *)&atpdev->sp[m][0])[k] = atp_readl_base(atpdev, 0x38); + } + n += 8; + } + } + c = atp_readb_base(atpdev, 0x29); + atp_writeb_base(atpdev, 0x29, c & 0xfb); + for (c = 0; c < 2; c++) { + atpdev->ultra_map[c] = 0; + atpdev->async[c] = 0; for (k = 0; k < 16; k++) { - n = 1; - n = n << k; - if (atpdev->sp[0][k] > 1) { - atpdev->ultra_map[0] |= n; - } else { - if (atpdev->sp[0][k] == 0) - atpdev->async[0] |= n; - } - } - atpdev->async[0] = ~(atpdev->async[0]); - outb(atpdev->global_map[0], base_io + 0x35); - - shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); - if (!shpnt) - goto err_nomem; - - p = (struct atp_unit *)&shpnt->hostdata; - - atpdev->host = shpnt; - atpdev->pdev = pdev; - pci_set_drvdata(pdev, p); - memcpy(p, atpdev, sizeof(*atpdev)); - if (atp870u_init_tables(shpnt) < 0) { - printk(KERN_ERR "Unable to allocate tables for Acard controller\n"); - goto unregister; - } - - if (request_irq(pdev->irq, atp870u_intr_handle, IRQF_SHARED, "atp880i", shpnt)) { - printk(KERN_ERR "Unable to allocate IRQ%d for Acard controller.\n", pdev->irq); - goto free_tables; - } - - spin_lock_irqsave(shpnt->host_lock, flags); - tmport = base_io + 0x38; - k = inb(tmport) & 0x80; - outb(k, tmport); - tmport += 0x03; - outb(0x20, tmport); - mdelay(32); - outb(0, tmport); - mdelay(32); - tmport = base_io + 0x5b; - inb(tmport); - tmport -= 0x04; - inb(tmport); - tmport = base_io + 0x40; - outb((host_id | 0x08), tmport); - tmport += 0x18; - outb(0, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0) - mdelay(1); - tmport -= 0x08; - inb(tmport); - tmport = base_io + 0x41; - outb(8, tmport++); - outb(0x7f, tmport); - tmport = base_io + 0x51; - outb(0x20, tmport); - - tscam(shpnt); - is880(p, base_io); - tmport = base_io + 0x38; - outb(0xb0, tmport); - shpnt->max_id = 16; - shpnt->this_id = host_id; - shpnt->unique_id = base_io; - shpnt->io_port = base_io; - shpnt->n_io_port = 0x60; /* Number of bytes of I/O space used */ - shpnt->irq = pdev->irq; - } else if (ent->device == ATP885_DEVID) { - printk(KERN_INFO " ACARD AEC-67162 PCI Ultra3 LVD Host Adapter: IO:%x, IRQ:%d.\n" - , base_io, pdev->irq); - - atpdev->pdev = pdev; - atpdev->dev_id = ent->device; - atpdev->baseport = base_io; - atpdev->ioport[0] = base_io + 0x80; - atpdev->ioport[1] = base_io + 0xc0; - atpdev->pciport[0] = base_io + 0x40; - atpdev->pciport[1] = base_io + 0x50; - - shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); - if (!shpnt) - goto err_nomem; - - p = (struct atp_unit *)&shpnt->hostdata; - - atpdev->host = shpnt; - atpdev->pdev = pdev; - pci_set_drvdata(pdev, p); - memcpy(p, atpdev, sizeof(struct atp_unit)); - if (atp870u_init_tables(shpnt) < 0) - goto unregister; - -#ifdef ED_DBGP - printk("request_irq() shpnt %p hostdata %p\n", shpnt, p); -#endif - if (request_irq(pdev->irq, atp870u_intr_handle, IRQF_SHARED, "atp870u", shpnt)) { - printk(KERN_ERR "Unable to allocate IRQ for Acard controller.\n"); - goto free_tables; + n = 1 << k; + if (atpdev->sp[c][k] > 1) + atpdev->ultra_map[c] |= n; + else + if (atpdev->sp[c][k] == 0) + atpdev->async[c] |= n; + } + atpdev->async[c] = ~(atpdev->async[c]); + + if (atpdev->global_map[c] == 0) { + k = setupdata[c][1]; + if ((k & 0x40) != 0) + atpdev->global_map[c] |= 0x20; + k &= 0x07; + atpdev->global_map[c] |= k; + if ((setupdata[c][2] & 0x04) != 0) + atpdev->global_map[c] |= 0x08; + atpdev->host_id[c] = setupdata[c][0] & 0x07; } - - spin_lock_irqsave(shpnt->host_lock, flags); - - c=inb(base_io + 0x29); - outb((c | 0x04),base_io + 0x29); - - n=0x1f80; -next_fblk_885: - if (n >= 0x2000) { - goto flash_ok_885; - } - outw(n,base_io + 0x3c); - if (inl(base_io + 0x38) == 0xffffffff) { - goto flash_ok_885; - } - for (m=0; m < 2; m++) { - p->global_map[m]= 0; - for (k=0; k < 4; k++) { - outw(n++,base_io + 0x3c); - ((unsigned long *)&setupdata[m][0])[k]=inl(base_io + 0x38); - } - for (k=0; k < 4; k++) { - outw(n++,base_io + 0x3c); - ((unsigned long *)&p->sp[m][0])[k]=inl(base_io + 0x38); - } - n += 8; - } - goto next_fblk_885; -flash_ok_885: -#ifdef ED_DBGP - printk( "Flash Read OK\n"); -#endif - c=inb(base_io + 0x29); - outb((c & 0xfb),base_io + 0x29); - for (c=0;c < 2;c++) { - p->ultra_map[c]=0; - p->async[c] = 0; - for (k=0; k < 16; k++) { - n=1; - n = n << k; - if (p->sp[c][k] > 1) { - p->ultra_map[c] |= n; - } else { - if (p->sp[c][k] == 0) { - p->async[c] |= n; - } - } - } - p->async[c] = ~(p->async[c]); - - if (p->global_map[c] == 0) { - k=setupdata[c][1]; - if ((k & 0x40) != 0) - p->global_map[c] |= 0x20; - k &= 0x07; - p->global_map[c] |= k; - if ((setupdata[c][2] & 0x04) != 0) - p->global_map[c] |= 0x08; - p->host_id[c] = setupdata[c][0] & 0x07; - } - } - - k = inb(base_io + 0x28) & 0x8f; - k |= 0x10; - outb(k, base_io + 0x28); - outb(0x80, base_io + 0x41); - outb(0x80, base_io + 0x51); - mdelay(100); - outb(0, base_io + 0x41); - outb(0, base_io + 0x51); - mdelay(1000); - inb(base_io + 0x9b); - inb(base_io + 0x97); - inb(base_io + 0xdb); - inb(base_io + 0xd7); - tmport = base_io + 0x80; - k=p->host_id[0]; - if (k > 7) - k = (k & 0x07) | 0x40; - k |= 0x08; - outb(k, tmport); - tmport += 0x18; - outb(0, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0) - cpu_relax(); - - tmport -= 0x08; - inb(tmport); - tmport = base_io + 0x81; - outb(8, tmport++); - outb(0x7f, tmport); - tmport = base_io + 0x91; - outb(0x20, tmport); - - tmport = base_io + 0xc0; - k=p->host_id[1]; - if (k > 7) - k = (k & 0x07) | 0x40; - k |= 0x08; - outb(k, tmport); - tmport += 0x18; - outb(0, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0) - cpu_relax(); + } - tmport -= 0x08; - inb(tmport); - tmport = base_io + 0xc1; - outb(8, tmport++); - outb(0x7f, tmport); - tmport = base_io + 0xd1; - outb(0x20, tmport); - - tscam_885(); - printk(KERN_INFO " Scanning Channel A SCSI Device ...\n"); - is885(p, base_io + 0x80, 0); - printk(KERN_INFO " Scanning Channel B SCSI Device ...\n"); - is885(p, base_io + 0xc0, 1); - - k = inb(base_io + 0x28) & 0xcf; - k |= 0xc0; - outb(k, base_io + 0x28); - k = inb(base_io + 0x1f) | 0x80; - outb(k, base_io + 0x1f); - k = inb(base_io + 0x29) | 0x01; - outb(k, base_io + 0x29); -#ifdef ED_DBGP - //printk("atp885: atp_host[0] 0x%p\n", atp_host[0]); -#endif - shpnt->max_id = 16; - shpnt->max_lun = (p->global_map[0] & 0x07) + 1; - shpnt->max_channel = 1; - shpnt->this_id = p->host_id[0]; - shpnt->unique_id = base_io; - shpnt->io_port = base_io; - shpnt->n_io_port = 0xff; /* Number of bytes of I/O space used */ - shpnt->irq = pdev->irq; - - } else { - error = pci_read_config_byte(pdev, 0x49, &host_id); + k = atp_readb_base(atpdev, 0x28) & 0x8f; + k |= 0x10; + atp_writeb_base(atpdev, 0x28, k); + atp_writeb_pci(atpdev, 0, 1, 0x80); + atp_writeb_pci(atpdev, 1, 1, 0x80); + mdelay(100); + atp_writeb_pci(atpdev, 0, 1, 0); + atp_writeb_pci(atpdev, 1, 1, 0); + mdelay(1000); + atp_readb_io(atpdev, 0, 0x1b); + atp_readb_io(atpdev, 0, 0x17); + atp_readb_io(atpdev, 1, 0x1b); + atp_readb_io(atpdev, 1, 0x17); + + k = atpdev->host_id[0]; + if (k > 7) + k = (k & 0x07) | 0x40; + atp_set_host_id(atpdev, 0, k); + + k = atpdev->host_id[1]; + if (k > 7) + k = (k & 0x07) | 0x40; + atp_set_host_id(atpdev, 1, k); + + mdelay(600); /* this delay used to be called tscam_885() */ + dev_info(&pdev->dev, "Scanning Channel A SCSI Device ...\n"); + atp_is(atpdev, 0, true, atp_readb_io(atpdev, 0, 0x1b) >> 7); + atp_writeb_io(atpdev, 0, 0x16, 0x80); + dev_info(&pdev->dev, "Scanning Channel B SCSI Device ...\n"); + atp_is(atpdev, 1, true, atp_readb_io(atpdev, 1, 0x1b) >> 7); + atp_writeb_io(atpdev, 1, 0x16, 0x80); + k = atp_readb_base(atpdev, 0x28) & 0xcf; + k |= 0xc0; + atp_writeb_base(atpdev, 0x28, k); + k = atp_readb_base(atpdev, 0x1f) | 0x80; + atp_writeb_base(atpdev, 0x1f, k); + k = atp_readb_base(atpdev, 0x29) | 0x01; + atp_writeb_base(atpdev, 0x29, k); + shpnt->max_id = 16; + shpnt->max_lun = (atpdev->global_map[0] & 0x07) + 1; + shpnt->max_channel = 1; + shpnt->this_id = atpdev->host_id[0]; +} - printk(KERN_INFO " ACARD AEC-671X PCI Ultra/W SCSI-2/3 Host Adapter: %d " - "IO:%x, IRQ:%d.\n", count, base_io, pdev->irq); +/* return non-zero on detection */ +static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct Scsi_Host *shpnt = NULL; + struct atp_unit *atpdev; + int err; - atpdev->ioport[0] = base_io; - atpdev->pciport[0] = base_io + 0x20; - atpdev->dev_id = ent->device; - host_id &= 0x07; - atpdev->host_id[0] = host_id; - tmport = base_io + 0x22; - atpdev->scam_on = inb(tmport); - tmport += 0x0b; - atpdev->global_map[0] = inb(tmport++); - atpdev->ultra_map[0] = inw(tmport); + if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610 && pdev->revision < 2) { + dev_err(&pdev->dev, "ATP850S chips (AEC6710L/F cards) are not supported.\n"); + return -ENODEV; + } - if (atpdev->ultra_map[0] == 0) { - atpdev->scam_on = 0x00; - atpdev->global_map[0] = 0x20; - atpdev->ultra_map[0] = 0xffff; - } + err = pci_enable_device(pdev); + if (err) + goto fail; - shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); - if (!shpnt) - goto err_nomem; + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + printk(KERN_ERR "atp870u: DMA mask required but not available.\n"); + err = -EIO; + goto disable_device; + } - p = (struct atp_unit *)&shpnt->hostdata; - - atpdev->host = shpnt; - atpdev->pdev = pdev; - pci_set_drvdata(pdev, p); - memcpy(p, atpdev, sizeof(*atpdev)); - if (atp870u_init_tables(shpnt) < 0) - goto unregister; - - if (request_irq(pdev->irq, atp870u_intr_handle, IRQF_SHARED, "atp870i", shpnt)) { - printk(KERN_ERR "Unable to allocate IRQ%d for Acard controller.\n", pdev->irq); - goto free_tables; - } - - spin_lock_irqsave(shpnt->host_lock, flags); - if (atpdev->chip_ver > 0x07) { /* check if atp876 chip then enable terminator */ - tmport = base_io + 0x3e; - outb(0x00, tmport); - } - - tmport = base_io + 0x3a; - k = (inb(tmport) & 0xf3) | 0x10; - outb(k, tmport); - outb((k & 0xdf), tmport); - mdelay(32); - outb(k, tmport); - mdelay(32); - tmport = base_io; - outb((host_id | 0x08), tmport); - tmport += 0x18; - outb(0, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0) - mdelay(1); - - tmport -= 0x08; - inb(tmport); - tmport = base_io + 1; - outb(8, tmport++); - outb(0x7f, tmport); - tmport = base_io + 0x11; - outb(0x20, tmport); - - tscam(shpnt); - is870(p, base_io); - tmport = base_io + 0x3a; - outb((inb(tmport) & 0xef), tmport); - tmport++; - outb((inb(tmport) | 0x20), tmport); - if (atpdev->chip_ver == 4) - shpnt->max_id = 16; - else - shpnt->max_id = 8; - shpnt->this_id = host_id; - shpnt->unique_id = base_io; - shpnt->io_port = base_io; - shpnt->n_io_port = 0x40; /* Number of bytes of I/O space used */ - shpnt->irq = pdev->irq; - } - spin_unlock_irqrestore(shpnt->host_lock, flags); - if(ent->device==ATP885_DEVID) { - if(!request_region(base_io, 0xff, "atp870u")) /* Register the IO ports that we use */ - goto request_io_fail; - } else if((ent->device==ATP880_DEVID1)||(ent->device==ATP880_DEVID2)) { - if(!request_region(base_io, 0x60, "atp870u")) /* Register the IO ports that we use */ - goto request_io_fail; - } else { - if(!request_region(base_io, 0x40, "atp870u")) /* Register the IO ports that we use */ - goto request_io_fail; - } - count++; - if (scsi_add_host(shpnt, &pdev->dev)) - goto scsi_add_fail; - scsi_scan_host(shpnt); -#ifdef ED_DBGP - printk("atp870u_prob : exit\n"); -#endif - return 0; + err = pci_request_regions(pdev, "atp870u"); + if (err) + goto disable_device; + pci_set_master(pdev); + + err = -ENOMEM; + shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); + if (!shpnt) + goto release_region; + + atpdev = shost_priv(shpnt); + + atpdev->host = shpnt; + atpdev->pdev = pdev; + pci_set_drvdata(pdev, atpdev); + + shpnt->io_port = pci_resource_start(pdev, 0); + shpnt->io_port &= 0xfffffff8; + shpnt->n_io_port = pci_resource_len(pdev, 0); + atpdev->baseport = shpnt->io_port; + shpnt->unique_id = shpnt->io_port; + shpnt->irq = pdev->irq; + + err = atp870u_init_tables(shpnt); + if (err) { + dev_err(&pdev->dev, "Unable to allocate tables for Acard controller\n"); + goto unregister; + } -scsi_add_fail: - printk("atp870u_prob:scsi_add_fail\n"); - if(ent->device==ATP885_DEVID) { - release_region(base_io, 0xff); - } else if((ent->device==ATP880_DEVID1)||(ent->device==ATP880_DEVID2)) { - release_region(base_io, 0x60); - } else { - release_region(base_io, 0x40); + if (is880(atpdev)) + atp880_init(shpnt); + else if (is885(atpdev)) + atp885_init(shpnt); + else + atp870_init(shpnt); + + err = request_irq(shpnt->irq, atp870u_intr_handle, IRQF_SHARED, "atp870u", shpnt); + if (err) { + dev_err(&pdev->dev, "Unable to allocate IRQ %d.\n", shpnt->irq); + goto free_tables; } -request_io_fail: - printk("atp870u_prob:request_io_fail\n"); - free_irq(pdev->irq, shpnt); + + err = scsi_add_host(shpnt, &pdev->dev); + if (err) + goto scsi_add_fail; + scsi_scan_host(shpnt); + + return 0; + +scsi_add_fail: + free_irq(shpnt->irq, shpnt); free_tables: - printk("atp870u_prob:free_table\n"); atp870u_free_tables(shpnt); unregister: - printk("atp870u_prob:unregister\n"); scsi_host_put(shpnt); - return -1; -err_eio: - kfree(atpdev); - return -EIO; -err_nomem: - kfree(atpdev); - return -ENOMEM; +release_region: + pci_release_regions(pdev); +disable_device: + pci_disable_device(pdev); +fail: + return err; } /* The abort command does not leave the device in a clean state where @@ -3055,7 +1586,6 @@ static int atp870u_abort(struct scsi_cmnd * SCpnt) { unsigned char j, k, c; struct scsi_cmnd *workrequ; - unsigned int tmport; struct atp_unit *dev; struct Scsi_Host *host; host = SCpnt->device->host; @@ -3065,18 +1595,13 @@ static int atp870u_abort(struct scsi_cmnd * SCpnt) printk(" atp870u: abort Channel = %x \n", c); printk("working=%x last_cmd=%x ", dev->working[c], dev->last_cmd[c]); printk(" quhdu=%x quendu=%x ", dev->quhd[c], dev->quend[c]); - tmport = dev->ioport[c]; for (j = 0; j < 0x18; j++) { - printk(" r%2x=%2x", j, inb(tmport++)); + printk(" r%2x=%2x", j, atp_readb_io(dev, c, j)); } - tmport += 0x04; - printk(" r1c=%2x", inb(tmport)); - tmport += 0x03; - printk(" r1f=%2x in_snd=%2x ", inb(tmport), dev->in_snd[c]); - tmport= dev->pciport[c]; - printk(" d00=%2x", inb(tmport)); - tmport += 0x02; - printk(" d02=%2x", inb(tmport)); + printk(" r1c=%2x", atp_readb_io(dev, c, 0x1c)); + printk(" r1f=%2x in_snd=%2x ", atp_readb_io(dev, c, 0x1f), dev->in_snd[c]); + printk(" d00=%2x", atp_readb_pci(dev, c, 0x00)); + printk(" d02=%2x", atp_readb_pci(dev, c, 0x02)); for(j=0;j<16;j++) { if (dev->id[c][j].curr_req != NULL) { workrequ = dev->id[c][j].curr_req; @@ -3136,12 +1661,10 @@ static void atp870u_remove (struct pci_dev *pdev) scsi_remove_host(pshost); - printk(KERN_INFO "free_irq : %d\n",pshost->irq); free_irq(pshost->irq, pshost); - release_region(pshost->io_port, pshost->n_io_port); - printk(KERN_INFO "atp870u_free_tables : %p\n",pshost); + pci_release_regions(pdev); + pci_disable_device(pdev); atp870u_free_tables(pshost); - printk(KERN_INFO "scsi_host_put : %p\n",pshost); scsi_host_put(pshost); } MODULE_LICENSE("GPL"); @@ -3185,52 +1708,26 @@ static struct pci_driver atp870u_driver = { .remove = atp870u_remove, }; -static int __init atp870u_init(void) -{ -#ifdef ED_DBGP - printk("atp870u_init: Entry\n"); -#endif - return pci_register_driver(&atp870u_driver); -} - -static void __exit atp870u_exit(void) -{ -#ifdef ED_DBGP - printk("atp870u_exit: Entry\n"); -#endif - pci_unregister_driver(&atp870u_driver); -} - -static void tscam_885(void) -{ - unsigned char i; - - for (i = 0; i < 0x2; i++) { - mdelay(300); - } - return; -} - - +module_pci_driver(atp870u_driver); -static void is885(struct atp_unit *dev, unsigned int wkport,unsigned char c) +static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip, unsigned char lvdmode) { - unsigned int tmport; - unsigned char i, j, k, rmb, n, lvdmode; + unsigned char i, j, k, rmb, n; unsigned short int m; static unsigned char mbuf[512]; - static unsigned char satn[9] = {0, 0, 0, 0, 0, 0, 0, 6, 6}; - static unsigned char inqd[9] = {0x12, 0, 0, 0, 0x24, 0, 0, 0x24, 6}; - static unsigned char synn[6] = {0x80, 1, 3, 1, 0x19, 0x0e}; - unsigned char synu[6] = {0x80, 1, 3, 1, 0x0a, 0x0e}; - static unsigned char synw[6] = {0x80, 1, 3, 1, 0x19, 0x0e}; - unsigned char synuw[6] = {0x80, 1, 3, 1, 0x0a, 0x0e}; - static unsigned char wide[6] = {0x80, 1, 2, 3, 1, 0}; - static unsigned char u3[9] = { 0x80,1,6,4,0x09,00,0x0e,0x01,0x02 }; - - lvdmode=inb(wkport + 0x1b) >> 7; + static unsigned char satn[9] = { 0, 0, 0, 0, 0, 0, 0, 6, 6 }; + static unsigned char inqd[9] = { 0x12, 0, 0, 0, 0x24, 0, 0, 0x24, 6 }; + static unsigned char synn[6] = { 0x80, 1, 3, 1, 0x19, 0x0e }; + unsigned char synu[6] = { 0x80, 1, 3, 1, 0x0a, 0x0e }; + static unsigned char synw[6] = { 0x80, 1, 3, 1, 0x19, 0x0e }; + static unsigned char synw_870[6] = { 0x80, 1, 3, 1, 0x0c, 0x07 }; + unsigned char synuw[6] = { 0x80, 1, 3, 1, 0x0a, 0x0e }; + static unsigned char wide[6] = { 0x80, 1, 2, 3, 1, 0 }; + static unsigned char u3[9] = { 0x80, 1, 6, 4, 0x09, 00, 0x0e, 0x01, 0x02 }; for (i = 0; i < 16; i++) { + if (!wide_chip && (i > 7)) + break; m = 1; m = m << i; if ((m & dev->active_id[c]) != 0) { @@ -3240,192 +1737,172 @@ static void is885(struct atp_unit *dev, unsigned int wkport,unsigned char c) printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_id[c]); continue; } - tmport = wkport + 0x1b; - outb(0x01, tmport); - tmport = wkport + 0x01; - outb(0x08, tmport++); - outb(0x7f, tmport++); - outb(satn[0], tmport++); - outb(satn[1], tmport++); - outb(satn[2], tmport++); - outb(satn[3], tmport++); - outb(satn[4], tmport++); - outb(satn[5], tmport++); - tmport += 0x06; - outb(0, tmport); - tmport += 0x02; - outb(dev->id[c][i].devsp, tmport++); - - outb(0, tmport++); - outb(satn[6], tmport++); - outb(satn[7], tmport++); + atp_writeb_io(dev, c, 0x1b, wide_chip ? 0x01 : 0x00); + atp_writeb_io(dev, c, 1, 0x08); + atp_writeb_io(dev, c, 2, 0x7f); + atp_writeb_io(dev, c, 3, satn[0]); + atp_writeb_io(dev, c, 4, satn[1]); + atp_writeb_io(dev, c, 5, satn[2]); + atp_writeb_io(dev, c, 6, satn[3]); + atp_writeb_io(dev, c, 7, satn[4]); + atp_writeb_io(dev, c, 8, satn[5]); + atp_writeb_io(dev, c, 0x0f, 0); + atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); + atp_writeb_io(dev, c, 0x12, 0); + atp_writeb_io(dev, c, 0x13, satn[6]); + atp_writeb_io(dev, c, 0x14, satn[7]); j = i; if ((j & 0x08) != 0) { j = (j & 0x07) | 0x40; } - outb(j, tmport); - tmport += 0x03; - outb(satn[8], tmport); - tmport += 0x07; + atp_writeb_io(dev, c, 0x15, j); + atp_writeb_io(dev, c, 0x18, satn[8]); - while ((inb(tmport) & 0x80) == 0x00) + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); - tmport -= 0x08; - if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) { + + if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; - } - while (inb(tmport) != 0x8e) + + while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); + dev->active_id[c] |= m; - tmport = wkport + 0x10; - outb(0x30, tmport); - tmport = wkport + 0x14; - outb(0x00, tmport); + atp_writeb_io(dev, c, 0x10, 0x30); + if (is885(dev) || is880(dev)) + atp_writeb_io(dev, c, 0x14, 0x00); + else /* result of is870() merge - is this a bug? */ + atp_writeb_io(dev, c, 0x04, 0x00); phase_cmd: - tmport = wkport + 0x18; - outb(0x08, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0x00) + atp_writeb_io(dev, c, 0x18, 0x08); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); - tmport -= 0x08; - j = inb(tmport); + + j = atp_readb_io(dev, c, 0x17); if (j != 0x16) { - tmport = wkport + 0x10; - outb(0x41, tmport); + atp_writeb_io(dev, c, 0x10, 0x41); goto phase_cmd; } sel_ok: - tmport = wkport + 0x03; - outb(inqd[0], tmport++); - outb(inqd[1], tmport++); - outb(inqd[2], tmport++); - outb(inqd[3], tmport++); - outb(inqd[4], tmport++); - outb(inqd[5], tmport); - tmport += 0x07; - outb(0, tmport); - tmport += 0x02; - outb(dev->id[c][i].devsp, tmport++); - outb(0, tmport++); - outb(inqd[6], tmport++); - outb(inqd[7], tmport++); - tmport += 0x03; - outb(inqd[8], tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0x00) + atp_writeb_io(dev, c, 3, inqd[0]); + atp_writeb_io(dev, c, 4, inqd[1]); + atp_writeb_io(dev, c, 5, inqd[2]); + atp_writeb_io(dev, c, 6, inqd[3]); + atp_writeb_io(dev, c, 7, inqd[4]); + atp_writeb_io(dev, c, 8, inqd[5]); + atp_writeb_io(dev, c, 0x0f, 0); + atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); + atp_writeb_io(dev, c, 0x12, 0); + atp_writeb_io(dev, c, 0x13, inqd[6]); + atp_writeb_io(dev, c, 0x14, inqd[7]); + atp_writeb_io(dev, c, 0x18, inqd[8]); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); - tmport -= 0x08; - if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) { + + if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; - } - while (inb(tmport) != 0x8e) + + while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); - tmport = wkport + 0x1b; - outb(0x00, tmport); - tmport = wkport + 0x18; - outb(0x08, tmport); - tmport += 0x07; + + if (wide_chip) + atp_writeb_io(dev, c, 0x1b, 0x00); + + atp_writeb_io(dev, c, 0x18, 0x08); j = 0; rd_inq_data: - k = inb(tmport); + k = atp_readb_io(dev, c, 0x1f); if ((k & 0x01) != 0) { - tmport -= 0x06; - mbuf[j++] = inb(tmport); - tmport += 0x06; + mbuf[j++] = atp_readb_io(dev, c, 0x19); goto rd_inq_data; } if ((k & 0x80) == 0) { goto rd_inq_data; } - tmport -= 0x08; - j = inb(tmport); + j = atp_readb_io(dev, c, 0x17); if (j == 0x16) { goto inq_ok; } - tmport = wkport + 0x10; - outb(0x46, tmport); - tmport += 0x02; - outb(0, tmport++); - outb(0, tmport++); - outb(0, tmport++); - tmport += 0x03; - outb(0x08, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0x00) + atp_writeb_io(dev, c, 0x10, 0x46); + atp_writeb_io(dev, c, 0x12, 0); + atp_writeb_io(dev, c, 0x13, 0); + atp_writeb_io(dev, c, 0x14, 0); + atp_writeb_io(dev, c, 0x18, 0x08); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); - tmport -= 0x08; - if (inb(tmport) != 0x16) { + + if (atp_readb_io(dev, c, 0x17) != 0x16) goto sel_ok; - } + inq_ok: mbuf[36] = 0; - printk( KERN_INFO" ID: %2d %s\n", i, &mbuf[8]); + printk(KERN_INFO " ID: %2d %s\n", i, &mbuf[8]); dev->id[c][i].devtype = mbuf[0]; rmb = mbuf[1]; n = mbuf[7]; + if (!wide_chip) + goto not_wide; if ((mbuf[7] & 0x60) == 0) { goto not_wide; } - if ((i < 8) && ((dev->global_map[c] & 0x20) == 0)) { - goto not_wide; + if (is885(dev) || is880(dev)) { + if ((i < 8) && ((dev->global_map[c] & 0x20) == 0)) + goto not_wide; + } else { /* result of is870() merge - is this a bug? */ + if ((dev->global_map[c] & 0x20) == 0) + goto not_wide; } if (lvdmode == 0) { - goto chg_wide; - } - if (dev->sp[c][i] != 0x04) { // force u2 - goto chg_wide; - } - - tmport = wkport + 0x1b; - outb(0x01, tmport); - tmport = wkport + 0x03; - outb(satn[0], tmport++); - outb(satn[1], tmport++); - outb(satn[2], tmport++); - outb(satn[3], tmport++); - outb(satn[4], tmport++); - outb(satn[5], tmport++); - tmport += 0x06; - outb(0, tmport); - tmport += 0x02; - outb(dev->id[c][i].devsp, tmport++); - outb(0, tmport++); - outb(satn[6], tmport++); - outb(satn[7], tmport++); - tmport += 0x03; - outb(satn[8], tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) + goto chg_wide; + } + if (dev->sp[c][i] != 0x04) // force u2 + { + goto chg_wide; + } + + atp_writeb_io(dev, c, 0x1b, 0x01); + atp_writeb_io(dev, c, 3, satn[0]); + atp_writeb_io(dev, c, 4, satn[1]); + atp_writeb_io(dev, c, 5, satn[2]); + atp_writeb_io(dev, c, 6, satn[3]); + atp_writeb_io(dev, c, 7, satn[4]); + atp_writeb_io(dev, c, 8, satn[5]); + atp_writeb_io(dev, c, 0x0f, 0); + atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); + atp_writeb_io(dev, c, 0x12, 0); + atp_writeb_io(dev, c, 0x13, satn[6]); + atp_writeb_io(dev, c, 0x14, satn[7]); + atp_writeb_io(dev, c, 0x18, satn[8]); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); - tmport -= 0x08; - if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) { + + if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; - } - while (inb(tmport) != 0x8e) + + while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); + try_u3: j = 0; - tmport = wkport + 0x14; - outb(0x09, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0) { - if ((inb(tmport) & 0x01) != 0) { - tmport -= 0x06; - outb(u3[j++], tmport); - tmport += 0x06; - } + atp_writeb_io(dev, c, 0x14, 0x09); + atp_writeb_io(dev, c, 0x18, 0x20); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { + if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) + atp_writeb_io(dev, c, 0x19, u3[j++]); cpu_relax(); } - tmport -= 0x08; - while ((inb(tmport) & 0x80) == 0x00) + + while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) cpu_relax(); - j = inb(tmport) & 0x0f; + + j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto u3p_in; } @@ -3437,19 +1914,13 @@ try_u3: } continue; u3p_out: - tmport = wkport + 0x18; - outb(0x20, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0) { - if ((inb(tmport) & 0x01) != 0) { - tmport -= 0x06; - outb(0, tmport); - tmport += 0x06; - } + atp_writeb_io(dev, c, 0x18, 0x20); + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { + if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) + atp_writeb_io(dev, c, 0x19, 0); cpu_relax(); } - tmport -= 0x08; - j = inb(tmport) & 0x0f; + j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto u3p_in; } @@ -3461,25 +1932,19 @@ u3p_out: } continue; u3p_in: - tmport = wkport + 0x14; - outb(0x09, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; + atp_writeb_io(dev, c, 0x14, 0x09); + atp_writeb_io(dev, c, 0x18, 0x20); k = 0; u3p_in1: - j = inb(tmport); + j = atp_readb_io(dev, c, 0x1f); if ((j & 0x01) != 0) { - tmport -= 0x06; - mbuf[k++] = inb(tmport); - tmport += 0x06; + mbuf[k++] = atp_readb_io(dev, c, 0x19); goto u3p_in1; } if ((j & 0x80) == 0x00) { goto u3p_in1; } - tmport -= 0x08; - j = inb(tmport) & 0x0f; + j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto u3p_in; } @@ -3491,16 +1956,13 @@ u3p_in1: } continue; u3p_cmd: - tmport = wkport + 0x10; - outb(0x30, tmport); - tmport = wkport + 0x14; - outb(0x00, tmport); - tmport += 0x04; - outb(0x08, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0x00); - tmport -= 0x08; - j = inb(tmport); + atp_writeb_io(dev, c, 0x10, 0x30); + atp_writeb_io(dev, c, 0x14, 0x00); + atp_writeb_io(dev, c, 0x18, 0x08); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00); + + j = atp_readb_io(dev, c, 0x17); if (j != 0x16) { if (j == 0x4e) { goto u3p_out; @@ -3527,54 +1989,44 @@ u3p_cmd: continue; } chg_wide: - tmport = wkport + 0x1b; - outb(0x01, tmport); - tmport = wkport + 0x03; - outb(satn[0], tmport++); - outb(satn[1], tmport++); - outb(satn[2], tmport++); - outb(satn[3], tmport++); - outb(satn[4], tmport++); - outb(satn[5], tmport++); - tmport += 0x06; - outb(0, tmport); - tmport += 0x02; - outb(dev->id[c][i].devsp, tmport++); - outb(0, tmport++); - outb(satn[6], tmport++); - outb(satn[7], tmport++); - tmport += 0x03; - outb(satn[8], tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) + atp_writeb_io(dev, c, 0x1b, 0x01); + atp_writeb_io(dev, c, 3, satn[0]); + atp_writeb_io(dev, c, 4, satn[1]); + atp_writeb_io(dev, c, 5, satn[2]); + atp_writeb_io(dev, c, 6, satn[3]); + atp_writeb_io(dev, c, 7, satn[4]); + atp_writeb_io(dev, c, 8, satn[5]); + atp_writeb_io(dev, c, 0x0f, 0); + atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); + atp_writeb_io(dev, c, 0x12, 0); + atp_writeb_io(dev, c, 0x13, satn[6]); + atp_writeb_io(dev, c, 0x14, satn[7]); + atp_writeb_io(dev, c, 0x18, satn[8]); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); - tmport -= 0x08; - if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) { + + if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; - } - while (inb(tmport) != 0x8e) + + while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); + try_wide: j = 0; - tmport = wkport + 0x14; - outb(0x05, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0) { - if ((inb(tmport) & 0x01) != 0) { - tmport -= 0x06; - outb(wide[j++], tmport); - tmport += 0x06; - } + atp_writeb_io(dev, c, 0x14, 0x05); + atp_writeb_io(dev, c, 0x18, 0x20); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { + if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) + atp_writeb_io(dev, c, 0x19, wide[j++]); cpu_relax(); } - tmport -= 0x08; - while ((inb(tmport) & 0x80) == 0x00) + + while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) cpu_relax(); - j = inb(tmport) & 0x0f; + + j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto widep_in; } @@ -3586,19 +2038,13 @@ try_wide: } continue; widep_out: - tmport = wkport + 0x18; - outb(0x20, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0) { - if ((inb(tmport) & 0x01) != 0) { - tmport -= 0x06; - outb(0, tmport); - tmport += 0x06; - } + atp_writeb_io(dev, c, 0x18, 0x20); + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { + if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) + atp_writeb_io(dev, c, 0x19, 0); cpu_relax(); } - tmport -= 0x08; - j = inb(tmport) & 0x0f; + j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto widep_in; } @@ -3610,25 +2056,19 @@ widep_out: } continue; widep_in: - tmport = wkport + 0x14; - outb(0xff, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; + atp_writeb_io(dev, c, 0x14, 0xff); + atp_writeb_io(dev, c, 0x18, 0x20); k = 0; widep_in1: - j = inb(tmport); + j = atp_readb_io(dev, c, 0x1f); if ((j & 0x01) != 0) { - tmport -= 0x06; - mbuf[k++] = inb(tmport); - tmport += 0x06; + mbuf[k++] = atp_readb_io(dev, c, 0x19); goto widep_in1; } if ((j & 0x80) == 0x00) { goto widep_in1; } - tmport -= 0x08; - j = inb(tmport) & 0x0f; + j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto widep_in; } @@ -3640,17 +2080,14 @@ widep_in1: } continue; widep_cmd: - tmport = wkport + 0x10; - outb(0x30, tmport); - tmport = wkport + 0x14; - outb(0x00, tmport); - tmport += 0x04; - outb(0x08, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0x00) + atp_writeb_io(dev, c, 0x10, 0x30); + atp_writeb_io(dev, c, 0x14, 0x00); + atp_writeb_io(dev, c, 0x18, 0x08); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); - tmport -= 0x08; - j = inb(tmport); + + j = atp_readb_io(dev, c, 0x17); if (j != 0x16) { if (j == 0x4e) { goto widep_out; @@ -3673,88 +2110,81 @@ widep_cmd: m = m << i; dev->wide_id[c] |= m; not_wide: - if ((dev->id[c][i].devtype == 0x00) || (dev->id[c][i].devtype == 0x07) || - ((dev->id[c][i].devtype == 0x05) && ((n & 0x10) != 0))) { + if ((dev->id[c][i].devtype == 0x00) || (dev->id[c][i].devtype == 0x07) || ((dev->id[c][i].devtype == 0x05) && ((n & 0x10) != 0))) { m = 1; m = m << i; if ((dev->async[c] & m) != 0) { - goto set_sync; + goto set_sync; } } continue; set_sync: - if (dev->sp[c][i] == 0x02) { - synu[4]=0x0c; - synuw[4]=0x0c; + if ((!is885(dev) && !is880(dev)) || (dev->sp[c][i] == 0x02)) { + synu[4] = 0x0c; + synuw[4] = 0x0c; } else { - if (dev->sp[c][i] >= 0x03) { - synu[4]=0x0a; - synuw[4]=0x0a; - } + if (dev->sp[c][i] >= 0x03) { + synu[4] = 0x0a; + synuw[4] = 0x0a; + } } - tmport = wkport + 0x1b; j = 0; if ((m & dev->wide_id[c]) != 0) { j |= 0x01; } - outb(j, tmport); - tmport = wkport + 0x03; - outb(satn[0], tmport++); - outb(satn[1], tmport++); - outb(satn[2], tmport++); - outb(satn[3], tmport++); - outb(satn[4], tmport++); - outb(satn[5], tmport++); - tmport += 0x06; - outb(0, tmport); - tmport += 0x02; - outb(dev->id[c][i].devsp, tmport++); - outb(0, tmport++); - outb(satn[6], tmport++); - outb(satn[7], tmport++); - tmport += 0x03; - outb(satn[8], tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0x00) + atp_writeb_io(dev, c, 0x1b, j); + atp_writeb_io(dev, c, 3, satn[0]); + atp_writeb_io(dev, c, 4, satn[1]); + atp_writeb_io(dev, c, 5, satn[2]); + atp_writeb_io(dev, c, 6, satn[3]); + atp_writeb_io(dev, c, 7, satn[4]); + atp_writeb_io(dev, c, 8, satn[5]); + atp_writeb_io(dev, c, 0x0f, 0); + atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); + atp_writeb_io(dev, c, 0x12, 0); + atp_writeb_io(dev, c, 0x13, satn[6]); + atp_writeb_io(dev, c, 0x14, satn[7]); + atp_writeb_io(dev, c, 0x18, satn[8]); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); - tmport -= 0x08; - if ((inb(tmport) != 0x11) && (inb(tmport) != 0x8e)) { + + if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; - } - while (inb(tmport) != 0x8e) + + while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); + try_sync: j = 0; - tmport = wkport + 0x14; - outb(0x06, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; - - while ((inb(tmport) & 0x80) == 0) { - if ((inb(tmport) & 0x01) != 0) { - tmport -= 0x06; + atp_writeb_io(dev, c, 0x14, 0x06); + atp_writeb_io(dev, c, 0x18, 0x20); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { + if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) { if ((m & dev->wide_id[c]) != 0) { - if ((m & dev->ultra_map[c]) != 0) { - outb(synuw[j++], tmport); - } else { - outb(synw[j++], tmport); - } + if (is885(dev) || is880(dev)) { + if ((m & dev->ultra_map[c]) != 0) { + atp_writeb_io(dev, c, 0x19, synuw[j++]); + } else { + atp_writeb_io(dev, c, 0x19, synw[j++]); + } + } else + atp_writeb_io(dev, c, 0x19, synw_870[j++]); } else { if ((m & dev->ultra_map[c]) != 0) { - outb(synu[j++], tmport); + atp_writeb_io(dev, c, 0x19, synu[j++]); } else { - outb(synn[j++], tmport); + atp_writeb_io(dev, c, 0x19, synn[j++]); } } - tmport += 0x06; } } - tmport -= 0x08; - while ((inb(tmport) & 0x80) == 0x00) + + while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) cpu_relax(); - j = inb(tmport) & 0x0f; + + j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto phase_ins; } @@ -3766,19 +2196,13 @@ try_sync: } continue; phase_outs: - tmport = wkport + 0x18; - outb(0x20, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0x00) { - if ((inb(tmport) & 0x01) != 0x00) { - tmport -= 0x06; - outb(0x00, tmport); - tmport += 0x06; - } + atp_writeb_io(dev, c, 0x18, 0x20); + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) { + if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0x00) + atp_writeb_io(dev, c, 0x19, 0x00); cpu_relax(); } - tmport -= 0x08; - j = inb(tmport); + j = atp_readb_io(dev, c, 0x17); if (j == 0x85) { goto tar_dcons; } @@ -3794,26 +2218,25 @@ phase_outs: } continue; phase_ins: - tmport = wkport + 0x14; - outb(0x06, tmport); - tmport += 0x04; - outb(0x20, tmport); - tmport += 0x07; + if (is885(dev) || is880(dev)) + atp_writeb_io(dev, c, 0x14, 0x06); + else + atp_writeb_io(dev, c, 0x14, 0xff); + atp_writeb_io(dev, c, 0x18, 0x20); k = 0; phase_ins1: - j = inb(tmport); + j = atp_readb_io(dev, c, 0x1f); if ((j & 0x01) != 0x00) { - tmport -= 0x06; - mbuf[k++] = inb(tmport); - tmport += 0x06; + mbuf[k++] = atp_readb_io(dev, c, 0x19); goto phase_ins1; } if ((j & 0x80) == 0x00) { goto phase_ins1; } - tmport -= 0x08; - while ((inb(tmport) & 0x80) == 0x00); - j = inb(tmport); + + while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00); + + j = atp_readb_io(dev, c, 0x17); if (j == 0x85) { goto tar_dcons; } @@ -3829,18 +2252,15 @@ phase_ins1: } continue; phase_cmds: - tmport = wkport + 0x10; - outb(0x30, tmport); + atp_writeb_io(dev, c, 0x10, 0x30); tar_dcons: - tmport = wkport + 0x14; - outb(0x00, tmport); - tmport += 0x04; - outb(0x08, tmport); - tmport += 0x07; - while ((inb(tmport) & 0x80) == 0x00) + atp_writeb_io(dev, c, 0x14, 0x00); + atp_writeb_io(dev, c, 0x18, 0x08); + + while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); - tmport -= 0x08; - j = inb(tmport); + + j = atp_readb_io(dev, c, 0x17); if (j != 0x16) { continue; } @@ -3856,14 +2276,21 @@ tar_dcons: if (mbuf[3] > 0x64) { continue; } - if (mbuf[4] > 0x0e) { - mbuf[4] = 0x0e; + if (is885(dev) || is880(dev)) { + if (mbuf[4] > 0x0e) { + mbuf[4] = 0x0e; + } + } else { + if (mbuf[4] > 0x0c) { + mbuf[4] = 0x0c; + } } dev->id[c][i].devsp = mbuf[4]; - if (mbuf[3] < 0x0c){ - j = 0xb0; - goto set_syn_ok; - } + if (is885(dev) || is880(dev)) + if (mbuf[3] < 0x0c) { + j = 0xb0; + goto set_syn_ok; + } if ((mbuf[3] < 0x0d) && (rmb == 0)) { j = 0xa0; goto set_syn_ok; @@ -3881,16 +2308,10 @@ tar_dcons: goto set_syn_ok; } j = 0x60; - set_syn_ok: +set_syn_ok: dev->id[c][i].devsp = (dev->id[c][i].devsp & 0x0f) | j; -#ifdef ED_DBGP +#ifdef ED_DBGP printk("dev->id[%2d][%2d].devsp = %2x\n",c,i,dev->id[c][i].devsp); #endif } - tmport = wkport + 0x16; - outb(0x80, tmport); } - -module_init(atp870u_init); -module_exit(atp870u_exit); - diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h index 5cf62566ad42..9b839b1e895a 100644 --- a/drivers/scsi/atp870u.h +++ b/drivers/scsi/atp870u.h @@ -26,22 +26,18 @@ struct atp_unit unsigned long baseport; unsigned long ioport[2]; unsigned long pciport[2]; - unsigned long irq; unsigned char last_cmd[2]; unsigned char in_snd[2]; unsigned char in_int[2]; unsigned char quhd[2]; unsigned char quend[2]; unsigned char global_map[2]; - unsigned char chip_ver; - unsigned char scam_on; unsigned char host_id[2]; unsigned int working[2]; unsigned short wide_id[2]; unsigned short active_id[2]; unsigned short ultra_map[2]; unsigned short async[2]; - unsigned short dev_id; unsigned char sp[2][16]; unsigned char r1f[2][16]; struct scsi_cmnd *quereq[2][qcnt]; diff --git a/drivers/scsi/be2iscsi/Kconfig b/drivers/scsi/be2iscsi/Kconfig index 4e7cad272469..bad5f32e1f67 100644 --- a/drivers/scsi/be2iscsi/Kconfig +++ b/drivers/scsi/be2iscsi/Kconfig @@ -3,6 +3,7 @@ config BE2ISCSI depends on PCI && SCSI && NET select SCSI_ISCSI_ATTRS select ISCSI_BOOT_SYSFS + select IRQ_POLL help This driver implements the iSCSI functionality for Emulex diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index 77f992e74726..ee5ace873535 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h @@ -20,7 +20,7 @@ #include <linux/pci.h> #include <linux/if_vlan.h> -#include <linux/blk-iopoll.h> +#include <linux/irq_poll.h> #define FW_VER_LEN 32 #define MCC_Q_LEN 128 #define MCC_CQ_LEN 256 @@ -42,7 +42,7 @@ struct be_queue_info { u16 id; u16 tail, head; bool created; - atomic_t used; /* Number of valid elements in the queue */ + u16 used; /* Number of valid elements in the queue */ }; static inline u32 MODULO(u16 val, u16 limit) @@ -101,7 +101,7 @@ struct be_eq_obj { struct beiscsi_hba *phba; struct be_queue_info *cq; struct work_struct work_cqs; /* Work Item */ - struct blk_iopoll iopoll; + struct irq_poll iopoll; }; struct be_mcc_obj { @@ -110,10 +110,9 @@ struct be_mcc_obj { }; struct beiscsi_mcc_tag_state { -#define MCC_TAG_STATE_COMPLETED 0x00 -#define MCC_TAG_STATE_RUNNING 0x01 -#define MCC_TAG_STATE_TIMEOUT 0x02 - uint8_t tag_state; + unsigned long tag_state; +#define MCC_TAG_STATE_RUNNING 1 +#define MCC_TAG_STATE_TIMEOUT 2 struct be_dma_mem tag_mem_state; }; @@ -124,7 +123,7 @@ struct be_ctrl_info { struct pci_dev *pdev; /* Mbox used for cmd request/response */ - spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */ + struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ struct be_dma_mem mbox_mem; /* Mbox mem is adjusted to align to 16 bytes. The allocated addr * is stored for freeing purpose */ @@ -133,11 +132,10 @@ struct be_ctrl_info { /* MCC Rings */ struct be_mcc_obj mcc_obj; spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ - spinlock_t mcc_cq_lock; wait_queue_head_t mcc_wait[MAX_MCC_CMD + 1]; unsigned int mcc_tag[MAX_MCC_CMD]; - unsigned int mcc_numtag[MAX_MCC_CMD + 1]; + unsigned int mcc_tag_status[MAX_MCC_CMD + 1]; unsigned short mcc_alloc_index; unsigned short mcc_free_index; unsigned int mcc_tag_available; @@ -147,6 +145,12 @@ struct be_ctrl_info { #include "be_cmds.h" +/* WRB index mask for MCC_Q_LEN queue entries */ +#define MCC_Q_WRB_IDX_MASK CQE_STATUS_WRB_MASK +#define MCC_Q_WRB_IDX_SHIFT CQE_STATUS_WRB_SHIFT +/* TAG is from 1...MAX_MCC_CMD, MASK includes MAX_MCC_CMD */ +#define MCC_Q_CMD_TAG_MASK ((MAX_MCC_CMD << 1) - 1) + #define PAGE_SHIFT_4K 12 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) #define mcc_timeout 120000 /* 12s timeout */ diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 2778089b01a5..a55eaeea37e7 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -104,24 +104,16 @@ int be_chk_reset_complete(struct beiscsi_hba *phba) return 0; } -void be_mcc_notify(struct beiscsi_hba *phba) -{ - struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; - u32 val = 0; - - val |= mccq->id & DB_MCCQ_RING_ID_MASK; - val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; - iowrite32(val, phba->db_va + DB_MCCQ_OFFSET); -} - unsigned int alloc_mcc_tag(struct beiscsi_hba *phba) { unsigned int tag = 0; + spin_lock(&phba->ctrl.mcc_lock); if (phba->ctrl.mcc_tag_available) { tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index]; phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0; - phba->ctrl.mcc_numtag[tag] = 0; + phba->ctrl.mcc_tag_status[tag] = 0; + phba->ctrl.ptag_state[tag].tag_state = 0; } if (tag) { phba->ctrl.mcc_tag_available--; @@ -130,11 +122,89 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba) else phba->ctrl.mcc_alloc_index++; } + spin_unlock(&phba->ctrl.mcc_lock); return tag; } +struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba, + unsigned int *ref_tag) +{ + struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; + struct be_mcc_wrb *wrb = NULL; + unsigned int tag; + + spin_lock_bh(&phba->ctrl.mcc_lock); + if (mccq->used == mccq->len) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : MCC queue full: WRB used %u tag avail %u\n", + mccq->used, phba->ctrl.mcc_tag_available); + goto alloc_failed; + } + + if (!phba->ctrl.mcc_tag_available) + goto alloc_failed; + + tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index]; + if (!tag) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n", + phba->ctrl.mcc_tag_available, + phba->ctrl.mcc_alloc_index); + goto alloc_failed; + } + + /* return this tag for further reference */ + *ref_tag = tag; + phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0; + phba->ctrl.mcc_tag_status[tag] = 0; + phba->ctrl.ptag_state[tag].tag_state = 0; + phba->ctrl.mcc_tag_available--; + if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1)) + phba->ctrl.mcc_alloc_index = 0; + else + phba->ctrl.mcc_alloc_index++; + + wrb = queue_head_node(mccq); + memset(wrb, 0, sizeof(*wrb)); + wrb->tag0 = tag; + wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK; + queue_head_inc(mccq); + mccq->used++; + +alloc_failed: + spin_unlock_bh(&phba->ctrl.mcc_lock); + return wrb; +} + +void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag) +{ + struct be_queue_info *mccq = &ctrl->mcc_obj.q; + + spin_lock_bh(&ctrl->mcc_lock); + tag = tag & MCC_Q_CMD_TAG_MASK; + ctrl->mcc_tag[ctrl->mcc_free_index] = tag; + if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1)) + ctrl->mcc_free_index = 0; + else + ctrl->mcc_free_index++; + ctrl->mcc_tag_available++; + mccq->used--; + spin_unlock_bh(&ctrl->mcc_lock); +} + +/** + * beiscsi_fail_session(): Closing session with appropriate error + * @cls_session: ptr to session + **/ +void beiscsi_fail_session(struct iscsi_cls_session *cls_session) +{ + iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); +} + /* - * beiscsi_mccq_compl()- Wait for completion of MBX + * beiscsi_mccq_compl_wait()- Process completion in MCC CQ * @phba: Driver private structure * @tag: Tag for the MBX Command * @wrb: the WRB used for the MBX Command @@ -146,43 +216,40 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba) * Success: 0 * Failure: Non-Zero **/ -int beiscsi_mccq_compl(struct beiscsi_hba *phba, - uint32_t tag, struct be_mcc_wrb **wrb, - struct be_dma_mem *mbx_cmd_mem) +int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba, + uint32_t tag, struct be_mcc_wrb **wrb, + struct be_dma_mem *mbx_cmd_mem) { int rc = 0; - uint32_t mcc_tag_response; + uint32_t mcc_tag_status; uint16_t status = 0, addl_status = 0, wrb_num = 0; struct be_mcc_wrb *temp_wrb; struct be_cmd_req_hdr *mbx_hdr; struct be_cmd_resp_hdr *mbx_resp_hdr; struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; - if (beiscsi_error(phba)) { - free_mcc_tag(&phba->ctrl, tag); + if (beiscsi_error(phba)) return -EPERM; - } - - /* Set MBX Tag state to Active */ - spin_lock(&phba->ctrl.mbox_lock); - phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING; - spin_unlock(&phba->ctrl.mbox_lock); /* wait for the mccq completion */ rc = wait_event_interruptible_timeout( phba->ctrl.mcc_wait[tag], - phba->ctrl.mcc_numtag[tag], + phba->ctrl.mcc_tag_status[tag], msecs_to_jiffies( BEISCSI_HOST_MBX_TIMEOUT)); - + /** + * If MBOX cmd timeout expired, tag and resource allocated + * for cmd is not freed until FW returns completion. + */ if (rc <= 0) { struct be_dma_mem *tag_mem; - /* Set MBX Tag state to timeout */ - spin_lock(&phba->ctrl.mbox_lock); - phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT; - spin_unlock(&phba->ctrl.mbox_lock); - /* Store resource addr to be freed later */ + /** + * PCI/DMA memory allocated and posted in non-embedded mode + * will have mbx_cmd_mem != NULL. + * Save virtual and bus addresses for the command so that it + * can be freed later. + **/ tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state; if (mbx_cmd_mem) { tag_mem->size = mbx_cmd_mem->size; @@ -191,28 +258,28 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, } else tag_mem->size = 0; + /* first make tag_mem_state visible to all */ + wmb(); + set_bit(MCC_TAG_STATE_TIMEOUT, + &phba->ctrl.ptag_state[tag].tag_state); + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_EH | BEISCSI_LOG_CONFIG, "BC_%d : MBX Cmd Completion timed out\n"); return -EBUSY; - } else { - rc = 0; - /* Set MBX Tag state to completed */ - spin_lock(&phba->ctrl.mbox_lock); - phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED; - spin_unlock(&phba->ctrl.mbox_lock); } - mcc_tag_response = phba->ctrl.mcc_numtag[tag]; - status = (mcc_tag_response & CQE_STATUS_MASK); - addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >> + rc = 0; + mcc_tag_status = phba->ctrl.mcc_tag_status[tag]; + status = (mcc_tag_status & CQE_STATUS_MASK); + addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT); if (mbx_cmd_mem) { mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va; } else { - wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >> + wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT; temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num); mbx_hdr = embedded_payload(temp_wrb); @@ -231,7 +298,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, mbx_hdr->subsystem, mbx_hdr->opcode, status, addl_status); - + rc = -EIO; if (status == MCC_STATUS_INSUFFICIENT_BUFFER) { mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr; beiscsi_log(phba, KERN_WARNING, @@ -241,70 +308,16 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, "Resp_Len : %d Actual_Resp_Len : %d\n", mbx_resp_hdr->response_length, mbx_resp_hdr->actual_resp_len); - rc = -EAGAIN; - goto release_mcc_tag; } - rc = -EIO; } -release_mcc_tag: - /* Release the MCC entry */ - free_mcc_tag(&phba->ctrl, tag); - + free_mcc_wrb(&phba->ctrl, tag); return rc; } -void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag) -{ - spin_lock(&ctrl->mbox_lock); - tag = tag & 0x000000FF; - ctrl->mcc_tag[ctrl->mcc_free_index] = tag; - if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1)) - ctrl->mcc_free_index = 0; - else - ctrl->mcc_free_index++; - ctrl->mcc_tag_available++; - spin_unlock(&ctrl->mbox_lock); -} - -bool is_link_state_evt(u32 trailer) -{ - return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & - ASYNC_TRAILER_EVENT_CODE_MASK) == - ASYNC_EVENT_CODE_LINK_STATE); -} - -static bool is_iscsi_evt(u32 trailer) -{ - return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & - ASYNC_TRAILER_EVENT_CODE_MASK) == - ASYNC_EVENT_CODE_ISCSI; -} - -static int iscsi_evt_type(u32 trailer) -{ - return (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & - ASYNC_TRAILER_EVENT_TYPE_MASK; -} - -static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) -{ - if (compl->flags != 0) { - compl->flags = le32_to_cpu(compl->flags); - WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); - return true; - } else - return false; -} - -static inline void be_mcc_compl_use(struct be_mcc_compl *compl) -{ - compl->flags = 0; -} - /* - * be_mcc_compl_process()- Check the MBX comapletion status + * beiscsi_process_mbox_compl()- Check the MBX completion status * @ctrl: Function specific MBX data structure * @compl: Completion status of MBX Command * @@ -314,8 +327,8 @@ static inline void be_mcc_compl_use(struct be_mcc_compl *compl) * Success: Zero * Failure: Non-Zero **/ -static int be_mcc_compl_process(struct be_ctrl_info *ctrl, - struct be_mcc_compl *compl) +static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl, + struct be_mcc_compl *compl) { u16 compl_status, extd_status; struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); @@ -323,206 +336,228 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, struct be_cmd_req_hdr *hdr = embedded_payload(wrb); struct be_cmd_resp_hdr *resp_hdr; - be_dws_le_to_cpu(compl, 4); - - compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & - CQE_STATUS_COMPL_MASK; - if (compl_status != MCC_STATUS_SUCCESS) { - extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & - CQE_STATUS_EXTD_MASK; - + /** + * To check if valid bit is set, check the entire word as we don't know + * the endianness of the data (old entry is host endian while a new + * entry is little endian) + */ + if (!compl->flags) { beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, - "BC_%d : error in cmd completion: " - "Subsystem : %d Opcode : %d " - "status(compl/extd)=%d/%d\n", - hdr->subsystem, hdr->opcode, - compl_status, extd_status); - - if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) { - resp_hdr = (struct be_cmd_resp_hdr *) hdr; - if (resp_hdr->response_length) - return 0; - } + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : BMBX busy, no completion\n"); return -EBUSY; } - return 0; -} - -int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl, - struct be_mcc_compl *compl) -{ - struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); - u16 compl_status, extd_status; - unsigned short tag; + compl->flags = le32_to_cpu(compl->flags); + WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); + /** + * Just swap the status to host endian; + * mcc tag is opaquely copied from mcc_wrb. + */ be_dws_le_to_cpu(compl, 4); - compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & - CQE_STATUS_COMPL_MASK; - /* The ctrl.mcc_numtag[tag] is filled with - * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status, - * [7:0] = compl_status - */ - tag = (compl->tag0 & 0x000000FF); + CQE_STATUS_COMPL_MASK; extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & - CQE_STATUS_EXTD_MASK; + CQE_STATUS_EXTD_MASK; + /* Need to reset the entire word that houses the valid bit */ + compl->flags = 0; - ctrl->mcc_numtag[tag] = 0x80000000; - ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000); - ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8; - ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF); + if (compl_status == MCC_STATUS_SUCCESS) + return 0; - if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) { - wake_up_interruptible(&ctrl->mcc_wait[tag]); - } else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) { - struct be_dma_mem *tag_mem; - tag_mem = &ctrl->ptag_state[tag].tag_mem_state; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n", + hdr->subsystem, hdr->opcode, compl_status, extd_status); - beiscsi_log(phba, KERN_WARNING, - BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT | - BEISCSI_LOG_CONFIG, - "BC_%d : MBX Completion for timeout Command " - "from FW\n"); - /* Check if memory needs to be freed */ - if (tag_mem->size) - pci_free_consistent(ctrl->pdev, tag_mem->size, - tag_mem->va, tag_mem->dma); - - /* Change tag state */ - spin_lock(&phba->ctrl.mbox_lock); - ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED; - spin_unlock(&phba->ctrl.mbox_lock); - - /* Free MCC Tag */ - free_mcc_tag(ctrl, tag); + if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) { + /* if status is insufficient buffer, check the length */ + resp_hdr = (struct be_cmd_resp_hdr *) hdr; + if (resp_hdr->response_length) + return 0; } - - return 0; + return -EINVAL; } -static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba) +static void beiscsi_process_async_link(struct beiscsi_hba *phba, + struct be_mcc_compl *compl) { - struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq; - struct be_mcc_compl *compl = queue_tail_node(mcc_cq); + struct be_async_event_link_state *evt; + + evt = (struct be_async_event_link_state *)compl; - if (be_mcc_compl_is_new(compl)) { - queue_tail_inc(mcc_cq); - return compl; + phba->port_speed = evt->port_speed; + /** + * Check logical link status in ASYNC event. + * This has been newly introduced in SKH-R Firmware 10.0.338.45. + **/ + if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) { + phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT; + phba->get_boot = BE_GET_BOOT_RETRIES; + __beiscsi_log(phba, KERN_ERR, + "BC_%d : Link Up on Port %d tag 0x%x\n", + evt->physical_port, evt->event_tag); + } else { + phba->state = BE_ADAPTER_LINK_DOWN; + __beiscsi_log(phba, KERN_ERR, + "BC_%d : Link Down on Port %d tag 0x%x\n", + evt->physical_port, evt->event_tag); + iscsi_host_for_each_session(phba->shost, + beiscsi_fail_session); } - return NULL; } -/** - * be2iscsi_fail_session(): Closing session with appropriate error - * @cls_session: ptr to session - * - * Depending on adapter state appropriate error flag is passed. - **/ -void be2iscsi_fail_session(struct iscsi_cls_session *cls_session) +static char *beiscsi_port_misconf_event_msg[] = { + "Physical Link is functional.", + "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.", + "Optics of two types installed - Remove one optic or install matching pair of optics.", + "Incompatible optics - Replace with compatible optics for card to function.", + "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.", + "Uncertified optics - Replace with Avago Certified optics to enable link operation." +}; + +static void beiscsi_process_async_sli(struct beiscsi_hba *phba, + struct be_mcc_compl *compl) { - struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); - struct beiscsi_hba *phba = iscsi_host_priv(shost); - uint32_t iscsi_err_flag; + struct be_async_event_sli *async_sli; + u8 evt_type, state, old_state, le; + char *sev = KERN_WARNING; + char *msg = NULL; + + evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT; + evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK; + + /* processing only MISCONFIGURED physical port event */ + if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED) + return; + + async_sli = (struct be_async_event_sli *)compl; + state = async_sli->event_data1 >> + (phba->fw_config.phys_port * 8) & 0xff; + le = async_sli->event_data2 >> + (phba->fw_config.phys_port * 8) & 0xff; + + old_state = phba->optic_state; + phba->optic_state = state; + + if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) { + /* fw is reporting a state we don't know, log and return */ + __beiscsi_log(phba, KERN_ERR, + "BC_%d : Port %c: Unrecognized optic state 0x%x\n", + phba->port_name, async_sli->event_data1); + return; + } - if (phba->state & BE_ADAPTER_STATE_SHUTDOWN) - iscsi_err_flag = ISCSI_ERR_INVALID_HOST; - else - iscsi_err_flag = ISCSI_ERR_CONN_FAILED; + if (ASYNC_SLI_LINK_EFFECT_VALID(le)) { + /* log link effect for unqualified-4, uncertified-5 optics */ + if (state > 3) + msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ? + " Link is non-operational." : + " Link is operational."; + /* 1 - info */ + if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1) + sev = KERN_INFO; + /* 2 - error */ + if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2) + sev = KERN_ERR; + } - iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); + if (old_state != phba->optic_state) + __beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n", + phba->port_name, + beiscsi_port_misconf_event_msg[state], + !msg ? "" : msg); } -void beiscsi_async_link_state_process(struct beiscsi_hba *phba, - struct be_async_event_link_state *evt) +void beiscsi_process_async_event(struct beiscsi_hba *phba, + struct be_mcc_compl *compl) { - if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) || - ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && - (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) { - phba->state = BE_ADAPTER_LINK_DOWN; - - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Link Down on Port %d\n", - evt->physical_port); - - iscsi_host_for_each_session(phba->shost, - be2iscsi_fail_session); - } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) || - ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && - (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { - phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT; + char *sev = KERN_INFO; + u8 evt_code; + + /* interpret flags as an async trailer */ + evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT; + evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK; + switch (evt_code) { + case ASYNC_EVENT_CODE_LINK_STATE: + beiscsi_process_async_link(phba, compl); + break; + case ASYNC_EVENT_CODE_ISCSI: + phba->state |= BE_ADAPTER_CHECK_BOOT; phba->get_boot = BE_GET_BOOT_RETRIES; - - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Link UP on Port %d\n", - evt->physical_port); + sev = KERN_ERR; + break; + case ASYNC_EVENT_CODE_SLI: + beiscsi_process_async_sli(phba, compl); + break; + default: + /* event not registered */ + sev = KERN_ERR; } + + beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n", + evt_code, compl->status, compl->flags); } -int beiscsi_process_mcc(struct beiscsi_hba *phba) +int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl, + struct be_mcc_compl *compl) { - struct be_mcc_compl *compl; - int num = 0, status = 0; - struct be_ctrl_info *ctrl = &phba->ctrl; + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + u16 compl_status, extd_status; + struct be_dma_mem *tag_mem; + unsigned int tag, wrb_idx; - spin_lock_bh(&phba->ctrl.mcc_cq_lock); - while ((compl = be_mcc_compl_get(phba))) { - if (compl->flags & CQE_FLAGS_ASYNC_MASK) { - /* Interpret flags as an async trailer */ - if (is_link_state_evt(compl->flags)) - /* Interpret compl as a async link evt */ - beiscsi_async_link_state_process(phba, - (struct be_async_event_link_state *) compl); - else if (is_iscsi_evt(compl->flags)) { - switch (iscsi_evt_type(compl->flags)) { - case ASYNC_EVENT_NEW_ISCSI_TGT_DISC: - case ASYNC_EVENT_NEW_ISCSI_CONN: - case ASYNC_EVENT_NEW_TCP_CONN: - phba->state |= BE_ADAPTER_CHECK_BOOT; - phba->get_boot = BE_GET_BOOT_RETRIES; - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | - BEISCSI_LOG_MBOX, - "BC_%d : Async iscsi Event," - " flags handled = 0x%08x\n", - compl->flags); - break; - default: - phba->state |= BE_ADAPTER_CHECK_BOOT; - phba->get_boot = BE_GET_BOOT_RETRIES; - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | - BEISCSI_LOG_MBOX, - "BC_%d : Unsupported Async" - " Event, flags = 0x%08x\n", - compl->flags); - } - } else - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | - BEISCSI_LOG_MBOX, - "BC_%d : Unsupported Async Event, flags" - " = 0x%08x\n", compl->flags); - - } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { - status = be_mcc_compl_process(ctrl, compl); - atomic_dec(&phba->ctrl.mcc_obj.q.used); - } - be_mcc_compl_use(compl); - num++; + be_dws_le_to_cpu(compl, 4); + tag = (compl->tag0 & MCC_Q_CMD_TAG_MASK); + wrb_idx = (compl->tag0 & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT; + + if (!test_bit(MCC_TAG_STATE_RUNNING, + &ctrl->ptag_state[tag].tag_state)) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX | + BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, + "BC_%d : MBX cmd completed but not posted\n"); + return 0; } - if (num) - hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0); + if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) { + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT | + BEISCSI_LOG_CONFIG, + "BC_%d : MBX Completion for timeout Command from FW\n"); + /** + * Check for the size before freeing resource. + * Only for non-embedded cmd, PCI resource is allocated. + **/ + tag_mem = &ctrl->ptag_state[tag].tag_mem_state; + if (tag_mem->size) + pci_free_consistent(ctrl->pdev, tag_mem->size, + tag_mem->va, tag_mem->dma); + free_mcc_wrb(ctrl, tag); + return 0; + } - spin_unlock_bh(&phba->ctrl.mcc_cq_lock); - return status; + compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & + CQE_STATUS_COMPL_MASK; + extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & + CQE_STATUS_EXTD_MASK; + /* The ctrl.mcc_tag_status[tag] is filled with + * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status, + * [7:0] = compl_status + */ + ctrl->mcc_tag_status[tag] = CQE_VALID_MASK; + ctrl->mcc_tag_status[tag] |= (wrb_idx << CQE_STATUS_WRB_SHIFT); + ctrl->mcc_tag_status[tag] |= (extd_status << CQE_STATUS_ADDL_SHIFT) & + CQE_STATUS_ADDL_MASK; + ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK); + + /* write ordering forced in wake_up_interruptible */ + clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state); + wake_up_interruptible(&ctrl->mcc_wait[tag]); + return 0; } /* - * be_mcc_wait_compl()- Wait for MBX completion + * be_mcc_compl_poll()- Wait for MBX completion * @phba: driver private structure * * Wait till no more pending mcc requests are present @@ -532,50 +567,57 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba) * Failure: Non-Zero * **/ -static int be_mcc_wait_compl(struct beiscsi_hba *phba) +int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag) { - int i, status; + struct be_ctrl_info *ctrl = &phba->ctrl; + int i; + + if (!test_bit(MCC_TAG_STATE_RUNNING, + &ctrl->ptag_state[tag].tag_state)) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d: tag %u state not running\n", tag); + return 0; + } for (i = 0; i < mcc_timeout; i++) { if (beiscsi_error(phba)) return -EIO; - status = beiscsi_process_mcc(phba); - if (status) - return status; - - if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0) + beiscsi_process_mcc_cq(phba); + /* after polling, wrb and tag need to be released */ + if (!test_bit(MCC_TAG_STATE_RUNNING, + &ctrl->ptag_state[tag].tag_state)) { + free_mcc_wrb(ctrl, tag); break; + } udelay(100); } - if (i == mcc_timeout) { - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, - "BC_%d : FW Timed Out\n"); - phba->fw_timeout = true; - beiscsi_ue_detect(phba); - return -EBUSY; - } - return 0; + + if (i < mcc_timeout) + return 0; + + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : FW Timed Out\n"); + phba->fw_timeout = true; + beiscsi_ue_detect(phba); + return -EBUSY; } -/* - * be_mcc_notify_wait()- Notify and wait for Compl - * @phba: driver private structure - * - * Notify MCC requests and wait for completion - * - * return - * Success: 0 - * Failure: Non-Zero - **/ -int be_mcc_notify_wait(struct beiscsi_hba *phba) +void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag) { - be_mcc_notify(phba); - return be_mcc_wait_compl(phba); + struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; + u32 val = 0; + + set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state); + val |= mccq->id & DB_MCCQ_RING_ID_MASK; + val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; + /* make request available for DMA */ + wmb(); + iowrite32(val, phba->db_va + DB_MCCQ_OFFSET); } /* - * be_mbox_db_ready_wait()- Check ready status + * be_mbox_db_ready_poll()- Check ready status * @ctrl: Function specific MBX data structure * * Check for the ready status of FW to send BMBX @@ -585,49 +627,45 @@ int be_mcc_notify_wait(struct beiscsi_hba *phba) * Success: 0 * Failure: Non-Zero **/ -static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl) +static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl) { -#define BEISCSI_MBX_RDY_BIT_TIMEOUT 4000 /* 4sec */ + /* wait 30s for generic non-flash MBOX operation */ +#define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); unsigned long timeout; - bool read_flag = false; - int ret = 0, i; u32 ready; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q); - if (beiscsi_error(phba)) - return -EIO; + /* + * This BMBX busy wait path is used during init only. + * For the commands executed during init, 5s should suffice. + */ + timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT); + do { + if (beiscsi_error(phba)) + return -EIO; - timeout = jiffies + (HZ * 110); + ready = ioread32(db); + if (ready == 0xffffffff) + return -EIO; - do { - for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) { - ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; - if (ready) { - read_flag = true; - break; - } - mdelay(1); - } + ready &= MPU_MAILBOX_DB_RDY_MASK; + if (ready) + return 0; - if (!read_flag) { - wait_event_timeout(rdybit_check_q, - (read_flag != true), - HZ * 5); - } - } while ((time_before(jiffies, timeout)) && !read_flag); + if (time_after(jiffies, timeout)) + break; + msleep(20); + } while (!ready); - if (!read_flag) { - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, - "BC_%d : FW Timed Out\n"); - phba->fw_timeout = true; - beiscsi_ue_detect(phba); - ret = -EBUSY; - } + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : FW Timed Out\n"); + + phba->fw_timeout = true; + beiscsi_ue_detect(phba); - return ret; + return -EBUSY; } /* @@ -648,10 +686,8 @@ int be_mbox_notify(struct be_ctrl_info *ctrl) void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; struct be_dma_mem *mbox_mem = &ctrl->mbox_mem; struct be_mcc_mailbox *mbox = mbox_mem->va; - struct be_mcc_compl *compl = &mbox->compl; - struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); - status = be_mbox_db_ready_wait(ctrl); + status = be_mbox_db_ready_poll(ctrl); if (status) return status; @@ -660,7 +696,7 @@ int be_mbox_notify(struct be_ctrl_info *ctrl) val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; iowrite32(val, db); - status = be_mbox_db_ready_wait(ctrl); + status = be_mbox_db_ready_poll(ctrl); if (status) return status; @@ -670,81 +706,15 @@ int be_mbox_notify(struct be_ctrl_info *ctrl) val |= (u32) (mbox_mem->dma >> 4) << 2; iowrite32(val, db); - status = be_mbox_db_ready_wait(ctrl); + status = be_mbox_db_ready_poll(ctrl); if (status) return status; - if (be_mcc_compl_is_new(compl)) { - status = be_mcc_compl_process(ctrl, &mbox->compl); - be_mcc_compl_use(compl); - if (status) { - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, - "BC_%d : After be_mcc_compl_process\n"); - - return status; - } - } else { - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, - "BC_%d : Invalid Mailbox Completion\n"); - - return -EBUSY; - } - return 0; -} - -/* - * Insert the mailbox address into the doorbell in two steps - * Polls on the mbox doorbell till a command completion (or a timeout) occurs - */ -static int be_mbox_notify_wait(struct beiscsi_hba *phba) -{ - int status; - u32 val = 0; - void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET; - struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem; - struct be_mcc_mailbox *mbox = mbox_mem->va; - struct be_mcc_compl *compl = &mbox->compl; - struct be_ctrl_info *ctrl = &phba->ctrl; - - status = be_mbox_db_ready_wait(ctrl); - if (status) - return status; - - val |= MPU_MAILBOX_DB_HI_MASK; - /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ - val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; - iowrite32(val, db); - - /* wait for ready to be set */ - status = be_mbox_db_ready_wait(ctrl); - if (status != 0) - return status; - - val = 0; - /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ - val |= (u32)(mbox_mem->dma >> 4) << 2; - iowrite32(val, db); + /* RDY is set; small delay before CQE read. */ + udelay(1); - status = be_mbox_db_ready_wait(ctrl); - if (status != 0) - return status; - - /* A cq entry has been made now */ - if (be_mcc_compl_is_new(compl)) { - status = be_mcc_compl_process(ctrl, &mbox->compl); - be_mcc_compl_use(compl); - if (status) - return status; - } else { - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, - "BC_%d : invalid mailbox completion\n"); - - return -EBUSY; - } - return 0; + status = beiscsi_process_mbox_compl(ctrl, &mbox->compl); + return status; } void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, @@ -809,21 +779,6 @@ struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem) return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; } -struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba) -{ - struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; - struct be_mcc_wrb *wrb; - - WARN_ON(atomic_read(&mccq->used) >= mccq->len); - wrb = queue_head_node(mccq); - memset(wrb, 0, sizeof(*wrb)); - wrb->tag0 = (mccq->head & 0x000000FF) << 16; - queue_head_inc(mccq); - atomic_inc(&mccq->used); - return wrb; -} - - int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, struct be_queue_info *eq, int eq_delay) { @@ -833,7 +788,7 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem = &eq->dma_mem; int status; - spin_lock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -860,7 +815,7 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, eq->id = le16_to_cpu(resp->eq_id); eq->created = true; } - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); return status; } @@ -881,7 +836,7 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) int status; u8 *endian_check; - spin_lock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); endian_check = (u8 *) wrb; @@ -900,7 +855,7 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BC_%d : be_cmd_fw_initialize Failed\n"); - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); return status; } @@ -921,7 +876,7 @@ int be_cmd_fw_uninit(struct be_ctrl_info *ctrl) int status; u8 *endian_check; - spin_lock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); endian_check = (u8 *) wrb; @@ -941,7 +896,7 @@ int be_cmd_fw_uninit(struct be_ctrl_info *ctrl) beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BC_%d : be_cmd_fw_uninit Failed\n"); - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); return status; } @@ -957,7 +912,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, void *ctxt = &req->context; int status; - spin_lock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -1007,7 +962,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, "BC_%d : In be_cmd_cq_create, status=ox%08x\n", status); - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); return status; } @@ -1025,13 +980,13 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba, struct be_queue_info *cq) { struct be_mcc_wrb *wrb; - struct be_cmd_req_mcc_create *req; + struct be_cmd_req_mcc_create_ext *req; struct be_dma_mem *q_mem = &mccq->dma_mem; struct be_ctrl_info *ctrl; void *ctxt; int status; - spin_lock(&phba->ctrl.mbox_lock); + mutex_lock(&phba->ctrl.mbox_lock); ctrl = &phba->ctrl; wrb = wrb_from_mbox(&ctrl->mbox_mem); memset(wrb, 0, sizeof(*wrb)); @@ -1041,9 +996,12 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba, be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_MCC_CREATE, sizeof(*req)); + OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req)); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE; + req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI; + req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI; AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, PCI_FUNC(phba->pcidev->devfn)); @@ -1056,13 +1014,13 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba, be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); - status = be_mbox_notify_wait(phba); + status = be_mbox_notify(ctrl); if (!status) { struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); mccq->id = le16_to_cpu(resp->id); mccq->created = true; } - spin_unlock(&phba->ctrl.mbox_lock); + mutex_unlock(&phba->ctrl.mbox_lock); return status; } @@ -1080,7 +1038,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, "BC_%d : In beiscsi_cmd_q_destroy " "queue_type : %d\n", queue_type); - spin_lock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -1110,7 +1068,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES; break; default: - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); BUG(); return -ENXIO; } @@ -1120,7 +1078,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, status = be_mbox_notify(ctrl); - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); return status; } @@ -1155,7 +1113,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, void *ctxt = &req->context; int status; - spin_lock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -1227,7 +1185,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, defq_ring->doorbell_offset = resp->doorbell_offset; } } - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); return status; } @@ -1255,7 +1213,7 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); int status; - spin_lock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -1286,7 +1244,7 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, pwrb_context->doorbell_offset = resp->doorbell_offset; } } - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); return status; } @@ -1297,7 +1255,7 @@ int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl, struct be_post_template_pages_req *req = embedded_payload(wrb); int status; - spin_lock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -1310,7 +1268,7 @@ int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl, be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify(ctrl); - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); return status; } @@ -1320,7 +1278,7 @@ int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl) struct be_remove_template_pages_req *req = embedded_payload(wrb); int status; - spin_lock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -1331,7 +1289,7 @@ int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl) req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; status = be_mbox_notify(ctrl); - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); return status; } @@ -1350,7 +1308,7 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, if (num_pages == 0xff) num_pages = 1; - spin_lock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); do { memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -1379,7 +1337,7 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, } } while (num_pages > 0); error: - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); if (status != 0) beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); return status; @@ -1392,15 +1350,15 @@ int beiscsi_cmd_reset_function(struct beiscsi_hba *phba) struct be_post_sgl_pages_req *req = embedded_payload(wrb); int status; - spin_lock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_FUNCTION_RESET, sizeof(*req)); - status = be_mbox_notify_wait(phba); + status = be_mbox_notify(ctrl); - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); return status; } @@ -1417,21 +1375,20 @@ int beiscsi_cmd_reset_function(struct beiscsi_hba *phba) int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag) { - unsigned int tag = 0; + unsigned int tag; struct be_mcc_wrb *wrb; struct be_cmd_set_vlan_req *req; struct be_ctrl_info *ctrl = &phba->ctrl; - spin_lock(&ctrl->mbox_lock); - tag = alloc_mcc_tag(phba); - if (!tag) { - spin_unlock(&ctrl->mbox_lock); - return tag; + if (mutex_lock_interruptible(&ctrl->mbox_lock)) + return 0; + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; } - wrb = wrb_from_mccq(phba); req = embedded_payload(wrb); - wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_NTWK_SET_VLAN, @@ -1440,8 +1397,8 @@ int be_cmd_set_vlan(struct beiscsi_hba *phba, req->interface_hndl = phba->interface_handle; req->vlan_priority = vlan_tag; - be_mcc_notify(phba); - spin_unlock(&ctrl->mbox_lock); + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); return tag; } diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 4bfca355fbe4..deeb951e6874 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -58,15 +58,16 @@ struct be_mcc_wrb { #define MCC_STATUS_ILLEGAL_FIELD 0x3 #define MCC_STATUS_INSUFFICIENT_BUFFER 0x4 -#define CQE_STATUS_COMPL_MASK 0xFFFF -#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ -#define CQE_STATUS_EXTD_MASK 0xFFFF -#define CQE_STATUS_EXTD_SHIFT 16 /* bits 0 - 15 */ +#define CQE_STATUS_COMPL_MASK 0xFFFF +#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ +#define CQE_STATUS_EXTD_MASK 0xFFFF +#define CQE_STATUS_EXTD_SHIFT 16 /* bits 31 - 16 */ #define CQE_STATUS_ADDL_MASK 0xFF00 -#define CQE_STATUS_MASK 0xFF -#define CQE_STATUS_ADDL_SHIFT 0x08 +#define CQE_STATUS_ADDL_SHIFT 8 +#define CQE_STATUS_MASK 0xFF #define CQE_STATUS_WRB_MASK 0xFF0000 #define CQE_STATUS_WRB_SHIFT 16 + #define BEISCSI_HOST_MBX_TIMEOUT (110 * 1000) #define BEISCSI_FW_MBX_TIMEOUT 100 @@ -119,13 +120,22 @@ struct be_mcc_compl { #define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF #define ASYNC_EVENT_CODE_LINK_STATE 0x1 #define ASYNC_EVENT_CODE_ISCSI 0x4 +#define ASYNC_EVENT_CODE_SLI 0x11 #define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */ -#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xF +#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF + +/* iSCSI events */ #define ASYNC_EVENT_NEW_ISCSI_TGT_DISC 0x4 #define ASYNC_EVENT_NEW_ISCSI_CONN 0x5 #define ASYNC_EVENT_NEW_TCP_CONN 0x7 +/* SLI events */ +#define ASYNC_SLI_EVENT_TYPE_MISCONFIGURED 0x9 +#define ASYNC_SLI_LINK_EFFECT_VALID(le) (le & 0x80) +#define ASYNC_SLI_LINK_EFFECT_SEV(le) ((le >> 1) & 0x03) +#define ASYNC_SLI_LINK_EFFECT_STATE(le) (le & 0x01) + struct be_async_event_trailer { u32 code; }; @@ -133,7 +143,6 @@ struct be_async_event_trailer { enum { ASYNC_EVENT_LINK_DOWN = 0x0, ASYNC_EVENT_LINK_UP = 0x1, - ASYNC_EVENT_LOGICAL = 0x2 }; /** @@ -143,16 +152,39 @@ enum { struct be_async_event_link_state { u8 physical_port; u8 port_link_status; +/** + * ASYNC_EVENT_LINK_DOWN 0x0 + * ASYNC_EVENT_LINK_UP 0x1 + * ASYNC_EVENT_LINK_LOGICAL_DOWN 0x2 + * ASYNC_EVENT_LINK_LOGICAL_UP 0x3 + */ +#define BE_ASYNC_LINK_UP_MASK 0x01 u8 port_duplex; u8 port_speed; -#define BEISCSI_PHY_LINK_FAULT_NONE 0x00 -#define BEISCSI_PHY_LINK_FAULT_LOCAL 0x01 -#define BEISCSI_PHY_LINK_FAULT_REMOTE 0x02 +/* BE2ISCSI_LINK_SPEED_ZERO 0x00 - no link */ +#define BE2ISCSI_LINK_SPEED_10MBPS 0x01 +#define BE2ISCSI_LINK_SPEED_100MBPS 0x02 +#define BE2ISCSI_LINK_SPEED_1GBPS 0x03 +#define BE2ISCSI_LINK_SPEED_10GBPS 0x04 +#define BE2ISCSI_LINK_SPEED_25GBPS 0x06 +#define BE2ISCSI_LINK_SPEED_40GBPS 0x07 u8 port_fault; - u8 rsvd0[7]; + u8 event_reason; + u16 qos_link_speed; + u32 event_tag; struct be_async_event_trailer trailer; } __packed; +/** + * When async-trailer is SLI event, mcc_compl is interpreted as + */ +struct be_async_event_sli { + u32 event_data1; + u32 event_data2; + u32 reserved; + u32 trailer; +} __packed; + struct be_mcc_mailbox { struct be_mcc_wrb wrb; struct be_mcc_compl compl; @@ -172,6 +204,7 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_CQ_CREATE 12 #define OPCODE_COMMON_EQ_CREATE 13 #define OPCODE_COMMON_MCC_CREATE 21 +#define OPCODE_COMMON_MCC_CREATE_EXT 90 #define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS 24 #define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS 25 #define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32 @@ -183,6 +216,7 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_EQ_DESTROY 55 #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58 #define OPCODE_COMMON_FUNCTION_RESET 61 +#define OPCODE_COMMON_GET_PORT_NAME 77 /** * LIST of opcodes that are common between Initiator and Target @@ -587,10 +621,11 @@ struct amap_mcc_context { u8 rsvd2[32]; } __packed; -struct be_cmd_req_mcc_create { +struct be_cmd_req_mcc_create_ext { struct be_cmd_req_hdr hdr; u16 num_pages; u16 rsvd0; + u32 async_evt_bitmap; u8 context[sizeof(struct amap_mcc_context) / 8]; struct phys_addr pages[8]; } __packed; @@ -653,20 +688,6 @@ struct be_cmd_req_modify_eq_delay { /******************** Get MAC ADDR *******************/ -#define ETH_ALEN 6 - -struct be_cmd_get_nic_conf_req { - struct be_cmd_req_hdr hdr; - u32 nic_port_count; - u32 speed; - u32 max_speed; - u32 link_state; - u32 max_frame_size; - u16 size_of_structure; - u8 mac_address[ETH_ALEN]; - u32 rsvd[23]; -}; - struct be_cmd_get_nic_conf_resp { struct be_cmd_resp_hdr hdr; u32 nic_port_count; @@ -675,9 +696,8 @@ struct be_cmd_get_nic_conf_resp { u32 link_state; u32 max_frame_size; u16 size_of_structure; - u8 mac_address[6]; - u32 rsvd[23]; -}; + u8 mac_address[ETH_ALEN]; +} __packed; #define BEISCSI_ALIAS_LEN 32 @@ -689,29 +709,6 @@ struct be_cmd_hba_name { u8 initiator_alias[BEISCSI_ALIAS_LEN]; } __packed; -struct be_cmd_ntwk_link_status_req { - struct be_cmd_req_hdr hdr; - u32 rsvd0; -} __packed; - -/*** Port Speed Values ***/ -#define BE2ISCSI_LINK_SPEED_ZERO 0x00 -#define BE2ISCSI_LINK_SPEED_10MBPS 0x01 -#define BE2ISCSI_LINK_SPEED_100MBPS 0x02 -#define BE2ISCSI_LINK_SPEED_1GBPS 0x03 -#define BE2ISCSI_LINK_SPEED_10GBPS 0x04 -struct be_cmd_ntwk_link_status_resp { - struct be_cmd_resp_hdr hdr; - u8 phys_port; - u8 mac_duplex; - u8 mac_speed; - u8 mac_fault; - u8 mgmt_mac_duplex; - u8 mgmt_mac_speed; - u16 qos_link_speed; - u32 logical_link_speed; -} __packed; - int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, struct be_queue_info *eq, int eq_delay); @@ -730,28 +727,28 @@ int be_poll_mcc(struct be_ctrl_info *ctrl); int mgmt_check_supported_fw(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba); unsigned int be_cmd_get_initname(struct beiscsi_hba *phba); -unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba); -void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag); +void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag); int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *, int num); -int beiscsi_mccq_compl(struct beiscsi_hba *phba, - uint32_t tag, struct be_mcc_wrb **wrb, - struct be_dma_mem *mbx_cmd_mem); +int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba, + uint32_t tag, struct be_mcc_wrb **wrb, + struct be_dma_mem *mbx_cmd_mem); /*ISCSI Functuions */ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); int be_cmd_fw_uninit(struct be_ctrl_info *ctrl); struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); -struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba); -int be_mcc_notify_wait(struct beiscsi_hba *phba); -void be_mcc_notify(struct beiscsi_hba *phba); -unsigned int alloc_mcc_tag(struct beiscsi_hba *phba); -void beiscsi_async_link_state_process(struct beiscsi_hba *phba, - struct be_async_event_link_state *evt); -int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl, - struct be_mcc_compl *compl); +int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag); +void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag); +struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba, + unsigned int *ref_tag); +void beiscsi_process_async_event(struct beiscsi_hba *phba, + struct be_mcc_compl *compl); +int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl, + struct be_mcc_compl *compl); + int be_mbox_notify(struct be_ctrl_info *ctrl); @@ -777,8 +774,6 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, struct hwi_wrb_context *pwrb_context, uint8_t ulp_num); -bool is_link_state_evt(u32 trailer); - /* Configuration Functions */ int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag); @@ -1137,6 +1132,21 @@ struct be_cmd_get_all_if_id_req { u32 if_hndl_list[1]; } __packed; +struct be_cmd_get_port_name { + union { + struct be_cmd_req_hdr req_hdr; + struct be_cmd_resp_hdr resp_hdr; + } h; + union { + struct { + u32 reserved; + } req; + struct { + u32 port_names; + } resp; + } p; +} __packed; + #define ISCSI_OPCODE_SCSI_DATA_OUT 5 #define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5 #define OPCODE_COMMON_MODIFY_EQ_DELAY 41 @@ -1367,5 +1377,5 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, u8 subsystem, u8 opcode, int cmd_len); -void be2iscsi_fail_session(struct iscsi_cls_session *cls_session); +void beiscsi_fail_session(struct iscsi_cls_session *cls_session); #endif /* !BEISCSI_CMDS_H */ diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index b7087ba69d8d..09f89a3eaa87 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -367,13 +367,14 @@ beiscsi_set_vlan_tag(struct Scsi_Host *shost, struct iscsi_iface_param_info *iface_param) { struct beiscsi_hba *phba = iscsi_host_priv(shost); - int ret = 0; + int ret; /* Get the Interface Handle */ - if (mgmt_get_all_if_id(phba)) { + ret = mgmt_get_all_if_id(phba); + if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Getting Interface Handle Failed\n"); - return -EIO; + return ret; } switch (iface_param->param) { @@ -465,6 +466,10 @@ beiscsi_set_ipv6(struct Scsi_Host *shost, ret = mgmt_set_ip(phba, iface_param, NULL, ISCSI_BOOTPROTO_STATIC); break; + case ISCSI_NET_PARAM_VLAN_ENABLED: + case ISCSI_NET_PARAM_VLAN_TAG: + ret = beiscsi_set_vlan_tag(shost, iface_param); + break; default: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Param %d not supported\n", @@ -730,7 +735,7 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba) return -EBUSY; } - rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL); + rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL); if (rc) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, @@ -753,7 +758,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost) struct beiscsi_hba *phba = iscsi_host_priv(shost); struct iscsi_cls_host *ihost = shost->shost_data; - ihost->port_state = (phba->state == BE_ADAPTER_LINK_UP) ? + ihost->port_state = (phba->state & BE_ADAPTER_LINK_UP) ? ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN; } @@ -761,34 +766,13 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost) * beiscsi_get_port_speed - Get the Port Speed from Adapter * @shost : pointer to scsi_host structure * - * returns Success/Failure */ -static int beiscsi_get_port_speed(struct Scsi_Host *shost) +static void beiscsi_get_port_speed(struct Scsi_Host *shost) { - int rc; - unsigned int tag; - struct be_mcc_wrb *wrb; - struct be_cmd_ntwk_link_status_resp *resp; struct beiscsi_hba *phba = iscsi_host_priv(shost); struct iscsi_cls_host *ihost = shost->shost_data; - tag = be_cmd_get_port_speed(phba); - if (!tag) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, - "BS_%d : Getting Port Speed Failed\n"); - - return -EBUSY; - } - rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL); - if (rc) { - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, - "BS_%d : Port Speed MBX Failed\n"); - return rc; - } - resp = embedded_payload(wrb); - - switch (resp->mac_speed) { + switch (phba->port_speed) { case BE2ISCSI_LINK_SPEED_10MBPS: ihost->port_speed = ISCSI_PORT_SPEED_10MBPS; break; @@ -801,10 +785,15 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost) case BE2ISCSI_LINK_SPEED_10GBPS: ihost->port_speed = ISCSI_PORT_SPEED_10GBPS; break; + case BE2ISCSI_LINK_SPEED_25GBPS: + ihost->port_speed = ISCSI_PORT_SPEED_25GBPS; + break; + case BE2ISCSI_LINK_SPEED_40GBPS: + ihost->port_speed = ISCSI_PORT_SPEED_40GBPS; + break; default: ihost->port_speed = ISCSI_PORT_SPEED_UNKNOWN; } - return 0; } /** @@ -854,12 +843,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, status = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost)); break; case ISCSI_HOST_PARAM_PORT_SPEED: - status = beiscsi_get_port_speed(shost); - if (status) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, - "BS_%d : Retreiving Port Speed Failed\n"); - return status; - } + beiscsi_get_port_speed(shost); status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); break; default: @@ -1159,7 +1143,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, return -EAGAIN; } - ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); + ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, @@ -1292,9 +1276,9 @@ static void beiscsi_flush_cq(struct beiscsi_hba *phba) for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_disable(&pbe_eq->iopoll); - beiscsi_process_cq(pbe_eq); - blk_iopoll_enable(&pbe_eq->iopoll); + irq_poll_disable(&pbe_eq->iopoll); + beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC); + irq_poll_enable(&pbe_eq->iopoll); } } @@ -1318,7 +1302,7 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag) ret = -EAGAIN; } - ret = beiscsi_mccq_compl(phba, tag, NULL, NULL); + ret = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); /* Flush the CQ entries */ beiscsi_flush_cq(phba); @@ -1393,7 +1377,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep) beiscsi_ep->ep_cid); } - beiscsi_mccq_compl(phba, tag, NULL, NULL); + beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); beiscsi_close_conn(beiscsi_ep, tcp_upload_flag); free_ep: msleep(BEISCSI_LOGOUT_SYNC_DELAY); diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index fe0c5143f8e6..f05e7737107d 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -29,6 +29,7 @@ #include <linux/iscsi_boot_sysfs.h> #include <linux/module.h> #include <linux/bsg-lib.h> +#include <linux/irq_poll.h> #include <scsi/libiscsi.h> #include <scsi/scsi_bsg_iscsi.h> @@ -285,7 +286,7 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc) return FAILED; } - rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); + rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); if (rc != -EBUSY) pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); @@ -366,7 +367,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) return FAILED; } - rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); + rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); if (rc != -EBUSY) pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); @@ -727,9 +728,8 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); - spin_lock_init(&ctrl->mbox_lock); + mutex_init(&ctrl->mbox_lock); spin_lock_init(&phba->ctrl.mcc_lock); - spin_lock_init(&phba->ctrl.mcc_cq_lock); return status; } @@ -895,32 +895,17 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id) static irqreturn_t be_isr_msix(int irq, void *dev_id) { struct beiscsi_hba *phba; - struct be_eq_entry *eqe = NULL; struct be_queue_info *eq; - struct be_queue_info *cq; - unsigned int num_eq_processed; struct be_eq_obj *pbe_eq; pbe_eq = dev_id; eq = &pbe_eq->q; - cq = pbe_eq->cq; - eqe = queue_tail_node(eq); phba = pbe_eq->phba; - num_eq_processed = 0; - while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] - & EQE_VALID_MASK) { - if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) - blk_iopoll_sched(&pbe_eq->iopoll); - AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); - queue_tail_inc(eq); - eqe = queue_tail_node(eq); - num_eq_processed++; - } - - if (num_eq_processed) - hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1); + /* disable interrupt till iopoll completes */ + hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1); + irq_poll_sched(&pbe_eq->iopoll); return IRQ_HANDLED; } @@ -972,8 +957,7 @@ static irqreturn_t be_isr(int irq, void *dev_id) spin_unlock_irqrestore(&phba->isr_lock, flags); num_mcceq_processed++; } else { - if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) - blk_iopoll_sched(&pbe_eq->iopoll); + irq_poll_sched(&pbe_eq->iopoll); num_ioeq_processed++; } AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); @@ -998,6 +982,7 @@ static irqreturn_t be_isr(int irq, void *dev_id) return IRQ_NONE; } + static int beiscsi_init_irqs(struct beiscsi_hba *phba) { struct pci_dev *pcidev = phba->pcidev; @@ -1072,7 +1057,7 @@ free_msix_irqs: void hwi_ring_cq_db(struct beiscsi_hba *phba, unsigned int id, unsigned int num_processed, - unsigned char rearm, unsigned char event) + unsigned char rearm) { u32 val = 0; @@ -1147,6 +1132,7 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) { struct sgl_handle *psgl_handle; + spin_lock_bh(&phba->io_sgl_lock); if (phba->io_sgl_hndl_avbl) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, "BM_%d : In alloc_io_sgl_handle," @@ -1164,12 +1150,14 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) phba->io_sgl_alloc_index++; } else psgl_handle = NULL; + spin_unlock_bh(&phba->io_sgl_lock); return psgl_handle; } static void free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) { + spin_lock_bh(&phba->io_sgl_lock); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, "BM_%d : In free_,io_sgl_free_index=%d\n", phba->io_sgl_free_index); @@ -1184,6 +1172,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) "value there=%p\n", phba->io_sgl_free_index, phba->io_sgl_hndl_base [phba->io_sgl_free_index]); + spin_unlock_bh(&phba->io_sgl_lock); return; } phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; @@ -1192,6 +1181,25 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) phba->io_sgl_free_index = 0; else phba->io_sgl_free_index++; + spin_unlock_bh(&phba->io_sgl_lock); +} + +static inline struct wrb_handle * +beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context, + unsigned int wrbs_per_cxn) +{ + struct wrb_handle *pwrb_handle; + + spin_lock_bh(&pwrb_context->wrb_lock); + pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; + pwrb_context->wrb_handles_available--; + if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) + pwrb_context->alloc_index = 0; + else + pwrb_context->alloc_index++; + spin_unlock_bh(&pwrb_context->wrb_lock); + + return pwrb_handle; } /** @@ -1203,30 +1211,32 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) * This happens under session_lock until submission to chip */ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, - struct hwi_wrb_context **pcontext) + struct hwi_wrb_context **pcontext) { struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; - struct wrb_handle *pwrb_handle; uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); phwi_ctrlr = phba->phwi_ctrlr; pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; - if (pwrb_context->wrb_handles_available >= 2) { - pwrb_handle = pwrb_context->pwrb_handle_base[ - pwrb_context->alloc_index]; - pwrb_context->wrb_handles_available--; - if (pwrb_context->alloc_index == - (phba->params.wrbs_per_cxn - 1)) - pwrb_context->alloc_index = 0; - else - pwrb_context->alloc_index++; + /* return the context address */ + *pcontext = pwrb_context; + return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn); +} - /* Return the context address */ - *pcontext = pwrb_context; - } else - pwrb_handle = NULL; - return pwrb_handle; +static inline void +beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context, + struct wrb_handle *pwrb_handle, + unsigned int wrbs_per_cxn) +{ + spin_lock_bh(&pwrb_context->wrb_lock); + pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; + pwrb_context->wrb_handles_available++; + if (pwrb_context->free_index == (wrbs_per_cxn - 1)) + pwrb_context->free_index = 0; + else + pwrb_context->free_index++; + spin_unlock_bh(&pwrb_context->wrb_lock); } /** @@ -1241,13 +1251,9 @@ static void free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, struct wrb_handle *pwrb_handle) { - pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; - pwrb_context->wrb_handles_available++; - if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1)) - pwrb_context->free_index = 0; - else - pwrb_context->free_index++; - + beiscsi_put_wrb_handle(pwrb_context, + pwrb_handle, + phba->params.wrbs_per_cxn); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x" @@ -1260,6 +1266,7 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) { struct sgl_handle *psgl_handle; + spin_lock_bh(&phba->mgmt_sgl_lock); if (phba->eh_sgl_hndl_avbl) { psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; @@ -1277,13 +1284,14 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) phba->eh_sgl_alloc_index++; } else psgl_handle = NULL; + spin_unlock_bh(&phba->mgmt_sgl_lock); return psgl_handle; } void free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) { - + spin_lock_bh(&phba->mgmt_sgl_lock); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BM_%d : In free_mgmt_sgl_handle," "eh_sgl_free_index=%d\n", @@ -1298,6 +1306,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) "BM_%d : Double Free in eh SGL ," "eh_sgl_free_index=%d\n", phba->eh_sgl_free_index); + spin_unlock_bh(&phba->mgmt_sgl_lock); return; } phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; @@ -1307,6 +1316,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) phba->eh_sgl_free_index = 0; else phba->eh_sgl_free_index++; + spin_unlock_bh(&phba->mgmt_sgl_lock); } static void @@ -2031,7 +2041,7 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, phwi_ctrlr, cri_index)); } -static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) +void beiscsi_process_mcc_cq(struct beiscsi_hba *phba) { struct be_queue_info *mcc_cq; struct be_mcc_compl *mcc_compl; @@ -2041,31 +2051,15 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) mcc_compl = queue_tail_node(mcc_cq); mcc_compl->flags = le32_to_cpu(mcc_compl->flags); while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { - if (num_processed >= 32) { hwi_ring_cq_db(phba, mcc_cq->id, - num_processed, 0, 0); + num_processed, 0); num_processed = 0; } if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { - /* Interpret flags as an async trailer */ - if (is_link_state_evt(mcc_compl->flags)) - /* Interpret compl as a async link evt */ - beiscsi_async_link_state_process(phba, - (struct be_async_event_link_state *) mcc_compl); - else { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX, - "BM_%d : Unsupported Async Event, flags" - " = 0x%08x\n", - mcc_compl->flags); - if (phba->state & BE_ADAPTER_LINK_UP) { - phba->state |= BE_ADAPTER_CHECK_BOOT; - phba->get_boot = BE_GET_BOOT_RETRIES; - } - } + beiscsi_process_async_event(phba, mcc_compl); } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { - be_mcc_compl_process_isr(&phba->ctrl, mcc_compl); - atomic_dec(&phba->ctrl.mcc_obj.q.used); + beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl); } mcc_compl->flags = 0; @@ -2076,24 +2070,24 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) } if (num_processed > 0) - hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0); - + hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1); } /** * beiscsi_process_cq()- Process the Completion Queue * @pbe_eq: Event Q on which the Completion has come + * @budget: Max number of events to processed * * return * Number of Completion Entries processed. **/ -unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) +unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget) { struct be_queue_info *cq; struct sol_cqe *sol; struct dmsg_cqe *dmsg; + unsigned int total = 0; unsigned int num_processed = 0; - unsigned int tot_nump = 0; unsigned short code = 0, cid = 0; uint16_t cri_index = 0; struct beiscsi_conn *beiscsi_conn; @@ -2144,12 +2138,12 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) beiscsi_ep = ep->dd_data; beiscsi_conn = beiscsi_ep->conn; - if (num_processed >= 32) { - hwi_ring_cq_db(phba, cq->id, - num_processed, 0, 0); - tot_nump += num_processed; + /* replenish cq */ + if (num_processed == 32) { + hwi_ring_cq_db(phba, cq->id, 32, 0); num_processed = 0; } + total++; switch (code) { case SOL_CMD_COMPLETE: @@ -2194,7 +2188,13 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) "BM_%d : Ignoring %s[%d] on CID : %d\n", cqe_desc[code], code, cid); break; + case CXN_KILLED_HDR_DIGEST_ERR: case SOL_CMD_KILLED_DATA_DIGEST_ERR: + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, + "BM_%d : Cmd Notification %s[%d] on CID : %d\n", + cqe_desc[code], code, cid); + break; case CMD_KILLED_INVALID_STATSN_RCVD: case CMD_KILLED_INVALID_R2T_RCVD: case CMD_CXN_KILLED_LUN_INVALID: @@ -2220,7 +2220,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: case CXN_KILLED_BURST_LEN_MISMATCH: case CXN_KILLED_AHS_RCVD: - case CXN_KILLED_HDR_DIGEST_ERR: case CXN_KILLED_UNKNOWN_HDR: case CXN_KILLED_STALE_ITT_TTT_RCVD: case CXN_KILLED_INVALID_ITT_TTT_RCVD: @@ -2255,13 +2254,12 @@ proc_next_cqe: queue_tail_inc(cq); sol = queue_tail_node(cq); num_processed++; + if (total == budget) + break; } - if (num_processed > 0) { - tot_nump += num_processed; - hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0); - } - return tot_nump; + hwi_ring_cq_db(phba, cq->id, num_processed, 1); + return total; } void beiscsi_process_all_cqs(struct work_struct *work) @@ -2281,36 +2279,52 @@ void beiscsi_process_all_cqs(struct work_struct *work) spin_lock_irqsave(&phba->isr_lock, flags); pbe_eq->todo_mcc_cq = false; spin_unlock_irqrestore(&phba->isr_lock, flags); - beiscsi_process_mcc_isr(phba); + beiscsi_process_mcc_cq(phba); } if (pbe_eq->todo_cq) { spin_lock_irqsave(&phba->isr_lock, flags); pbe_eq->todo_cq = false; spin_unlock_irqrestore(&phba->isr_lock, flags); - beiscsi_process_cq(pbe_eq); + beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC); } /* rearm EQ for further interrupts */ hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); } -static int be_iopoll(struct blk_iopoll *iop, int budget) +static int be_iopoll(struct irq_poll *iop, int budget) { - unsigned int ret; + unsigned int ret, num_eq_processed; struct beiscsi_hba *phba; struct be_eq_obj *pbe_eq; + struct be_eq_entry *eqe = NULL; + struct be_queue_info *eq; + num_eq_processed = 0; pbe_eq = container_of(iop, struct be_eq_obj, iopoll); - ret = beiscsi_process_cq(pbe_eq); + phba = pbe_eq->phba; + eq = &pbe_eq->q; + eqe = queue_tail_node(eq); + + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & + EQE_VALID_MASK) { + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + num_eq_processed++; + } + + hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1); + + ret = beiscsi_process_cq(pbe_eq, budget); pbe_eq->cq_count += ret; if (ret < budget) { - phba = pbe_eq->phba; - blk_iopoll_complete(iop); + irq_poll_complete(iop); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, - "BM_%d : rearm pbe_eq->q.id =%d\n", - pbe_eq->q.id); + "BM_%d : rearm pbe_eq->q.id =%d ret %d\n", + pbe_eq->q.id, ret); hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); } return ret; @@ -2504,7 +2518,7 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, * @pwrb: ptr to the WRB entry * @task: iscsi task which is to be executed **/ -static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) +static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) { struct iscsi_sge *psgl; struct beiscsi_io_task *io_task = task->dd_data; @@ -2536,6 +2550,9 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) task->data, task->data_count, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(phba->pcidev, + io_task->mtask_addr)) + return -ENOMEM; io_task->mtask_data_count = task->data_count; } else io_task->mtask_addr = 0; @@ -2580,6 +2597,7 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); } AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); + return 0; } /** @@ -2708,8 +2726,10 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) * phba->params.cxns_per_ctrl, GFP_KERNEL); - if (!phwi_ctrlr->wrb_context) + if (!phwi_ctrlr->wrb_context) { + kfree(phba->phwi_ctrlr); return -ENOMEM; + } phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), GFP_KERNEL); @@ -2906,6 +2926,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) } num_cxn_wrbh--; } + spin_lock_init(&pwrb_context->wrb_lock); } idx = 0; for (index = 0; index < phba->params.cxns_per_ctrl; index++) { @@ -3868,6 +3889,8 @@ static int hwi_init_port(struct beiscsi_hba *phba) phwi_context->min_eqd = 0; phwi_context->cur_eqd = 0; be_cmd_fw_initialize(&phba->ctrl); + /* set optic state to unknown */ + phba->optic_state = 0xff; status = beiscsi_create_eqs(phba, phwi_context); if (status != 0) { @@ -4386,7 +4409,7 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba) goto boot_freemem; } - ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); + ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, @@ -4470,6 +4493,7 @@ put_shost: scsi_host_put(phba->shost); free_kset: iscsi_boot_destroy_kset(phba->boot_kset); + phba->boot_kset = NULL; return -ENOMEM; } @@ -4609,11 +4633,9 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, } if (io_task->psgl_handle) { - spin_lock_bh(&phba->mgmt_sgl_lock); free_mgmt_sgl_handle(phba, io_task->psgl_handle); io_task->psgl_handle = NULL; - spin_unlock_bh(&phba->mgmt_sgl_lock); } if (io_task->mtask_addr) { @@ -4659,9 +4681,7 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) } if (io_task->psgl_handle) { - spin_lock(&phba->io_sgl_lock); free_io_sgl_handle(phba, io_task->psgl_handle); - spin_unlock(&phba->io_sgl_lock); io_task->psgl_handle = NULL; } @@ -4716,6 +4736,20 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; iowrite32(doorbell, phba->db_va + beiscsi_conn->doorbell_offset); + + /* + * There is no completion for CONTEXT_UPDATE. The completion of next + * WRB posted guarantees FW's processing and DMA'ing of it. + * Use beiscsi_put_wrb_handle to put it back in the pool which makes + * sure zero'ing or reuse of the WRB only after wrbs_per_cxn. + */ + beiscsi_put_wrb_handle(pwrb_context, pwrb_handle, + phba->params.wrbs_per_cxn); + beiscsi_log(phba, KERN_INFO, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n", + pwrb_handle, pwrb_context->free_index, + pwrb_context->wrb_handles_available); } static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, @@ -4763,9 +4797,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) io_task->pwrb_handle = NULL; if (task->sc) { - spin_lock(&phba->io_sgl_lock); io_task->psgl_handle = alloc_io_sgl_handle(phba); - spin_unlock(&phba->io_sgl_lock); if (!io_task->psgl_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, @@ -4790,10 +4822,8 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { beiscsi_conn->task = task; if (!beiscsi_conn->login_in_progress) { - spin_lock(&phba->mgmt_sgl_lock); io_task->psgl_handle = (struct sgl_handle *) alloc_mgmt_sgl_handle(phba); - spin_unlock(&phba->mgmt_sgl_lock); if (!io_task->psgl_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | @@ -4832,9 +4862,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) beiscsi_conn->plogin_wrb_handle; } } else { - spin_lock(&phba->mgmt_sgl_lock); io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); - spin_unlock(&phba->mgmt_sgl_lock); if (!io_task->psgl_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | @@ -4869,15 +4897,11 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) return 0; free_io_hndls: - spin_lock(&phba->io_sgl_lock); free_io_sgl_handle(phba, io_task->psgl_handle); - spin_unlock(&phba->io_sgl_lock); goto free_hndls; free_mgmt_hndls: - spin_lock(&phba->mgmt_sgl_lock); free_mgmt_sgl_handle(phba, io_task->psgl_handle); io_task->psgl_handle = NULL; - spin_unlock(&phba->mgmt_sgl_lock); free_hndls: phwi_ctrlr = phba->phwi_ctrlr; cri_index = BE_GET_CRI_FROM_CID( @@ -4905,7 +4929,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, pwrb = io_task->pwrb_handle->pwrb; - io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; io_task->bhs_len = sizeof(struct be_cmd_bhs); if (writedir) { @@ -4966,7 +4989,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, unsigned int doorbell = 0; pwrb = io_task->pwrb_handle->pwrb; - io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; io_task->bhs_len = sizeof(struct be_cmd_bhs); if (writedir) { @@ -5025,6 +5047,7 @@ static int beiscsi_mtask(struct iscsi_task *task) unsigned int doorbell = 0; unsigned int cid; unsigned int pwrb_typeoffset = 0; + int ret = 0; cid = beiscsi_conn->beiscsi_conn_cid; pwrb = io_task->pwrb_handle->pwrb; @@ -5073,7 +5096,7 @@ static int beiscsi_mtask(struct iscsi_task *task) case ISCSI_OP_LOGIN: AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); - hwi_write_buffer(pwrb, task); + ret = hwi_write_buffer(pwrb, task); break; case ISCSI_OP_NOOP_OUT: if (task->hdr->ttt != ISCSI_RESERVED_TAG) { @@ -5093,19 +5116,19 @@ static int beiscsi_mtask(struct iscsi_task *task) AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dmsg, pwrb, 0); } - hwi_write_buffer(pwrb, task); + ret = hwi_write_buffer(pwrb, task); break; case ISCSI_OP_TEXT: ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); - hwi_write_buffer(pwrb, task); + ret = hwi_write_buffer(pwrb, task); break; case ISCSI_OP_SCSI_TMFUNC: ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); - hwi_write_buffer(pwrb, task); + ret = hwi_write_buffer(pwrb, task); break; case ISCSI_OP_LOGOUT: ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); - hwi_write_buffer(pwrb, task); + ret = hwi_write_buffer(pwrb, task); break; default: @@ -5116,6 +5139,9 @@ static int beiscsi_mtask(struct iscsi_task *task) return -EINVAL; } + if (ret) + return ret; + /* Set the task type */ io_task->wrb_type = (is_chip_be2_be3r(phba)) ? AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : @@ -5134,23 +5160,21 @@ static int beiscsi_task_xmit(struct iscsi_task *task) { struct beiscsi_io_task *io_task = task->dd_data; struct scsi_cmnd *sc = task->sc; - struct beiscsi_hba *phba = NULL; + struct beiscsi_hba *phba; struct scatterlist *sg; int num_sg; unsigned int writedir = 0, xferlen = 0; - phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba; + if (!io_task->conn->login_in_progress) + task->hdr->exp_statsn = 0; if (!sc) return beiscsi_mtask(task); io_task->scsi_cmnd = sc; num_sg = scsi_dma_map(sc); + phba = io_task->conn->phba; if (num_sg < 0) { - struct iscsi_conn *conn = task->conn; - struct beiscsi_hba *phba = NULL; - - phba = ((struct beiscsi_conn *)conn->dd_data)->phba; beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, "BM_%d : scsi_dma_map Failed " @@ -5213,12 +5237,13 @@ static int beiscsi_bsg_request(struct bsg_job *job) rc = wait_event_interruptible_timeout( phba->ctrl.mcc_wait[tag], - phba->ctrl.mcc_numtag[tag], + phba->ctrl.mcc_tag_status[tag], msecs_to_jiffies( BEISCSI_HOST_MBX_TIMEOUT)); - extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; - status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; - free_mcc_tag(&phba->ctrl, tag); + extd_status = (phba->ctrl.mcc_tag_status[tag] & + CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT; + status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK; + free_mcc_wrb(&phba->ctrl, tag); resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, @@ -5279,21 +5304,18 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba, if (phba->msix_enabled) { for (i = 0; i <= phba->num_cpus; i++) { msix_vec = phba->msix_entries[i].vector; - synchronize_irq(msix_vec); free_irq(msix_vec, &phwi_context->be_eq[i]); kfree(phba->msi_name[i]); } } else - if (phba->pcidev->irq) { - synchronize_irq(phba->pcidev->irq); + if (phba->pcidev->irq) free_irq(phba->pcidev->irq, phba); - } pci_disable_msix(phba->pcidev); cancel_delayed_work_sync(&phba->beiscsi_hw_check_task); for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_disable(&pbe_eq->iopoll); + irq_poll_disable(&pbe_eq->iopoll); } if (unload_state == BEISCSI_CLEAN_UNLOAD) { @@ -5315,7 +5337,6 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba, static void beiscsi_remove(struct pci_dev *pcidev) { - struct beiscsi_hba *phba = NULL; phba = pci_get_drvdata(pcidev); @@ -5325,9 +5346,9 @@ static void beiscsi_remove(struct pci_dev *pcidev) } beiscsi_destroy_def_ifaces(phba); - beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); iscsi_boot_destroy_kset(phba->boot_kset); iscsi_host_remove(phba->shost); + beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); pci_dev_put(phba->pcidev); iscsi_host_free(phba->shost); pci_disable_pcie_error_reporting(pcidev); @@ -5336,23 +5357,6 @@ static void beiscsi_remove(struct pci_dev *pcidev) pci_disable_device(pcidev); } -static void beiscsi_shutdown(struct pci_dev *pcidev) -{ - - struct beiscsi_hba *phba = NULL; - - phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); - if (!phba) { - dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n"); - return; - } - - phba->state = BE_ADAPTER_STATE_SHUTDOWN; - iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session); - beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); - pci_disable_device(pcidev); -} - static void beiscsi_msix_enable(struct beiscsi_hba *phba) { int i, status; @@ -5415,7 +5419,7 @@ static void be_eqd_update(struct beiscsi_hba *phba) if (num) { tag = be_cmd_modify_eq_delay(phba, set_eqd, num); if (tag) - beiscsi_mccq_compl(phba, tag, NULL, NULL); + beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); } } @@ -5566,11 +5570,17 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev) phba->shost->max_id = phba->params.cxns_per_ctrl; phba->shost->can_queue = phba->params.ios_per_ctrl; ret = hwi_init_controller(phba); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : beiscsi_eeh_resume -" + "Failed to initialize beiscsi_hba.\n"); + goto ret_err; + } for (i = 0; i < MAX_MCC_CMD; i++) { init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); phba->ctrl.mcc_tag[i] = i + 1; - phba->ctrl.mcc_numtag[i + 1] = 0; + phba->ctrl.mcc_tag_status[i + 1] = 0; phba->ctrl.mcc_tag_available++; } @@ -5579,9 +5589,8 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev) for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, + irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); - blk_iopoll_enable(&pbe_eq->iopoll); } i = (phba->msix_enabled) ? i : 0; @@ -5673,6 +5682,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, goto hba_free; } + /* + * FUNCTION_RESET should clean up any stale info in FW for this fn + */ ret = beiscsi_cmd_reset_function(phba); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, @@ -5696,6 +5708,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, "BM_%d : Error getting fw config\n"); goto free_port; } + mgmt_get_port_name(&phba->ctrl, phba); + beiscsi_get_params(phba); if (enable_msix) find_num_cpus(phba); @@ -5713,7 +5727,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, } phba->shost->max_id = phba->params.cxns_per_ctrl; - beiscsi_get_params(phba); phba->shost->can_queue = phba->params.ios_per_ctrl; ret = beiscsi_init_port(phba); if (ret < 0) { @@ -5726,7 +5739,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, for (i = 0; i < MAX_MCC_CMD; i++) { init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); phba->ctrl.mcc_tag[i] = i + 1; - phba->ctrl.mcc_numtag[i + 1] = 0; + phba->ctrl.mcc_tag_status[i + 1] = 0; phba->ctrl.mcc_tag_available++; memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, sizeof(struct be_dma_mem)); @@ -5752,9 +5765,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, + irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); - blk_iopoll_enable(&pbe_eq->iopoll); } i = (phba->msix_enabled) ? i : 0; @@ -5795,7 +5807,7 @@ free_blkenbld: destroy_workqueue(phba->wq); for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_disable(&pbe_eq->iopoll); + irq_poll_disable(&pbe_eq->iopoll); } free_twq: beiscsi_clean_port(phba); @@ -5861,7 +5873,6 @@ static struct pci_driver beiscsi_pci_driver = { .name = DRV_NAME, .probe = beiscsi_dev_probe, .remove = beiscsi_remove, - .shutdown = beiscsi_shutdown, .id_table = beiscsi_pci_id_table, .err_handler = &beiscsi_eeh_handlers }; diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 5c67c0732241..30a4606d9a3b 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -36,7 +36,7 @@ #include <scsi/scsi_transport_iscsi.h> #define DRV_NAME "be2iscsi" -#define BUILD_STR "10.6.0.1" +#define BUILD_STR "11.0.0.0" #define BE_NAME "Emulex OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" @@ -63,6 +63,7 @@ #define BE2_SGE 32 #define BE2_DEFPDU_HDR_SZ 64 #define BE2_DEFPDU_DATA_SZ 8192 +#define BE2_MAX_NUM_CQ_PROC 512 #define MAX_CPUS 64 #define BEISCSI_MAX_NUM_CPUS 7 @@ -103,8 +104,7 @@ #define BE_ADAPTER_LINK_UP 0x001 #define BE_ADAPTER_LINK_DOWN 0x002 #define BE_ADAPTER_PCI_ERR 0x004 -#define BE_ADAPTER_STATE_SHUTDOWN 0x008 -#define BE_ADAPTER_CHECK_BOOT 0x010 +#define BE_ADAPTER_CHECK_BOOT 0x008 #define BEISCSI_CLEAN_UNLOAD 0x01 @@ -304,6 +304,7 @@ struct invalidate_command_table { #define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \ (phwi_ctrlr->wrb_context[cri].ulp_num) struct hwi_wrb_context { + spinlock_t wrb_lock; struct list_head wrb_handle_list; struct list_head wrb_handle_drvr_list; struct wrb_handle **pwrb_handle_base; @@ -398,7 +399,9 @@ struct beiscsi_hba { * group together since they are used most frequently * for cid to cri conversion */ +#define BEISCSI_PHYS_PORT_MAX 4 unsigned int phys_port; + /* valid values of phys_port id are 0, 1, 2, 3 */ unsigned int eqid_count; unsigned int cqid_count; unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT]; @@ -416,6 +419,7 @@ struct beiscsi_hba { } fw_config; unsigned int state; + u8 optic_state; int get_boot; bool fw_timeout; bool ue_detected; @@ -423,6 +427,8 @@ struct beiscsi_hba { bool mac_addr_set; u8 mac_address[ETH_ALEN]; + u8 port_name; + u8 port_speed; char fw_ver_str[BEISCSI_VER_STRLEN]; char wq_name[20]; struct workqueue_struct *wq; /* The actuak work queue */ @@ -845,9 +851,10 @@ void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, void hwi_ring_cq_db(struct beiscsi_hba *phba, unsigned int id, unsigned int num_processed, - unsigned char rearm, unsigned char event); + unsigned char rearm); -unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq); +unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget); +void beiscsi_process_mcc_cq(struct beiscsi_hba *phba); static inline bool beiscsi_error(struct beiscsi_hba *phba) { @@ -1074,12 +1081,14 @@ struct hwi_context_memory { #define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */ #define BEISCSI_LOG_ISCSI 0x0040 /* SCSI/iSCSI Protocol related Logs */ +#define __beiscsi_log(phba, level, fmt, arg...) \ + shost_printk(level, phba->shost, fmt, __LINE__, ##arg) + #define beiscsi_log(phba, level, mask, fmt, arg...) \ do { \ uint32_t log_value = phba->attr_log_enable; \ if (((mask) & log_value) || (level[1] <= '3')) \ - shost_printk(level, phba->shost, \ - fmt, __LINE__, ##arg); \ -} while (0) + __beiscsi_log(phba, level, fmt, ##arg); \ +} while (0); #endif diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index aea3e6b9477d..83926e221f1e 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -161,20 +161,17 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct be_cmd_req_modify_eq_delay *req; - unsigned int tag = 0; + unsigned int tag; int i; - spin_lock(&ctrl->mbox_lock); - tag = alloc_mcc_tag(phba); - if (!tag) { - spin_unlock(&ctrl->mbox_lock); - return tag; + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; } - wrb = wrb_from_mccq(phba); req = embedded_payload(wrb); - - wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); @@ -187,8 +184,8 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, cpu_to_le32(set_eqd[i].delay_multiplier); } - be_mcc_notify(phba); - spin_unlock(&ctrl->mbox_lock); + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); return tag; } @@ -209,22 +206,20 @@ unsigned int mgmt_reopen_session(struct beiscsi_hba *phba, struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct be_cmd_reopen_session_req *req; - unsigned int tag = 0; + unsigned int tag; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BG_%d : In bescsi_get_boot_target\n"); - spin_lock(&ctrl->mbox_lock); - tag = alloc_mcc_tag(phba); - if (!tag) { - spin_unlock(&ctrl->mbox_lock); - return tag; + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; } - wrb = wrb_from_mccq(phba); req = embedded_payload(wrb); - wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS, @@ -234,8 +229,8 @@ unsigned int mgmt_reopen_session(struct beiscsi_hba *phba, req->reopen_type = reopen_type; req->session_handle = sess_handle; - be_mcc_notify(phba); - spin_unlock(&ctrl->mbox_lock); + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); return tag; } @@ -244,29 +239,27 @@ unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba) struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct be_cmd_get_boot_target_req *req; - unsigned int tag = 0; + unsigned int tag; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BG_%d : In bescsi_get_boot_target\n"); - spin_lock(&ctrl->mbox_lock); - tag = alloc_mcc_tag(phba); - if (!tag) { - spin_unlock(&ctrl->mbox_lock); - return tag; + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; } - wrb = wrb_from_mccq(phba); req = embedded_payload(wrb); - wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET, sizeof(struct be_cmd_get_boot_target_resp)); - be_mcc_notify(phba); - spin_unlock(&ctrl->mbox_lock); + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); return tag; } @@ -276,7 +269,7 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba, { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; - unsigned int tag = 0; + unsigned int tag; struct be_cmd_get_session_req *req; struct be_cmd_get_session_resp *resp; struct be_sge *sge; @@ -285,22 +278,17 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BG_%d : In beiscsi_get_session_info\n"); - spin_lock(&ctrl->mbox_lock); - tag = alloc_mcc_tag(phba); - if (!tag) { - spin_unlock(&ctrl->mbox_lock); - return tag; + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; } nonemb_cmd->size = sizeof(*resp); req = nonemb_cmd->va; memset(req, 0, sizeof(*req)); - wrb = wrb_from_mccq(phba); sge = nonembedded_sgl(wrb); - wrb->tag0 |= tag; - - - wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_SESSION_GET_A_SESSION, @@ -310,12 +298,54 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba, sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd->size); - be_mcc_notify(phba); - spin_unlock(&ctrl->mbox_lock); + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); return tag; } /** + * mgmt_get_port_name()- Get port name for the function + * @ctrl: ptr to Ctrl Info + * @phba: ptr to the dev priv structure + * + * Get the alphanumeric character for port + * + **/ +int mgmt_get_port_name(struct be_ctrl_info *ctrl, + struct beiscsi_hba *phba) +{ + int ret = 0; + struct be_mcc_wrb *wrb; + struct be_cmd_get_port_name *ioctl; + + mutex_lock(&ctrl->mbox_lock); + wrb = wrb_from_mbox(&ctrl->mbox_mem); + memset(wrb, 0, sizeof(*wrb)); + ioctl = embedded_payload(wrb); + + be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0); + be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_PORT_NAME, + EMBED_MBX_MAX_PAYLOAD_SIZE); + ret = be_mbox_notify(ctrl); + phba->port_name = 0; + if (!ret) { + phba->port_name = ioctl->p.resp.port_names >> + (phba->fw_config.phys_port * 8) & 0xff; + } else { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n", + ret, ioctl->h.resp_hdr.status); + } + + if (phba->port_name == 0) + phba->port_name = '?'; + + mutex_unlock(&ctrl->mbox_lock); + return ret; +} + +/** * mgmt_get_fw_config()- Get the FW config for the function * @ctrl: ptr to Ctrl Info * @phba: ptr to the dev priv structure @@ -331,91 +361,147 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); - struct be_fw_cfg *req = embedded_payload(wrb); - int status = 0; + struct be_fw_cfg *pfw_cfg = embedded_payload(wrb); + uint32_t cid_count, icd_count; + int status = -EINVAL; + uint8_t ulp_num = 0; - spin_lock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); + be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0); - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); - - be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, EMBED_MBX_MAX_PAYLOAD_SIZE); - status = be_mbox_notify(ctrl); - if (!status) { - uint8_t ulp_num = 0; - struct be_fw_cfg *pfw_cfg; - pfw_cfg = req; - if (!is_chip_be2_be3r(phba)) { - phba->fw_config.eqid_count = pfw_cfg->eqid_count; - phba->fw_config.cqid_count = pfw_cfg->cqid_count; + if (be_mbox_notify(ctrl)) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d : Failed in mgmt_get_fw_config\n"); + goto fail_init; + } - beiscsi_log(phba, KERN_INFO, - BEISCSI_LOG_INIT, - "BG_%d : EQ_Count : %d CQ_Count : %d\n", - phba->fw_config.eqid_count, + /* FW response formats depend on port id */ + phba->fw_config.phys_port = pfw_cfg->phys_port; + if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d : invalid physical port id %d\n", + phba->fw_config.phys_port); + goto fail_init; + } + + /* populate and check FW config against min and max values */ + if (!is_chip_be2_be3r(phba)) { + phba->fw_config.eqid_count = pfw_cfg->eqid_count; + phba->fw_config.cqid_count = pfw_cfg->cqid_count; + if (phba->fw_config.eqid_count == 0 || + phba->fw_config.eqid_count > 2048) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d : invalid EQ count %d\n", + phba->fw_config.eqid_count); + goto fail_init; + } + if (phba->fw_config.cqid_count == 0 || + phba->fw_config.cqid_count > 4096) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d : invalid CQ count %d\n", phba->fw_config.cqid_count); + goto fail_init; } + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : EQ_Count : %d CQ_Count : %d\n", + phba->fw_config.eqid_count, + phba->fw_config.cqid_count); + } - for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) - if (pfw_cfg->ulp[ulp_num].ulp_mode & - BEISCSI_ULP_ISCSI_INI_MODE) - set_bit(ulp_num, - &phba->fw_config.ulp_supported); - - phba->fw_config.phys_port = pfw_cfg->phys_port; - for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { - if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { - - phba->fw_config.iscsi_cid_start[ulp_num] = - pfw_cfg->ulp[ulp_num].sq_base; - phba->fw_config.iscsi_cid_count[ulp_num] = - pfw_cfg->ulp[ulp_num].sq_count; - - phba->fw_config.iscsi_icd_start[ulp_num] = - pfw_cfg->ulp[ulp_num].icd_base; - phba->fw_config.iscsi_icd_count[ulp_num] = - pfw_cfg->ulp[ulp_num].icd_count; - - phba->fw_config.iscsi_chain_start[ulp_num] = - pfw_cfg->chain_icd[ulp_num].chain_base; - phba->fw_config.iscsi_chain_count[ulp_num] = - pfw_cfg->chain_icd[ulp_num].chain_count; - - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BG_%d : Function loaded on ULP : %d\n" - "\tiscsi_cid_count : %d\n" - "\tiscsi_cid_start : %d\n" - "\t iscsi_icd_count : %d\n" - "\t iscsi_icd_start : %d\n", - ulp_num, - phba->fw_config. - iscsi_cid_count[ulp_num], - phba->fw_config. - iscsi_cid_start[ulp_num], - phba->fw_config. - iscsi_icd_count[ulp_num], - phba->fw_config. - iscsi_icd_start[ulp_num]); - } + /** + * Check on which all ULP iSCSI Protocol is loaded. + * Set the Bit for those ULP. This set flag is used + * at all places in the code to check on which ULP + * iSCSi Protocol is loaded + **/ + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (pfw_cfg->ulp[ulp_num].ulp_mode & + BEISCSI_ULP_ISCSI_INI_MODE) { + set_bit(ulp_num, &phba->fw_config.ulp_supported); + + /* Get the CID, ICD and Chain count for each ULP */ + phba->fw_config.iscsi_cid_start[ulp_num] = + pfw_cfg->ulp[ulp_num].sq_base; + phba->fw_config.iscsi_cid_count[ulp_num] = + pfw_cfg->ulp[ulp_num].sq_count; + + phba->fw_config.iscsi_icd_start[ulp_num] = + pfw_cfg->ulp[ulp_num].icd_base; + phba->fw_config.iscsi_icd_count[ulp_num] = + pfw_cfg->ulp[ulp_num].icd_count; + + phba->fw_config.iscsi_chain_start[ulp_num] = + pfw_cfg->chain_icd[ulp_num].chain_base; + phba->fw_config.iscsi_chain_count[ulp_num] = + pfw_cfg->chain_icd[ulp_num].chain_count; + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : Function loaded on ULP : %d\n" + "\tiscsi_cid_count : %d\n" + "\tiscsi_cid_start : %d\n" + "\t iscsi_icd_count : %d\n" + "\t iscsi_icd_start : %d\n", + ulp_num, + phba->fw_config. + iscsi_cid_count[ulp_num], + phba->fw_config. + iscsi_cid_start[ulp_num], + phba->fw_config. + iscsi_icd_count[ulp_num], + phba->fw_config. + iscsi_icd_start[ulp_num]); } + } - phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode & - BEISCSI_FUNC_DUA_MODE); + if (phba->fw_config.ulp_supported == 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n", + pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode, + pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode); + goto fail_init; + } - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BG_%d : DUA Mode : 0x%x\n", - phba->fw_config.dual_ulp_aware); + /** + * ICD is shared among ULPs. Use icd_count of any one loaded ULP + **/ + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) + break; + icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; + if (icd_count == 0 || icd_count > 65536) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BG_%d: invalid ICD count %d\n", icd_count); + goto fail_init; + } - } else { + cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + + BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); + if (cid_count == 0 || cid_count > 4096) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BG_%d : Failed in mgmt_get_fw_config\n"); - status = -EINVAL; + "BG_%d: invalid CID count %d\n", cid_count); + goto fail_init; } - spin_unlock(&ctrl->mbox_lock); + /** + * Check FW is dual ULP aware i.e. can handle either + * of the protocols. + */ + phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode & + BEISCSI_FUNC_DUA_MODE); + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : DUA Mode : 0x%x\n", + phba->fw_config.dual_ulp_aware); + + /* all set, continue using this FW config */ + status = 0; +fail_init: + mutex_unlock(&ctrl->mbox_lock); return status; } @@ -440,7 +526,7 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl, nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes); req = nonemb_cmd.va; memset(req, 0, sizeof(*req)); - spin_lock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, @@ -470,7 +556,7 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl, } else beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : Failed in mgmt_check_supported_fw\n"); - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); if (nonemb_cmd.va) pci_free_consistent(ctrl->pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); @@ -501,8 +587,9 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, req->region = region; req->sector = sector; req->offset = offset; - spin_lock(&ctrl->mbox_lock); + if (mutex_lock_interruptible(&ctrl->mbox_lock)) + return 0; switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { case BEISCSI_WRITE_FLASH: offset = sector * sector_size + offset; @@ -521,28 +608,26 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, "BG_%d : Unsupported cmd = 0x%x\n\n", bsg_req->rqst_data.h_vendor.vendor_cmd[0]); - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); return -ENOSYS; } - tag = alloc_mcc_tag(phba); - if (!tag) { - spin_unlock(&ctrl->mbox_lock); - return tag; + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; } - wrb = wrb_from_mccq(phba); mcc_sge = nonembedded_sgl(wrb); be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, job->request_payload.sg_cnt); mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); mcc_sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); mcc_sge->len = cpu_to_le32(nonemb_cmd->size); - wrb->tag0 |= tag; - be_mcc_notify(phba); + be_mcc_notify(phba, tag); - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); return tag; } @@ -558,12 +643,19 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num) { struct be_ctrl_info *ctrl = &phba->ctrl; - struct be_mcc_wrb *wrb = wrb_from_mccq(phba); - struct iscsi_cleanup_req *req = embedded_payload(wrb); - int status = 0; + struct be_mcc_wrb *wrb; + struct iscsi_cleanup_req *req; + unsigned int tag; + int status; - spin_lock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return -EBUSY; + } + req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); @@ -572,11 +664,12 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num) req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num)); req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num)); - status = be_mcc_notify_wait(phba); + be_mcc_notify(phba, tag); + status = be_mcc_compl_poll(phba, tag); if (status) beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, "BG_%d : mgmt_epfw_cleanup , FAILED\n"); - spin_unlock(&ctrl->mbox_lock); + mutex_unlock(&ctrl->mbox_lock); return status; } @@ -590,20 +683,18 @@ unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba, struct be_mcc_wrb *wrb; struct be_sge *sge; struct invalidate_commands_params_in *req; - unsigned int i, tag = 0; + unsigned int i, tag; - spin_lock(&ctrl->mbox_lock); - tag = alloc_mcc_tag(phba); - if (!tag) { - spin_unlock(&ctrl->mbox_lock); - return tag; + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; } req = nonemb_cmd->va; memset(req, 0, sizeof(*req)); - wrb = wrb_from_mccq(phba); sge = nonembedded_sgl(wrb); - wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, @@ -621,8 +712,8 @@ unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba, sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd->size); - be_mcc_notify(phba); - spin_unlock(&ctrl->mbox_lock); + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); return tag; } @@ -637,16 +728,14 @@ unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba, struct iscsi_invalidate_connection_params_in *req; unsigned int tag = 0; - spin_lock(&ctrl->mbox_lock); - tag = alloc_mcc_tag(phba); - if (!tag) { - spin_unlock(&ctrl->mbox_lock); - return tag; + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; } - wrb = wrb_from_mccq(phba); - wrb->tag0 |= tag; - req = embedded_payload(wrb); + req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION, @@ -658,8 +747,8 @@ unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba, else req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE; req->save_cfg = savecfg_flag; - be_mcc_notify(phba); - spin_unlock(&ctrl->mbox_lock); + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); return tag; } @@ -669,25 +758,23 @@ unsigned int mgmt_upload_connection(struct beiscsi_hba *phba, struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct tcp_upload_params_in *req; - unsigned int tag = 0; + unsigned int tag; - spin_lock(&ctrl->mbox_lock); - tag = alloc_mcc_tag(phba); - if (!tag) { - spin_unlock(&ctrl->mbox_lock); - return tag; + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; } - wrb = wrb_from_mccq(phba); - req = embedded_payload(wrb); - wrb->tag0 |= tag; + req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD, OPCODE_COMMON_TCP_UPLOAD, sizeof(*req)); req->id = (unsigned short)cid; req->upload_type = (unsigned char)upload_flag; - be_mcc_notify(phba); - spin_unlock(&ctrl->mbox_lock); + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); return tag; } @@ -722,6 +809,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba, unsigned short cid = beiscsi_ep->ep_cid; struct be_sge *sge; + if (dst_addr->sa_family != PF_INET && dst_addr->sa_family != PF_INET6) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BG_%d : unknown addr family %d\n", + dst_addr->sa_family); + return -EINVAL; + } + phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; @@ -732,18 +826,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba, ptemplate_address = &template_address; ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address); - spin_lock(&ctrl->mbox_lock); - tag = alloc_mcc_tag(phba); - if (!tag) { - spin_unlock(&ctrl->mbox_lock); - return tag; + if (mutex_lock_interruptible(&ctrl->mbox_lock)) + return 0; + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; } - wrb = wrb_from_mccq(phba); - sge = nonembedded_sgl(wrb); + sge = nonembedded_sgl(wrb); req = nonemb_cmd->va; memset(req, 0, sizeof(*req)); - wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, @@ -760,7 +853,8 @@ int mgmt_open_connection(struct beiscsi_hba *phba, beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr; beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port); beiscsi_ep->ip_type = BE2_IPV4; - } else if (dst_addr->sa_family == PF_INET6) { + } else { + /* else its PF_INET6 family */ req->ip_address.ip_type = BE2_IPV6; memcpy(&req->ip_address.addr, &daddr_in6->sin6_addr.in6_u.u6_addr8, 16); @@ -769,14 +863,6 @@ int mgmt_open_connection(struct beiscsi_hba *phba, memcpy(&beiscsi_ep->dst6_addr, &daddr_in6->sin6_addr.in6_u.u6_addr8, 16); beiscsi_ep->ip_type = BE2_IPV6; - } else{ - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, - "BG_%d : unknown addr family %d\n", - dst_addr->sa_family); - spin_unlock(&ctrl->mbox_lock); - free_mcc_tag(&phba->ctrl, tag); - return -EINVAL; - } req->cid = cid; i = phba->nxt_cqid++; @@ -801,35 +887,45 @@ int mgmt_open_connection(struct beiscsi_hba *phba, req->tcp_window_scale_count = 2; } - be_mcc_notify(phba); - spin_unlock(&ctrl->mbox_lock); + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); return tag; } unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba) { struct be_ctrl_info *ctrl = &phba->ctrl; - struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); - struct be_cmd_get_all_if_id_req *req = embedded_payload(wrb); - struct be_cmd_get_all_if_id_req *pbe_allid = req; + struct be_mcc_wrb *wrb; + struct be_cmd_get_all_if_id_req *req; + struct be_cmd_get_all_if_id_req *pbe_allid; + unsigned int tag; int status = 0; - memset(wrb, 0, sizeof(*wrb)); - - spin_lock(&ctrl->mbox_lock); + if (mutex_lock_interruptible(&ctrl->mbox_lock)) + return -EINTR; + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return -ENOMEM; + } + req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID, sizeof(*req)); - status = be_mbox_notify(ctrl); - if (!status) - phba->interface_handle = pbe_allid->if_hndl_list[0]; - else { + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + + status = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL); + if (status) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed in mgmt_get_all_if_id\n"); + return -EBUSY; } - spin_unlock(&ctrl->mbox_lock); + + pbe_allid = embedded_payload(wrb); + phba->interface_handle = pbe_allid->if_hndl_list[0]; return status; } @@ -852,27 +948,24 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba, unsigned int tag; int rc = 0; - spin_lock(&ctrl->mbox_lock); - tag = alloc_mcc_tag(phba); - if (!tag) { - spin_unlock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); rc = -ENOMEM; goto free_cmd; } - wrb = wrb_from_mccq(phba); - wrb->tag0 |= tag; sge = nonembedded_sgl(wrb); - be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1); sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma)); sge->len = cpu_to_le32(nonemb_cmd->size); - be_mcc_notify(phba); - spin_unlock(&ctrl->mbox_lock); + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); - rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd); + rc = beiscsi_mccq_compl_wait(phba, tag, NULL, nonemb_cmd); if (resp_buf) memcpy(resp_buf, nonemb_cmd->va, resp_buf_len); @@ -1003,8 +1096,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba, uint32_t ip_type; int rc; - if (mgmt_get_all_if_id(phba)) - return -EIO; + rc = mgmt_get_all_if_id(phba); + if (rc) + return rc; ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ? BE2_IPV6 : BE2_IPV4 ; @@ -1173,8 +1267,9 @@ int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type, uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp); int rc; - if (mgmt_get_all_if_id(phba)) - return -EIO; + rc = mgmt_get_all_if_id(phba); + if (rc) + return rc; do { rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, @@ -1245,55 +1340,27 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba, unsigned int be_cmd_get_initname(struct beiscsi_hba *phba) { - unsigned int tag = 0; + unsigned int tag; struct be_mcc_wrb *wrb; struct be_cmd_hba_name *req; struct be_ctrl_info *ctrl = &phba->ctrl; - spin_lock(&ctrl->mbox_lock); - tag = alloc_mcc_tag(phba); - if (!tag) { - spin_unlock(&ctrl->mbox_lock); - return tag; + if (mutex_lock_interruptible(&ctrl->mbox_lock)) + return 0; + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; } - wrb = wrb_from_mccq(phba); req = embedded_payload(wrb); - wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_CFG_GET_HBA_NAME, sizeof(*req)); - be_mcc_notify(phba); - spin_unlock(&ctrl->mbox_lock); - return tag; -} - -unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba) -{ - unsigned int tag = 0; - struct be_mcc_wrb *wrb; - struct be_cmd_ntwk_link_status_req *req; - struct be_ctrl_info *ctrl = &phba->ctrl; - - spin_lock(&ctrl->mbox_lock); - tag = alloc_mcc_tag(phba); - if (!tag) { - spin_unlock(&ctrl->mbox_lock); - return tag; - } - - wrb = wrb_from_mccq(phba); - req = embedded_payload(wrb); - wrb->tag0 |= tag; - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); - be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, - sizeof(*req)); - - be_mcc_notify(phba); - spin_unlock(&ctrl->mbox_lock); + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); return tag; } @@ -1330,7 +1397,7 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba, return -EAGAIN; } - rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL); + rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL); if (rc) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, @@ -1364,7 +1431,7 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba, return -EAGAIN; } - rc = beiscsi_mccq_compl(phba, tag, NULL, NULL); + rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); if (rc) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, @@ -1406,7 +1473,7 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, return -EBUSY; } - rc = beiscsi_mccq_compl(phba, tag, NULL, NULL); + rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); if (rc) { beiscsi_log(phba, KERN_ERR, (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX), @@ -1749,19 +1816,17 @@ int beiscsi_logout_fw_sess(struct beiscsi_hba *phba, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BG_%d : In bescsi_logout_fwboot_sess\n"); - spin_lock(&ctrl->mbox_lock); - tag = alloc_mcc_tag(phba); - if (!tag) { - spin_unlock(&ctrl->mbox_lock); + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BG_%d : MBX Tag Failure\n"); return -EINVAL; } - wrb = wrb_from_mccq(phba); req = embedded_payload(wrb); - wrb->tag0 |= tag; be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET, @@ -1769,10 +1834,10 @@ int beiscsi_logout_fw_sess(struct beiscsi_hba *phba, /* Set the session handle */ req->session_handle = fw_sess_handle; - be_mcc_notify(phba); - spin_unlock(&ctrl->mbox_lock); + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); - rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL); + rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL); if (rc) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index c1dbb690ee27..f3a48a04b2ca 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -268,6 +268,8 @@ struct beiscsi_endpoint { int mgmt_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba); +int mgmt_get_port_name(struct be_ctrl_info *ctrl, + struct beiscsi_hba *phba); unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba, struct beiscsi_endpoint *beiscsi_ep, diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h index 4ad7e368bbc2..0e119d838e1b 100644 --- a/drivers/scsi/bfa/bfa.h +++ b/drivers/scsi/bfa/bfa.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index e3f67b097a5c..7209afad82f7 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -90,6 +91,25 @@ static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { +void +__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data) +{ + int tail = trcm->tail; + struct bfa_trc_s *trc = &trcm->trc[tail]; + + if (trcm->stopped) + return; + + trc->fileno = (u16) fileno; + trc->line = (u16) line; + trc->data.u64 = data; + trc->timestamp = BFA_TRC_TS(trcm); + + trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); + if (trcm->tail == trcm->head) + trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); +} + static void bfa_com_port_attach(struct bfa_s *bfa) { diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h index 91a8aa394db5..df6760ca0911 100644 --- a/drivers/scsi/bfa/bfa_cs.h +++ b/drivers/scsi/bfa/bfa_cs.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -107,44 +108,11 @@ bfa_trc_stop(struct bfa_trc_mod_s *trcm) trcm->stopped = 1; } -static inline void -__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data) -{ - int tail = trcm->tail; - struct bfa_trc_s *trc = &trcm->trc[tail]; - - if (trcm->stopped) - return; - - trc->fileno = (u16) fileno; - trc->line = (u16) line; - trc->data.u64 = data; - trc->timestamp = BFA_TRC_TS(trcm); - - trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); - if (trcm->tail == trcm->head) - trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); -} - +void +__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data); -static inline void -__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data) -{ - int tail = trcm->tail; - struct bfa_trc_s *trc = &trcm->trc[tail]; - - if (trcm->stopped) - return; - - trc->fileno = (u16) fileno; - trc->line = (u16) line; - trc->data.u32.u32 = data; - trc->timestamp = BFA_TRC_TS(trcm); - - trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); - if (trcm->tail == trcm->head) - trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); -} +void +__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data); #define bfa_sm_fault(__mod, __event) do { \ bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \ diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h index 877b86dd2837..5dc3782d615b 100644 --- a/drivers/scsi/bfa/bfa_defs.h +++ b/drivers/scsi/bfa/bfa_defs.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h index 06f0a163ca35..5815a904574d 100644 --- a/drivers/scsi/bfa/bfa_defs_fcs.h +++ b/drivers/scsi/bfa/bfa_defs_fcs.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h index 638f441ffc38..e81707f938cb 100644 --- a/drivers/scsi/bfa/bfa_defs_svc.h +++ b/drivers/scsi/bfa/bfa_defs_svc.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h index 64069a0a3d0d..18b7304d6b0b 100644 --- a/drivers/scsi/bfa/bfa_fc.h +++ b/drivers/scsi/bfa/bfa_fc.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c index dce787f6cca2..b8dadc9cc993 100644 --- a/drivers/scsi/bfa/bfa_fcbuild.c +++ b/drivers/scsi/bfa/bfa_fcbuild.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h index 03c753d1e548..b109a8813401 100644 --- a/drivers/scsi/bfa/bfa_fcbuild.h +++ b/drivers/scsi/bfa/bfa_fcbuild.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index d7385d1d9c5a..20982e7cdd81 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h index e693af6e5930..e93921dec347 100644 --- a/drivers/scsi/bfa/bfa_fcpim.h +++ b/drivers/scsi/bfa/bfa_fcpim.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index 0f19455951ec..1e7e139d71ea 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 42bcb970445a..06dc215ea050 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -633,7 +634,7 @@ void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, /* * HBA Attribute Block : BFA internal representation. Note : Some variable - * sizes have been trimmed to suit BFA For Ex : Model will be "Brocade". Based + * sizes have been trimmed to suit BFA For Ex : Model will be "QLogic ". Based * on this the size has been reduced to 16 bytes from the standard's 64 bytes. */ struct bfa_fcs_fdmi_hba_attr_s { diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c index 6dc7926a3edd..4f089d76afb1 100644 --- a/drivers/scsi/bfa/bfa_fcs_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index ff75ef891755..7733ad5305d4 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -2653,7 +2654,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, strncpy(hba_attr->node_sym_name.symname, port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN); - strcpy(hba_attr->vendor_info, "BROCADE"); + strcpy(hba_attr->vendor_info, "QLogic"); hba_attr->num_ports = cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc)); hba_attr->fabric_name = port->fabric->lps->pr_nwwn; diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index 2035b0d64351..de50349a39ce 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c index ea24d4c6e67a..c4a0c0eb88a5 100644 --- a/drivers/scsi/bfa/bfa_hw_cb.c +++ b/drivers/scsi/bfa/bfa_hw_cb.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c index 637527f48b40..b0ff378dece2 100644 --- a/drivers/scsi/bfa/bfa_hw_ct.c +++ b/drivers/scsi/bfa/bfa_hw_ct.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 98f7e8cca52d..a1ada4a31c97 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -2697,7 +2698,7 @@ bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc) bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT); } -#define BFA_MFG_NAME "Brocade" +#define BFA_MFG_NAME "QLogic" void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, struct bfa_adapter_attr_s *ad_attr) @@ -2802,7 +2803,7 @@ void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer) { memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); - memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); + strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); } void diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index a38aafa030b3..713745da44c6 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c index 453c2f5b5561..f1b80da298c8 100644 --- a/drivers/scsi/bfa/bfa_ioc_cb.c +++ b/drivers/scsi/bfa/bfa_ioc_cb.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index bd53150e4ee0..651a8fb93037 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h index a14c784ff3fc..53135f21fa0e 100644 --- a/drivers/scsi/bfa/bfa_modules.h +++ b/drivers/scsi/bfa/bfa_modules.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_plog.h b/drivers/scsi/bfa/bfa_plog.h index 1c9baa68339b..da570c0b8275 100644 --- a/drivers/scsi/bfa/bfa_plog.h +++ b/drivers/scsi/bfa/bfa_plog.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c index 8ea7697deb9b..da1721e0d167 100644 --- a/drivers/scsi/bfa/bfa_port.c +++ b/drivers/scsi/bfa/bfa_port.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h index 2fcab6bc6280..26dc1bf14c85 100644 --- a/drivers/scsi/bfa/bfa_port.h +++ b/drivers/scsi/bfa/bfa_port.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index 625225f31081..12de292175ef 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index ef07365991e7..ea2278bc78a8 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index cc3b9d3d6d40..9d253cb83ee7 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -130,13 +131,9 @@ MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for " "boot port. Otherwise 10 secs in RHEL4 & 0 for " "[RHEL5, SLES10, ESX40] Range[>0]"); module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts " - "for Brocade-415/425/815/825 cards, default=0, " - " Range[false:0|true:1]"); +MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts for QLogic-415/425/815/825 cards, default=0 Range[false:0|true:1]"); module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts " - "if possible for Brocade-1010/1020/804/1007/902/1741 " - "cards, default=0, Range[false:0|true:1]"); +MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts if possible for QLogic-1010/1020/804/1007/902/1741 cards, default=0, Range[false:0|true:1]"); module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, " "Range[false:0|true:1]"); @@ -838,8 +835,7 @@ bfad_drv_init(struct bfad_s *bfad) printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", bfad->inst_no); printk(KERN_WARNING - "Not enough memory to attach all Brocade HBA ports, %s", - "System may need more memory.\n"); + "Not enough memory to attach all QLogic BR-series HBA ports. System may need more memory.\n"); return BFA_STATUS_FAILED; } @@ -1710,7 +1706,7 @@ bfad_init(void) { int error = 0; - printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n", + pr_info("QLogic BR-series BFA FC/FCOE SCSI driver - version: %s\n", BFAD_DRIVER_VERSION); if (num_sgpgs > 0) @@ -1817,6 +1813,6 @@ bfad_free_fwimg(void) module_init(bfad_init); module_exit(bfad_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME); -MODULE_AUTHOR("Brocade Communications Systems, Inc."); +MODULE_DESCRIPTION("QLogic BR-series Fibre Channel HBA Driver" BFAD_PROTO_NAME); +MODULE_AUTHOR("QLogic Corporation"); MODULE_VERSION(BFAD_DRIVER_VERSION); diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 40be670a1cbc..13db3b7bc873 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -750,65 +751,65 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, bfa_get_adapter_model(&bfad->bfa, model); nports = bfa_get_nports(&bfad->bfa); - if (!strcmp(model, "Brocade-425")) + if (!strcmp(model, "QLogic-425")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 4Gbps PCIe dual port FC HBA"); - else if (!strcmp(model, "Brocade-825")) + "QLogic BR-series 4Gbps PCIe dual port FC HBA"); + else if (!strcmp(model, "QLogic-825")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 8Gbps PCIe dual port FC HBA"); - else if (!strcmp(model, "Brocade-42B")) + "QLogic BR-series 8Gbps PCIe dual port FC HBA"); + else if (!strcmp(model, "QLogic-42B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 4Gbps PCIe dual port FC HBA for HP"); - else if (!strcmp(model, "Brocade-82B")) + "QLogic BR-series 4Gbps PCIe dual port FC HBA for HP"); + else if (!strcmp(model, "QLogic-82B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 8Gbps PCIe dual port FC HBA for HP"); - else if (!strcmp(model, "Brocade-1010")) + "QLogic BR-series 8Gbps PCIe dual port FC HBA for HP"); + else if (!strcmp(model, "QLogic-1010")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 10Gbps single port CNA"); - else if (!strcmp(model, "Brocade-1020")) + "QLogic BR-series 10Gbps single port CNA"); + else if (!strcmp(model, "QLogic-1020")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 10Gbps dual port CNA"); - else if (!strcmp(model, "Brocade-1007")) + "QLogic BR-series 10Gbps dual port CNA"); + else if (!strcmp(model, "QLogic-1007")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 10Gbps CNA for IBM Blade Center"); - else if (!strcmp(model, "Brocade-415")) + "QLogic BR-series 10Gbps CNA for IBM Blade Center"); + else if (!strcmp(model, "QLogic-415")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 4Gbps PCIe single port FC HBA"); - else if (!strcmp(model, "Brocade-815")) + "QLogic BR-series 4Gbps PCIe single port FC HBA"); + else if (!strcmp(model, "QLogic-815")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 8Gbps PCIe single port FC HBA"); - else if (!strcmp(model, "Brocade-41B")) + "QLogic BR-series 8Gbps PCIe single port FC HBA"); + else if (!strcmp(model, "QLogic-41B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 4Gbps PCIe single port FC HBA for HP"); - else if (!strcmp(model, "Brocade-81B")) + "QLogic BR-series 4Gbps PCIe single port FC HBA for HP"); + else if (!strcmp(model, "QLogic-81B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 8Gbps PCIe single port FC HBA for HP"); - else if (!strcmp(model, "Brocade-804")) + "QLogic BR-series 8Gbps PCIe single port FC HBA for HP"); + else if (!strcmp(model, "QLogic-804")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 8Gbps FC HBA for HP Bladesystem C-class"); - else if (!strcmp(model, "Brocade-1741")) + "QLogic BR-series 8Gbps FC HBA for HP Bladesystem C-class"); + else if (!strcmp(model, "QLogic-1741")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 10Gbps CNA for Dell M-Series Blade Servers"); - else if (strstr(model, "Brocade-1860")) { + "QLogic BR-series 10Gbps CNA for Dell M-Series Blade Servers"); + else if (strstr(model, "QLogic-1860")) { if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 10Gbps single port CNA"); + "QLogic BR-series 10Gbps single port CNA"); else if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 16Gbps PCIe single port FC HBA"); + "QLogic BR-series 16Gbps PCIe single port FC HBA"); else if (nports == 2 && bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 10Gbps dual port CNA"); + "QLogic BR-series 10Gbps dual port CNA"); else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 16Gbps PCIe dual port FC HBA"); - } else if (!strcmp(model, "Brocade-1867")) { + "QLogic BR-series 16Gbps PCIe dual port FC HBA"); + } else if (!strcmp(model, "QLogic-1867")) { if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 16Gbps PCIe single port FC HBA for IBM"); + "QLogic BR-series 16Gbps PCIe single port FC HBA for IBM"); else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, - "Brocade 16Gbps PCIe dual port FC HBA for IBM"); + "QLogic BR-series 16Gbps PCIe dual port FC HBA for IBM"); } else snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Invalid Model"); diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index 023b9d42ad9a..d1ad0208dfe7 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h index 90abef691585..917e140dfbcc 100644 --- a/drivers/scsi/bfa/bfad_bsg.h +++ b/drivers/scsi/bfa/bfad_bsg.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 74a307c0a240..8dcd8c70c7ee 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index 8b97877d42cf..f9e862093a25 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -57,7 +58,7 @@ #ifdef BFA_DRIVER_VERSION #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION #else -#define BFAD_DRIVER_VERSION "3.2.23.0" +#define BFAD_DRIVER_VERSION "3.2.25.0" #endif #define BFAD_PROTO_NAME FCPI_NAME diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 299c6f80d460..6c805e13f8dd 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -185,7 +186,7 @@ bfad_im_info(struct Scsi_Host *shost) memset(bfa_buf, 0, sizeof(bfa_buf)); snprintf(bfa_buf, sizeof(bfa_buf), - "Brocade FC/FCOE Adapter, " "hwpath: %s driver: %s", + "QLogic BR-series FC/FCOE Adapter, hwpath: %s driver: %s", bfad->pci_name, BFAD_DRIVER_VERSION); return bfa_buf; @@ -271,6 +272,19 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd, cmnd->host_scribble = NULL; cmnd->SCp.Status = 0; bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); + /* + * bfa_itnim can be NULL if the port gets disconnected and the bfa + * and fcs layers have cleaned up their nexus with the targets and + * the same has not been cleaned up by the shim + */ + if (bfa_itnim == NULL) { + bfa_tskim_free(tskim); + BFA_LOG(KERN_ERR, bfad, bfa_log_level, + "target reset, bfa_itnim is NULL\n"); + rc = BFA_STATUS_FAILED; + goto out; + } + memset(&scsilun, 0, sizeof(scsilun)); bfa_tskim_start(tskim, bfa_itnim, scsilun, FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO); @@ -326,6 +340,19 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd) cmnd->SCp.ptr = (char *)&wq; cmnd->SCp.Status = 0; bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); + /* + * bfa_itnim can be NULL if the port gets disconnected and the bfa + * and fcs layers have cleaned up their nexus with the targets and + * the same has not been cleaned up by the shim + */ + if (bfa_itnim == NULL) { + bfa_tskim_free(tskim); + BFA_LOG(KERN_ERR, bfad, bfa_log_level, + "lun reset, bfa_itnim is NULL\n"); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + rc = FAILED; + goto out; + } int_to_scsilun(cmnd->device->lun, &scsilun); bfa_tskim_start(tskim, bfa_itnim, scsilun, FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO); diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index f6c1023e502a..836fdc221edd 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h index 9ef91f907dec..97600dcec649 100644 --- a/drivers/scsi/bfa/bfi.h +++ b/drivers/scsi/bfa/bfi.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h index 1a3fe5ad58fa..ae5bfe039fcc 100644 --- a/drivers/scsi/bfa/bfi_ms.h +++ b/drivers/scsi/bfa/bfi_ms.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h index 99133bcf53f9..fd5b87616e8b 100644 --- a/drivers/scsi/bfa/bfi_reg.h +++ b/drivers/scsi/bfa/bfi_reg.h @@ -1,9 +1,10 @@ /* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014- QLogic Corporation. * All rights reserved - * www.brocade.com + * www.qlogic.com * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as @@ -16,7 +17,7 @@ */ /* - * bfi_reg.h ASIC register defines for all Brocade adapter ASICs + * bfi_reg.h ASIC register defines for all QLogic BR-series adapter ASICs */ #ifndef __BFI_REG_H__ diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 67405c628864..d7029ea5d319 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -97,6 +97,15 @@ static void __exit bnx2fc_mod_exit(void); unsigned int bnx2fc_debug_level; module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_logging, + "Option to enable extended logging,\n" + "\t\tDefault is 0 - no logging.\n" + "\t\t0x01 - SCSI cmd error, cleanup.\n" + "\t\t0x02 - Session setup, cleanup, etc.\n" + "\t\t0x04 - lport events, link, mtu, etc.\n" + "\t\t0x08 - ELS logs.\n" + "\t\t0x10 - fcoe L2 fame related logs.\n" + "\t\t0xff - LOG all messages."); static int bnx2fc_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu); diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 0002caf687dd..2230dab67ca5 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1104,8 +1104,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) struct bnx2fc_cmd *io_req; struct fc_lport *lport; struct bnx2fc_rport *tgt; - int rc = FAILED; - + int rc; rc = fc_block_scsi_eh(sc_cmd); if (rc) @@ -1114,7 +1113,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) lport = shost_priv(sc_cmd->device->host); if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { printk(KERN_ERR PFX "eh_abort: link not ready\n"); - return rc; + return FAILED; } tgt = (struct bnx2fc_rport *)&rp[1]; diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 0e2bee937fe8..e22a268fd311 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -57,7 +57,7 @@ MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)"); static int cxgb3i_rx_credit_thres = 10 * 1024; module_param(cxgb3i_rx_credit_thres, int, 0644); -MODULE_PARM_DESC(rx_credit_thres, +MODULE_PARM_DESC(cxgb3i_rx_credit_thres, "RX credits return threshold in bytes (default=10KB)"); static unsigned int cxgb3i_max_connect = 8 * 1024; diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index c11cd193f896..a8ac4c0a1493 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -34,7 +34,6 @@ extern const struct file_operations cxlflash_cxl_fops; sectors */ -#define NUM_RRQ_ENTRY 16 /* for master issued cmds */ #define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry)) /* AFU command retry limit */ @@ -48,9 +47,12 @@ extern const struct file_operations cxlflash_cxl_fops; index derivation */ -#define CXLFLASH_MAX_CMDS 16 +#define CXLFLASH_MAX_CMDS 256 #define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS +/* RRQ for master issued cmds */ +#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS + static inline void check_sizes(void) { @@ -149,7 +151,7 @@ struct afu_cmd { struct afu { /* Stuff requiring alignment go first. */ - u64 rrq_entry[NUM_RRQ_ENTRY]; /* 128B RRQ */ + u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */ /* * Command & data for AFU commands. */ @@ -165,6 +167,8 @@ struct afu { struct sisl_host_map __iomem *host_map; /* MC host map */ struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */ + struct kref mapcount; + ctx_hndl_t ctx_hndl; /* master's context handle */ u64 *hrrq_start; u64 *hrrq_end; diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 1e5bf0ca81da..3879b46d79e1 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -368,6 +368,7 @@ out: no_room: afu->read_room = true; + kref_get(&cfg->afu->mapcount); schedule_work(&cfg->work_q); rc = SCSI_MLQUEUE_HOST_BUSY; goto out; @@ -473,6 +474,16 @@ out: return rc; } +static void afu_unmap(struct kref *ref) +{ + struct afu *afu = container_of(ref, struct afu, mapcount); + + if (likely(afu->afu_map)) { + cxl_psa_unmap((void __iomem *)afu->afu_map); + afu->afu_map = NULL; + } +} + /** * cxlflash_driver_info() - information handler for this host driver * @host: SCSI host associated with device. @@ -503,6 +514,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) ulong lock_flags; short lflag = 0; int rc = 0; + int kref_got = 0; dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " "cdb=(%08X-%08X-%08X-%08X)\n", @@ -547,6 +559,9 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) goto out; } + kref_get(&cfg->afu->mapcount); + kref_got = 1; + cmd->rcb.ctx_id = afu->ctx_hndl; cmd->rcb.port_sel = port_sel; cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); @@ -587,6 +602,8 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) } out: + if (kref_got) + kref_put(&afu->mapcount, afu_unmap); pr_devel("%s: returning rc=%d\n", __func__, rc); return rc; } @@ -632,20 +649,36 @@ static void free_mem(struct cxlflash_cfg *cfg) * @cfg: Internal structure associated with the host. * * Safe to call with AFU in a partially allocated/initialized state. + * + * Cleans up all state associated with the command queue, and unmaps + * the MMIO space. + * + * - complete() will take care of commands we initiated (they'll be checked + * in as part of the cleanup that occurs after the completion) + * + * - cmd_checkin() will take care of entries that we did not initiate and that + * have not (and will not) complete because they are sitting on a [now stale] + * hardware queue */ static void stop_afu(struct cxlflash_cfg *cfg) { int i; struct afu *afu = cfg->afu; + struct afu_cmd *cmd; if (likely(afu)) { - for (i = 0; i < CXLFLASH_NUM_CMDS; i++) - complete(&afu->cmd[i].cevent); + for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { + cmd = &afu->cmd[i]; + complete(&cmd->cevent); + if (!atomic_read(&cmd->free)) + cmd_checkin(cmd); + } if (likely(afu->afu_map)) { cxl_psa_unmap((void __iomem *)afu->afu_map); afu->afu_map = NULL; } + kref_put(&afu->mapcount, afu_unmap); } } @@ -693,11 +726,11 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level) */ static void term_afu(struct cxlflash_cfg *cfg) { - term_mc(cfg, UNDO_START); - if (cfg->afu) stop_afu(cfg); + term_mc(cfg, UNDO_START); + pr_debug("%s: returning\n", __func__); } @@ -731,10 +764,9 @@ static void cxlflash_remove(struct pci_dev *pdev) scsi_remove_host(cfg->host); /* fall through */ case INIT_STATE_AFU: - term_afu(cfg); cancel_work_sync(&cfg->work_q); + term_afu(cfg); case INIT_STATE_PCI: - pci_release_regions(cfg->dev); pci_disable_device(pdev); case INIT_STATE_NONE: free_mem(cfg); @@ -807,15 +839,6 @@ static int init_pci(struct cxlflash_cfg *cfg) struct pci_dev *pdev = cfg->dev; int rc = 0; - cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0); - rc = pci_request_regions(pdev, CXLFLASH_NAME); - if (rc < 0) { - dev_err(&pdev->dev, - "%s: Couldn't register memory range of registers\n", - __func__); - goto out; - } - rc = pci_enable_device(pdev); if (rc || pci_channel_offline(pdev)) { if (pci_channel_offline(pdev)) { @@ -827,55 +850,13 @@ static int init_pci(struct cxlflash_cfg *cfg) dev_err(&pdev->dev, "%s: Cannot enable adapter\n", __func__); cxlflash_wait_for_pci_err_recovery(cfg); - goto out_release_regions; - } - } - - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (rc < 0) { - dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n", - __func__); - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - } - - if (rc < 0) { - dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n", - __func__); - goto out_disable; - } - - pci_set_master(pdev); - - if (pci_channel_offline(pdev)) { - cxlflash_wait_for_pci_err_recovery(cfg); - if (pci_channel_offline(pdev)) { - rc = -EIO; - goto out_msi_disable; + goto out; } } - rc = pci_save_state(pdev); - - if (rc != PCIBIOS_SUCCESSFUL) { - dev_err(&pdev->dev, "%s: Failed to save PCI config space\n", - __func__); - rc = -EIO; - goto cleanup_nolog; - } - out: pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; - -cleanup_nolog: -out_msi_disable: - cxlflash_wait_for_pci_err_recovery(cfg); -out_disable: - pci_disable_device(pdev); -out_release_regions: - pci_release_regions(pdev); - goto out; - } /** @@ -1108,7 +1089,7 @@ static const struct asyc_intr_info ainfo[] = { {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET}, {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0}, {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET}, - {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0}, + {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET}, {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR}, {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST}, {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0}, @@ -1316,6 +1297,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data) __func__, port); cfg->lr_state = LINK_RESET_REQUIRED; cfg->lr_port = port; + kref_get(&cfg->afu->mapcount); schedule_work(&cfg->work_q); } @@ -1336,6 +1318,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data) if (info->action & SCAN_HOST) { atomic_inc(&cfg->scan_host_needed); + kref_get(&cfg->afu->mapcount); schedule_work(&cfg->work_q); } } @@ -1731,6 +1714,7 @@ static int init_afu(struct cxlflash_cfg *cfg) rc = -ENOMEM; goto err1; } + kref_init(&afu->mapcount); /* No byte reverse on reading afu_version or string will be backwards */ reg = readq(&afu->afu_map->global.regs.afu_version); @@ -1765,8 +1749,7 @@ out: return rc; err2: - cxl_psa_unmap((void __iomem *)afu->afu_map); - afu->afu_map = NULL; + kref_put(&afu->mapcount, afu_unmap); err1: term_mc(cfg, UNDO_START); goto out; @@ -2114,6 +2097,16 @@ static ssize_t lun_mode_store(struct device *dev, rc = kstrtouint(buf, 10, &lun_mode); if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { afu->internal_lun = lun_mode; + + /* + * When configured for internal LUN, there is only one channel, + * channel number 0, else there will be 2 (default). + */ + if (afu->internal_lun) + shost->max_channel = 0; + else + shost->max_channel = NUM_FC_PORTS - 1; + afu_reset(cfg); scsi_scan_host(cfg->host); } @@ -2260,7 +2253,7 @@ static struct scsi_host_template driver_template = { .eh_device_reset_handler = cxlflash_eh_device_reset_handler, .eh_host_reset_handler = cxlflash_eh_host_reset_handler, .change_queue_depth = cxlflash_change_queue_depth, - .cmd_per_lun = 16, + .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, .can_queue = CXLFLASH_MAX_CMDS, .this_id = -1, .sg_tablesize = SG_NONE, /* No scatter gather support */ @@ -2274,6 +2267,7 @@ static struct scsi_host_template driver_template = { * Device dependent values */ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS }; +static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS }; /* * PCI device binding table @@ -2281,6 +2275,8 @@ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS }; static struct pci_device_id cxlflash_pci_table[] = { {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, + {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, {} }; @@ -2339,6 +2335,7 @@ static void cxlflash_worker_thread(struct work_struct *work) if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) scsi_scan_host(cfg->host); + kref_put(&afu->mapcount, afu_unmap); } /** @@ -2505,8 +2502,8 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, if (unlikely(rc)) dev_err(dev, "%s: Failed to mark user contexts!(%d)\n", __func__, rc); - term_mc(cfg, UNDO_START); stop_afu(cfg); + term_mc(cfg, UNDO_START); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: cfg->state = STATE_FAILTERM; @@ -2585,8 +2582,7 @@ static struct pci_driver cxlflash_driver = { */ static int __init init_cxlflash(void) { - pr_info("%s: IBM Power CXL Flash Adapter: %s\n", - __func__, CXLFLASH_DRIVER_DATE); + pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME); cxlflash_list_init(); diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h index 60324566c14f..0faed422c7f4 100644 --- a/drivers/scsi/cxlflash/main.h +++ b/drivers/scsi/cxlflash/main.h @@ -22,10 +22,9 @@ #define CXLFLASH_NAME "cxlflash" #define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter" -#define CXLFLASH_DRIVER_DATE "(August 13, 2015)" -#define PCI_DEVICE_ID_IBM_CORSA 0x04F0 -#define CXLFLASH_SUBS_DEV_ID 0x04F0 +#define PCI_DEVICE_ID_IBM_CORSA 0x04F0 +#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600 /* Since there is only one target, make it 0 */ #define CXLFLASH_TARGET 0 diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index cac2e6a50efd..d8a5cb3cd2bd 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -709,27 +709,32 @@ int cxlflash_disk_release(struct scsi_device *sdev, * @cfg: Internal structure associated with the host. * @ctxi: Context to release. * - * Note that the rht_lun member of the context was cut from a single - * allocation when the context was created and therefore does not need - * to be explicitly freed. Also note that we conditionally check for the - * existence of the context control map before clearing the RHT registers - * and context capabilities because it is possible to destroy a context - * while the context is in the error state (previous mapping was removed - * [so we don't have to worry about clearing] and context is waiting for - * a new mapping). + * This routine is safe to be called with a a non-initialized context + * and is tolerant of being called with the context's mutex held (it + * will be unlocked if necessary before freeing). Also note that the + * routine conditionally checks for the existence of the context control + * map before clearing the RHT registers and context capabilities because + * it is possible to destroy a context while the context is in the error + * state (previous mapping was removed [so there is no need to worry about + * clearing] and context is waiting for a new mapping). */ static void destroy_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi) { struct afu *afu = cfg->afu; - WARN_ON(!list_empty(&ctxi->luns)); + if (ctxi->initialized) { + WARN_ON(!list_empty(&ctxi->luns)); - /* Clear RHT registers and drop all capabilities for this context */ - if (afu->afu_map && ctxi->ctrl_map) { - writeq_be(0, &ctxi->ctrl_map->rht_start); - writeq_be(0, &ctxi->ctrl_map->rht_cnt_id); - writeq_be(0, &ctxi->ctrl_map->ctx_cap); + /* Clear RHT registers and drop all capabilities for context */ + if (afu->afu_map && ctxi->ctrl_map) { + writeq_be(0, &ctxi->ctrl_map->rht_start); + writeq_be(0, &ctxi->ctrl_map->rht_cnt_id); + writeq_be(0, &ctxi->ctrl_map->ctx_cap); + } + + if (mutex_is_locked(&ctxi->mutex)) + mutex_unlock(&ctxi->mutex); } /* Free memory associated with context */ @@ -742,23 +747,12 @@ static void destroy_context(struct cxlflash_cfg *cfg, /** * create_context() - allocates and initializes a context * @cfg: Internal structure associated with the host. - * @ctx: Previously obtained CXL context reference. - * @ctxid: Previously obtained process element associated with CXL context. - * @adap_fd: Previously obtained adapter fd associated with CXL context. - * @file: Previously obtained file associated with CXL context. - * @perms: User-specified permissions. - * - * The context's mutex is locked when an allocated context is returned. * * Return: Allocated context on success, NULL on failure */ -static struct ctx_info *create_context(struct cxlflash_cfg *cfg, - struct cxl_context *ctx, int ctxid, - int adap_fd, struct file *file, - u32 perms) +static struct ctx_info *create_context(struct cxlflash_cfg *cfg) { struct device *dev = &cfg->dev->dev; - struct afu *afu = cfg->afu; struct ctx_info *ctxi = NULL; struct llun_info **lli = NULL; u8 *ws = NULL; @@ -781,28 +775,49 @@ static struct ctx_info *create_context(struct cxlflash_cfg *cfg, ctxi->rht_lun = lli; ctxi->rht_needs_ws = ws; ctxi->rht_start = rhte; - ctxi->rht_perms = perms; +out: + return ctxi; + +err: + kfree(ws); + kfree(lli); + kfree(ctxi); + ctxi = NULL; + goto out; +} + +/** + * init_context() - initializes a previously allocated context + * @ctxi: Previously allocated context + * @cfg: Internal structure associated with the host. + * @ctx: Previously obtained CXL context reference. + * @ctxid: Previously obtained process element associated with CXL context. + * @adap_fd: Previously obtained adapter fd associated with CXL context. + * @file: Previously obtained file associated with CXL context. + * @perms: User-specified permissions. + * + * Upon return, the context is marked as initialized and the context's mutex + * is locked. + */ +static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg, + struct cxl_context *ctx, int ctxid, int adap_fd, + struct file *file, u32 perms) +{ + struct afu *afu = cfg->afu; + ctxi->rht_perms = perms; ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); ctxi->lfd = adap_fd; ctxi->pid = current->tgid; /* tgid = pid */ ctxi->ctx = ctx; ctxi->file = file; + ctxi->initialized = true; mutex_init(&ctxi->mutex); INIT_LIST_HEAD(&ctxi->luns); INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */ mutex_lock(&ctxi->mutex); -out: - return ctxi; - -err: - kfree(ws); - kfree(lli); - kfree(ctxi); - ctxi = NULL; - goto out; } /** @@ -1300,9 +1315,9 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, u32 perms; int ctxid = -1; u64 rctxid = 0UL; - struct file *file; + struct file *file = NULL; - struct cxl_context *ctx; + struct cxl_context *ctx = NULL; int fd = -1; @@ -1356,7 +1371,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, if (unlikely(!lun_access)) { dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__); rc = -ENOMEM; - goto err0; + goto err; } lun_access->lli = lli; @@ -1371,53 +1386,56 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, goto out_attach; } + ctxi = create_context(cfg); + if (unlikely(!ctxi)) { + dev_err(dev, "%s: Failed to create context! (%d)\n", + __func__, ctxid); + goto err; + } + ctx = cxl_dev_context_init(cfg->dev); - if (unlikely(IS_ERR_OR_NULL(ctx))) { + if (IS_ERR_OR_NULL(ctx)) { dev_err(dev, "%s: Could not initialize context %p\n", __func__, ctx); rc = -ENODEV; - goto err1; + goto err; + } + + work = &ctxi->work; + work->num_interrupts = attach->num_interrupts; + work->flags = CXL_START_WORK_NUM_IRQS; + + rc = cxl_start_work(ctx, work); + if (unlikely(rc)) { + dev_dbg(dev, "%s: Could not start context rc=%d\n", + __func__, rc); + goto err; } ctxid = cxl_process_element(ctx); - if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) { + if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid); rc = -EPERM; - goto err2; + goto err; } file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd); if (unlikely(fd < 0)) { rc = -ENODEV; dev_err(dev, "%s: Could not get file descriptor\n", __func__); - goto err2; + goto err; } /* Translate read/write O_* flags from fcntl.h to AFU permission bits */ perms = SISL_RHT_PERM(attach->hdr.flags + 1); - ctxi = create_context(cfg, ctx, ctxid, fd, file, perms); - if (unlikely(!ctxi)) { - dev_err(dev, "%s: Failed to create context! (%d)\n", - __func__, ctxid); - goto err3; - } - - work = &ctxi->work; - work->num_interrupts = attach->num_interrupts; - work->flags = CXL_START_WORK_NUM_IRQS; - - rc = cxl_start_work(ctx, work); - if (unlikely(rc)) { - dev_dbg(dev, "%s: Could not start context rc=%d\n", - __func__, rc); - goto err4; - } + /* Context mutex is locked upon return */ + init_context(ctxi, cfg, ctx, ctxid, fd, file, perms); rc = afu_attach(cfg, ctxi); if (unlikely(rc)) { dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc); - goto err5; + goto err; } /* @@ -1453,13 +1471,14 @@ out: __func__, ctxid, fd, attach->block_size, rc, attach->last_lba); return rc; -err5: - cxl_stop_context(ctx); -err4: - put_context(ctxi); - destroy_context(cfg, ctxi); - ctxi = NULL; -err3: +err: + /* Cleanup CXL context; okay to 'stop' even if it was not started */ + if (!IS_ERR_OR_NULL(ctx)) { + cxl_stop_context(ctx); + cxl_release_context(ctx); + ctx = NULL; + } + /* * Here, we're overriding the fops with a dummy all-NULL fops because * fput() calls the release fop, which will cause us to mistakenly @@ -1467,15 +1486,21 @@ err3: * to that routine (cxlflash_cxl_release) we should try to fix the * issue here. */ - file->f_op = &null_fops; - fput(file); - put_unused_fd(fd); - fd = -1; -err2: - cxl_release_context(ctx); -err1: + if (fd > 0) { + file->f_op = &null_fops; + fput(file); + put_unused_fd(fd); + fd = -1; + file = NULL; + } + + /* Cleanup our context; safe to call even with mutex locked */ + if (ctxi) { + destroy_context(cfg, ctxi); + ctxi = NULL; + } + kfree(lun_access); -err0: scsi_device_put(sdev); goto out; } @@ -1500,31 +1525,31 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi) struct afu *afu = cfg->afu; ctx = cxl_dev_context_init(cfg->dev); - if (unlikely(IS_ERR_OR_NULL(ctx))) { + if (IS_ERR_OR_NULL(ctx)) { dev_err(dev, "%s: Could not initialize context %p\n", __func__, ctx); rc = -ENODEV; goto out; } + rc = cxl_start_work(ctx, &ctxi->work); + if (unlikely(rc)) { + dev_dbg(dev, "%s: Could not start context rc=%d\n", + __func__, rc); + goto err1; + } + ctxid = cxl_process_element(ctx); - if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) { + if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid); rc = -EPERM; - goto err1; + goto err2; } file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd); if (unlikely(fd < 0)) { rc = -ENODEV; dev_err(dev, "%s: Could not get file descriptor\n", __func__); - goto err1; - } - - rc = cxl_start_work(ctx, &ctxi->work); - if (unlikely(rc)) { - dev_dbg(dev, "%s: Could not start context rc=%d\n", - __func__, rc); goto err2; } @@ -1569,10 +1594,10 @@ out: return rc; err3: - cxl_stop_context(ctx); -err2: fput(file); put_unused_fd(fd); +err2: + cxl_stop_context(ctx); err1: cxl_release_context(ctx); goto out; diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h index bede574bcd77..5f9a091fda95 100644 --- a/drivers/scsi/cxlflash/superpipe.h +++ b/drivers/scsi/cxlflash/superpipe.h @@ -102,6 +102,7 @@ struct ctx_info { u64 ctxid; int lfd; pid_t pid; + bool initialized; bool unavail; bool err_recovery_active; struct mutex mutex; /* Context protection */ diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c index a53f583e2d7b..50f8e9300770 100644 --- a/drivers/scsi/cxlflash/vlun.c +++ b/drivers/scsi/cxlflash/vlun.c @@ -1008,6 +1008,8 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg) virt->last_lba = last_lba; virt->rsrc_handle = rsrc_handle; + if (lli->port_sel == BOTH_PORTS) + virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE; out: if (likely(ctxi)) put_context(ctxi); diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig index e5647d59224f..0b331c9c0a8f 100644 --- a/drivers/scsi/device_handler/Kconfig +++ b/drivers/scsi/device_handler/Kconfig @@ -13,13 +13,13 @@ menuconfig SCSI_DH config SCSI_DH_RDAC tristate "LSI RDAC Device Handler" - depends on SCSI_DH + depends on SCSI_DH && SCSI help If you have a LSI RDAC select y. Otherwise, say N. config SCSI_DH_HP_SW tristate "HP/COMPAQ MSA Device Handler" - depends on SCSI_DH + depends on SCSI_DH && SCSI help If you have a HP/COMPAQ MSA device that requires START_STOP to be sent to start it and cannot upgrade the firmware then select y. @@ -27,13 +27,13 @@ config SCSI_DH_HP_SW config SCSI_DH_EMC tristate "EMC CLARiiON Device Handler" - depends on SCSI_DH + depends on SCSI_DH && SCSI help If you have a EMC CLARiiON select y. Otherwise, say N. config SCSI_DH_ALUA tristate "SPC-3 ALUA Device Handler" - depends on SCSI_DH + depends on SCSI_DH && SCSI help SCSI Device handler for generic SPC-3 Asymmetric Logical Unit Access (ALUA). diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index cc2773b5de68..5bcdf8dd6fb0 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -22,20 +22,15 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> +#include <asm/unaligned.h> #include <scsi/scsi.h> +#include <scsi/scsi_proto.h> +#include <scsi/scsi_dbg.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dh.h> #define ALUA_DH_NAME "alua" -#define ALUA_DH_VER "1.3" - -#define TPGS_STATE_OPTIMIZED 0x0 -#define TPGS_STATE_NONOPTIMIZED 0x1 -#define TPGS_STATE_STANDBY 0x2 -#define TPGS_STATE_UNAVAILABLE 0x3 -#define TPGS_STATE_LBA_DEPENDENT 0x4 -#define TPGS_STATE_OFFLINE 0xe -#define TPGS_STATE_TRANSITIONING 0xf +#define ALUA_DH_VER "2.0" #define TPGS_SUPPORT_NONE 0x00 #define TPGS_SUPPORT_OPTIMIZED 0x01 @@ -54,27 +49,62 @@ #define TPGS_MODE_IMPLICIT 0x1 #define TPGS_MODE_EXPLICIT 0x2 -#define ALUA_INQUIRY_SIZE 36 +#define ALUA_RTPG_SIZE 128 #define ALUA_FAILOVER_TIMEOUT 60 #define ALUA_FAILOVER_RETRIES 5 +#define ALUA_RTPG_DELAY_MSECS 5 -/* flags passed from user level */ -#define ALUA_OPTIMIZE_STPG 1 +/* device handler flags */ +#define ALUA_OPTIMIZE_STPG 0x01 +#define ALUA_RTPG_EXT_HDR_UNSUPP 0x02 +#define ALUA_SYNC_STPG 0x04 +/* State machine flags */ +#define ALUA_PG_RUN_RTPG 0x10 +#define ALUA_PG_RUN_STPG 0x20 +#define ALUA_PG_RUNNING 0x40 -struct alua_dh_data { +static uint optimize_stpg; +module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0."); + +static LIST_HEAD(port_group_list); +static DEFINE_SPINLOCK(port_group_lock); +static struct workqueue_struct *kaluad_wq; +static struct workqueue_struct *kaluad_sync_wq; + +struct alua_port_group { + struct kref kref; + struct rcu_head rcu; + struct list_head node; + struct list_head dh_list; + unsigned char device_id_str[256]; + int device_id_len; int group_id; - int rel_port; int tpgs; int state; int pref; unsigned flags; /* used for optimizing STPG */ - unsigned char inq[ALUA_INQUIRY_SIZE]; - unsigned char *buff; - int bufflen; unsigned char transition_tmo; - unsigned char sense[SCSI_SENSE_BUFFERSIZE]; - int senselen; + unsigned long expiry; + unsigned long interval; + struct delayed_work rtpg_work; + spinlock_t lock; + struct list_head rtpg_list; + struct scsi_device *rtpg_sdev; +}; + +struct alua_dh_data { + struct list_head node; + struct alua_port_group *pg; + int group_id; + spinlock_t pg_lock; struct scsi_device *sdev; + int init_error; + struct mutex init_mutex; +}; + +struct alua_queue_data { + struct list_head entry; activate_complete callback_fn; void *callback_data; }; @@ -82,231 +112,160 @@ struct alua_dh_data { #define ALUA_POLICY_SWITCH_CURRENT 0 #define ALUA_POLICY_SWITCH_ALL 1 -static char print_alua_state(int); -static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *); - -static int realloc_buffer(struct alua_dh_data *h, unsigned len) -{ - if (h->buff && h->buff != h->inq) - kfree(h->buff); - - h->buff = kmalloc(len, GFP_NOIO); - if (!h->buff) { - h->buff = h->inq; - h->bufflen = ALUA_INQUIRY_SIZE; - return 1; - } - h->bufflen = len; - return 0; -} +static void alua_rtpg_work(struct work_struct *work); +static void alua_rtpg_queue(struct alua_port_group *pg, + struct scsi_device *sdev, + struct alua_queue_data *qdata, bool force); +static void alua_check(struct scsi_device *sdev, bool force); -static struct request *get_alua_req(struct scsi_device *sdev, - void *buffer, unsigned buflen, int rw) +static void release_port_group(struct kref *kref) { - struct request *rq; - struct request_queue *q = sdev->request_queue; - - rq = blk_get_request(q, rw, GFP_NOIO); - - if (IS_ERR(rq)) { - sdev_printk(KERN_INFO, sdev, - "%s: blk_get_request failed\n", __func__); - return NULL; - } - blk_rq_set_block_pc(rq); - - if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { - blk_put_request(rq); - sdev_printk(KERN_INFO, sdev, - "%s: blk_rq_map_kern failed\n", __func__); - return NULL; - } - - rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | - REQ_FAILFAST_DRIVER; - rq->retries = ALUA_FAILOVER_RETRIES; - rq->timeout = ALUA_FAILOVER_TIMEOUT * HZ; - - return rq; + struct alua_port_group *pg; + + pg = container_of(kref, struct alua_port_group, kref); + if (pg->rtpg_sdev) + flush_delayed_work(&pg->rtpg_work); + spin_lock(&port_group_lock); + list_del(&pg->node); + spin_unlock(&port_group_lock); + kfree_rcu(pg, rcu); } /* - * submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command + * submit_rtpg - Issue a REPORT TARGET GROUP STATES command * @sdev: sdev the command should be sent to */ -static int submit_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) +static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff, + int bufflen, struct scsi_sense_hdr *sshdr, int flags) { - struct request *rq; - int err = SCSI_DH_RES_TEMP_UNAVAIL; - - rq = get_alua_req(sdev, h->buff, h->bufflen, READ); - if (!rq) - goto done; + u8 cdb[COMMAND_SIZE(MAINTENANCE_IN)]; + int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + REQ_FAILFAST_DRIVER; /* Prepare the command. */ - rq->cmd[0] = INQUIRY; - rq->cmd[1] = 1; - rq->cmd[2] = 0x83; - rq->cmd[4] = h->bufflen; - rq->cmd_len = COMMAND_SIZE(INQUIRY); - - rq->sense = h->sense; - memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); - rq->sense_len = h->senselen = 0; - - err = blk_execute_rq(rq->q, NULL, rq, 1); - if (err == -EIO) { - sdev_printk(KERN_INFO, sdev, - "%s: evpd inquiry failed with %x\n", - ALUA_DH_NAME, rq->errors); - h->senselen = rq->sense_len; - err = SCSI_DH_IO; - } - blk_put_request(rq); -done: - return err; + memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_IN)); + cdb[0] = MAINTENANCE_IN; + if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP)) + cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT; + else + cdb[1] = MI_REPORT_TARGET_PGS; + put_unaligned_be32(bufflen, &cdb[6]); + + return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE, + buff, bufflen, sshdr, + ALUA_FAILOVER_TIMEOUT * HZ, + ALUA_FAILOVER_RETRIES, NULL, req_flags); } /* - * submit_rtpg - Issue a REPORT TARGET GROUP STATES command - * @sdev: sdev the command should be sent to + * submit_stpg - Issue a SET TARGET PORT GROUP command + * + * Currently we're only setting the current target port group state + * to 'active/optimized' and let the array firmware figure out + * the states of the remaining groups. */ -static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, - bool rtpg_ext_hdr_req) +static int submit_stpg(struct scsi_device *sdev, int group_id, + struct scsi_sense_hdr *sshdr) { - struct request *rq; - int err = SCSI_DH_RES_TEMP_UNAVAIL; + u8 cdb[COMMAND_SIZE(MAINTENANCE_OUT)]; + unsigned char stpg_data[8]; + int stpg_len = 8; + int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + REQ_FAILFAST_DRIVER; - rq = get_alua_req(sdev, h->buff, h->bufflen, READ); - if (!rq) - goto done; + /* Prepare the data buffer */ + memset(stpg_data, 0, stpg_len); + stpg_data[4] = SCSI_ACCESS_STATE_OPTIMAL; + put_unaligned_be16(group_id, &stpg_data[6]); /* Prepare the command. */ - rq->cmd[0] = MAINTENANCE_IN; - if (rtpg_ext_hdr_req) - rq->cmd[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT; - else - rq->cmd[1] = MI_REPORT_TARGET_PGS; - rq->cmd[6] = (h->bufflen >> 24) & 0xff; - rq->cmd[7] = (h->bufflen >> 16) & 0xff; - rq->cmd[8] = (h->bufflen >> 8) & 0xff; - rq->cmd[9] = h->bufflen & 0xff; - rq->cmd_len = COMMAND_SIZE(MAINTENANCE_IN); - - rq->sense = h->sense; - memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); - rq->sense_len = h->senselen = 0; - - err = blk_execute_rq(rq->q, NULL, rq, 1); - if (err == -EIO) { - sdev_printk(KERN_INFO, sdev, - "%s: rtpg failed with %x\n", - ALUA_DH_NAME, rq->errors); - h->senselen = rq->sense_len; - err = SCSI_DH_IO; - } - blk_put_request(rq); -done: - return err; + memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_OUT)); + cdb[0] = MAINTENANCE_OUT; + cdb[1] = MO_SET_TARGET_PGS; + put_unaligned_be32(stpg_len, &cdb[6]); + + return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE, + stpg_data, stpg_len, + sshdr, ALUA_FAILOVER_TIMEOUT * HZ, + ALUA_FAILOVER_RETRIES, NULL, req_flags); } -/* - * alua_stpg - Evaluate SET TARGET GROUP STATES - * @sdev: the device to be evaluated - * @state: the new target group state - * - * Send a SET TARGET GROUP STATES command to the device. - * We only have to test here if we should resubmit the command; - * any other error is assumed as a failure. - */ -static void stpg_endio(struct request *req, int error) +struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, + int group_id) { - struct alua_dh_data *h = req->end_io_data; - struct scsi_sense_hdr sense_hdr; - unsigned err = SCSI_DH_OK; - - if (host_byte(req->errors) != DID_OK || - msg_byte(req->errors) != COMMAND_COMPLETE) { - err = SCSI_DH_IO; - goto done; + struct alua_port_group *pg; + + list_for_each_entry(pg, &port_group_list, node) { + if (pg->group_id != group_id) + continue; + if (pg->device_id_len != id_size) + continue; + if (strncmp(pg->device_id_str, id_str, id_size)) + continue; + if (!kref_get_unless_zero(&pg->kref)) + continue; + return pg; } - if (req->sense_len > 0) { - err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, - &sense_hdr); - if (!err) { - err = SCSI_DH_IO; - goto done; - } - err = alua_check_sense(h->sdev, &sense_hdr); - if (err == ADD_TO_MLQUEUE) { - err = SCSI_DH_RETRY; - goto done; - } - sdev_printk(KERN_INFO, h->sdev, - "%s: stpg sense code: %02x/%02x/%02x\n", - ALUA_DH_NAME, sense_hdr.sense_key, - sense_hdr.asc, sense_hdr.ascq); - err = SCSI_DH_IO; - } else if (error) - err = SCSI_DH_IO; - - if (err == SCSI_DH_OK) { - h->state = TPGS_STATE_OPTIMIZED; - sdev_printk(KERN_INFO, h->sdev, - "%s: port group %02x switched to state %c\n", - ALUA_DH_NAME, h->group_id, - print_alua_state(h->state)); - } -done: - req->end_io_data = NULL; - __blk_put_request(req->q, req); - if (h->callback_fn) { - h->callback_fn(h->callback_data, err); - h->callback_fn = h->callback_data = NULL; - } - return; + return NULL; } /* - * submit_stpg - Issue a SET TARGET GROUP STATES command + * alua_alloc_pg - Allocate a new port_group structure + * @sdev: scsi device + * @h: alua device_handler data + * @group_id: port group id * - * Currently we're only setting the current target port group state - * to 'active/optimized' and let the array firmware figure out - * the states of the remaining groups. + * Allocate a new port_group structure for a given + * device. */ -static unsigned submit_stpg(struct alua_dh_data *h) +struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, + int group_id, int tpgs) { - struct request *rq; - int stpg_len = 8; - struct scsi_device *sdev = h->sdev; + struct alua_port_group *pg, *tmp_pg; - /* Prepare the data buffer */ - memset(h->buff, 0, stpg_len); - h->buff[4] = TPGS_STATE_OPTIMIZED & 0x0f; - h->buff[6] = (h->group_id >> 8) & 0xff; - h->buff[7] = h->group_id & 0xff; + pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL); + if (!pg) + return ERR_PTR(-ENOMEM); - rq = get_alua_req(sdev, h->buff, stpg_len, WRITE); - if (!rq) - return SCSI_DH_RES_TEMP_UNAVAIL; + pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str, + sizeof(pg->device_id_str)); + if (pg->device_id_len <= 0) { + /* + * Internal error: TPGS supported but no device + * identifcation found. Disable ALUA support. + */ + kfree(pg); + sdev_printk(KERN_INFO, sdev, + "%s: No device descriptors found\n", + ALUA_DH_NAME); + return ERR_PTR(-ENXIO); + } + pg->group_id = group_id; + pg->tpgs = tpgs; + pg->state = SCSI_ACCESS_STATE_OPTIMAL; + if (optimize_stpg) + pg->flags |= ALUA_OPTIMIZE_STPG; + kref_init(&pg->kref); + INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work); + INIT_LIST_HEAD(&pg->rtpg_list); + INIT_LIST_HEAD(&pg->node); + INIT_LIST_HEAD(&pg->dh_list); + spin_lock_init(&pg->lock); + + spin_lock(&port_group_lock); + tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, + group_id); + if (tmp_pg) { + spin_unlock(&port_group_lock); + kfree(pg); + return tmp_pg; + } - /* Prepare the command. */ - rq->cmd[0] = MAINTENANCE_OUT; - rq->cmd[1] = MO_SET_TARGET_PGS; - rq->cmd[6] = (stpg_len >> 24) & 0xff; - rq->cmd[7] = (stpg_len >> 16) & 0xff; - rq->cmd[8] = (stpg_len >> 8) & 0xff; - rq->cmd[9] = stpg_len & 0xff; - rq->cmd_len = COMMAND_SIZE(MAINTENANCE_OUT); - - rq->sense = h->sense; - memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); - rq->sense_len = h->senselen = 0; - rq->end_io_data = h; - - blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio); - return SCSI_DH_OK; + list_add(&pg->node, &port_group_list); + spin_unlock(&port_group_lock); + + return pg; } /* @@ -316,12 +275,23 @@ static unsigned submit_stpg(struct alua_dh_data *h) * Examine the TPGS setting of the sdev to find out if ALUA * is supported. */ -static int alua_check_tpgs(struct scsi_device *sdev, struct alua_dh_data *h) +static int alua_check_tpgs(struct scsi_device *sdev) { - int err = SCSI_DH_OK; + int tpgs = TPGS_MODE_NONE; + + /* + * ALUA support for non-disk devices is fraught with + * difficulties, so disable it for now. + */ + if (sdev->type != TYPE_DISK) { + sdev_printk(KERN_INFO, sdev, + "%s: disable for non-disk devices\n", + ALUA_DH_NAME); + return tpgs; + } - h->tpgs = scsi_device_tpgs(sdev); - switch (h->tpgs) { + tpgs = scsi_device_tpgs(sdev); + switch (tpgs) { case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports implicit and explicit TPGS\n", @@ -335,71 +305,38 @@ static int alua_check_tpgs(struct scsi_device *sdev, struct alua_dh_data *h) sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n", ALUA_DH_NAME); break; - default: - h->tpgs = TPGS_MODE_NONE; + case TPGS_MODE_NONE: sdev_printk(KERN_INFO, sdev, "%s: not supported\n", ALUA_DH_NAME); - err = SCSI_DH_DEV_UNSUPP; + break; + default: + sdev_printk(KERN_INFO, sdev, + "%s: unsupported TPGS setting %d\n", + ALUA_DH_NAME, tpgs); + tpgs = TPGS_MODE_NONE; break; } - return err; + return tpgs; } /* - * alua_vpd_inquiry - Evaluate INQUIRY vpd page 0x83 + * alua_check_vpd - Evaluate INQUIRY vpd page 0x83 * @sdev: device to be checked * * Extract the relative target port and the target port group * descriptor from the list of identificators. */ -static int alua_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) +static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, + int tpgs) { - int len; - unsigned err; - unsigned char *d; + int rel_port = -1, group_id; + struct alua_port_group *pg, *old_pg = NULL; + bool pg_updated; + unsigned long flags; - retry: - err = submit_vpd_inquiry(sdev, h); - - if (err != SCSI_DH_OK) - return err; - - /* Check if vpd page exceeds initial buffer */ - len = (h->buff[2] << 8) + h->buff[3] + 4; - if (len > h->bufflen) { - /* Resubmit with the correct length */ - if (realloc_buffer(h, len)) { - sdev_printk(KERN_WARNING, sdev, - "%s: kmalloc buffer failed\n", - ALUA_DH_NAME); - /* Temporary failure, bypass */ - return SCSI_DH_DEV_TEMP_BUSY; - } - goto retry; - } - - /* - * Now look for the correct descriptor. - */ - d = h->buff + 4; - while (d < h->buff + len) { - switch (d[1] & 0xf) { - case 0x4: - /* Relative target port */ - h->rel_port = (d[6] << 8) + d[7]; - break; - case 0x5: - /* Target port group */ - h->group_id = (d[6] << 8) + d[7]; - break; - default: - break; - } - d += d[3] + 4; - } - - if (h->group_id == -1) { + group_id = scsi_vpd_tpg_id(sdev, &rel_port); + if (group_id < 0) { /* * Internal error; TPGS supported but required * VPD identification descriptors not present. @@ -408,34 +345,65 @@ static int alua_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) sdev_printk(KERN_INFO, sdev, "%s: No target port descriptors found\n", ALUA_DH_NAME); - h->state = TPGS_STATE_OPTIMIZED; - h->tpgs = TPGS_MODE_NONE; - err = SCSI_DH_DEV_UNSUPP; - } else { - sdev_printk(KERN_INFO, sdev, - "%s: port group %02x rel port %02x\n", - ALUA_DH_NAME, h->group_id, h->rel_port); + return SCSI_DH_DEV_UNSUPP; } - return err; + pg = alua_alloc_pg(sdev, group_id, tpgs); + if (IS_ERR(pg)) { + if (PTR_ERR(pg) == -ENOMEM) + return SCSI_DH_NOMEM; + return SCSI_DH_DEV_UNSUPP; + } + sdev_printk(KERN_INFO, sdev, + "%s: device %s port group %x rel port %x\n", + ALUA_DH_NAME, pg->device_id_str, group_id, rel_port); + + /* Check for existing port group references */ + spin_lock(&h->pg_lock); + old_pg = h->pg; + if (old_pg != pg) { + /* port group has changed. Update to new port group */ + if (h->pg) { + spin_lock_irqsave(&old_pg->lock, flags); + list_del_rcu(&h->node); + spin_unlock_irqrestore(&old_pg->lock, flags); + } + rcu_assign_pointer(h->pg, pg); + pg_updated = true; + } + + spin_lock_irqsave(&pg->lock, flags); + if (sdev->synchronous_alua) + pg->flags |= ALUA_SYNC_STPG; + if (pg_updated) + list_add_rcu(&h->node, &pg->dh_list); + spin_unlock_irqrestore(&pg->lock, flags); + + alua_rtpg_queue(h->pg, sdev, NULL, true); + spin_unlock(&h->pg_lock); + + if (old_pg) + kref_put(&old_pg->kref, release_port_group); + + return SCSI_DH_OK; } -static char print_alua_state(int state) +static char print_alua_state(unsigned char state) { switch (state) { - case TPGS_STATE_OPTIMIZED: + case SCSI_ACCESS_STATE_OPTIMAL: return 'A'; - case TPGS_STATE_NONOPTIMIZED: + case SCSI_ACCESS_STATE_ACTIVE: return 'N'; - case TPGS_STATE_STANDBY: + case SCSI_ACCESS_STATE_STANDBY: return 'S'; - case TPGS_STATE_UNAVAILABLE: + case SCSI_ACCESS_STATE_UNAVAILABLE: return 'U'; - case TPGS_STATE_LBA_DEPENDENT: + case SCSI_ACCESS_STATE_LBA: return 'L'; - case TPGS_STATE_OFFLINE: + case SCSI_ACCESS_STATE_OFFLINE: return 'O'; - case TPGS_STATE_TRANSITIONING: + case SCSI_ACCESS_STATE_TRANSITIONING: return 'T'; default: return 'X'; @@ -447,40 +415,24 @@ static int alua_check_sense(struct scsi_device *sdev, { switch (sense_hdr->sense_key) { case NOT_READY: - if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { /* * LUN Not Accessible - ALUA state transition */ - return ADD_TO_MLQUEUE; - if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0b) - /* - * LUN Not Accessible -- Target port in standby state - */ - return SUCCESS; - if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0c) - /* - * LUN Not Accessible -- Target port in unavailable state - */ - return SUCCESS; - if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x12) - /* - * LUN Not Ready -- Offline - */ - return SUCCESS; - if (sdev->allow_restart && - sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x02) - /* - * if the device is not started, we need to wake - * the error handler to start the motor - */ - return FAILED; + alua_check(sdev, false); + return NEEDS_RETRY; + } break; case UNIT_ATTENTION: - if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) { /* - * Power On, Reset, or Bus Device Reset, just retry. + * Power On, Reset, or Bus Device Reset. + * Might have obscured a state transition, + * so schedule a recheck. */ + alua_check(sdev, true); return ADD_TO_MLQUEUE; + } if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04) /* * Device internal reset @@ -491,16 +443,20 @@ static int alua_check_sense(struct scsi_device *sdev, * Mode Parameters Changed */ return ADD_TO_MLQUEUE; - if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) + if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) { /* * ALUA state changed */ + alua_check(sdev, true); return ADD_TO_MLQUEUE; - if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) + } + if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) { /* * Implicit ALUA state transition failed */ + alua_check(sdev, true); return ADD_TO_MLQUEUE; + } if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03) /* * Inquiry data has changed @@ -520,38 +476,74 @@ static int alua_check_sense(struct scsi_device *sdev, } /* + * alua_tur - Send a TEST UNIT READY + * @sdev: device to which the TEST UNIT READY command should be send + * + * Send a TEST UNIT READY to @sdev to figure out the device state + * Returns SCSI_DH_RETRY if the sense code is NOT READY/ALUA TRANSITIONING, + * SCSI_DH_OK if no error occurred, and SCSI_DH_IO otherwise. + */ +static int alua_tur(struct scsi_device *sdev) +{ + struct scsi_sense_hdr sense_hdr; + int retval; + + retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ, + ALUA_FAILOVER_RETRIES, &sense_hdr); + if (sense_hdr.sense_key == NOT_READY && + sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) + return SCSI_DH_RETRY; + else if (retval) + return SCSI_DH_IO; + else + return SCSI_DH_OK; +} + +/* * alua_rtpg - Evaluate REPORT TARGET GROUP STATES * @sdev: the device to be evaluated. - * @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state * * Evaluate the Target Port Group State. * Returns SCSI_DH_DEV_OFFLINED if the path is * found to be unusable. */ -static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition) +static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) { struct scsi_sense_hdr sense_hdr; - int len, k, off, valid_states = 0; - unsigned char *ucp; - unsigned err; - bool rtpg_ext_hdr_req = 1; - unsigned long expiry, interval = 0; + struct alua_port_group *tmp_pg; + int len, k, off, valid_states = 0, bufflen = ALUA_RTPG_SIZE; + unsigned char *desc, *buff; + unsigned err, retval; unsigned int tpg_desc_tbl_off; unsigned char orig_transition_tmo; + unsigned long flags; - if (!h->transition_tmo) - expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT * HZ); - else - expiry = round_jiffies_up(jiffies + h->transition_tmo * HZ); + if (!pg->expiry) { + unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ; - retry: - err = submit_rtpg(sdev, h, rtpg_ext_hdr_req); + if (pg->transition_tmo) + transition_tmo = pg->transition_tmo * HZ; + + pg->expiry = round_jiffies_up(jiffies + transition_tmo); + } + + buff = kzalloc(bufflen, GFP_KERNEL); + if (!buff) + return SCSI_DH_DEV_TEMP_BUSY; - if (err == SCSI_DH_IO && h->senselen > 0) { - err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, - &sense_hdr); - if (!err) + retry: + retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags); + + if (retval) { + if (!scsi_sense_valid(&sense_hdr)) { + sdev_printk(KERN_INFO, sdev, + "%s: rtpg failed, result %d\n", + ALUA_DH_NAME, retval); + kfree(buff); + if (driver_byte(retval) == DRIVER_ERROR) + return SCSI_DH_DEV_TEMP_BUSY; return SCSI_DH_IO; + } /* * submit_rtpg() has failed on existing arrays @@ -561,73 +553,111 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_ * The retry without rtpg_ext_hdr_req set * handles this. */ - if (rtpg_ext_hdr_req == 1 && + if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) && sense_hdr.sense_key == ILLEGAL_REQUEST && sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) { - rtpg_ext_hdr_req = 0; + pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP; goto retry; } - - err = alua_check_sense(sdev, &sense_hdr); - if (err == ADD_TO_MLQUEUE && time_before(jiffies, expiry)) - goto retry; - sdev_printk(KERN_INFO, sdev, - "%s: rtpg sense code %02x/%02x/%02x\n", - ALUA_DH_NAME, sense_hdr.sense_key, - sense_hdr.asc, sense_hdr.ascq); - err = SCSI_DH_IO; + /* + * Retry on ALUA state transition or if any + * UNIT ATTENTION occurred. + */ + if (sense_hdr.sense_key == NOT_READY && + sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) + err = SCSI_DH_RETRY; + else if (sense_hdr.sense_key == UNIT_ATTENTION) + err = SCSI_DH_RETRY; + if (err == SCSI_DH_RETRY && + pg->expiry != 0 && time_before(jiffies, pg->expiry)) { + sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n", + ALUA_DH_NAME); + scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); + return err; + } + sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n", + ALUA_DH_NAME); + scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); + kfree(buff); + pg->expiry = 0; + return SCSI_DH_IO; } - if (err != SCSI_DH_OK) - return err; - len = (h->buff[0] << 24) + (h->buff[1] << 16) + - (h->buff[2] << 8) + h->buff[3] + 4; + len = get_unaligned_be32(&buff[0]) + 4; - if (len > h->bufflen) { + if (len > bufflen) { /* Resubmit with the correct length */ - if (realloc_buffer(h, len)) { + kfree(buff); + bufflen = len; + buff = kmalloc(bufflen, GFP_KERNEL); + if (!buff) { sdev_printk(KERN_WARNING, sdev, "%s: kmalloc buffer failed\n",__func__); /* Temporary failure, bypass */ + pg->expiry = 0; return SCSI_DH_DEV_TEMP_BUSY; } goto retry; } - orig_transition_tmo = h->transition_tmo; - if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && h->buff[5] != 0) - h->transition_tmo = h->buff[5]; + orig_transition_tmo = pg->transition_tmo; + if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && buff[5] != 0) + pg->transition_tmo = buff[5]; else - h->transition_tmo = ALUA_FAILOVER_TIMEOUT; + pg->transition_tmo = ALUA_FAILOVER_TIMEOUT; - if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) { + if (orig_transition_tmo != pg->transition_tmo) { sdev_printk(KERN_INFO, sdev, "%s: transition timeout set to %d seconds\n", - ALUA_DH_NAME, h->transition_tmo); - expiry = jiffies + h->transition_tmo * HZ; + ALUA_DH_NAME, pg->transition_tmo); + pg->expiry = jiffies + pg->transition_tmo * HZ; } - if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR) + if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR) tpg_desc_tbl_off = 8; else tpg_desc_tbl_off = 4; - for (k = tpg_desc_tbl_off, ucp = h->buff + tpg_desc_tbl_off; + for (k = tpg_desc_tbl_off, desc = buff + tpg_desc_tbl_off; k < len; - k += off, ucp += off) { - - if (h->group_id == (ucp[2] << 8) + ucp[3]) { - h->state = ucp[0] & 0x0f; - h->pref = ucp[0] >> 7; - valid_states = ucp[1]; + k += off, desc += off) { + u16 group_id = get_unaligned_be16(&desc[2]); + + spin_lock_irqsave(&port_group_lock, flags); + tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, + group_id); + spin_unlock_irqrestore(&port_group_lock, flags); + if (tmp_pg) { + if (spin_trylock_irqsave(&tmp_pg->lock, flags)) { + if ((tmp_pg == pg) || + !(tmp_pg->flags & ALUA_PG_RUNNING)) { + struct alua_dh_data *h; + + tmp_pg->state = desc[0] & 0x0f; + tmp_pg->pref = desc[0] >> 7; + rcu_read_lock(); + list_for_each_entry_rcu(h, + &tmp_pg->dh_list, node) { + /* h->sdev should always be valid */ + BUG_ON(!h->sdev); + h->sdev->access_state = desc[0]; + } + rcu_read_unlock(); + } + if (tmp_pg == pg) + valid_states = desc[1]; + spin_unlock_irqrestore(&tmp_pg->lock, flags); + } + kref_put(&tmp_pg->kref, release_port_group); } - off = 8 + (ucp[7] * 4); + off = 8 + (desc[7] * 4); } + spin_lock_irqsave(&pg->lock, flags); sdev_printk(KERN_INFO, sdev, "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n", - ALUA_DH_NAME, h->group_id, print_alua_state(h->state), - h->pref ? "preferred" : "non-preferred", + ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), + pg->pref ? "preferred" : "non-preferred", valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l', @@ -636,36 +666,236 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_ valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); - switch (h->state) { - case TPGS_STATE_TRANSITIONING: - if (wait_for_transition) { - if (time_before(jiffies, expiry)) { - /* State transition, retry */ - interval += 2000; - msleep(interval); - goto retry; - } + switch (pg->state) { + case SCSI_ACCESS_STATE_TRANSITIONING: + if (time_before(jiffies, pg->expiry)) { + /* State transition, retry */ + pg->interval = 2; err = SCSI_DH_RETRY; } else { - err = SCSI_DH_OK; - } + struct alua_dh_data *h; - /* Transitioning time exceeded, set port to standby */ - h->state = TPGS_STATE_STANDBY; + /* Transitioning time exceeded, set port to standby */ + err = SCSI_DH_IO; + pg->state = SCSI_ACCESS_STATE_STANDBY; + pg->expiry = 0; + rcu_read_lock(); + list_for_each_entry_rcu(h, &pg->dh_list, node) { + BUG_ON(!h->sdev); + h->sdev->access_state = + (pg->state & SCSI_ACCESS_STATE_MASK); + if (pg->pref) + h->sdev->access_state |= + SCSI_ACCESS_STATE_PREFERRED; + } + rcu_read_unlock(); + } break; - case TPGS_STATE_OFFLINE: + case SCSI_ACCESS_STATE_OFFLINE: /* Path unusable */ err = SCSI_DH_DEV_OFFLINED; + pg->expiry = 0; break; default: /* Useable path if active */ err = SCSI_DH_OK; + pg->expiry = 0; break; } + spin_unlock_irqrestore(&pg->lock, flags); + kfree(buff); return err; } /* + * alua_stpg - Issue a SET TARGET PORT GROUP command + * + * Issue a SET TARGET PORT GROUP command and evaluate the + * response. Returns SCSI_DH_RETRY per default to trigger + * a re-evaluation of the target group state or SCSI_DH_OK + * if no further action needs to be taken. + */ +static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg) +{ + int retval; + struct scsi_sense_hdr sense_hdr; + + if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) { + /* Only implicit ALUA supported, retry */ + return SCSI_DH_RETRY; + } + switch (pg->state) { + case SCSI_ACCESS_STATE_OPTIMAL: + return SCSI_DH_OK; + case SCSI_ACCESS_STATE_ACTIVE: + if ((pg->flags & ALUA_OPTIMIZE_STPG) && + !pg->pref && + (pg->tpgs & TPGS_MODE_IMPLICIT)) + return SCSI_DH_OK; + break; + case SCSI_ACCESS_STATE_STANDBY: + case SCSI_ACCESS_STATE_UNAVAILABLE: + break; + case SCSI_ACCESS_STATE_OFFLINE: + return SCSI_DH_IO; + case SCSI_ACCESS_STATE_TRANSITIONING: + break; + default: + sdev_printk(KERN_INFO, sdev, + "%s: stpg failed, unhandled TPGS state %d", + ALUA_DH_NAME, pg->state); + return SCSI_DH_NOSYS; + } + retval = submit_stpg(sdev, pg->group_id, &sense_hdr); + + if (retval) { + if (!scsi_sense_valid(&sense_hdr)) { + sdev_printk(KERN_INFO, sdev, + "%s: stpg failed, result %d", + ALUA_DH_NAME, retval); + if (driver_byte(retval) == DRIVER_ERROR) + return SCSI_DH_DEV_TEMP_BUSY; + } else { + sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n", + ALUA_DH_NAME); + scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); + } + } + /* Retry RTPG */ + return SCSI_DH_RETRY; +} + +static void alua_rtpg_work(struct work_struct *work) +{ + struct alua_port_group *pg = + container_of(work, struct alua_port_group, rtpg_work.work); + struct scsi_device *sdev; + LIST_HEAD(qdata_list); + int err = SCSI_DH_OK; + struct alua_queue_data *qdata, *tmp; + unsigned long flags; + struct workqueue_struct *alua_wq = kaluad_wq; + + spin_lock_irqsave(&pg->lock, flags); + sdev = pg->rtpg_sdev; + if (!sdev) { + WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); + WARN_ON(pg->flags & ALUA_PG_RUN_STPG); + spin_unlock_irqrestore(&pg->lock, flags); + return; + } + if (pg->flags & ALUA_SYNC_STPG) + alua_wq = kaluad_sync_wq; + pg->flags |= ALUA_PG_RUNNING; + if (pg->flags & ALUA_PG_RUN_RTPG) { + int state = pg->state; + + pg->flags &= ~ALUA_PG_RUN_RTPG; + spin_unlock_irqrestore(&pg->lock, flags); + if (state == SCSI_ACCESS_STATE_TRANSITIONING) { + if (alua_tur(sdev) == SCSI_DH_RETRY) { + spin_lock_irqsave(&pg->lock, flags); + pg->flags &= ~ALUA_PG_RUNNING; + pg->flags |= ALUA_PG_RUN_RTPG; + spin_unlock_irqrestore(&pg->lock, flags); + queue_delayed_work(alua_wq, &pg->rtpg_work, + pg->interval * HZ); + return; + } + /* Send RTPG on failure or if TUR indicates SUCCESS */ + } + err = alua_rtpg(sdev, pg); + spin_lock_irqsave(&pg->lock, flags); + if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { + pg->flags &= ~ALUA_PG_RUNNING; + pg->flags |= ALUA_PG_RUN_RTPG; + spin_unlock_irqrestore(&pg->lock, flags); + queue_delayed_work(alua_wq, &pg->rtpg_work, + pg->interval * HZ); + return; + } + if (err != SCSI_DH_OK) + pg->flags &= ~ALUA_PG_RUN_STPG; + } + if (pg->flags & ALUA_PG_RUN_STPG) { + pg->flags &= ~ALUA_PG_RUN_STPG; + spin_unlock_irqrestore(&pg->lock, flags); + err = alua_stpg(sdev, pg); + spin_lock_irqsave(&pg->lock, flags); + if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { + pg->flags |= ALUA_PG_RUN_RTPG; + pg->interval = 0; + pg->flags &= ~ALUA_PG_RUNNING; + spin_unlock_irqrestore(&pg->lock, flags); + queue_delayed_work(alua_wq, &pg->rtpg_work, + pg->interval * HZ); + return; + } + } + + list_splice_init(&pg->rtpg_list, &qdata_list); + pg->rtpg_sdev = NULL; + spin_unlock_irqrestore(&pg->lock, flags); + + list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) { + list_del(&qdata->entry); + if (qdata->callback_fn) + qdata->callback_fn(qdata->callback_data, err); + kfree(qdata); + } + spin_lock_irqsave(&pg->lock, flags); + pg->flags &= ~ALUA_PG_RUNNING; + spin_unlock_irqrestore(&pg->lock, flags); + scsi_device_put(sdev); + kref_put(&pg->kref, release_port_group); +} + +static void alua_rtpg_queue(struct alua_port_group *pg, + struct scsi_device *sdev, + struct alua_queue_data *qdata, bool force) +{ + int start_queue = 0; + unsigned long flags; + struct workqueue_struct *alua_wq = kaluad_wq; + + if (!pg) + return; + + spin_lock_irqsave(&pg->lock, flags); + if (qdata) { + list_add_tail(&qdata->entry, &pg->rtpg_list); + pg->flags |= ALUA_PG_RUN_STPG; + force = true; + } + if (pg->rtpg_sdev == NULL) { + pg->interval = 0; + pg->flags |= ALUA_PG_RUN_RTPG; + kref_get(&pg->kref); + pg->rtpg_sdev = sdev; + scsi_device_get(sdev); + start_queue = 1; + } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { + pg->flags |= ALUA_PG_RUN_RTPG; + /* Do not queue if the worker is already running */ + if (!(pg->flags & ALUA_PG_RUNNING)) { + kref_get(&pg->kref); + start_queue = 1; + } + } + + if (pg->flags & ALUA_SYNC_STPG) + alua_wq = kaluad_sync_wq; + spin_unlock_irqrestore(&pg->lock, flags); + + if (start_queue && + !queue_delayed_work(alua_wq, &pg->rtpg_work, + msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { + scsi_device_put(sdev); + kref_put(&pg->kref, release_port_group); + } +} + +/* * alua_initialize - Initialize ALUA state * @sdev: the device to be initialized * @@ -674,21 +904,14 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_ */ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) { - int err; - - err = alua_check_tpgs(sdev, h); - if (err != SCSI_DH_OK) - goto out; - - err = alua_vpd_inquiry(sdev, h); - if (err != SCSI_DH_OK) - goto out; - - err = alua_rtpg(sdev, h, 0); - if (err != SCSI_DH_OK) - goto out; - -out: + int err = SCSI_DH_DEV_UNSUPP, tpgs; + + mutex_lock(&h->init_mutex); + tpgs = alua_check_tpgs(sdev); + if (tpgs != TPGS_MODE_NONE) + err = alua_check_vpd(sdev, h, tpgs); + h->init_error = err; + mutex_unlock(&h->init_mutex); return err; } /* @@ -703,9 +926,11 @@ out: static int alua_set_params(struct scsi_device *sdev, const char *params) { struct alua_dh_data *h = sdev->handler_data; + struct alua_port_group __rcu *pg = NULL; unsigned int optimize = 0, argc; const char *p = params; int result = SCSI_DH_OK; + unsigned long flags; if ((sscanf(params, "%u", &argc) != 1) || (argc != 1)) return -EINVAL; @@ -715,18 +940,23 @@ static int alua_set_params(struct scsi_device *sdev, const char *params) if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1)) return -EINVAL; + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (!pg) { + rcu_read_unlock(); + return -ENXIO; + } + spin_lock_irqsave(&pg->lock, flags); if (optimize) - h->flags |= ALUA_OPTIMIZE_STPG; + pg->flags |= ALUA_OPTIMIZE_STPG; else - h->flags &= ~ALUA_OPTIMIZE_STPG; + pg->flags &= ~ALUA_OPTIMIZE_STPG; + spin_unlock_irqrestore(&pg->lock, flags); + rcu_read_unlock(); return result; } -static uint optimize_stpg; -module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0."); - /* * alua_activate - activate a path * @sdev: device on the path to be activated @@ -742,48 +972,33 @@ static int alua_activate(struct scsi_device *sdev, { struct alua_dh_data *h = sdev->handler_data; int err = SCSI_DH_OK; - int stpg = 0; + struct alua_queue_data *qdata; + struct alua_port_group __rcu *pg; - err = alua_rtpg(sdev, h, 1); - if (err != SCSI_DH_OK) + qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); + if (!qdata) { + err = SCSI_DH_RES_TEMP_UNAVAIL; goto out; - - if (optimize_stpg) - h->flags |= ALUA_OPTIMIZE_STPG; - - if (h->tpgs & TPGS_MODE_EXPLICIT) { - switch (h->state) { - case TPGS_STATE_NONOPTIMIZED: - stpg = 1; - if ((h->flags & ALUA_OPTIMIZE_STPG) && - (!h->pref) && - (h->tpgs & TPGS_MODE_IMPLICIT)) - stpg = 0; - break; - case TPGS_STATE_STANDBY: - case TPGS_STATE_UNAVAILABLE: - stpg = 1; - break; - case TPGS_STATE_OFFLINE: - err = SCSI_DH_IO; - break; - case TPGS_STATE_TRANSITIONING: - err = SCSI_DH_RETRY; - break; - default: - break; - } } - - if (stpg) { - h->callback_fn = fn; - h->callback_data = data; - err = submit_stpg(h); - if (err == SCSI_DH_OK) - return 0; - h->callback_fn = h->callback_data = NULL; + qdata->callback_fn = fn; + qdata->callback_data = data; + + mutex_lock(&h->init_mutex); + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (!pg || !kref_get_unless_zero(&pg->kref)) { + rcu_read_unlock(); + kfree(qdata); + err = h->init_error; + mutex_unlock(&h->init_mutex); + goto out; } + fn = NULL; + rcu_read_unlock(); + mutex_unlock(&h->init_mutex); + alua_rtpg_queue(pg, sdev, qdata, true); + kref_put(&pg->kref, release_port_group); out: if (fn) fn(data, err); @@ -791,6 +1006,29 @@ out: } /* + * alua_check - check path status + * @sdev: device on the path to be checked + * + * Check the device status + */ +static void alua_check(struct scsi_device *sdev, bool force) +{ + struct alua_dh_data *h = sdev->handler_data; + struct alua_port_group *pg; + + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (!pg || !kref_get_unless_zero(&pg->kref)) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + alua_rtpg_queue(pg, sdev, NULL, force); + kref_put(&pg->kref, release_port_group); +} + +/* * alua_prep_fn - request callback * * Fail I/O to all paths not in state @@ -799,13 +1037,20 @@ out: static int alua_prep_fn(struct scsi_device *sdev, struct request *req) { struct alua_dh_data *h = sdev->handler_data; + struct alua_port_group __rcu *pg; + unsigned char state = SCSI_ACCESS_STATE_OPTIMAL; int ret = BLKPREP_OK; - if (h->state == TPGS_STATE_TRANSITIONING) + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (pg) + state = pg->state; + rcu_read_unlock(); + if (state == SCSI_ACCESS_STATE_TRANSITIONING) ret = BLKPREP_DEFER; - else if (h->state != TPGS_STATE_OPTIMIZED && - h->state != TPGS_STATE_NONOPTIMIZED && - h->state != TPGS_STATE_LBA_DEPENDENT) { + else if (state != SCSI_ACCESS_STATE_OPTIMAL && + state != SCSI_ACCESS_STATE_ACTIVE && + state != SCSI_ACCESS_STATE_LBA) { ret = BLKPREP_KILL; req->cmd_flags |= REQ_QUIET; } @@ -813,6 +1058,13 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req) } +static void alua_rescan(struct scsi_device *sdev) +{ + struct alua_dh_data *h = sdev->handler_data; + + alua_initialize(sdev, h); +} + /* * alua_bus_attach - Attach device handler * @sdev: device to be attached to @@ -820,20 +1072,21 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req) static int alua_bus_attach(struct scsi_device *sdev) { struct alua_dh_data *h; - int err; + int err, ret = -EINVAL; h = kzalloc(sizeof(*h) , GFP_KERNEL); if (!h) return -ENOMEM; - h->tpgs = TPGS_MODE_UNINITIALIZED; - h->state = TPGS_STATE_OPTIMIZED; - h->group_id = -1; - h->rel_port = -1; - h->buff = h->inq; - h->bufflen = ALUA_INQUIRY_SIZE; + spin_lock_init(&h->pg_lock); + rcu_assign_pointer(h->pg, NULL); + h->init_error = SCSI_DH_OK; h->sdev = sdev; + INIT_LIST_HEAD(&h->node); + mutex_init(&h->init_mutex); err = alua_initialize(sdev, h); + if (err == SCSI_DH_NOMEM) + ret = -ENOMEM; if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED) goto failed; @@ -841,7 +1094,7 @@ static int alua_bus_attach(struct scsi_device *sdev) return 0; failed: kfree(h); - return -EINVAL; + return ret; } /* @@ -851,9 +1104,19 @@ failed: static void alua_bus_detach(struct scsi_device *sdev) { struct alua_dh_data *h = sdev->handler_data; - - if (h->buff && h->inq != h->buff) - kfree(h->buff); + struct alua_port_group *pg; + + spin_lock(&h->pg_lock); + pg = h->pg; + rcu_assign_pointer(h->pg, NULL); + h->sdev = NULL; + spin_unlock(&h->pg_lock); + if (pg) { + spin_lock(&pg->lock); + list_del_rcu(&h->node); + spin_unlock(&pg->lock); + kref_put(&pg->kref, release_port_group); + } sdev->handler_data = NULL; kfree(h); } @@ -866,6 +1129,7 @@ static struct scsi_device_handler alua_dh = { .prep_fn = alua_prep_fn, .check_sense = alua_check_sense, .activate = alua_activate, + .rescan = alua_rescan, .set_params = alua_set_params, }; @@ -873,16 +1137,31 @@ static int __init alua_init(void) { int r; + kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0); + if (!kaluad_wq) { + /* Temporary failure, bypass */ + return SCSI_DH_DEV_TEMP_BUSY; + } + kaluad_sync_wq = create_workqueue("kaluad_sync"); + if (!kaluad_sync_wq) { + destroy_workqueue(kaluad_wq); + return SCSI_DH_DEV_TEMP_BUSY; + } r = scsi_register_device_handler(&alua_dh); - if (r != 0) + if (r != 0) { printk(KERN_ERR "%s: Failed to register scsi device handler", ALUA_DH_NAME); + destroy_workqueue(kaluad_sync_wq); + destroy_workqueue(kaluad_wq); + } return r; } static void __exit alua_exit(void) { scsi_unregister_device_handler(&alua_dh); + destroy_workqueue(kaluad_sync_wq); + destroy_workqueue(kaluad_wq); } module_init(alua_init); diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index e6fb97cb12f4..375d81850f15 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -199,7 +199,12 @@ static int parse_sp_info_reply(struct scsi_device *sdev, csdev->lun_state = csdev->buffer[4]; csdev->current_sp = csdev->buffer[8]; csdev->port = csdev->buffer[7]; - + if (csdev->lun_state == CLARIION_LUN_OWNED) + sdev->access_state = SCSI_ACCESS_STATE_OPTIMAL; + else + sdev->access_state = SCSI_ACCESS_STATE_STANDBY; + if (csdev->default_sp == csdev->current_sp) + sdev->access_state |= SCSI_ACCESS_STATE_PREFERRED; out: return err; } diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 361358134315..06fbd0b0c68a 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -165,6 +165,7 @@ struct rdac_controller { struct work_struct ms_work; struct scsi_device *ms_sdev; struct list_head ms_head; + struct list_head dh_list; }; struct c2_inquiry { @@ -181,7 +182,9 @@ struct c2_inquiry { }; struct rdac_dh_data { + struct list_head node; struct rdac_controller *ctlr; + struct scsi_device *sdev; #define UNINITIALIZED_LUN (1 << 8) unsigned lun; @@ -392,6 +395,7 @@ static struct rdac_controller *get_controller(int index, char *array_name, INIT_WORK(&ctlr->ms_work, send_mode_select); INIT_LIST_HEAD(&ctlr->ms_head); list_add(&ctlr->node, &ctlr_list); + INIT_LIST_HEAD(&ctlr->dh_list); return ctlr; } @@ -455,7 +459,8 @@ static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) { - int err; + int err, access_state; + struct rdac_dh_data *tmp; struct c9_inquiry *inqp; h->state = RDAC_STATE_ACTIVE; @@ -471,19 +476,31 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) h->mode = RDAC_MODE; /* LUN in RDAC mode */ /* Update ownership */ - if (inqp->avte_cvp & 0x1) + if (inqp->avte_cvp & 0x1) { h->lun_state = RDAC_LUN_OWNED; - else { + access_state = SCSI_ACCESS_STATE_OPTIMAL; + } else { h->lun_state = RDAC_LUN_UNOWNED; - if (h->mode == RDAC_MODE) + if (h->mode == RDAC_MODE) { h->state = RDAC_STATE_PASSIVE; + access_state = SCSI_ACCESS_STATE_STANDBY; + } else + access_state = SCSI_ACCESS_STATE_ACTIVE; } /* Update path prio*/ - if (inqp->path_prio & 0x1) + if (inqp->path_prio & 0x1) { h->preferred = RDAC_PREFERRED; - else + access_state |= SCSI_ACCESS_STATE_PREFERRED; + } else h->preferred = RDAC_NON_PREFERRED; + rcu_read_lock(); + list_for_each_entry_rcu(tmp, &h->ctlr->dh_list, node) { + /* h->sdev should always be valid */ + BUG_ON(!tmp->sdev); + tmp->sdev->access_state = access_state; + } + rcu_read_unlock(); } return err; @@ -508,6 +525,10 @@ static int initialize_controller(struct scsi_device *sdev, h->ctlr = get_controller(index, array_name, array_id, sdev); if (!h->ctlr) err = SCSI_DH_RES_TEMP_UNAVAIL; + else { + list_add_rcu(&h->node, &h->ctlr->dh_list); + h->sdev = sdev; + } spin_unlock(&list_lock); } return err; @@ -562,7 +583,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev, /* * Command Lock contention */ - err = SCSI_DH_RETRY; + err = SCSI_DH_IMM_RETRY; break; default: break; @@ -612,6 +633,8 @@ retry: err = mode_select_handle_sense(sdev, h->sense); if (err == SCSI_DH_RETRY && retry_cnt--) goto retry; + if (err == SCSI_DH_IMM_RETRY) + goto retry; } if (err == SCSI_DH_OK) { h->state = RDAC_STATE_ACTIVE; @@ -827,8 +850,11 @@ static void rdac_bus_detach( struct scsi_device *sdev ) flush_workqueue(kmpath_rdacd); spin_lock(&list_lock); - if (h->ctlr) + if (h->ctlr) { + list_del_rcu(&h->node); + h->sdev = NULL; kref_put(&h->ctlr->kref, release_controller); + } spin_unlock(&list_lock); sdev->handler_data = NULL; kfree(h); diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c index 3e088125a8be..6c14e68b9e1a 100644 --- a/drivers/scsi/dmx3191d.c +++ b/drivers/scsi/dmx3191d.c @@ -36,17 +36,10 @@ #define DONT_USE_INTR -#define NCR5380_read(reg) inb(port + reg) -#define NCR5380_write(reg, value) outb(value, port + reg) +#define NCR5380_read(reg) inb(instance->io_port + reg) +#define NCR5380_write(reg, value) outb(value, instance->io_port + reg) #define NCR5380_implementation_fields /* none */ -#define NCR5380_local_declare() unsigned int port -#define NCR5380_setup(instance) port = instance->io_port - -/* - * Includes needed for NCR5380.[ch] (XXX: Move them to NCR5380.h) - */ -#include <linux/delay.h> #include "NCR5380.h" #include "NCR5380.c" @@ -56,6 +49,7 @@ static struct scsi_host_template dmx3191d_driver_template = { + .module = THIS_MODULE, .proc_name = DMX3191D_DRIVER_NAME, .name = "Domex DMX3191D", .info = NCR5380_info, @@ -67,6 +61,8 @@ static struct scsi_host_template dmx3191d_driver_template = { .sg_tablesize = SG_ALL, .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING, + .cmd_size = NCR5380_CMD_SIZE, + .max_sectors = 128, }; static int dmx3191d_probe_one(struct pci_dev *pdev, @@ -97,17 +93,25 @@ static int dmx3191d_probe_one(struct pci_dev *pdev, */ shost->irq = NO_IRQ; - NCR5380_init(shost, FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E); + error = NCR5380_init(shost, FLAG_NO_PSEUDO_DMA); + if (error) + goto out_host_put; + + NCR5380_maybe_reset_bus(shost); pci_set_drvdata(pdev, shost); error = scsi_add_host(shost, &pdev->dev); if (error) - goto out_release_region; + goto out_exit; scsi_scan_host(shost); return 0; +out_exit: + NCR5380_exit(shost); +out_host_put: + scsi_host_put(shost); out_release_region: release_region(io, DMX3191D_REGION_LEN); out_disable_device: @@ -119,15 +123,14 @@ static int dmx3191d_probe_one(struct pci_dev *pdev, static void dmx3191d_remove_one(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); + unsigned long io = shost->io_port; scsi_remove_host(shost); NCR5380_exit(shost); - - release_region(shost->io_port, DMX3191D_REGION_LEN); - pci_disable_device(pdev); - scsi_host_put(shost); + release_region(io, DMX3191D_REGION_LEN); + pci_disable_device(pdev); } static struct pci_device_id dmx3191d_pci_tbl[] = { diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index d4cda5e9600e..21c8d210c456 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -180,11 +180,14 @@ static u8 adpt_read_blink_led(adpt_hba* host) *============================================================================ */ +#ifdef MODULE static struct pci_device_id dptids[] = { { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, { 0, } }; +#endif + MODULE_DEVICE_TABLE(pci,dptids); static int adpt_detect(struct scsi_host_template* sht) diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c index 4c74c7ba2dff..6c736b071cf4 100644 --- a/drivers/scsi/dtc.c +++ b/drivers/scsi/dtc.c @@ -1,9 +1,5 @@ - #define PSEUDO_DMA #define DONT_USE_INTR -#define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */ -#define DMA_WORKS_RIGHT - /* * DTC 3180/3280 driver, by @@ -50,15 +46,13 @@ #include <linux/module.h> -#include <linux/signal.h> #include <linux/blkdev.h> -#include <linux/delay.h> -#include <linux/stat.h> #include <linux/string.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <scsi/scsi_host.h> + #include "dtc.h" #define AUTOPROBE_IRQ #include "NCR5380.h" @@ -150,7 +144,7 @@ static const struct signature { static int __init dtc_setup(char *str) { - static int commandline_current = 0; + static int commandline_current; int i; int ints[10]; @@ -188,7 +182,7 @@ __setup("dtc=", dtc_setup); static int __init dtc_detect(struct scsi_host_template * tpnt) { - static int current_override = 0, current_base = 0; + static int current_override, current_base; struct Scsi_Host *instance; unsigned int addr; void __iomem *base; @@ -205,9 +199,8 @@ static int __init dtc_detect(struct scsi_host_template * tpnt) addr = 0; } else for (; !addr && (current_base < NO_BASES); ++current_base) { -#if (DTCDEBUG & DTCDEBUG_INIT) - printk(KERN_DEBUG "scsi-dtc : probing address %08x\n", bases[current_base].address); -#endif + dprintk(NDEBUG_INIT, "dtc: probing address 0x%08x\n", + (unsigned int)bases[current_base].address); if (bases[current_base].noauto) continue; base = ioremap(bases[current_base].address, 0x2000); @@ -216,18 +209,14 @@ static int __init dtc_detect(struct scsi_host_template * tpnt) for (sig = 0; sig < NO_SIGNATURES; ++sig) { if (check_signature(base + signatures[sig].offset, signatures[sig].string, strlen(signatures[sig].string))) { addr = bases[current_base].address; -#if (DTCDEBUG & DTCDEBUG_INIT) - printk(KERN_DEBUG "scsi-dtc : detected board.\n"); -#endif + dprintk(NDEBUG_INIT, "dtc: detected board\n"); goto found; } } iounmap(base); } -#if defined(DTCDEBUG) && (DTCDEBUG & DTCDEBUG_INIT) - printk(KERN_DEBUG "scsi-dtc : base = %08x\n", addr); -#endif + dprintk(NDEBUG_INIT, "dtc: addr = 0x%08x\n", addr); if (!addr) break; @@ -235,12 +224,15 @@ static int __init dtc_detect(struct scsi_host_template * tpnt) found: instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata)); if (instance == NULL) - break; + goto out_unmap; instance->base = addr; ((struct NCR5380_hostdata *)(instance)->hostdata)->base = base; - NCR5380_init(instance, 0); + if (NCR5380_init(instance, FLAG_NO_DMA_FIXUP)) + goto out_unregister; + + NCR5380_maybe_reset_bus(instance); NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR); /* Enable int's */ if (overrides[current_override].irq != IRQ_AUTO) @@ -271,14 +263,19 @@ found: printk(KERN_WARNING "scsi%d : interrupts not used. Might as well not jumper it.\n", instance->host_no); instance->irq = NO_IRQ; #endif -#if defined(DTCDEBUG) && (DTCDEBUG & DTCDEBUG_INIT) - printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); -#endif + dprintk(NDEBUG_INIT, "scsi%d : irq = %d\n", + instance->host_no, instance->irq); ++current_override; ++count; } return count; + +out_unregister: + scsi_unregister(instance); +out_unmap: + iounmap(base); + return count; } /* @@ -331,12 +328,8 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, unsigned char *d = dst; int i; /* For counting time spent in the poll-loop */ struct NCR5380_hostdata *hostdata = shost_priv(instance); - NCR5380_local_declare(); - NCR5380_setup(instance); i = 0; - NCR5380_read(RESET_PARITY_INTERRUPT_REG); - NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE); if (instance->irq == NO_IRQ) NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ); else @@ -348,7 +341,7 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, while (NCR5380_read(DTC_CONTROL_REG) & CSR_HOST_BUF_NOT_RDY) ++i; rtrc(3); - memcpy_fromio(d, base + DTC_DATA_BUF, 128); + memcpy_fromio(d, hostdata->base + DTC_DATA_BUF, 128); d += 128; len -= 128; rtrc(7); @@ -358,9 +351,7 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, rtrc(4); while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS)) ++i; - NCR5380_write(MODE_REG, 0); /* Clear the operating mode */ rtrc(0); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); if (i > hostdata->spin_max_r) hostdata->spin_max_r = i; return (0); @@ -383,12 +374,7 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, { int i; struct NCR5380_hostdata *hostdata = shost_priv(instance); - NCR5380_local_declare(); - NCR5380_setup(instance); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); - NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE); - /* set direction (write) */ if (instance->irq == NO_IRQ) NCR5380_write(DTC_CONTROL_REG, 0); else @@ -400,7 +386,7 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, while (NCR5380_read(DTC_CONTROL_REG) & CSR_HOST_BUF_NOT_RDY) ++i; rtrc(3); - memcpy_toio(base + DTC_DATA_BUF, src, 128); + memcpy_toio(hostdata->base + DTC_DATA_BUF, src, 128); src += 128; len -= 128; } @@ -413,47 +399,60 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, ++i; rtrc(7); /* Check for parity error here. fixme. */ - NCR5380_write(MODE_REG, 0); /* Clear the operating mode */ rtrc(0); if (i > hostdata->spin_max_w) hostdata->spin_max_w = i; return (0); } +static int dtc_dma_xfer_len(struct scsi_cmnd *cmd) +{ + int transfersize = cmd->transfersize; + + /* Limit transfers to 32K, for xx400 & xx406 + * pseudoDMA that transfers in 128 bytes blocks. + */ + if (transfersize > 32 * 1024 && cmd->SCp.this_residual && + !(cmd->SCp.this_residual % transfersize)) + transfersize = 32 * 1024; + + return transfersize; +} + MODULE_LICENSE("GPL"); #include "NCR5380.c" static int dtc_release(struct Scsi_Host *shost) { - NCR5380_local_declare(); - NCR5380_setup(shost); + struct NCR5380_hostdata *hostdata = shost_priv(shost); + if (shost->irq != NO_IRQ) free_irq(shost->irq, shost); NCR5380_exit(shost); - if (shost->io_port && shost->n_io_port) - release_region(shost->io_port, shost->n_io_port); scsi_unregister(shost); - iounmap(base); + iounmap(hostdata->base); return 0; } static struct scsi_host_template driver_template = { - .name = "DTC 3180/3280 ", - .detect = dtc_detect, - .release = dtc_release, - .proc_name = "dtc3x80", - .show_info = dtc_show_info, - .write_info = dtc_write_info, - .info = dtc_info, - .queuecommand = dtc_queue_command, - .eh_abort_handler = dtc_abort, - .eh_bus_reset_handler = dtc_bus_reset, - .bios_param = dtc_biosparam, - .can_queue = CAN_QUEUE, - .this_id = 7, - .sg_tablesize = SG_ALL, - .cmd_per_lun = CMD_PER_LUN, - .use_clustering = DISABLE_CLUSTERING, + .name = "DTC 3180/3280", + .detect = dtc_detect, + .release = dtc_release, + .proc_name = "dtc3x80", + .show_info = dtc_show_info, + .write_info = dtc_write_info, + .info = dtc_info, + .queuecommand = dtc_queue_command, + .eh_abort_handler = dtc_abort, + .eh_bus_reset_handler = dtc_bus_reset, + .bios_param = dtc_biosparam, + .can_queue = 32, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = 2, + .use_clustering = DISABLE_CLUSTERING, + .cmd_size = NCR5380_CMD_SIZE, + .max_sectors = 128, }; #include "scsi_module.c" diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h index 78a2332e9064..56732cba8aba 100644 --- a/drivers/scsi/dtc.h +++ b/drivers/scsi/dtc.h @@ -10,54 +10,17 @@ #ifndef DTC3280_H #define DTC3280_H -#define DTCDEBUG 0 -#define DTCDEBUG_INIT 0x1 -#define DTCDEBUG_TRANSFER 0x2 - -#ifndef CMD_PER_LUN -#define CMD_PER_LUN 2 -#endif - -#ifndef CAN_QUEUE -#define CAN_QUEUE 32 -#endif - #define NCR5380_implementation_fields \ void __iomem *base -#define NCR5380_local_declare() \ - void __iomem *base - -#define NCR5380_setup(instance) \ - base = ((struct NCR5380_hostdata *)(instance)->hostdata)->base +#define DTC_address(reg) \ + (((struct NCR5380_hostdata *)shost_priv(instance))->base + DTC_5380_OFFSET + reg) -#define DTC_address(reg) (base + DTC_5380_OFFSET + reg) - -#define dbNCR5380_read(reg) \ - (rval=readb(DTC_address(reg)), \ - (((unsigned char) printk("DTC : read register %d at addr %p is: %02x\n"\ - , (reg), DTC_address(reg), rval)), rval ) ) - -#define dbNCR5380_write(reg, value) do { \ - printk("DTC : write %02x to register %d at address %p\n", \ - (value), (reg), DTC_address(reg)); \ - writeb(value, DTC_address(reg));} while(0) - - -#if !(DTCDEBUG & DTCDEBUG_TRANSFER) #define NCR5380_read(reg) (readb(DTC_address(reg))) #define NCR5380_write(reg, value) (writeb(value, DTC_address(reg))) -#else -#define NCR5380_read(reg) (readb(DTC_address(reg))) -#define xNCR5380_read(reg) \ - (((unsigned char) printk("DTC : read register %d at address %p\n"\ - , (reg), DTC_address(reg))), readb(DTC_address(reg))) -#define NCR5380_write(reg, value) do { \ - printk("DTC : write %02x to register %d at address %p\n", \ - (value), (reg), DTC_address(reg)); \ - writeb(value, DTC_address(reg));} while(0) -#endif +#define NCR5380_dma_xfer_len(instance, cmd, phase) \ + dtc_dma_xfer_len(cmd) #define NCR5380_intr dtc_intr #define NCR5380_queue_command dtc_queue_command diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c index baf913047b48..3e8483410f61 100644 --- a/drivers/scsi/esas2r/esas2r_ioctl.c +++ b/drivers/scsi/esas2r/esas2r_ioctl.c @@ -1360,14 +1360,15 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) if (ioctl->header.channel == 0xFF) { a = (struct esas2r_adapter *)hostdata; } else { - a = esas2r_adapters[ioctl->header.channel]; - if (ioctl->header.channel >= MAX_ADAPTERS || (a == NULL)) { + if (ioctl->header.channel >= MAX_ADAPTERS || + esas2r_adapters[ioctl->header.channel] == NULL) { ioctl->header.return_code = IOCTL_BAD_CHANNEL; esas2r_log(ESAS2R_LOG_WARN, "bad channel value"); kfree(ioctl); return -ENOTSUPP; } + a = esas2r_adapters[ioctl->header.channel]; } switch (cmd) { diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 34a1b1f333b4..3e83d485f743 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -1118,7 +1118,8 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) * If this is the first validated FCF, note the time and * set a timer to trigger selection. */ - if (mtu_valid && !fip->sel_fcf && fcoe_ctlr_fcf_usable(fcf)) { + if (mtu_valid && !fip->sel_fcf && !fip->sel_time && + fcoe_ctlr_fcf_usable(fcf)) { fip->sel_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); if (!timer_pending(&fip->timer) || diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c index eefe14d453db..b87ab38a4530 100644 --- a/drivers/scsi/fdomain.c +++ b/drivers/scsi/fdomain.c @@ -1768,7 +1768,7 @@ struct scsi_host_template fdomain_driver_template = { }; #ifndef PCMCIA -#ifdef CONFIG_PCI +#if defined(CONFIG_PCI) && defined(MODULE) static struct pci_device_id fdomain_pci_tbl[] = { { PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70, diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index f8d2478b11cc..90091e693020 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -56,40 +56,31 @@ * */ -/* settings for DTC3181E card with only Mustek scanner attached */ -#define USLEEP_POLL msecs_to_jiffies(10) -#define USLEEP_SLEEP msecs_to_jiffies(200) -#define USLEEP_WAITLONG msecs_to_jiffies(5000) - #define AUTOPROBE_IRQ #ifdef CONFIG_SCSI_GENERIC_NCR53C400 -#define NCR53C400_PSEUDO_DMA 1 #define PSEUDO_DMA -#define NCR53C400 #endif #include <asm/io.h> -#include <linux/signal.h> #include <linux/blkdev.h> +#include <linux/module.h> #include <scsi/scsi_host.h> #include "g_NCR5380.h" #include "NCR5380.h" -#include <linux/stat.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/isapnp.h> -#include <linux/delay.h> #include <linux/interrupt.h> -#define NCR_NOT_SET 0 -static int ncr_irq = NCR_NOT_SET; -static int ncr_dma = NCR_NOT_SET; -static int ncr_addr = NCR_NOT_SET; -static int ncr_5380 = NCR_NOT_SET; -static int ncr_53c400 = NCR_NOT_SET; -static int ncr_53c400a = NCR_NOT_SET; -static int dtc_3181e = NCR_NOT_SET; +static int ncr_irq; +static int ncr_dma; +static int ncr_addr; +static int ncr_5380; +static int ncr_53c400; +static int ncr_53c400a; +static int dtc_3181e; +static int hp_c2502; static struct override { NCR5380_map_type NCR5380_map_name; @@ -121,7 +112,7 @@ static struct override { static void __init internal_setup(int board, char *str, int *ints) { - static int commandline_current = 0; + static int commandline_current; switch (board) { case BOARD_NCR5380: if (ints[0] != 2 && ints[0] != 3) { @@ -235,6 +226,30 @@ static int __init do_DTC3181E_setup(char *str) #endif +#ifndef SCSI_G_NCR5380_MEM +/* + * Configure I/O address of 53C400A or DTC436 by writing magic numbers + * to ports 0x779 and 0x379. + */ +static void magic_configure(int idx, u8 irq, u8 magic[]) +{ + u8 cfg = 0; + + outb(magic[0], 0x779); + outb(magic[1], 0x379); + outb(magic[2], 0x379); + outb(magic[3], 0x379); + outb(magic[4], 0x379); + + /* allowed IRQs for HP C2502 */ + if (irq != 2 && irq != 3 && irq != 4 && irq != 5 && irq != 7) + irq = 0; + if (idx >= 0 && idx <= 7) + cfg = 0x80 | idx | (irq << 4); + outb(cfg, 0x379); +} +#endif + /** * generic_NCR5380_detect - look for NCR5380 controllers * @tpnt: the scsi template @@ -243,19 +258,18 @@ static int __init do_DTC3181E_setup(char *str) * and DTC436(ISAPnP) controllers. If overrides have been set we use * them. * - * The caller supplied NCR5380_init function is invoked from here, before - * the interrupt line is taken. - * * Locks: none */ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) { - static int current_override = 0; + static int current_override; int count; unsigned int *ports; + u8 *magic = NULL; #ifndef SCSI_G_NCR5380_MEM int i; + int port_idx = -1; unsigned long region_size = 16; #endif static unsigned int __initdata ncr_53c400a_ports[] = { @@ -264,27 +278,36 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) static unsigned int __initdata dtc_3181e_ports[] = { 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0 }; - int flags = 0; + static u8 ncr_53c400a_magic[] __initdata = { /* 53C400A & DTC436 */ + 0x59, 0xb9, 0xc5, 0xae, 0xa6 + }; + static u8 hp_c2502_magic[] __initdata = { /* HP C2502 */ + 0x0f, 0x22, 0xf0, 0x20, 0x80 + }; + int flags; struct Scsi_Host *instance; + struct NCR5380_hostdata *hostdata; #ifdef SCSI_G_NCR5380_MEM unsigned long base; void __iomem *iomem; #endif - if (ncr_irq != NCR_NOT_SET) + if (ncr_irq) overrides[0].irq = ncr_irq; - if (ncr_dma != NCR_NOT_SET) + if (ncr_dma) overrides[0].dma = ncr_dma; - if (ncr_addr != NCR_NOT_SET) + if (ncr_addr) overrides[0].NCR5380_map_name = (NCR5380_map_type) ncr_addr; - if (ncr_5380 != NCR_NOT_SET) + if (ncr_5380) overrides[0].board = BOARD_NCR5380; - else if (ncr_53c400 != NCR_NOT_SET) + else if (ncr_53c400) overrides[0].board = BOARD_NCR53C400; - else if (ncr_53c400a != NCR_NOT_SET) + else if (ncr_53c400a) overrides[0].board = BOARD_NCR53C400A; - else if (dtc_3181e != NCR_NOT_SET) + else if (dtc_3181e) overrides[0].board = BOARD_DTC3181E; + else if (hp_c2502) + overrides[0].board = BOARD_HP_C2502; #ifndef SCSI_G_NCR5380_MEM if (!current_override && isapnp_present()) { struct pnp_dev *dev = NULL; @@ -318,41 +341,45 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) } } #endif - tpnt->proc_name = "g_NCR5380"; for (count = 0; current_override < NO_OVERRIDES; ++current_override) { if (!(overrides[current_override].NCR5380_map_name)) continue; ports = NULL; + flags = 0; switch (overrides[current_override].board) { case BOARD_NCR5380: flags = FLAG_NO_PSEUDO_DMA; break; case BOARD_NCR53C400: - flags = FLAG_NCR53C400; +#ifdef PSEUDO_DMA + flags = FLAG_NO_DMA_FIXUP; +#endif break; case BOARD_NCR53C400A: - flags = FLAG_NO_PSEUDO_DMA; + flags = FLAG_NO_DMA_FIXUP; + ports = ncr_53c400a_ports; + magic = ncr_53c400a_magic; + break; + case BOARD_HP_C2502: + flags = FLAG_NO_DMA_FIXUP; ports = ncr_53c400a_ports; + magic = hp_c2502_magic; break; case BOARD_DTC3181E: - flags = FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E; + flags = FLAG_NO_DMA_FIXUP; ports = dtc_3181e_ports; + magic = ncr_53c400a_magic; break; } #ifndef SCSI_G_NCR5380_MEM - if (ports) { + if (ports && magic) { /* wakeup sequence for the NCR53C400A and DTC3181E */ /* Disable the adapter and look for a free io port */ - outb(0x59, 0x779); - outb(0xb9, 0x379); - outb(0xc5, 0x379); - outb(0xae, 0x379); - outb(0xa6, 0x379); - outb(0x00, 0x379); + magic_configure(-1, 0, magic); if (overrides[current_override].NCR5380_map_name != PORT_AUTO) for (i = 0; ports[i]; i++) { @@ -371,17 +398,12 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) } if (ports[i]) { /* At this point we have our region reserved */ - outb(0x59, 0x779); - outb(0xb9, 0x379); - outb(0xc5, 0x379); - outb(0xae, 0x379); - outb(0xa6, 0x379); - outb(0x80 | i, 0x379); /* set io port to be used */ + magic_configure(i, 0, magic); /* no IRQ yet */ outb(0xc0, ports[i] + 9); if (inb(ports[i] + 9) != 0x80) continue; - else - overrides[current_override].NCR5380_map_name = ports[i]; + overrides[current_override].NCR5380_map_name = ports[i]; + port_idx = i; } else continue; } @@ -403,24 +425,65 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) } #endif instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata)); - if (instance == NULL) { -#ifndef SCSI_G_NCR5380_MEM - release_region(overrides[current_override].NCR5380_map_name, region_size); -#else - iounmap(iomem); - release_mem_region(base, NCR5380_region_size); -#endif - continue; - } + if (instance == NULL) + goto out_release; + hostdata = shost_priv(instance); - instance->NCR5380_instance_name = overrides[current_override].NCR5380_map_name; #ifndef SCSI_G_NCR5380_MEM + instance->io_port = overrides[current_override].NCR5380_map_name; instance->n_io_port = region_size; + hostdata->io_width = 1; /* 8-bit PDMA by default */ + + /* + * On NCR53C400 boards, NCR5380 registers are mapped 8 past + * the base address. + */ + switch (overrides[current_override].board) { + case BOARD_NCR53C400: + instance->io_port += 8; + hostdata->c400_ctl_status = 0; + hostdata->c400_blk_cnt = 1; + hostdata->c400_host_buf = 4; + break; + case BOARD_DTC3181E: + hostdata->io_width = 2; /* 16-bit PDMA */ + /* fall through */ + case BOARD_NCR53C400A: + case BOARD_HP_C2502: + hostdata->c400_ctl_status = 9; + hostdata->c400_blk_cnt = 10; + hostdata->c400_host_buf = 8; + break; + } #else - ((struct NCR5380_hostdata *)instance->hostdata)->iomem = iomem; + instance->base = overrides[current_override].NCR5380_map_name; + hostdata->iomem = iomem; + switch (overrides[current_override].board) { + case BOARD_NCR53C400: + hostdata->c400_ctl_status = 0x100; + hostdata->c400_blk_cnt = 0x101; + hostdata->c400_host_buf = 0x104; + break; + case BOARD_DTC3181E: + case BOARD_NCR53C400A: + case BOARD_HP_C2502: + pr_err(DRV_MODULE_NAME ": unknown register offsets\n"); + goto out_unregister; + } #endif - NCR5380_init(instance, flags); + if (NCR5380_init(instance, flags)) + goto out_unregister; + + switch (overrides[current_override].board) { + case BOARD_NCR53C400: + case BOARD_DTC3181E: + case BOARD_NCR53C400A: + case BOARD_HP_C2502: + NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); + } + + NCR5380_maybe_reset_bus(instance); if (overrides[current_override].irq != IRQ_AUTO) instance->irq = overrides[current_override].irq; @@ -431,12 +494,18 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) if (instance->irq == 255) instance->irq = NO_IRQ; - if (instance->irq != NO_IRQ) + if (instance->irq != NO_IRQ) { +#ifndef SCSI_G_NCR5380_MEM + /* set IRQ for HP C2502 */ + if (overrides[current_override].board == BOARD_HP_C2502) + magic_configure(port_idx, instance->irq, magic); +#endif if (request_irq(instance->irq, generic_NCR5380_intr, 0, "NCR5380", instance)) { printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = NO_IRQ; } + } if (instance->irq == NO_IRQ) { printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); @@ -447,6 +516,17 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) ++count; } return count; + +out_unregister: + scsi_unregister(instance); +out_release: +#ifndef SCSI_G_NCR5380_MEM + release_region(overrides[current_override].NCR5380_map_name, region_size); +#else + iounmap(iomem); + release_mem_region(base, NCR5380_region_size); +#endif + return count; } /** @@ -460,21 +540,15 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) static int generic_NCR5380_release_resources(struct Scsi_Host *instance) { - NCR5380_local_declare(); - NCR5380_setup(instance); - if (instance->irq != NO_IRQ) free_irq(instance->irq, instance); NCR5380_exit(instance); - #ifndef SCSI_G_NCR5380_MEM - release_region(instance->NCR5380_instance_name, instance->n_io_port); + release_region(instance->io_port, instance->n_io_port); #else iounmap(((struct NCR5380_hostdata *)instance->hostdata)->iomem); - release_mem_region(instance->NCR5380_instance_name, NCR5380_region_size); + release_mem_region(instance->base, NCR5380_region_size); #endif - - return 0; } @@ -507,7 +581,7 @@ generic_NCR5380_biosparam(struct scsi_device *sdev, struct block_device *bdev, } #endif -#ifdef NCR53C400_PSEUDO_DMA +#ifdef PSEUDO_DMA /** * NCR5380_pread - pseudo DMA read @@ -521,75 +595,68 @@ generic_NCR5380_biosparam(struct scsi_device *sdev, struct block_device *bdev, static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) { + struct NCR5380_hostdata *hostdata = shost_priv(instance); int blocks = len / 128; int start = 0; - int bl; - - NCR5380_local_declare(); - NCR5380_setup(instance); - NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE | CSR_TRANS_DIR); - NCR5380_write(C400_BLOCK_COUNTER_REG, blocks); + NCR5380_write(hostdata->c400_ctl_status, CSR_BASE | CSR_TRANS_DIR); + NCR5380_write(hostdata->c400_blk_cnt, blocks); while (1) { - if ((bl = NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) { + if (NCR5380_read(hostdata->c400_blk_cnt) == 0) break; - } - if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) { + if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) { printk(KERN_ERR "53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks); return -1; } - while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY); + while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) + ; /* FIXME - no timeout */ #ifndef SCSI_G_NCR5380_MEM - { - int i; - for (i = 0; i < 128; i++) - dst[start + i] = NCR5380_read(C400_HOST_BUFFER); - } + if (hostdata->io_width == 2) + insw(instance->io_port + hostdata->c400_host_buf, + dst + start, 64); + else + insb(instance->io_port + hostdata->c400_host_buf, + dst + start, 128); #else /* implies SCSI_G_NCR5380_MEM */ - memcpy_fromio(dst + start, iomem + NCR53C400_host_buffer, 128); + memcpy_fromio(dst + start, + hostdata->iomem + NCR53C400_host_buffer, 128); #endif start += 128; blocks--; } if (blocks) { - while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY) - { - // FIXME - no timeout - } + while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) + ; /* FIXME - no timeout */ #ifndef SCSI_G_NCR5380_MEM - { - int i; - for (i = 0; i < 128; i++) - dst[start + i] = NCR5380_read(C400_HOST_BUFFER); - } + if (hostdata->io_width == 2) + insw(instance->io_port + hostdata->c400_host_buf, + dst + start, 64); + else + insb(instance->io_port + hostdata->c400_host_buf, + dst + start, 128); #else /* implies SCSI_G_NCR5380_MEM */ - memcpy_fromio(dst + start, iomem + NCR53C400_host_buffer, 128); + memcpy_fromio(dst + start, + hostdata->iomem + NCR53C400_host_buffer, 128); #endif start += 128; blocks--; } - if (!(NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ)) + if (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ)) printk("53C400r: no 53C80 gated irq after transfer"); -#if 0 - /* - * DON'T DO THIS - THEY NEVER ARRIVE! - */ - printk("53C400r: Waiting for 53C80 registers\n"); - while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG) + /* wait for 53C80 registers to be available */ + while (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG)) ; -#endif + if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER)) printk(KERN_ERR "53C400r: no end dma signal\n"); - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); return 0; } @@ -605,89 +672,91 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) { + struct NCR5380_hostdata *hostdata = shost_priv(instance); int blocks = len / 128; int start = 0; - int bl; - int i; - NCR5380_local_declare(); - NCR5380_setup(instance); - - NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE); - NCR5380_write(C400_BLOCK_COUNTER_REG, blocks); + NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); + NCR5380_write(hostdata->c400_blk_cnt, blocks); while (1) { - if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) { + if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) { printk(KERN_ERR "53C400w: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks); return -1; } - if ((bl = NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) { + if (NCR5380_read(hostdata->c400_blk_cnt) == 0) break; - } - while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY) + while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) ; // FIXME - timeout #ifndef SCSI_G_NCR5380_MEM - { - for (i = 0; i < 128; i++) - NCR5380_write(C400_HOST_BUFFER, src[start + i]); - } + if (hostdata->io_width == 2) + outsw(instance->io_port + hostdata->c400_host_buf, + src + start, 64); + else + outsb(instance->io_port + hostdata->c400_host_buf, + src + start, 128); #else /* implies SCSI_G_NCR5380_MEM */ - memcpy_toio(iomem + NCR53C400_host_buffer, src + start, 128); + memcpy_toio(hostdata->iomem + NCR53C400_host_buffer, + src + start, 128); #endif start += 128; blocks--; } if (blocks) { - while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY) + while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) ; // FIXME - no timeout #ifndef SCSI_G_NCR5380_MEM - { - for (i = 0; i < 128; i++) - NCR5380_write(C400_HOST_BUFFER, src[start + i]); - } + if (hostdata->io_width == 2) + outsw(instance->io_port + hostdata->c400_host_buf, + src + start, 64); + else + outsb(instance->io_port + hostdata->c400_host_buf, + src + start, 128); #else /* implies SCSI_G_NCR5380_MEM */ - memcpy_toio(iomem + NCR53C400_host_buffer, src + start, 128); + memcpy_toio(hostdata->iomem + NCR53C400_host_buffer, + src + start, 128); #endif start += 128; blocks--; } -#if 0 - printk("53C400w: waiting for registers to be available\n"); - THEY NEVER DO ! while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG); - printk("53C400w: Got em\n"); -#endif - - /* Let's wait for this instead - could be ugly */ - /* All documentation says to check for this. Maybe my hardware is too - * fast. Waiting for it seems to work fine! KLL - */ - while (!(i = NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ)) - ; // FIXME - no timeout - - /* - * I know. i is certainly != 0 here but the loop is new. See previous - * comment. - */ - if (i) { - if (!((i = NCR5380_read(BUS_AND_STATUS_REG)) & BASR_END_DMA_TRANSFER)) - printk(KERN_ERR "53C400w: No END OF DMA bit - WHOOPS! BASR=%0x\n", i); - } else - printk(KERN_ERR "53C400w: no 53C80 gated irq after transfer (last block)\n"); + /* wait for 53C80 registers to be available */ + while (!(NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG)) { + udelay(4); /* DTC436 chip hangs without this */ + /* FIXME - no timeout */ + } -#if 0 if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER)) { printk(KERN_ERR "53C400w: no end dma signal\n"); } -#endif + while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT)) ; // TIMEOUT return 0; } -#endif /* PSEUDO_DMA */ + +static int generic_NCR5380_dma_xfer_len(struct scsi_cmnd *cmd) +{ + int transfersize = cmd->transfersize; + + /* Limit transfers to 32K, for xx400 & xx406 + * pseudoDMA that transfers in 128 bytes blocks. + */ + if (transfersize > 32 * 1024 && cmd->SCp.this_residual && + !(cmd->SCp.this_residual % transfersize)) + transfersize = 32 * 1024; + + /* 53C400 datasheet: non-modulo-128-byte transfers should use PIO */ + if (transfersize % 128) + transfersize = 0; + + return transfersize; +} + +#endif /* PSEUDO_DMA */ /* * Include the NCR5380 core code that we build our driver around @@ -696,22 +765,24 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, #include "NCR5380.c" static struct scsi_host_template driver_template = { - .show_info = generic_NCR5380_show_info, - .name = "Generic NCR5380/NCR53C400 SCSI", - .detect = generic_NCR5380_detect, - .release = generic_NCR5380_release_resources, - .info = generic_NCR5380_info, - .queuecommand = generic_NCR5380_queue_command, + .proc_name = DRV_MODULE_NAME, + .name = "Generic NCR5380/NCR53C400 SCSI", + .detect = generic_NCR5380_detect, + .release = generic_NCR5380_release_resources, + .info = generic_NCR5380_info, + .queuecommand = generic_NCR5380_queue_command, .eh_abort_handler = generic_NCR5380_abort, .eh_bus_reset_handler = generic_NCR5380_bus_reset, - .bios_param = NCR5380_BIOSPARAM, - .can_queue = CAN_QUEUE, - .this_id = 7, - .sg_tablesize = SG_ALL, - .cmd_per_lun = CMD_PER_LUN, - .use_clustering = DISABLE_CLUSTERING, + .bios_param = NCR5380_BIOSPARAM, + .can_queue = 16, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = 2, + .use_clustering = DISABLE_CLUSTERING, + .cmd_size = NCR5380_CMD_SIZE, + .max_sectors = 128, }; -#include <linux/module.h> + #include "scsi_module.c" module_param(ncr_irq, int, 0); @@ -721,6 +792,7 @@ module_param(ncr_5380, int, 0); module_param(ncr_53c400, int, 0); module_param(ncr_53c400a, int, 0); module_param(dtc_3181e, int, 0); +module_param(hp_c2502, int, 0); MODULE_LICENSE("GPL"); #if !defined(SCSI_G_NCR5380_MEM) && defined(MODULE) diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h index bea1a3b9b862..6f3d2ac4f185 100644 --- a/drivers/scsi/g_NCR5380.h +++ b/drivers/scsi/g_NCR5380.h @@ -14,81 +14,67 @@ #ifndef GENERIC_NCR5380_H #define GENERIC_NCR5380_H -#ifdef NCR53C400 +#ifdef CONFIG_SCSI_GENERIC_NCR53C400 #define BIOSPARAM #define NCR5380_BIOSPARAM generic_NCR5380_biosparam #else #define NCR5380_BIOSPARAM NULL #endif -#ifndef ASM - -#ifndef CMD_PER_LUN -#define CMD_PER_LUN 2 -#endif - -#ifndef CAN_QUEUE -#define CAN_QUEUE 16 -#endif - #define __STRVAL(x) #x #define STRVAL(x) __STRVAL(x) #ifndef SCSI_G_NCR5380_MEM +#define DRV_MODULE_NAME "g_NCR5380" -#define NCR5380_map_config port #define NCR5380_map_type int #define NCR5380_map_name port -#define NCR5380_instance_name io_port -#define NCR53C400_register_offset 0 -#define NCR53C400_address_adjust 8 -#ifdef NCR53C400 +#ifdef CONFIG_SCSI_GENERIC_NCR53C400 #define NCR5380_region_size 16 #else #define NCR5380_region_size 8 #endif -#define NCR5380_read(reg) (inb(NCR5380_map_name + (reg))) -#define NCR5380_write(reg, value) (outb((value), (NCR5380_map_name + (reg)))) +#define NCR5380_read(reg) \ + inb(instance->io_port + (reg)) +#define NCR5380_write(reg, value) \ + outb(value, instance->io_port + (reg)) #define NCR5380_implementation_fields \ - NCR5380_map_type NCR5380_map_name - -#define NCR5380_local_declare() \ - register NCR5380_implementation_fields - -#define NCR5380_setup(instance) \ - NCR5380_map_name = (NCR5380_map_type)((instance)->NCR5380_instance_name) + int c400_ctl_status; \ + int c400_blk_cnt; \ + int c400_host_buf; \ + int io_width; #else /* therefore SCSI_G_NCR5380_MEM */ +#define DRV_MODULE_NAME "g_NCR5380_mmio" -#define NCR5380_map_config memory #define NCR5380_map_type unsigned long #define NCR5380_map_name base -#define NCR5380_instance_name base -#define NCR53C400_register_offset 0x108 -#define NCR53C400_address_adjust 0 #define NCR53C400_mem_base 0x3880 #define NCR53C400_host_buffer 0x3900 #define NCR5380_region_size 0x3a00 -#define NCR5380_read(reg) readb(iomem + NCR53C400_mem_base + (reg)) -#define NCR5380_write(reg, value) writeb(value, iomem + NCR53C400_mem_base + (reg)) +#define NCR5380_read(reg) \ + readb(((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \ + NCR53C400_mem_base + (reg)) +#define NCR5380_write(reg, value) \ + writeb(value, ((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \ + NCR53C400_mem_base + (reg)) #define NCR5380_implementation_fields \ - NCR5380_map_type NCR5380_map_name; \ - void __iomem *iomem; - -#define NCR5380_local_declare() \ - register void __iomem *iomem - -#define NCR5380_setup(instance) \ - iomem = (((struct NCR5380_hostdata *)(instance)->hostdata)->iomem) + void __iomem *iomem; \ + int c400_ctl_status; \ + int c400_blk_cnt; \ + int c400_host_buf; #endif +#define NCR5380_dma_xfer_len(instance, cmd, phase) \ + generic_NCR5380_dma_xfer_len(cmd) + #define NCR5380_intr generic_NCR5380_intr #define NCR5380_queue_command generic_NCR5380_queue_command #define NCR5380_abort generic_NCR5380_abort @@ -102,7 +88,7 @@ #define BOARD_NCR53C400 1 #define BOARD_NCR53C400A 2 #define BOARD_DTC3181E 3 +#define BOARD_HP_C2502 4 -#endif /* ndef ASM */ #endif /* GENERIC_NCR5380_H */ diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 71e138044379..0a767740bf02 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -2838,7 +2838,6 @@ static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source, u16 idx, gdth_evt_data *evt) { gdth_evt_str *e; - struct timeval tv; /* no GDTH_LOCK_HA() ! */ TRACE2(("gdth_store_event() source %d idx %d\n", source, idx)); @@ -2854,8 +2853,7 @@ static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source, !strcmp((char *)&ebuffer[elastidx].event_data.event_string, (char *)&evt->event_string)))) { e = &ebuffer[elastidx]; - do_gettimeofday(&tv); - e->last_stamp = tv.tv_sec; + e->last_stamp = (u32)ktime_get_real_seconds(); ++e->same_count; } else { if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */ @@ -2871,8 +2869,7 @@ static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source, e = &ebuffer[elastidx]; e->event_source = source; e->event_idx = idx; - do_gettimeofday(&tv); - e->first_stamp = e->last_stamp = tv.tv_sec; + e->first_stamp = e->last_stamp = (u32)ktime_get_real_seconds(); e->same_count = 1; e->event_data = *evt; e->application = 0; diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index e66e997992e3..be609db66807 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c @@ -148,7 +148,6 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) gdth_cmd_str *gdtcmd; gdth_evt_str *estr; char hrec[161]; - struct timeval tv; char *buf; gdth_dskstat_str *pds; @@ -540,8 +539,14 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) if (estr->event_data.eu.driver.ionode == ha->hanum && estr->event_source == ES_ASYNC) { gdth_log_event(&estr->event_data, hrec); - do_gettimeofday(&tv); - sec = (int)(tv.tv_sec - estr->first_stamp); + + /* + * Elapsed seconds subtraction with unsigned operands is + * safe from wrap around in year 2106. Executes as: + * operand a + (2's complement operand b) + 1 + */ + + sec = (int)((u32)ktime_get_real_seconds() - estr->first_stamp); if (sec < 0) sec = 0; seq_printf(m," date- %02d:%02d:%02d\t%s\n", sec/3600, sec%3600/60, sec%60, hrec); diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig new file mode 100644 index 000000000000..d1dd1616f983 --- /dev/null +++ b/drivers/scsi/hisi_sas/Kconfig @@ -0,0 +1,8 @@ +config SCSI_HISI_SAS + tristate "HiSilicon SAS" + depends on HAS_DMA && HAS_IOMEM + depends on ARM64 || COMPILE_TEST + select SCSI_SAS_LIBSAS + select BLK_DEV_INTEGRITY + help + This driver supports HiSilicon's SAS HBA diff --git a/drivers/scsi/hisi_sas/Makefile b/drivers/scsi/hisi_sas/Makefile new file mode 100644 index 000000000000..c6d3a1b5fcb9 --- /dev/null +++ b/drivers/scsi/hisi_sas/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_main.o +obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_v1_hw.o hisi_sas_v2_hw.o diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h new file mode 100644 index 000000000000..29e89f340b64 --- /dev/null +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2015 Linaro Ltd. + * Copyright (c) 2015 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#ifndef _HISI_SAS_H_ +#define _HISI_SAS_H_ + +#include <linux/acpi.h> +#include <linux/dmapool.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/regmap.h> +#include <scsi/sas_ata.h> +#include <scsi/libsas.h> + +#define DRV_VERSION "v1.3" + +#define HISI_SAS_MAX_PHYS 9 +#define HISI_SAS_MAX_QUEUES 32 +#define HISI_SAS_QUEUE_SLOTS 512 +#define HISI_SAS_MAX_ITCT_ENTRIES 2048 +#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES + +#define HISI_SAS_STATUS_BUF_SZ \ + (sizeof(struct hisi_sas_err_record) + 1024) +#define HISI_SAS_COMMAND_TABLE_SZ \ + (((sizeof(union hisi_sas_command_table)+3)/4)*4) + +#define HISI_SAS_MAX_SSP_RESP_SZ (sizeof(struct ssp_frame_hdr) + 1024) +#define HISI_SAS_MAX_SMP_RESP_SZ 1028 +#define HISI_SAS_MAX_STP_RESP_SZ 28 + +#define DEV_IS_EXPANDER(type) \ + ((type == SAS_EDGE_EXPANDER_DEVICE) || \ + (type == SAS_FANOUT_EXPANDER_DEVICE)) + +struct hisi_hba; + +enum { + PORT_TYPE_SAS = (1U << 1), + PORT_TYPE_SATA = (1U << 0), +}; + +enum dev_status { + HISI_SAS_DEV_NORMAL, + HISI_SAS_DEV_EH, +}; + +enum hisi_sas_dev_type { + HISI_SAS_DEV_TYPE_STP = 0, + HISI_SAS_DEV_TYPE_SSP, + HISI_SAS_DEV_TYPE_SATA, +}; + +struct hisi_sas_phy { + struct hisi_hba *hisi_hba; + struct hisi_sas_port *port; + struct asd_sas_phy sas_phy; + struct sas_identify identify; + struct timer_list timer; + struct work_struct phyup_ws; + u64 port_id; /* from hw */ + u64 dev_sas_addr; + u64 phy_type; + u64 frame_rcvd_size; + u8 frame_rcvd[32]; + u8 phy_attached; + u8 reserved[3]; + enum sas_linkrate minimum_linkrate; + enum sas_linkrate maximum_linkrate; +}; + +struct hisi_sas_port { + struct asd_sas_port sas_port; + u8 port_attached; + u8 id; /* from hw */ + struct list_head list; +}; + +struct hisi_sas_cq { + struct hisi_hba *hisi_hba; + int id; +}; + +struct hisi_sas_device { + enum sas_device_type dev_type; + struct hisi_hba *hisi_hba; + struct domain_device *sas_device; + u64 attached_phy; + u64 device_id; + u64 running_req; + u8 dev_status; +}; + +struct hisi_sas_slot { + struct list_head entry; + struct sas_task *task; + struct hisi_sas_port *port; + u64 n_elem; + int dlvry_queue; + int dlvry_queue_slot; + int cmplt_queue; + int cmplt_queue_slot; + int idx; + int abort; + void *cmd_hdr; + dma_addr_t cmd_hdr_dma; + void *status_buffer; + dma_addr_t status_buffer_dma; + void *command_table; + dma_addr_t command_table_dma; + struct hisi_sas_sge_page *sge_page; + dma_addr_t sge_page_dma; + struct work_struct abort_slot; +}; + +struct hisi_sas_tmf_task { + u8 tmf; + u16 tag_of_task_to_be_managed; +}; + +struct hisi_sas_hw { + int (*hw_init)(struct hisi_hba *hisi_hba); + void (*setup_itct)(struct hisi_hba *hisi_hba, + struct hisi_sas_device *device); + void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no); + int (*get_free_slot)(struct hisi_hba *hisi_hba, int *q, int *s); + void (*start_delivery)(struct hisi_hba *hisi_hba); + int (*prep_ssp)(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, int is_tmf, + struct hisi_sas_tmf_task *tmf); + int (*prep_smp)(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot); + int (*prep_stp)(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot); + int (*slot_complete)(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, int abort); + void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no); + void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no); + void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no); + void (*free_device)(struct hisi_hba *hisi_hba, + struct hisi_sas_device *dev); + int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id); + int max_command_entries; + int complete_hdr_size; +}; + +struct hisi_hba { + /* This must be the first element, used by SHOST_TO_SAS_HA */ + struct sas_ha_struct *p; + + struct platform_device *pdev; + void __iomem *regs; + struct regmap *ctrl; + u32 ctrl_reset_reg; + u32 ctrl_reset_sts_reg; + u32 ctrl_clock_ena_reg; + u8 sas_addr[SAS_ADDR_SIZE]; + + int n_phy; + int scan_finished; + spinlock_t lock; + + struct timer_list timer; + struct workqueue_struct *wq; + + int slot_index_count; + unsigned long *slot_index_tags; + + /* SCSI/SAS glue */ + struct sas_ha_struct sha; + struct Scsi_Host *shost; + + struct hisi_sas_cq cq[HISI_SAS_MAX_QUEUES]; + struct hisi_sas_phy phy[HISI_SAS_MAX_PHYS]; + struct hisi_sas_port port[HISI_SAS_MAX_PHYS]; + + int queue_count; + int queue; + struct hisi_sas_slot *slot_prep; + + struct dma_pool *sge_page_pool; + struct hisi_sas_device devices[HISI_SAS_MAX_DEVICES]; + struct dma_pool *command_table_pool; + struct dma_pool *status_buffer_pool; + struct hisi_sas_cmd_hdr *cmd_hdr[HISI_SAS_MAX_QUEUES]; + dma_addr_t cmd_hdr_dma[HISI_SAS_MAX_QUEUES]; + void *complete_hdr[HISI_SAS_MAX_QUEUES]; + dma_addr_t complete_hdr_dma[HISI_SAS_MAX_QUEUES]; + struct hisi_sas_initial_fis *initial_fis; + dma_addr_t initial_fis_dma; + struct hisi_sas_itct *itct; + dma_addr_t itct_dma; + struct hisi_sas_iost *iost; + dma_addr_t iost_dma; + struct hisi_sas_breakpoint *breakpoint; + dma_addr_t breakpoint_dma; + struct hisi_sas_breakpoint *sata_breakpoint; + dma_addr_t sata_breakpoint_dma; + struct hisi_sas_slot *slot_info; + const struct hisi_sas_hw *hw; /* Low level hw interface */ +}; + +/* Generic HW DMA host memory structures */ +/* Delivery queue header */ +struct hisi_sas_cmd_hdr { + /* dw0 */ + __le32 dw0; + + /* dw1 */ + __le32 dw1; + + /* dw2 */ + __le32 dw2; + + /* dw3 */ + __le32 transfer_tags; + + /* dw4 */ + __le32 data_transfer_len; + + /* dw5 */ + __le32 first_burst_num; + + /* dw6 */ + __le32 sg_len; + + /* dw7 */ + __le32 dw7; + + /* dw8-9 */ + __le64 cmd_table_addr; + + /* dw10-11 */ + __le64 sts_buffer_addr; + + /* dw12-13 */ + __le64 prd_table_addr; + + /* dw14-15 */ + __le64 dif_prd_table_addr; +}; + +struct hisi_sas_itct { + __le64 qw0; + __le64 sas_addr; + __le64 qw2; + __le64 qw3; + __le64 qw4_15[12]; +}; + +struct hisi_sas_iost { + __le64 qw0; + __le64 qw1; + __le64 qw2; + __le64 qw3; +}; + +struct hisi_sas_err_record { + u32 data[4]; +}; + +struct hisi_sas_initial_fis { + struct hisi_sas_err_record err_record; + struct dev_to_host_fis fis; + u32 rsvd[3]; +}; + +struct hisi_sas_breakpoint { + u8 data[128]; /*io128 byte*/ +}; + +struct hisi_sas_sge { + __le64 addr; + __le32 page_ctrl_0; + __le32 page_ctrl_1; + __le32 data_len; + __le32 data_off; +}; + +struct hisi_sas_command_table_smp { + u8 bytes[44]; +}; + +struct hisi_sas_command_table_stp { + struct host_to_dev_fis command_fis; + u8 dummy[12]; + u8 atapi_cdb[ATAPI_CDB_LEN]; +}; + +#define HISI_SAS_SGE_PAGE_CNT SCSI_MAX_SG_SEGMENTS +struct hisi_sas_sge_page { + struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT]; +}; + +struct hisi_sas_command_table_ssp { + struct ssp_frame_hdr hdr; + union { + struct { + struct ssp_command_iu task; + u32 prot[6]; + }; + struct ssp_tmf_iu ssp_task; + struct xfer_rdy_iu xfer_rdy; + struct ssp_response_iu ssp_res; + } u; +}; + +union hisi_sas_command_table { + struct hisi_sas_command_table_ssp ssp; + struct hisi_sas_command_table_smp smp; + struct hisi_sas_command_table_stp stp; +}; +extern int hisi_sas_probe(struct platform_device *pdev, + const struct hisi_sas_hw *ops); +extern int hisi_sas_remove(struct platform_device *pdev); + +extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy); +extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, + struct sas_task *task, + struct hisi_sas_slot *slot); +#endif diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c new file mode 100644 index 000000000000..097ab4f27a6b --- /dev/null +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -0,0 +1,1418 @@ +/* + * Copyright (c) 2015 Linaro Ltd. + * Copyright (c) 2015 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include "hisi_sas.h" +#define DRV_NAME "hisi_sas" + +#define DEV_IS_GONE(dev) \ + ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) + +static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, + u8 *lun, struct hisi_sas_tmf_task *tmf); + +static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) +{ + return device->port->ha->lldd_ha; +} + +static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) +{ + void *bitmap = hisi_hba->slot_index_tags; + + clear_bit(slot_idx, bitmap); +} + +static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) +{ + hisi_sas_slot_index_clear(hisi_hba, slot_idx); +} + +static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) +{ + void *bitmap = hisi_hba->slot_index_tags; + + set_bit(slot_idx, bitmap); +} + +static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx) +{ + unsigned int index; + void *bitmap = hisi_hba->slot_index_tags; + + index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count); + if (index >= hisi_hba->slot_index_count) + return -SAS_QUEUE_FULL; + hisi_sas_slot_index_set(hisi_hba, index); + *slot_idx = index; + return 0; +} + +static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->slot_index_count; ++i) + hisi_sas_slot_index_clear(hisi_hba, i); +} + +void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, + struct hisi_sas_slot *slot) +{ + struct device *dev = &hisi_hba->pdev->dev; + + if (!slot->task) + return; + + if (!sas_protocol_ata(task->task_proto)) + if (slot->n_elem) + dma_unmap_sg(dev, task->scatter, slot->n_elem, + task->data_dir); + + if (slot->command_table) + dma_pool_free(hisi_hba->command_table_pool, + slot->command_table, slot->command_table_dma); + + if (slot->status_buffer) + dma_pool_free(hisi_hba->status_buffer_pool, + slot->status_buffer, slot->status_buffer_dma); + + if (slot->sge_page) + dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page, + slot->sge_page_dma); + + list_del_init(&slot->entry); + task->lldd_task = NULL; + slot->task = NULL; + slot->port = NULL; + hisi_sas_slot_index_free(hisi_hba, slot->idx); + memset(slot, 0, sizeof(*slot)); +} +EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); + +static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + return hisi_hba->hw->prep_smp(hisi_hba, slot); +} + +static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, int is_tmf, + struct hisi_sas_tmf_task *tmf) +{ + return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf); +} + +static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + return hisi_hba->hw->prep_stp(hisi_hba, slot); +} + +/* + * This function will issue an abort TMF regardless of whether the + * task is in the sdev or not. Then it will do the task complete + * cleanup and callbacks. + */ +static void hisi_sas_slot_abort(struct work_struct *work) +{ + struct hisi_sas_slot *abort_slot = + container_of(work, struct hisi_sas_slot, abort_slot); + struct sas_task *task = abort_slot->task; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); + struct scsi_cmnd *cmnd = task->uldd_task; + struct hisi_sas_tmf_task tmf_task; + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct scsi_lun lun; + struct device *dev = &hisi_hba->pdev->dev; + int tag = abort_slot->idx; + + if (!(task->task_proto & SAS_PROTOCOL_SSP)) { + dev_err(dev, "cannot abort slot for non-ssp task\n"); + goto out; + } + + int_to_scsilun(cmnd->device->lun, &lun); + tmf_task.tmf = TMF_ABORT_TASK; + tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); + + hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task); +out: + /* Do cleanup for this task */ + hisi_sas_slot_task_free(hisi_hba, task, abort_slot); + if (task->task_done) + task->task_done(task); + if (sas_dev && sas_dev->running_req) + sas_dev->running_req--; +} + +static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, + int is_tmf, struct hisi_sas_tmf_task *tmf, + int *pass) +{ + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_port *port; + struct hisi_sas_slot *slot; + struct hisi_sas_cmd_hdr *cmd_hdr_base; + struct device *dev = &hisi_hba->pdev->dev; + int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; + + if (!device->port) { + struct task_status_struct *ts = &task->task_status; + + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + /* + * libsas will use dev->port, should + * not call task_done for sata + */ + if (device->dev_type != SAS_SATA_DEV) + task->task_done(task); + return 0; + } + + if (DEV_IS_GONE(sas_dev)) { + if (sas_dev) + dev_info(dev, "task prep: device %llu not ready\n", + sas_dev->device_id); + else + dev_info(dev, "task prep: device %016llx not ready\n", + SAS_ADDR(device->sas_addr)); + + rc = SAS_PHY_DOWN; + return rc; + } + port = device->port->lldd_port; + if (port && !port->port_attached && !tmf) { + if (sas_protocol_ata(task->task_proto)) { + struct task_status_struct *ts = &task->task_status; + + dev_info(dev, + "task prep: SATA/STP port%d not attach device\n", + device->port->id); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + task->task_done(task); + } else { + struct task_status_struct *ts = &task->task_status; + + dev_info(dev, + "task prep: SAS port%d does not attach device\n", + device->port->id); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + task->task_done(task); + } + return 0; + } + + if (!sas_protocol_ata(task->task_proto)) { + if (task->num_scatter) { + n_elem = dma_map_sg(dev, task->scatter, + task->num_scatter, task->data_dir); + if (!n_elem) { + rc = -ENOMEM; + goto prep_out; + } + } + } else + n_elem = task->num_scatter; + + rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); + if (rc) + goto err_out; + rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue, + &dlvry_queue_slot); + if (rc) + goto err_out_tag; + + slot = &hisi_hba->slot_info[slot_idx]; + memset(slot, 0, sizeof(struct hisi_sas_slot)); + + slot->idx = slot_idx; + slot->n_elem = n_elem; + slot->dlvry_queue = dlvry_queue; + slot->dlvry_queue_slot = dlvry_queue_slot; + cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; + slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; + slot->task = task; + slot->port = port; + task->lldd_task = slot; + INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort); + + slot->status_buffer = dma_pool_alloc(hisi_hba->status_buffer_pool, + GFP_ATOMIC, + &slot->status_buffer_dma); + if (!slot->status_buffer) { + rc = -ENOMEM; + goto err_out_slot_buf; + } + memset(slot->status_buffer, 0, HISI_SAS_STATUS_BUF_SZ); + + slot->command_table = dma_pool_alloc(hisi_hba->command_table_pool, + GFP_ATOMIC, + &slot->command_table_dma); + if (!slot->command_table) { + rc = -ENOMEM; + goto err_out_status_buf; + } + memset(slot->command_table, 0, HISI_SAS_COMMAND_TABLE_SZ); + memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + rc = hisi_sas_task_prep_smp(hisi_hba, slot); + break; + case SAS_PROTOCOL_SSP: + rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + rc = hisi_sas_task_prep_ata(hisi_hba, slot); + break; + default: + dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n", + task->task_proto); + rc = -EINVAL; + break; + } + + if (rc) { + dev_err(dev, "task prep: rc = 0x%x\n", rc); + if (slot->sge_page) + goto err_out_sge; + goto err_out_command_table; + } + + list_add_tail(&slot->entry, &port->list); + spin_lock(&task->task_state_lock); + task->task_state_flags |= SAS_TASK_AT_INITIATOR; + spin_unlock(&task->task_state_lock); + + hisi_hba->slot_prep = slot; + + sas_dev->running_req++; + ++(*pass); + + return 0; + +err_out_sge: + dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page, + slot->sge_page_dma); +err_out_command_table: + dma_pool_free(hisi_hba->command_table_pool, slot->command_table, + slot->command_table_dma); +err_out_status_buf: + dma_pool_free(hisi_hba->status_buffer_pool, slot->status_buffer, + slot->status_buffer_dma); +err_out_slot_buf: + /* Nothing to be done */ +err_out_tag: + hisi_sas_slot_index_free(hisi_hba, slot_idx); +err_out: + dev_err(dev, "task prep: failed[%d]!\n", rc); + if (!sas_protocol_ata(task->task_proto)) + if (n_elem) + dma_unmap_sg(dev, task->scatter, n_elem, + task->data_dir); +prep_out: + return rc; +} + +static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, + int is_tmf, struct hisi_sas_tmf_task *tmf) +{ + u32 rc; + u32 pass = 0; + unsigned long flags; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); + struct device *dev = &hisi_hba->pdev->dev; + + /* protect task_prep and start_delivery sequence */ + spin_lock_irqsave(&hisi_hba->lock, flags); + rc = hisi_sas_task_prep(task, hisi_hba, is_tmf, tmf, &pass); + if (rc) + dev_err(dev, "task exec: failed[%d]!\n", rc); + + if (likely(pass)) + hisi_hba->hw->start_delivery(hisi_hba); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + + return rc; +} + +static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_ha_struct *sas_ha; + + if (!phy->phy_attached) + return; + + sas_ha = &hisi_hba->sha; + sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); + + if (sas_phy->phy) { + struct sas_phy *sphy = sas_phy->phy; + + sphy->negotiated_linkrate = sas_phy->linkrate; + sphy->minimum_linkrate = phy->minimum_linkrate; + sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + sphy->maximum_linkrate = phy->maximum_linkrate; + } + + if (phy->phy_type & PORT_TYPE_SAS) { + struct sas_identify_frame *id; + + id = (struct sas_identify_frame *)phy->frame_rcvd; + id->dev_type = phy->identify.device_type; + id->initiator_bits = SAS_PROTOCOL_ALL; + id->target_bits = phy->identify.target_port_protocols; + } else if (phy->phy_type & PORT_TYPE_SATA) { + /*Nothing*/ + } + + sas_phy->frame_rcvd_size = phy->frame_rcvd_size; + sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED); +} + +static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) +{ + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct hisi_sas_device *sas_dev = NULL; + int i; + + spin_lock(&hisi_hba->lock); + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { + hisi_hba->devices[i].device_id = i; + sas_dev = &hisi_hba->devices[i]; + sas_dev->dev_status = HISI_SAS_DEV_NORMAL; + sas_dev->dev_type = device->dev_type; + sas_dev->hisi_hba = hisi_hba; + sas_dev->sas_device = device; + break; + } + } + spin_unlock(&hisi_hba->lock); + + return sas_dev; +} + +static int hisi_sas_dev_found(struct domain_device *device) +{ + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct domain_device *parent_dev = device->parent; + struct hisi_sas_device *sas_dev; + struct device *dev = &hisi_hba->pdev->dev; + + sas_dev = hisi_sas_alloc_dev(device); + if (!sas_dev) { + dev_err(dev, "fail alloc dev: max support %d devices\n", + HISI_SAS_MAX_DEVICES); + return -EINVAL; + } + + device->lldd_dev = sas_dev; + hisi_hba->hw->setup_itct(hisi_hba, sas_dev); + + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { + int phy_no; + u8 phy_num = parent_dev->ex_dev.num_phys; + struct ex_phy *phy; + + for (phy_no = 0; phy_no < phy_num; phy_no++) { + phy = &parent_dev->ex_dev.ex_phy[phy_no]; + if (SAS_ADDR(phy->attached_sas_addr) == + SAS_ADDR(device->sas_addr)) { + sas_dev->attached_phy = phy_no; + break; + } + } + + if (phy_no == phy_num) { + dev_info(dev, "dev found: no attached " + "dev:%016llx at ex:%016llx\n", + SAS_ADDR(device->sas_addr), + SAS_ADDR(parent_dev->sas_addr)); + return -EINVAL; + } + } + + return 0; +} + +static int hisi_sas_slave_configure(struct scsi_device *sdev) +{ + struct domain_device *dev = sdev_to_domain_dev(sdev); + int ret = sas_slave_configure(sdev); + + if (ret) + return ret; + if (!dev_is_sata(dev)) + sas_change_queue_depth(sdev, 64); + + return 0; +} + +static void hisi_sas_scan_start(struct Scsi_Host *shost) +{ + struct hisi_hba *hisi_hba = shost_priv(shost); + int i; + + for (i = 0; i < hisi_hba->n_phy; ++i) + hisi_sas_bytes_dmaed(hisi_hba, i); + + hisi_hba->scan_finished = 1; +} + +static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct hisi_hba *hisi_hba = shost_priv(shost); + struct sas_ha_struct *sha = &hisi_hba->sha; + + if (hisi_hba->scan_finished == 0) + return 0; + + sas_drain_work(sha); + return 1; +} + +static void hisi_sas_phyup_work(struct work_struct *work) +{ + struct hisi_sas_phy *phy = + container_of(work, struct hisi_sas_phy, phyup_ws); + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + int phy_no = sas_phy->id; + + hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */ + hisi_sas_bytes_dmaed(hisi_hba, phy_no); +} + +static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + phy->hisi_hba = hisi_hba; + phy->port = NULL; + init_timer(&phy->timer); + sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; + sas_phy->class = SAS; + sas_phy->iproto = SAS_PROTOCOL_ALL; + sas_phy->tproto = 0; + sas_phy->type = PHY_TYPE_PHYSICAL; + sas_phy->role = PHY_ROLE_INITIATOR; + sas_phy->oob_mode = OOB_NOT_CONNECTED; + sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; + sas_phy->id = phy_no; + sas_phy->sas_addr = &hisi_hba->sas_addr[0]; + sas_phy->frame_rcvd = &phy->frame_rcvd[0]; + sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; + sas_phy->lldd_phy = phy; + + INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work); +} + +static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) +{ + struct sas_ha_struct *sas_ha = sas_phy->ha; + struct hisi_hba *hisi_hba = sas_ha->lldd_ha; + struct hisi_sas_phy *phy = sas_phy->lldd_phy; + struct asd_sas_port *sas_port = sas_phy->port; + struct hisi_sas_port *port = &hisi_hba->port[sas_phy->id]; + unsigned long flags; + + if (!sas_port) + return; + + spin_lock_irqsave(&hisi_hba->lock, flags); + port->port_attached = 1; + port->id = phy->port_id; + phy->port = port; + sas_port->lldd_port = port; + spin_unlock_irqrestore(&hisi_hba->lock, flags); +} + +static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, int phy_no, + struct domain_device *device) +{ + struct hisi_sas_phy *phy; + struct hisi_sas_port *port; + struct hisi_sas_slot *slot, *slot2; + struct device *dev = &hisi_hba->pdev->dev; + + phy = &hisi_hba->phy[phy_no]; + port = phy->port; + if (!port) + return; + + list_for_each_entry_safe(slot, slot2, &port->list, entry) { + struct sas_task *task; + + task = slot->task; + if (device && task->dev != device) + continue; + + dev_info(dev, "Release slot [%d:%d], task [%p]:\n", + slot->dlvry_queue, slot->dlvry_queue_slot, task); + hisi_hba->hw->slot_complete(hisi_hba, slot, 1); + } +} + +static void hisi_sas_port_notify_deformed(struct asd_sas_phy *sas_phy) +{ + struct domain_device *device; + struct hisi_sas_phy *phy = sas_phy->lldd_phy; + struct asd_sas_port *sas_port = sas_phy->port; + + list_for_each_entry(device, &sas_port->dev_list, dev_list_node) + hisi_sas_do_release_task(phy->hisi_hba, sas_phy->id, device); +} + +static void hisi_sas_release_task(struct hisi_hba *hisi_hba, + struct domain_device *device) +{ + struct asd_sas_port *port = device->port; + struct asd_sas_phy *sas_phy; + + list_for_each_entry(sas_phy, &port->phy_list, port_phy_el) + hisi_sas_do_release_task(hisi_hba, sas_phy->id, device); +} + +static void hisi_sas_dev_gone(struct domain_device *device) +{ + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct device *dev = &hisi_hba->pdev->dev; + u64 dev_id = sas_dev->device_id; + + dev_info(dev, "found dev[%lld:%x] is gone\n", + sas_dev->device_id, sas_dev->dev_type); + + hisi_hba->hw->free_device(hisi_hba, sas_dev); + device->lldd_dev = NULL; + memset(sas_dev, 0, sizeof(*sas_dev)); + sas_dev->device_id = dev_id; + sas_dev->dev_type = SAS_PHY_UNUSED; + sas_dev->dev_status = HISI_SAS_DEV_NORMAL; +} + +static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) +{ + return hisi_sas_task_exec(task, gfp_flags, 0, NULL); +} + +static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata) +{ + struct sas_ha_struct *sas_ha = sas_phy->ha; + struct hisi_hba *hisi_hba = sas_ha->lldd_ha; + int phy_no = sas_phy->id; + + switch (func) { + case PHY_FUNC_HARD_RESET: + hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); + break; + + case PHY_FUNC_LINK_RESET: + hisi_hba->hw->phy_enable(hisi_hba, phy_no); + hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); + break; + + case PHY_FUNC_DISABLE: + hisi_hba->hw->phy_disable(hisi_hba, phy_no); + break; + + case PHY_FUNC_SET_LINK_RATE: + case PHY_FUNC_RELEASE_SPINUP_HOLD: + default: + return -EOPNOTSUPP; + } + return 0; +} + +static void hisi_sas_task_done(struct sas_task *task) +{ + if (!del_timer(&task->slow_task->timer)) + return; + complete(&task->slow_task->completion); +} + +static void hisi_sas_tmf_timedout(unsigned long data) +{ + struct sas_task *task = (struct sas_task *)data; + + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + complete(&task->slow_task->completion); +} + +#define TASK_TIMEOUT 20 +#define TASK_RETRY 3 +static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, + void *parameter, u32 para_len, + struct hisi_sas_tmf_task *tmf) +{ + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_hba *hisi_hba = sas_dev->hisi_hba; + struct device *dev = &hisi_hba->pdev->dev; + struct sas_task *task; + int res, retry; + + for (retry = 0; retry < TASK_RETRY; retry++) { + task = sas_alloc_slow_task(GFP_KERNEL); + if (!task) + return -ENOMEM; + + task->dev = device; + task->task_proto = device->tproto; + + memcpy(&task->ssp_task, parameter, para_len); + task->task_done = hisi_sas_task_done; + + task->slow_task->timer.data = (unsigned long) task; + task->slow_task->timer.function = hisi_sas_tmf_timedout; + task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ; + add_timer(&task->slow_task->timer); + + res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); + + if (res) { + del_timer(&task->slow_task->timer); + dev_err(dev, "abort tmf: executing internal task failed: %d\n", + res); + goto ex_err; + } + + wait_for_completion(&task->slow_task->completion); + res = TMF_RESP_FUNC_FAILED; + /* Even TMF timed out, return direct. */ + if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + dev_err(dev, "abort tmf: TMF task[%d] timeout\n", + tmf->tag_of_task_to_be_managed); + if (task->lldd_task) { + struct hisi_sas_slot *slot = + task->lldd_task; + + hisi_sas_slot_task_free(hisi_hba, + task, slot); + } + + goto ex_err; + } + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { + res = TMF_RESP_FUNC_COMPLETE; + break; + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_UNDERRUN) { + /* no error, but return the number of bytes of + * underrun + */ + dev_warn(dev, "abort tmf: task to dev %016llx " + "resp: 0x%x sts 0x%x underrun\n", + SAS_ADDR(device->sas_addr), + task->task_status.resp, + task->task_status.stat); + res = task->task_status.residual; + break; + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_OVERRUN) { + dev_warn(dev, "abort tmf: blocked task error\n"); + res = -EMSGSIZE; + break; + } + + dev_warn(dev, "abort tmf: task to dev " + "%016llx resp: 0x%x status 0x%x\n", + SAS_ADDR(device->sas_addr), task->task_status.resp, + task->task_status.stat); + sas_free_task(task); + task = NULL; + } +ex_err: + WARN_ON(retry == TASK_RETRY); + sas_free_task(task); + return res; +} + +static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, + u8 *lun, struct hisi_sas_tmf_task *tmf) +{ + struct sas_ssp_task ssp_task; + + if (!(device->tproto & SAS_PROTOCOL_SSP)) + return TMF_RESP_FUNC_ESUPP; + + memcpy(ssp_task.LUN, lun, 8); + + return hisi_sas_exec_internal_tmf_task(device, &ssp_task, + sizeof(ssp_task), tmf); +} + +static int hisi_sas_abort_task(struct sas_task *task) +{ + struct scsi_lun lun; + struct hisi_sas_tmf_task tmf_task; + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); + struct device *dev = &hisi_hba->pdev->dev; + int rc = TMF_RESP_FUNC_FAILED; + unsigned long flags; + + if (!sas_dev) { + dev_warn(dev, "Device has been removed\n"); + return TMF_RESP_FUNC_FAILED; + } + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + rc = TMF_RESP_FUNC_COMPLETE; + goto out; + } + + spin_unlock_irqrestore(&task->task_state_lock, flags); + sas_dev->dev_status = HISI_SAS_DEV_EH; + if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { + struct scsi_cmnd *cmnd = task->uldd_task; + struct hisi_sas_slot *slot = task->lldd_task; + u32 tag = slot->idx; + + int_to_scsilun(cmnd->device->lun, &lun); + tmf_task.tmf = TMF_ABORT_TASK; + tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); + + rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, + &tmf_task); + + /* if successful, clear the task and callback forwards.*/ + if (rc == TMF_RESP_FUNC_COMPLETE) { + if (task->lldd_task) { + struct hisi_sas_slot *slot; + + slot = &hisi_hba->slot_info + [tmf_task.tag_of_task_to_be_managed]; + spin_lock_irqsave(&hisi_hba->lock, flags); + hisi_hba->hw->slot_complete(hisi_hba, slot, 1); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + } + } + + } else if (task->task_proto & SAS_PROTOCOL_SATA || + task->task_proto & SAS_PROTOCOL_STP) { + if (task->dev->dev_type == SAS_SATA_DEV) { + struct hisi_slot_info *slot = task->lldd_task; + + dev_notice(dev, "abort task: hba=%p task=%p slot=%p\n", + hisi_hba, task, slot); + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + rc = TMF_RESP_FUNC_COMPLETE; + goto out; + } + + } + +out: + if (rc != TMF_RESP_FUNC_COMPLETE) + dev_notice(dev, "abort task: rc=%d\n", rc); + return rc; +} + +static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) +{ + struct hisi_sas_tmf_task tmf_task; + int rc = TMF_RESP_FUNC_FAILED; + + tmf_task.tmf = TMF_ABORT_TASK_SET; + rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); + + return rc; +} + +static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct hisi_sas_tmf_task tmf_task; + + tmf_task.tmf = TMF_CLEAR_ACA; + rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); + + return rc; +} + +static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) +{ + struct sas_phy *phy = sas_get_local_phy(device); + int rc, reset_type = (device->dev_type == SAS_SATA_DEV || + (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; + rc = sas_phy_reset(phy, reset_type); + sas_put_local_phy(phy); + msleep(2000); + return rc; +} + +static int hisi_sas_I_T_nexus_reset(struct domain_device *device) +{ + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + unsigned long flags; + int rc = TMF_RESP_FUNC_FAILED; + + if (sas_dev->dev_status != HISI_SAS_DEV_EH) + return TMF_RESP_FUNC_FAILED; + sas_dev->dev_status = HISI_SAS_DEV_NORMAL; + + rc = hisi_sas_debug_I_T_nexus_reset(device); + + spin_lock_irqsave(&hisi_hba->lock, flags); + hisi_sas_release_task(hisi_hba, device); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + + return 0; +} + +static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) +{ + struct hisi_sas_tmf_task tmf_task; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct device *dev = &hisi_hba->pdev->dev; + unsigned long flags; + int rc = TMF_RESP_FUNC_FAILED; + + tmf_task.tmf = TMF_LU_RESET; + sas_dev->dev_status = HISI_SAS_DEV_EH; + rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); + if (rc == TMF_RESP_FUNC_COMPLETE) { + spin_lock_irqsave(&hisi_hba->lock, flags); + hisi_sas_release_task(hisi_hba, device); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + } + + /* If failed, fall-through I_T_Nexus reset */ + dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n", + sas_dev->device_id, rc); + return rc; +} + +static int hisi_sas_query_task(struct sas_task *task) +{ + struct scsi_lun lun; + struct hisi_sas_tmf_task tmf_task; + int rc = TMF_RESP_FUNC_FAILED; + + if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { + struct scsi_cmnd *cmnd = task->uldd_task; + struct domain_device *device = task->dev; + struct hisi_sas_slot *slot = task->lldd_task; + u32 tag = slot->idx; + + int_to_scsilun(cmnd->device->lun, &lun); + tmf_task.tmf = TMF_QUERY_TASK; + tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); + + rc = hisi_sas_debug_issue_ssp_tmf(device, + lun.scsi_lun, + &tmf_task); + switch (rc) { + /* The task is still in Lun, release it then */ + case TMF_RESP_FUNC_SUCC: + /* The task is not in Lun or failed, reset the phy */ + case TMF_RESP_FUNC_FAILED: + case TMF_RESP_FUNC_COMPLETE: + break; + } + } + return rc; +} + +static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) +{ + hisi_sas_port_notify_formed(sas_phy); +} + +static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy) +{ + hisi_sas_port_notify_deformed(sas_phy); +} + +static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) +{ + phy->phy_attached = 0; + phy->phy_type = 0; + phy->port = NULL; +} + +void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_ha_struct *sas_ha = &hisi_hba->sha; + + if (rdy) { + /* Phy down but ready */ + hisi_sas_bytes_dmaed(hisi_hba, phy_no); + hisi_sas_port_notify_formed(sas_phy); + } else { + struct hisi_sas_port *port = phy->port; + + /* Phy down and not ready */ + sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); + sas_phy_disconnected(sas_phy); + + if (port) { + if (phy->phy_type & PORT_TYPE_SAS) { + int port_id = port->id; + + if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, + port_id)) + port->port_attached = 0; + } else if (phy->phy_type & PORT_TYPE_SATA) + port->port_attached = 0; + } + hisi_sas_phy_disconnected(phy); + } +} +EXPORT_SYMBOL_GPL(hisi_sas_phy_down); + +static struct scsi_transport_template *hisi_sas_stt; + +static struct scsi_host_template hisi_sas_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = sas_queuecommand, + .target_alloc = sas_target_alloc, + .slave_configure = hisi_sas_slave_configure, + .scan_finished = hisi_sas_scan_finished, + .scan_start = hisi_sas_scan_start, + .change_queue_depth = sas_change_queue_depth, + .bios_param = sas_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .use_clustering = ENABLE_CLUSTERING, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_bus_reset_handler = sas_eh_bus_reset_handler, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, +}; + +static struct sas_domain_function_template hisi_sas_transport_ops = { + .lldd_dev_found = hisi_sas_dev_found, + .lldd_dev_gone = hisi_sas_dev_gone, + .lldd_execute_task = hisi_sas_queue_command, + .lldd_control_phy = hisi_sas_control_phy, + .lldd_abort_task = hisi_sas_abort_task, + .lldd_abort_task_set = hisi_sas_abort_task_set, + .lldd_clear_aca = hisi_sas_clear_aca, + .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, + .lldd_lu_reset = hisi_sas_lu_reset, + .lldd_query_task = hisi_sas_query_task, + .lldd_port_formed = hisi_sas_port_formed, + .lldd_port_deformed = hisi_sas_port_deformed, +}; + +static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) +{ + struct platform_device *pdev = hisi_hba->pdev; + struct device *dev = &pdev->dev; + int i, s, max_command_entries = hisi_hba->hw->max_command_entries; + + spin_lock_init(&hisi_hba->lock); + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_init(hisi_hba, i); + hisi_hba->port[i].port_attached = 0; + hisi_hba->port[i].id = -1; + INIT_LIST_HEAD(&hisi_hba->port[i].list); + } + + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; + hisi_hba->devices[i].device_id = i; + hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL; + } + + for (i = 0; i < hisi_hba->queue_count; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + + /* Completion queue structure */ + cq->id = i; + cq->hisi_hba = hisi_hba; + + /* Delivery queue */ + s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; + hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s, + &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL); + if (!hisi_hba->cmd_hdr[i]) + goto err_out; + memset(hisi_hba->cmd_hdr[i], 0, s); + + /* Completion queue */ + s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; + hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s, + &hisi_hba->complete_hdr_dma[i], GFP_KERNEL); + if (!hisi_hba->complete_hdr[i]) + goto err_out; + memset(hisi_hba->complete_hdr[i], 0, s); + } + + s = HISI_SAS_STATUS_BUF_SZ; + hisi_hba->status_buffer_pool = dma_pool_create("status_buffer", + dev, s, 16, 0); + if (!hisi_hba->status_buffer_pool) + goto err_out; + + s = HISI_SAS_COMMAND_TABLE_SZ; + hisi_hba->command_table_pool = dma_pool_create("command_table", + dev, s, 16, 0); + if (!hisi_hba->command_table_pool) + goto err_out; + + s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); + hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma, + GFP_KERNEL); + if (!hisi_hba->itct) + goto err_out; + + memset(hisi_hba->itct, 0, s); + + hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, + sizeof(struct hisi_sas_slot), + GFP_KERNEL); + if (!hisi_hba->slot_info) + goto err_out; + + s = max_command_entries * sizeof(struct hisi_sas_iost); + hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma, + GFP_KERNEL); + if (!hisi_hba->iost) + goto err_out; + + memset(hisi_hba->iost, 0, s); + + s = max_command_entries * sizeof(struct hisi_sas_breakpoint); + hisi_hba->breakpoint = dma_alloc_coherent(dev, s, + &hisi_hba->breakpoint_dma, GFP_KERNEL); + if (!hisi_hba->breakpoint) + goto err_out; + + memset(hisi_hba->breakpoint, 0, s); + + hisi_hba->slot_index_count = max_command_entries; + s = hisi_hba->slot_index_count / sizeof(unsigned long); + hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL); + if (!hisi_hba->slot_index_tags) + goto err_out; + + hisi_hba->sge_page_pool = dma_pool_create("status_sge", dev, + sizeof(struct hisi_sas_sge_page), 16, 0); + if (!hisi_hba->sge_page_pool) + goto err_out; + + s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; + hisi_hba->initial_fis = dma_alloc_coherent(dev, s, + &hisi_hba->initial_fis_dma, GFP_KERNEL); + if (!hisi_hba->initial_fis) + goto err_out; + memset(hisi_hba->initial_fis, 0, s); + + s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2; + hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s, + &hisi_hba->sata_breakpoint_dma, GFP_KERNEL); + if (!hisi_hba->sata_breakpoint) + goto err_out; + memset(hisi_hba->sata_breakpoint, 0, s); + + hisi_sas_slot_index_init(hisi_hba); + + hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); + if (!hisi_hba->wq) { + dev_err(dev, "sas_alloc: failed to create workqueue\n"); + goto err_out; + } + + return 0; +err_out: + return -ENOMEM; +} + +static void hisi_sas_free(struct hisi_hba *hisi_hba) +{ + struct device *dev = &hisi_hba->pdev->dev; + int i, s, max_command_entries = hisi_hba->hw->max_command_entries; + + for (i = 0; i < hisi_hba->queue_count; i++) { + s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; + if (hisi_hba->cmd_hdr[i]) + dma_free_coherent(dev, s, + hisi_hba->cmd_hdr[i], + hisi_hba->cmd_hdr_dma[i]); + + s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; + if (hisi_hba->complete_hdr[i]) + dma_free_coherent(dev, s, + hisi_hba->complete_hdr[i], + hisi_hba->complete_hdr_dma[i]); + } + + dma_pool_destroy(hisi_hba->status_buffer_pool); + dma_pool_destroy(hisi_hba->command_table_pool); + dma_pool_destroy(hisi_hba->sge_page_pool); + + s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); + if (hisi_hba->itct) + dma_free_coherent(dev, s, + hisi_hba->itct, hisi_hba->itct_dma); + + s = max_command_entries * sizeof(struct hisi_sas_iost); + if (hisi_hba->iost) + dma_free_coherent(dev, s, + hisi_hba->iost, hisi_hba->iost_dma); + + s = max_command_entries * sizeof(struct hisi_sas_breakpoint); + if (hisi_hba->breakpoint) + dma_free_coherent(dev, s, + hisi_hba->breakpoint, + hisi_hba->breakpoint_dma); + + + s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; + if (hisi_hba->initial_fis) + dma_free_coherent(dev, s, + hisi_hba->initial_fis, + hisi_hba->initial_fis_dma); + + s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2; + if (hisi_hba->sata_breakpoint) + dma_free_coherent(dev, s, + hisi_hba->sata_breakpoint, + hisi_hba->sata_breakpoint_dma); + + if (hisi_hba->wq) + destroy_workqueue(hisi_hba->wq); +} + +static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, + const struct hisi_sas_hw *hw) +{ + struct resource *res; + struct Scsi_Host *shost; + struct hisi_hba *hisi_hba; + struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node; + + shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba)); + if (!shost) + goto err_out; + hisi_hba = shost_priv(shost); + + hisi_hba->hw = hw; + hisi_hba->pdev = pdev; + hisi_hba->shost = shost; + SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; + + init_timer(&hisi_hba->timer); + + if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, + SAS_ADDR_SIZE)) + goto err_out; + + if (np) { + hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, + "hisilicon,sas-syscon"); + if (IS_ERR(hisi_hba->ctrl)) + goto err_out; + + if (device_property_read_u32(dev, "ctrl-reset-reg", + &hisi_hba->ctrl_reset_reg)) + goto err_out; + + if (device_property_read_u32(dev, "ctrl-reset-sts-reg", + &hisi_hba->ctrl_reset_sts_reg)) + goto err_out; + + if (device_property_read_u32(dev, "ctrl-clock-ena-reg", + &hisi_hba->ctrl_clock_ena_reg)) + goto err_out; + } + + if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) + goto err_out; + + if (device_property_read_u32(dev, "queue-count", + &hisi_hba->queue_count)) + goto err_out; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hisi_hba->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(hisi_hba->regs)) + goto err_out; + + if (hisi_sas_alloc(hisi_hba, shost)) { + hisi_sas_free(hisi_hba); + goto err_out; + } + + return shost; +err_out: + dev_err(dev, "shost alloc failed\n"); + return NULL; +} + +static void hisi_sas_init_add(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) + memcpy(&hisi_hba->phy[i].dev_sas_addr, + hisi_hba->sas_addr, + SAS_ADDR_SIZE); +} + +int hisi_sas_probe(struct platform_device *pdev, + const struct hisi_sas_hw *hw) +{ + struct Scsi_Host *shost; + struct hisi_hba *hisi_hba; + struct device *dev = &pdev->dev; + struct asd_sas_phy **arr_phy; + struct asd_sas_port **arr_port; + struct sas_ha_struct *sha; + int rc, phy_nr, port_nr, i; + + shost = hisi_sas_shost_alloc(pdev, hw); + if (!shost) { + rc = -ENOMEM; + goto err_out_ha; + } + + sha = SHOST_TO_SAS_HA(shost); + hisi_hba = shost_priv(shost); + platform_set_drvdata(pdev, sha); + + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) && + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { + dev_err(dev, "No usable DMA addressing method\n"); + rc = -EIO; + goto err_out_ha; + } + + phy_nr = port_nr = hisi_hba->n_phy; + + arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); + arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); + if (!arr_phy || !arr_port) + return -ENOMEM; + + sha->sas_phy = arr_phy; + sha->sas_port = arr_port; + sha->core.shost = shost; + sha->lldd_ha = hisi_hba; + + shost->transportt = hisi_sas_stt; + shost->max_id = HISI_SAS_MAX_DEVICES; + shost->max_lun = ~0; + shost->max_channel = 1; + shost->max_cmd_len = 16; + shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT); + shost->can_queue = hisi_hba->hw->max_command_entries; + shost->cmd_per_lun = hisi_hba->hw->max_command_entries; + + sha->sas_ha_name = DRV_NAME; + sha->dev = &hisi_hba->pdev->dev; + sha->lldd_module = THIS_MODULE; + sha->sas_addr = &hisi_hba->sas_addr[0]; + sha->num_phys = hisi_hba->n_phy; + sha->core.shost = hisi_hba->shost; + + for (i = 0; i < hisi_hba->n_phy; i++) { + sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; + sha->sas_port[i] = &hisi_hba->port[i].sas_port; + } + + hisi_sas_init_add(hisi_hba); + + rc = hisi_hba->hw->hw_init(hisi_hba); + if (rc) + goto err_out_ha; + + rc = scsi_add_host(shost, &pdev->dev); + if (rc) + goto err_out_ha; + + rc = sas_register_ha(sha); + if (rc) + goto err_out_register_ha; + + scsi_scan_host(shost); + + return 0; + +err_out_register_ha: + scsi_remove_host(shost); +err_out_ha: + kfree(shost); + return rc; +} +EXPORT_SYMBOL_GPL(hisi_sas_probe); + +int hisi_sas_remove(struct platform_device *pdev) +{ + struct sas_ha_struct *sha = platform_get_drvdata(pdev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + + scsi_remove_host(sha->core.shost); + sas_unregister_ha(sha); + sas_remove_host(sha->core.shost); + + hisi_sas_free(hisi_hba); + return 0; +} +EXPORT_SYMBOL_GPL(hisi_sas_remove); + +static __init int hisi_sas_init(void) +{ + pr_info("hisi_sas: driver version %s\n", DRV_VERSION); + + hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); + if (!hisi_sas_stt) + return -ENOMEM; + + return 0; +} + +static __exit void hisi_sas_exit(void) +{ + sas_release_transport(hisi_sas_stt); +} + +module_init(hisi_sas_init); +module_exit(hisi_sas_exit); + +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); +MODULE_DESCRIPTION("HISILICON SAS controller driver"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c new file mode 100644 index 000000000000..1abbc2e162df --- /dev/null +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -0,0 +1,1880 @@ +/* + * Copyright (c) 2015 Linaro Ltd. + * Copyright (c) 2015 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include "hisi_sas.h" +#define DRV_NAME "hisi_sas_v1_hw" + +/* global registers need init*/ +#define DLVRY_QUEUE_ENABLE 0x0 +#define IOST_BASE_ADDR_LO 0x8 +#define IOST_BASE_ADDR_HI 0xc +#define ITCT_BASE_ADDR_LO 0x10 +#define ITCT_BASE_ADDR_HI 0x14 +#define BROKEN_MSG_ADDR_LO 0x18 +#define BROKEN_MSG_ADDR_HI 0x1c +#define PHY_CONTEXT 0x20 +#define PHY_STATE 0x24 +#define PHY_PORT_NUM_MA 0x28 +#define PORT_STATE 0x2c +#define PHY_CONN_RATE 0x30 +#define HGC_TRANS_TASK_CNT_LIMIT 0x38 +#define AXI_AHB_CLK_CFG 0x3c +#define HGC_SAS_TXFAIL_RETRY_CTRL 0x84 +#define HGC_GET_ITV_TIME 0x90 +#define DEVICE_MSG_WORK_MODE 0x94 +#define I_T_NEXUS_LOSS_TIME 0xa0 +#define BUS_INACTIVE_LIMIT_TIME 0xa8 +#define REJECT_TO_OPEN_LIMIT_TIME 0xac +#define CFG_AGING_TIME 0xbc +#define CFG_AGING_TIME_ITCT_REL_OFF 0 +#define CFG_AGING_TIME_ITCT_REL_MSK (0x1 << CFG_AGING_TIME_ITCT_REL_OFF) +#define HGC_DFX_CFG2 0xc0 +#define FIS_LIST_BADDR_L 0xc4 +#define CFG_1US_TIMER_TRSH 0xcc +#define CFG_SAS_CONFIG 0xd4 +#define HGC_IOST_ECC_ADDR 0x140 +#define HGC_IOST_ECC_ADDR_BAD_OFF 16 +#define HGC_IOST_ECC_ADDR_BAD_MSK (0x3ff << HGC_IOST_ECC_ADDR_BAD_OFF) +#define HGC_DQ_ECC_ADDR 0x144 +#define HGC_DQ_ECC_ADDR_BAD_OFF 16 +#define HGC_DQ_ECC_ADDR_BAD_MSK (0xfff << HGC_DQ_ECC_ADDR_BAD_OFF) +#define HGC_INVLD_DQE_INFO 0x148 +#define HGC_INVLD_DQE_INFO_DQ_OFF 0 +#define HGC_INVLD_DQE_INFO_DQ_MSK (0xffff << HGC_INVLD_DQE_INFO_DQ_OFF) +#define HGC_INVLD_DQE_INFO_TYPE_OFF 16 +#define HGC_INVLD_DQE_INFO_TYPE_MSK (0x1 << HGC_INVLD_DQE_INFO_TYPE_OFF) +#define HGC_INVLD_DQE_INFO_FORCE_OFF 17 +#define HGC_INVLD_DQE_INFO_FORCE_MSK (0x1 << HGC_INVLD_DQE_INFO_FORCE_OFF) +#define HGC_INVLD_DQE_INFO_PHY_OFF 18 +#define HGC_INVLD_DQE_INFO_PHY_MSK (0x1 << HGC_INVLD_DQE_INFO_PHY_OFF) +#define HGC_INVLD_DQE_INFO_ABORT_OFF 19 +#define HGC_INVLD_DQE_INFO_ABORT_MSK (0x1 << HGC_INVLD_DQE_INFO_ABORT_OFF) +#define HGC_INVLD_DQE_INFO_IPTT_OF_OFF 20 +#define HGC_INVLD_DQE_INFO_IPTT_OF_MSK (0x1 << HGC_INVLD_DQE_INFO_IPTT_OF_OFF) +#define HGC_INVLD_DQE_INFO_SSP_ERR_OFF 21 +#define HGC_INVLD_DQE_INFO_SSP_ERR_MSK (0x1 << HGC_INVLD_DQE_INFO_SSP_ERR_OFF) +#define HGC_INVLD_DQE_INFO_OFL_OFF 22 +#define HGC_INVLD_DQE_INFO_OFL_MSK (0x1 << HGC_INVLD_DQE_INFO_OFL_OFF) +#define HGC_ITCT_ECC_ADDR 0x150 +#define HGC_ITCT_ECC_ADDR_BAD_OFF 16 +#define HGC_ITCT_ECC_ADDR_BAD_MSK (0x3ff << HGC_ITCT_ECC_ADDR_BAD_OFF) +#define HGC_AXI_FIFO_ERR_INFO 0x154 +#define INT_COAL_EN 0x1bc +#define OQ_INT_COAL_TIME 0x1c0 +#define OQ_INT_COAL_CNT 0x1c4 +#define ENT_INT_COAL_TIME 0x1c8 +#define ENT_INT_COAL_CNT 0x1cc +#define OQ_INT_SRC 0x1d0 +#define OQ_INT_SRC_MSK 0x1d4 +#define ENT_INT_SRC1 0x1d8 +#define ENT_INT_SRC2 0x1dc +#define ENT_INT_SRC2_DQ_CFG_ERR_OFF 25 +#define ENT_INT_SRC2_DQ_CFG_ERR_MSK (0x1 << ENT_INT_SRC2_DQ_CFG_ERR_OFF) +#define ENT_INT_SRC2_CQ_CFG_ERR_OFF 27 +#define ENT_INT_SRC2_CQ_CFG_ERR_MSK (0x1 << ENT_INT_SRC2_CQ_CFG_ERR_OFF) +#define ENT_INT_SRC2_AXI_WRONG_INT_OFF 28 +#define ENT_INT_SRC2_AXI_WRONG_INT_MSK (0x1 << ENT_INT_SRC2_AXI_WRONG_INT_OFF) +#define ENT_INT_SRC2_AXI_OVERLF_INT_OFF 29 +#define ENT_INT_SRC2_AXI_OVERLF_INT_MSK (0x1 << ENT_INT_SRC2_AXI_OVERLF_INT_OFF) +#define ENT_INT_SRC_MSK1 0x1e0 +#define ENT_INT_SRC_MSK2 0x1e4 +#define SAS_ECC_INTR 0x1e8 +#define SAS_ECC_INTR_DQ_ECC1B_OFF 0 +#define SAS_ECC_INTR_DQ_ECC1B_MSK (0x1 << SAS_ECC_INTR_DQ_ECC1B_OFF) +#define SAS_ECC_INTR_DQ_ECCBAD_OFF 1 +#define SAS_ECC_INTR_DQ_ECCBAD_MSK (0x1 << SAS_ECC_INTR_DQ_ECCBAD_OFF) +#define SAS_ECC_INTR_IOST_ECC1B_OFF 2 +#define SAS_ECC_INTR_IOST_ECC1B_MSK (0x1 << SAS_ECC_INTR_IOST_ECC1B_OFF) +#define SAS_ECC_INTR_IOST_ECCBAD_OFF 3 +#define SAS_ECC_INTR_IOST_ECCBAD_MSK (0x1 << SAS_ECC_INTR_IOST_ECCBAD_OFF) +#define SAS_ECC_INTR_ITCT_ECC1B_OFF 4 +#define SAS_ECC_INTR_ITCT_ECC1B_MSK (0x1 << SAS_ECC_INTR_ITCT_ECC1B_OFF) +#define SAS_ECC_INTR_ITCT_ECCBAD_OFF 5 +#define SAS_ECC_INTR_ITCT_ECCBAD_MSK (0x1 << SAS_ECC_INTR_ITCT_ECCBAD_OFF) +#define SAS_ECC_INTR_MSK 0x1ec +#define HGC_ERR_STAT_EN 0x238 +#define DLVRY_Q_0_BASE_ADDR_LO 0x260 +#define DLVRY_Q_0_BASE_ADDR_HI 0x264 +#define DLVRY_Q_0_DEPTH 0x268 +#define DLVRY_Q_0_WR_PTR 0x26c +#define DLVRY_Q_0_RD_PTR 0x270 +#define COMPL_Q_0_BASE_ADDR_LO 0x4e0 +#define COMPL_Q_0_BASE_ADDR_HI 0x4e4 +#define COMPL_Q_0_DEPTH 0x4e8 +#define COMPL_Q_0_WR_PTR 0x4ec +#define COMPL_Q_0_RD_PTR 0x4f0 +#define HGC_ECC_ERR 0x7d0 + +/* phy registers need init */ +#define PORT_BASE (0x800) + +#define PHY_CFG (PORT_BASE + 0x0) +#define PHY_CFG_ENA_OFF 0 +#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) +#define PHY_CFG_DC_OPT_OFF 2 +#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) +#define PROG_PHY_LINK_RATE (PORT_BASE + 0xc) +#define PROG_PHY_LINK_RATE_MAX_OFF 0 +#define PROG_PHY_LINK_RATE_MAX_MSK (0xf << PROG_PHY_LINK_RATE_MAX_OFF) +#define PROG_PHY_LINK_RATE_MIN_OFF 4 +#define PROG_PHY_LINK_RATE_MIN_MSK (0xf << PROG_PHY_LINK_RATE_MIN_OFF) +#define PROG_PHY_LINK_RATE_OOB_OFF 8 +#define PROG_PHY_LINK_RATE_OOB_MSK (0xf << PROG_PHY_LINK_RATE_OOB_OFF) +#define PHY_CTRL (PORT_BASE + 0x14) +#define PHY_CTRL_RESET_OFF 0 +#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) +#define PHY_RATE_NEGO (PORT_BASE + 0x30) +#define PHY_PCN (PORT_BASE + 0x44) +#define SL_TOUT_CFG (PORT_BASE + 0x8c) +#define SL_CONTROL (PORT_BASE + 0x94) +#define SL_CONTROL_NOTIFY_EN_OFF 0 +#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) +#define TX_ID_DWORD0 (PORT_BASE + 0x9c) +#define TX_ID_DWORD1 (PORT_BASE + 0xa0) +#define TX_ID_DWORD2 (PORT_BASE + 0xa4) +#define TX_ID_DWORD3 (PORT_BASE + 0xa8) +#define TX_ID_DWORD4 (PORT_BASE + 0xaC) +#define TX_ID_DWORD5 (PORT_BASE + 0xb0) +#define TX_ID_DWORD6 (PORT_BASE + 0xb4) +#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) +#define RX_IDAF_DWORD1 (PORT_BASE + 0xc8) +#define RX_IDAF_DWORD2 (PORT_BASE + 0xcc) +#define RX_IDAF_DWORD3 (PORT_BASE + 0xd0) +#define RX_IDAF_DWORD4 (PORT_BASE + 0xd4) +#define RX_IDAF_DWORD5 (PORT_BASE + 0xd8) +#define RX_IDAF_DWORD6 (PORT_BASE + 0xdc) +#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) +#define DONE_RECEIVED_TIME (PORT_BASE + 0x12c) +#define CON_CFG_DRIVER (PORT_BASE + 0x130) +#define PHY_CONFIG2 (PORT_BASE + 0x1a8) +#define PHY_CONFIG2_FORCE_TXDEEMPH_OFF 3 +#define PHY_CONFIG2_FORCE_TXDEEMPH_MSK (0x1 << PHY_CONFIG2_FORCE_TXDEEMPH_OFF) +#define PHY_CONFIG2_TX_TRAIN_COMP_OFF 24 +#define PHY_CONFIG2_TX_TRAIN_COMP_MSK (0x1 << PHY_CONFIG2_TX_TRAIN_COMP_OFF) +#define CHL_INT0 (PORT_BASE + 0x1b0) +#define CHL_INT0_PHYCTRL_NOTRDY_OFF 0 +#define CHL_INT0_PHYCTRL_NOTRDY_MSK (0x1 << CHL_INT0_PHYCTRL_NOTRDY_OFF) +#define CHL_INT0_SN_FAIL_NGR_OFF 2 +#define CHL_INT0_SN_FAIL_NGR_MSK (0x1 << CHL_INT0_SN_FAIL_NGR_OFF) +#define CHL_INT0_DWS_LOST_OFF 4 +#define CHL_INT0_DWS_LOST_MSK (0x1 << CHL_INT0_DWS_LOST_OFF) +#define CHL_INT0_SL_IDAF_FAIL_OFF 10 +#define CHL_INT0_SL_IDAF_FAIL_MSK (0x1 << CHL_INT0_SL_IDAF_FAIL_OFF) +#define CHL_INT0_ID_TIMEOUT_OFF 11 +#define CHL_INT0_ID_TIMEOUT_MSK (0x1 << CHL_INT0_ID_TIMEOUT_OFF) +#define CHL_INT0_SL_OPAF_FAIL_OFF 12 +#define CHL_INT0_SL_OPAF_FAIL_MSK (0x1 << CHL_INT0_SL_OPAF_FAIL_OFF) +#define CHL_INT0_SL_PS_FAIL_OFF 21 +#define CHL_INT0_SL_PS_FAIL_MSK (0x1 << CHL_INT0_SL_PS_FAIL_OFF) +#define CHL_INT1 (PORT_BASE + 0x1b4) +#define CHL_INT2 (PORT_BASE + 0x1b8) +#define CHL_INT2_SL_RX_BC_ACK_OFF 2 +#define CHL_INT2_SL_RX_BC_ACK_MSK (0x1 << CHL_INT2_SL_RX_BC_ACK_OFF) +#define CHL_INT2_SL_PHY_ENA_OFF 6 +#define CHL_INT2_SL_PHY_ENA_MSK (0x1 << CHL_INT2_SL_PHY_ENA_OFF) +#define CHL_INT0_MSK (PORT_BASE + 0x1bc) +#define CHL_INT0_MSK_PHYCTRL_NOTRDY_OFF 0 +#define CHL_INT0_MSK_PHYCTRL_NOTRDY_MSK (0x1 << CHL_INT0_MSK_PHYCTRL_NOTRDY_OFF) +#define CHL_INT1_MSK (PORT_BASE + 0x1c0) +#define CHL_INT2_MSK (PORT_BASE + 0x1c4) +#define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) +#define DMA_TX_STATUS (PORT_BASE + 0x2d0) +#define DMA_TX_STATUS_BUSY_OFF 0 +#define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) +#define DMA_RX_STATUS (PORT_BASE + 0x2e8) +#define DMA_RX_STATUS_BUSY_OFF 0 +#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) + +#define AXI_CFG 0x5100 +#define RESET_VALUE 0x7ffff + +/* HW dma structures */ +/* Delivery queue header */ +/* dw0 */ +#define CMD_HDR_RESP_REPORT_OFF 5 +#define CMD_HDR_RESP_REPORT_MSK 0x20 +#define CMD_HDR_TLR_CTRL_OFF 6 +#define CMD_HDR_TLR_CTRL_MSK 0xc0 +#define CMD_HDR_PORT_OFF 17 +#define CMD_HDR_PORT_MSK 0xe0000 +#define CMD_HDR_PRIORITY_OFF 27 +#define CMD_HDR_PRIORITY_MSK 0x8000000 +#define CMD_HDR_MODE_OFF 28 +#define CMD_HDR_MODE_MSK 0x10000000 +#define CMD_HDR_CMD_OFF 29 +#define CMD_HDR_CMD_MSK 0xe0000000 +/* dw1 */ +#define CMD_HDR_VERIFY_DTL_OFF 10 +#define CMD_HDR_VERIFY_DTL_MSK 0x400 +#define CMD_HDR_SSP_FRAME_TYPE_OFF 13 +#define CMD_HDR_SSP_FRAME_TYPE_MSK 0xe000 +#define CMD_HDR_DEVICE_ID_OFF 16 +#define CMD_HDR_DEVICE_ID_MSK 0xffff0000 +/* dw2 */ +#define CMD_HDR_CFL_OFF 0 +#define CMD_HDR_CFL_MSK 0x1ff +#define CMD_HDR_MRFL_OFF 15 +#define CMD_HDR_MRFL_MSK 0xff8000 +#define CMD_HDR_FIRST_BURST_OFF 25 +#define CMD_HDR_FIRST_BURST_MSK 0x2000000 +/* dw3 */ +#define CMD_HDR_IPTT_OFF 0 +#define CMD_HDR_IPTT_MSK 0xffff +/* dw6 */ +#define CMD_HDR_DATA_SGL_LEN_OFF 16 +#define CMD_HDR_DATA_SGL_LEN_MSK 0xffff0000 + +/* Completion header */ +#define CMPLT_HDR_IPTT_OFF 0 +#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) +#define CMPLT_HDR_CMD_CMPLT_OFF 17 +#define CMPLT_HDR_CMD_CMPLT_MSK (0x1 << CMPLT_HDR_CMD_CMPLT_OFF) +#define CMPLT_HDR_ERR_RCRD_XFRD_OFF 18 +#define CMPLT_HDR_ERR_RCRD_XFRD_MSK (0x1 << CMPLT_HDR_ERR_RCRD_XFRD_OFF) +#define CMPLT_HDR_RSPNS_XFRD_OFF 19 +#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) +#define CMPLT_HDR_IO_CFG_ERR_OFF 27 +#define CMPLT_HDR_IO_CFG_ERR_MSK (0x1 << CMPLT_HDR_IO_CFG_ERR_OFF) + +/* ITCT header */ +/* qw0 */ +#define ITCT_HDR_DEV_TYPE_OFF 0 +#define ITCT_HDR_DEV_TYPE_MSK (0x3ULL << ITCT_HDR_DEV_TYPE_OFF) +#define ITCT_HDR_VALID_OFF 2 +#define ITCT_HDR_VALID_MSK (0x1ULL << ITCT_HDR_VALID_OFF) +#define ITCT_HDR_AWT_CONTROL_OFF 4 +#define ITCT_HDR_AWT_CONTROL_MSK (0x1ULL << ITCT_HDR_AWT_CONTROL_OFF) +#define ITCT_HDR_MAX_CONN_RATE_OFF 5 +#define ITCT_HDR_MAX_CONN_RATE_MSK (0xfULL << ITCT_HDR_MAX_CONN_RATE_OFF) +#define ITCT_HDR_VALID_LINK_NUM_OFF 9 +#define ITCT_HDR_VALID_LINK_NUM_MSK (0xfULL << ITCT_HDR_VALID_LINK_NUM_OFF) +#define ITCT_HDR_PORT_ID_OFF 13 +#define ITCT_HDR_PORT_ID_MSK (0x7ULL << ITCT_HDR_PORT_ID_OFF) +#define ITCT_HDR_SMP_TIMEOUT_OFF 16 +#define ITCT_HDR_SMP_TIMEOUT_MSK (0xffffULL << ITCT_HDR_SMP_TIMEOUT_OFF) +/* qw1 */ +#define ITCT_HDR_MAX_SAS_ADDR_OFF 0 +#define ITCT_HDR_MAX_SAS_ADDR_MSK (0xffffffffffffffff << \ + ITCT_HDR_MAX_SAS_ADDR_OFF) +/* qw2 */ +#define ITCT_HDR_IT_NEXUS_LOSS_TL_OFF 0 +#define ITCT_HDR_IT_NEXUS_LOSS_TL_MSK (0xffffULL << \ + ITCT_HDR_IT_NEXUS_LOSS_TL_OFF) +#define ITCT_HDR_BUS_INACTIVE_TL_OFF 16 +#define ITCT_HDR_BUS_INACTIVE_TL_MSK (0xffffULL << \ + ITCT_HDR_BUS_INACTIVE_TL_OFF) +#define ITCT_HDR_MAX_CONN_TL_OFF 32 +#define ITCT_HDR_MAX_CONN_TL_MSK (0xffffULL << \ + ITCT_HDR_MAX_CONN_TL_OFF) +#define ITCT_HDR_REJ_OPEN_TL_OFF 48 +#define ITCT_HDR_REJ_OPEN_TL_MSK (0xffffULL << \ + ITCT_HDR_REJ_OPEN_TL_OFF) + +/* Err record header */ +#define ERR_HDR_DMA_TX_ERR_TYPE_OFF 0 +#define ERR_HDR_DMA_TX_ERR_TYPE_MSK (0xffff << ERR_HDR_DMA_TX_ERR_TYPE_OFF) +#define ERR_HDR_DMA_RX_ERR_TYPE_OFF 16 +#define ERR_HDR_DMA_RX_ERR_TYPE_MSK (0xffff << ERR_HDR_DMA_RX_ERR_TYPE_OFF) + +struct hisi_sas_complete_v1_hdr { + __le32 data; +}; + +struct hisi_sas_err_record_v1 { + /* dw0 */ + __le32 dma_err_type; + + /* dw1 */ + __le32 trans_tx_fail_type; + + /* dw2 */ + __le32 trans_rx_fail_type; + + /* dw3 */ + u32 rsvd; +}; + +enum { + HISI_SAS_PHY_BCAST_ACK = 0, + HISI_SAS_PHY_SL_PHY_ENABLED, + HISI_SAS_PHY_INT_ABNORMAL, + HISI_SAS_PHY_INT_NR +}; + +enum { + DMA_TX_ERR_BASE = 0x0, + DMA_RX_ERR_BASE = 0x100, + TRANS_TX_FAIL_BASE = 0x200, + TRANS_RX_FAIL_BASE = 0x300, + + /* dma tx */ + DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x0 */ + DMA_TX_DIF_APP_ERR, /* 0x1 */ + DMA_TX_DIF_RPP_ERR, /* 0x2 */ + DMA_TX_AXI_BUS_ERR, /* 0x3 */ + DMA_TX_DATA_SGL_OVERFLOW_ERR, /* 0x4 */ + DMA_TX_DIF_SGL_OVERFLOW_ERR, /* 0x5 */ + DMA_TX_UNEXP_XFER_RDY_ERR, /* 0x6 */ + DMA_TX_XFER_RDY_OFFSET_ERR, /* 0x7 */ + DMA_TX_DATA_UNDERFLOW_ERR, /* 0x8 */ + DMA_TX_XFER_RDY_LENGTH_OVERFLOW_ERR, /* 0x9 */ + + /* dma rx */ + DMA_RX_BUFFER_ECC_ERR = DMA_RX_ERR_BASE, /* 0x100 */ + DMA_RX_DIF_CRC_ERR, /* 0x101 */ + DMA_RX_DIF_APP_ERR, /* 0x102 */ + DMA_RX_DIF_RPP_ERR, /* 0x103 */ + DMA_RX_RESP_BUFFER_OVERFLOW_ERR, /* 0x104 */ + DMA_RX_AXI_BUS_ERR, /* 0x105 */ + DMA_RX_DATA_SGL_OVERFLOW_ERR, /* 0x106 */ + DMA_RX_DIF_SGL_OVERFLOW_ERR, /* 0x107 */ + DMA_RX_DATA_OFFSET_ERR, /* 0x108 */ + DMA_RX_UNEXP_RX_DATA_ERR, /* 0x109 */ + DMA_RX_DATA_OVERFLOW_ERR, /* 0x10a */ + DMA_RX_DATA_UNDERFLOW_ERR, /* 0x10b */ + DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x10c */ + + /* trans tx */ + TRANS_TX_RSVD0_ERR = TRANS_TX_FAIL_BASE, /* 0x200 */ + TRANS_TX_PHY_NOT_ENABLE_ERR, /* 0x201 */ + TRANS_TX_OPEN_REJCT_WRONG_DEST_ERR, /* 0x202 */ + TRANS_TX_OPEN_REJCT_ZONE_VIOLATION_ERR, /* 0x203 */ + TRANS_TX_OPEN_REJCT_BY_OTHER_ERR, /* 0x204 */ + TRANS_TX_RSVD1_ERR, /* 0x205 */ + TRANS_TX_OPEN_REJCT_AIP_TIMEOUT_ERR, /* 0x206 */ + TRANS_TX_OPEN_REJCT_STP_BUSY_ERR, /* 0x207 */ + TRANS_TX_OPEN_REJCT_PROTOCOL_NOT_SUPPORT_ERR, /* 0x208 */ + TRANS_TX_OPEN_REJCT_RATE_NOT_SUPPORT_ERR, /* 0x209 */ + TRANS_TX_OPEN_REJCT_BAD_DEST_ERR, /* 0x20a */ + TRANS_TX_OPEN_BREAK_RECEIVE_ERR, /* 0x20b */ + TRANS_TX_LOW_PHY_POWER_ERR, /* 0x20c */ + TRANS_TX_OPEN_REJCT_PATHWAY_BLOCKED_ERR, /* 0x20d */ + TRANS_TX_OPEN_TIMEOUT_ERR, /* 0x20e */ + TRANS_TX_OPEN_REJCT_NO_DEST_ERR, /* 0x20f */ + TRANS_TX_OPEN_RETRY_ERR, /* 0x210 */ + TRANS_TX_RSVD2_ERR, /* 0x211 */ + TRANS_TX_BREAK_TIMEOUT_ERR, /* 0x212 */ + TRANS_TX_BREAK_REQUEST_ERR, /* 0x213 */ + TRANS_TX_BREAK_RECEIVE_ERR, /* 0x214 */ + TRANS_TX_CLOSE_TIMEOUT_ERR, /* 0x215 */ + TRANS_TX_CLOSE_NORMAL_ERR, /* 0x216 */ + TRANS_TX_CLOSE_PHYRESET_ERR, /* 0x217 */ + TRANS_TX_WITH_CLOSE_DWS_TIMEOUT_ERR, /* 0x218 */ + TRANS_TX_WITH_CLOSE_COMINIT_ERR, /* 0x219 */ + TRANS_TX_NAK_RECEIVE_ERR, /* 0x21a */ + TRANS_TX_ACK_NAK_TIMEOUT_ERR, /* 0x21b */ + TRANS_TX_CREDIT_TIMEOUT_ERR, /* 0x21c */ + TRANS_TX_IPTT_CONFLICT_ERR, /* 0x21d */ + TRANS_TX_TXFRM_TYPE_ERR, /* 0x21e */ + TRANS_TX_TXSMP_LENGTH_ERR, /* 0x21f */ + + /* trans rx */ + TRANS_RX_FRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x300 */ + TRANS_RX_FRAME_DONE_ERR, /* 0x301 */ + TRANS_RX_FRAME_ERRPRM_ERR, /* 0x302 */ + TRANS_RX_FRAME_NO_CREDIT_ERR, /* 0x303 */ + TRANS_RX_RSVD0_ERR, /* 0x304 */ + TRANS_RX_FRAME_OVERRUN_ERR, /* 0x305 */ + TRANS_RX_FRAME_NO_EOF_ERR, /* 0x306 */ + TRANS_RX_LINK_BUF_OVERRUN_ERR, /* 0x307 */ + TRANS_RX_BREAK_TIMEOUT_ERR, /* 0x308 */ + TRANS_RX_BREAK_REQUEST_ERR, /* 0x309 */ + TRANS_RX_BREAK_RECEIVE_ERR, /* 0x30a */ + TRANS_RX_CLOSE_TIMEOUT_ERR, /* 0x30b */ + TRANS_RX_CLOSE_NORMAL_ERR, /* 0x30c */ + TRANS_RX_CLOSE_PHYRESET_ERR, /* 0x30d */ + TRANS_RX_WITH_CLOSE_DWS_TIMEOUT_ERR, /* 0x30e */ + TRANS_RX_WITH_CLOSE_COMINIT_ERR, /* 0x30f */ + TRANS_RX_DATA_LENGTH0_ERR, /* 0x310 */ + TRANS_RX_BAD_HASH_ERR, /* 0x311 */ + TRANS_RX_XRDY_ZERO_ERR, /* 0x312 */ + TRANS_RX_SSP_FRAME_LEN_ERR, /* 0x313 */ + TRANS_RX_TRANS_RX_RSVD1_ERR, /* 0x314 */ + TRANS_RX_NO_BALANCE_ERR, /* 0x315 */ + TRANS_RX_TRANS_RX_RSVD2_ERR, /* 0x316 */ + TRANS_RX_TRANS_RX_RSVD3_ERR, /* 0x317 */ + TRANS_RX_BAD_FRAME_TYPE_ERR, /* 0x318 */ + TRANS_RX_SMP_FRAME_LEN_ERR, /* 0x319 */ + TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x31a */ +}; + +#define HISI_SAS_COMMAND_ENTRIES_V1_HW 8192 + +#define HISI_SAS_PHY_MAX_INT_NR (HISI_SAS_PHY_INT_NR * HISI_SAS_MAX_PHYS) +#define HISI_SAS_CQ_MAX_INT_NR (HISI_SAS_MAX_QUEUES) +#define HISI_SAS_FATAL_INT_NR (2) + +#define HISI_SAS_MAX_INT_NR \ + (HISI_SAS_PHY_MAX_INT_NR + HISI_SAS_CQ_MAX_INT_NR +\ + HISI_SAS_FATAL_INT_NR) + +static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) +{ + void __iomem *regs = hisi_hba->regs + off; + + return readl(regs); +} + +static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) +{ + void __iomem *regs = hisi_hba->regs + off; + + return readl_relaxed(regs); +} + +static void hisi_sas_write32(struct hisi_hba *hisi_hba, + u32 off, u32 val) +{ + void __iomem *regs = hisi_hba->regs + off; + + writel(val, regs); +} + +static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, + int phy_no, u32 off, u32 val) +{ + void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; + + writel(val, regs); +} + +static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, + int phy_no, u32 off) +{ + void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; + + return readl(regs); +} + +static void config_phy_opt_mode_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg &= ~PHY_CFG_DC_OPT_MSK; + cfg |= 1 << PHY_CFG_DC_OPT_OFF; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static void config_tx_tfe_autoneg_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CONFIG2); + + cfg &= ~PHY_CONFIG2_FORCE_TXDEEMPH_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CONFIG2, cfg); +} + +static void config_id_frame_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + struct sas_identify_frame identify_frame; + u32 *identify_buffer; + + memset(&identify_frame, 0, sizeof(identify_frame)); + identify_frame.dev_type = SAS_END_DEVICE; + identify_frame.frame_type = 0; + identify_frame._un1 = 1; + identify_frame.initiator_bits = SAS_PROTOCOL_ALL; + identify_frame.target_bits = SAS_PROTOCOL_NONE; + memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); + memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); + identify_frame.phy_id = phy_no; + identify_buffer = (u32 *)(&identify_frame); + + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, + __swab32(identify_buffer[0])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, + identify_buffer[2]); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, + identify_buffer[1]); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, + identify_buffer[4]); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, + identify_buffer[3]); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, + __swab32(identify_buffer[5])); +} + +static void init_id_frame_v1_hw(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) + config_id_frame_v1_hw(hisi_hba, i); +} + +static void setup_itct_v1_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) +{ + struct domain_device *device = sas_dev->sas_device; + struct device *dev = &hisi_hba->pdev->dev; + u64 qw0, device_id = sas_dev->device_id; + struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; + + memset(itct, 0, sizeof(*itct)); + + /* qw0 */ + qw0 = 0; + switch (sas_dev->dev_type) { + case SAS_END_DEVICE: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: + qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; + break; + default: + dev_warn(dev, "setup itct: unsupported dev type (%d)\n", + sas_dev->dev_type); + } + + qw0 |= ((1 << ITCT_HDR_VALID_OFF) | + (1 << ITCT_HDR_AWT_CONTROL_OFF) | + (device->max_linkrate << ITCT_HDR_MAX_CONN_RATE_OFF) | + (1 << ITCT_HDR_VALID_LINK_NUM_OFF) | + (device->port->id << ITCT_HDR_PORT_ID_OFF)); + itct->qw0 = cpu_to_le64(qw0); + + /* qw1 */ + memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE); + itct->sas_addr = __swab64(itct->sas_addr); + + /* qw2 */ + itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_IT_NEXUS_LOSS_TL_OFF) | + (0xff00ULL << ITCT_HDR_BUS_INACTIVE_TL_OFF) | + (0xff00ULL << ITCT_HDR_MAX_CONN_TL_OFF) | + (0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF)); +} + +static void free_device_v1_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) +{ + u64 dev_id = sas_dev->device_id; + struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; + u64 qw0; + u32 reg_val = hisi_sas_read32(hisi_hba, CFG_AGING_TIME); + + reg_val |= CFG_AGING_TIME_ITCT_REL_MSK; + hisi_sas_write32(hisi_hba, CFG_AGING_TIME, reg_val); + + /* free itct */ + udelay(1); + reg_val = hisi_sas_read32(hisi_hba, CFG_AGING_TIME); + reg_val &= ~CFG_AGING_TIME_ITCT_REL_MSK; + hisi_sas_write32(hisi_hba, CFG_AGING_TIME, reg_val); + + qw0 = cpu_to_le64(itct->qw0); + qw0 &= ~ITCT_HDR_VALID_MSK; + itct->qw0 = cpu_to_le64(qw0); +} + +static int reset_hw_v1_hw(struct hisi_hba *hisi_hba) +{ + int i; + unsigned long end_time; + u32 val; + struct device *dev = &hisi_hba->pdev->dev; + + for (i = 0; i < hisi_hba->n_phy; i++) { + u32 phy_ctrl = hisi_sas_phy_read32(hisi_hba, i, PHY_CTRL); + + phy_ctrl |= PHY_CTRL_RESET_MSK; + hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, phy_ctrl); + } + msleep(1); /* It is safe to wait for 50us */ + + /* Ensure DMA tx & rx idle */ + for (i = 0; i < hisi_hba->n_phy; i++) { + u32 dma_tx_status, dma_rx_status; + + end_time = jiffies + msecs_to_jiffies(1000); + + while (1) { + dma_tx_status = hisi_sas_phy_read32(hisi_hba, i, + DMA_TX_STATUS); + dma_rx_status = hisi_sas_phy_read32(hisi_hba, i, + DMA_RX_STATUS); + + if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) && + !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK)) + break; + + msleep(20); + if (time_after(jiffies, end_time)) + return -EIO; + } + } + + /* Ensure axi bus idle */ + end_time = jiffies + msecs_to_jiffies(1000); + while (1) { + u32 axi_status = + hisi_sas_read32(hisi_hba, AXI_CFG); + + if (axi_status == 0) + break; + + msleep(20); + if (time_after(jiffies, end_time)) + return -EIO; + } + + if (ACPI_HANDLE(dev)) { + acpi_status s; + + s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); + if (ACPI_FAILURE(s)) { + dev_err(dev, "Reset failed\n"); + return -EIO; + } + } else if (hisi_hba->ctrl) { + /* Apply reset and disable clock */ + /* clk disable reg is offset by +4 bytes from clk enable reg */ + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg, + RESET_VALUE); + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4, + RESET_VALUE); + msleep(1); + regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); + if (RESET_VALUE != (val & RESET_VALUE)) { + dev_err(dev, "Reset failed\n"); + return -EIO; + } + + /* De-reset and enable clock */ + /* deassert rst reg is offset by +4 bytes from assert reg */ + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4, + RESET_VALUE); + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg, + RESET_VALUE); + msleep(1); + regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); + if (val & RESET_VALUE) { + dev_err(dev, "De-reset failed\n"); + return -EIO; + } + } else + dev_warn(dev, "no reset method\n"); + + return 0; +} + +static void init_reg_v1_hw(struct hisi_hba *hisi_hba) +{ + int i; + + /* Global registers init*/ + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, + (u32)((1ULL << hisi_hba->queue_count) - 1)); + hisi_sas_write32(hisi_hba, HGC_TRANS_TASK_CNT_LIMIT, 0x11); + hisi_sas_write32(hisi_hba, DEVICE_MSG_WORK_MODE, 0x1); + hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x1ff); + hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x401); + hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0x64); + hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1); + hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x64); + hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x2710); + hisi_sas_write32(hisi_hba, REJECT_TO_OPEN_LIMIT_TIME, 0x1); + hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x7a12); + hisi_sas_write32(hisi_hba, HGC_DFX_CFG2, 0x9c40); + hisi_sas_write32(hisi_hba, FIS_LIST_BADDR_L, 0x2); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0xc); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x186a0); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 1); + hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1); + hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffffffff); + hisi_sas_write32(hisi_hba, OQ_INT_SRC_MSK, 0); + hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0); + hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0); + hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 0x2); + hisi_sas_write32(hisi_hba, CFG_SAS_CONFIG, 0x22000000); + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x88a); + hisi_sas_phy_write32(hisi_hba, i, PHY_CONFIG2, 0x7c080); + hisi_sas_phy_write32(hisi_hba, i, PHY_RATE_NEGO, 0x415ee00); + hisi_sas_phy_write32(hisi_hba, i, PHY_PCN, 0x80a80000); + hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d); + hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x0); + hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); + hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0); + hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x13f0a); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 3); + hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 8); + } + + for (i = 0; i < hisi_hba->queue_count; i++) { + /* Delivery queue */ + hisi_sas_write32(hisi_hba, + DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), + upper_32_bits(hisi_hba->cmd_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, + DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), + lower_32_bits(hisi_hba->cmd_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, + DLVRY_Q_0_DEPTH + (i * 0x14), + HISI_SAS_QUEUE_SLOTS); + + /* Completion queue */ + hisi_sas_write32(hisi_hba, + COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), + upper_32_bits(hisi_hba->complete_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, + COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), + lower_32_bits(hisi_hba->complete_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), + HISI_SAS_QUEUE_SLOTS); + } + + /* itct */ + hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, + lower_32_bits(hisi_hba->itct_dma)); + + hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, + upper_32_bits(hisi_hba->itct_dma)); + + /* iost */ + hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, + lower_32_bits(hisi_hba->iost_dma)); + + hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, + upper_32_bits(hisi_hba->iost_dma)); + + /* breakpoint */ + hisi_sas_write32(hisi_hba, BROKEN_MSG_ADDR_LO, + lower_32_bits(hisi_hba->breakpoint_dma)); + + hisi_sas_write32(hisi_hba, BROKEN_MSG_ADDR_HI, + upper_32_bits(hisi_hba->breakpoint_dma)); +} + +static int hw_init_v1_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = &hisi_hba->pdev->dev; + int rc; + + rc = reset_hw_v1_hw(hisi_hba); + if (rc) { + dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc); + return rc; + } + + msleep(100); + init_reg_v1_hw(hisi_hba); + + init_id_frame_v1_hw(hisi_hba); + + return 0; +} + +static void enable_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg |= PHY_CFG_ENA_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static void disable_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg &= ~PHY_CFG_ENA_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static void start_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + config_id_frame_v1_hw(hisi_hba, phy_no); + config_phy_opt_mode_v1_hw(hisi_hba, phy_no); + config_tx_tfe_autoneg_v1_hw(hisi_hba, phy_no); + enable_phy_v1_hw(hisi_hba, phy_no); +} + +static void stop_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + disable_phy_v1_hw(hisi_hba, phy_no); +} + +static void phy_hard_reset_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + stop_phy_v1_hw(hisi_hba, phy_no); + msleep(100); + start_phy_v1_hw(hisi_hba, phy_no); +} + +static void start_phys_v1_hw(unsigned long data) +{ + struct hisi_hba *hisi_hba = (struct hisi_hba *)data; + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x12a); + start_phy_v1_hw(hisi_hba, i); + } +} + +static void phys_init_v1_hw(struct hisi_hba *hisi_hba) +{ + int i; + struct timer_list *timer = &hisi_hba->timer; + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x6a); + hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK); + } + + setup_timer(timer, start_phys_v1_hw, (unsigned long)hisi_hba); + mod_timer(timer, jiffies + HZ); +} + +static void sl_notify_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 sl_control; + + sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + sl_control |= SL_CONTROL_NOTIFY_EN_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); + msleep(1); + sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); +} + +static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id) +{ + int i, bitmap = 0; + u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); + + for (i = 0; i < hisi_hba->n_phy; i++) + if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) + bitmap |= 1 << i; + + return bitmap; +} + +/** + * This function allocates across all queues to load balance. + * Slots are allocated from queues in a round-robin fashion. + * + * The callpath to this function and upto writing the write + * queue pointer should be safe from interruption. + */ +static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, int *q, int *s) +{ + struct device *dev = &hisi_hba->pdev->dev; + u32 r, w; + int queue = hisi_hba->queue; + + while (1) { + w = hisi_sas_read32_relaxed(hisi_hba, + DLVRY_Q_0_WR_PTR + (queue * 0x14)); + r = hisi_sas_read32_relaxed(hisi_hba, + DLVRY_Q_0_RD_PTR + (queue * 0x14)); + if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { + queue = (queue + 1) % hisi_hba->queue_count; + if (queue == hisi_hba->queue) { + dev_warn(dev, "could not find free slot\n"); + return -EAGAIN; + } + continue; + } + break; + } + hisi_hba->queue = (queue + 1) % hisi_hba->queue_count; + *q = queue; + *s = w; + return 0; +} + +static void start_delivery_v1_hw(struct hisi_hba *hisi_hba) +{ + int dlvry_queue = hisi_hba->slot_prep->dlvry_queue; + int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot; + + hisi_sas_write32(hisi_hba, + DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), + ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS); +} + +static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, + struct hisi_sas_cmd_hdr *hdr, + struct scatterlist *scatter, + int n_elem) +{ + struct device *dev = &hisi_hba->pdev->dev; + struct scatterlist *sg; + int i; + + if (n_elem > HISI_SAS_SGE_PAGE_CNT) { + dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", + n_elem); + return -EINVAL; + } + + slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC, + &slot->sge_page_dma); + if (!slot->sge_page) + return -ENOMEM; + + for_each_sg(scatter, sg, n_elem, i) { + struct hisi_sas_sge *entry = &slot->sge_page->sge[i]; + + entry->addr = cpu_to_le64(sg_dma_address(sg)); + entry->page_ctrl_0 = entry->page_ctrl_1 = 0; + entry->data_len = cpu_to_le32(sg_dma_len(sg)); + entry->data_off = 0; + } + + hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma); + + hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); + + return 0; +} + +static int prep_smp_v1_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct domain_device *device = task->dev; + struct device *dev = &hisi_hba->pdev->dev; + struct hisi_sas_port *port = slot->port; + struct scatterlist *sg_req, *sg_resp; + struct hisi_sas_device *sas_dev = device->lldd_dev; + dma_addr_t req_dma_addr; + unsigned int req_len, resp_len; + int elem, rc; + + /* + * DMA-map SMP request, response buffers + */ + /* req */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + req_dma_addr = sg_dma_address(sg_req); + + /* resp */ + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out_req; + } + resp_len = sg_dma_len(sg_resp); + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_resp; + } + + /* create header */ + /* dw0 */ + hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | + (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ + (1 << CMD_HDR_MODE_OFF) | /* ini mode */ + (2 << CMD_HDR_CMD_OFF)); /* smp */ + + /* map itct entry */ + hdr->dw1 = cpu_to_le32(sas_dev->device_id << CMD_HDR_DEVICE_ID_OFF); + + /* dw2 */ + hdr->dw2 = cpu_to_le32((((req_len-4)/4) << CMD_HDR_CFL_OFF) | + (HISI_SAS_MAX_SMP_RESP_SZ/4 << + CMD_HDR_MRFL_OFF)); + + hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); + + hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); + hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + + return 0; + +err_out_resp: + dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); +err_out_req: + dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + return rc; +} + +static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, int is_tmf, + struct hisi_sas_tmf_task *tmf) +{ + struct sas_task *task = slot->task; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_port *port = slot->port; + struct sas_ssp_task *ssp_task = &task->ssp_task; + struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; + int has_data = 0, rc, priority = is_tmf; + u8 *buf_cmd, fburst = 0; + u32 dw1, dw2; + + /* create header */ + hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | + (0x2 << CMD_HDR_TLR_CTRL_OFF) | + (port->id << CMD_HDR_PORT_OFF) | + (priority << CMD_HDR_PRIORITY_OFF) | + (1 << CMD_HDR_MODE_OFF) | /* ini mode */ + (1 << CMD_HDR_CMD_OFF)); /* ssp */ + + dw1 = 1 << CMD_HDR_VERIFY_DTL_OFF; + + if (is_tmf) { + dw1 |= 3 << CMD_HDR_SSP_FRAME_TYPE_OFF; + } else { + switch (scsi_cmnd->sc_data_direction) { + case DMA_TO_DEVICE: + dw1 |= 2 << CMD_HDR_SSP_FRAME_TYPE_OFF; + has_data = 1; + break; + case DMA_FROM_DEVICE: + dw1 |= 1 << CMD_HDR_SSP_FRAME_TYPE_OFF; + has_data = 1; + break; + default: + dw1 |= 0 << CMD_HDR_SSP_FRAME_TYPE_OFF; + } + } + + /* map itct entry */ + dw1 |= sas_dev->device_id << CMD_HDR_DEVICE_ID_OFF; + hdr->dw1 = cpu_to_le32(dw1); + + if (is_tmf) { + dw2 = ((sizeof(struct ssp_tmf_iu) + + sizeof(struct ssp_frame_hdr)+3)/4) << + CMD_HDR_CFL_OFF; + } else { + dw2 = ((sizeof(struct ssp_command_iu) + + sizeof(struct ssp_frame_hdr)+3)/4) << + CMD_HDR_CFL_OFF; + } + + dw2 |= (HISI_SAS_MAX_SSP_RESP_SZ/4) << CMD_HDR_MRFL_OFF; + + hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); + + if (has_data) { + rc = prep_prd_sge_v1_hw(hisi_hba, slot, hdr, task->scatter, + slot->n_elem); + if (rc) + return rc; + } + + hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); + hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); + hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + + buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr); + if (task->ssp_task.enable_first_burst) { + fburst = (1 << 7); + dw2 |= 1 << CMD_HDR_FIRST_BURST_OFF; + } + hdr->dw2 = cpu_to_le32(dw2); + + memcpy(buf_cmd, &task->ssp_task.LUN, 8); + if (!is_tmf) { + buf_cmd[9] = fburst | task->ssp_task.task_attr | + (task->ssp_task.task_prio << 3); + memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, + task->ssp_task.cmd->cmd_len); + } else { + buf_cmd[10] = tmf->tmf; + switch (tmf->tmf) { + case TMF_ABORT_TASK: + case TMF_QUERY_TASK: + buf_cmd[12] = + (tmf->tag_of_task_to_be_managed >> 8) & 0xff; + buf_cmd[13] = + tmf->tag_of_task_to_be_managed & 0xff; + break; + default: + break; + } + } + + return 0; +} + +/* by default, task resp is complete */ +static void slot_err_v1_hw(struct hisi_hba *hisi_hba, + struct sas_task *task, + struct hisi_sas_slot *slot) +{ + struct task_status_struct *ts = &task->task_status; + struct hisi_sas_err_record_v1 *err_record = slot->status_buffer; + struct device *dev = &hisi_hba->pdev->dev; + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + { + int error = -1; + u32 dma_err_type = cpu_to_le32(err_record->dma_err_type); + u32 dma_tx_err_type = ((dma_err_type & + ERR_HDR_DMA_TX_ERR_TYPE_MSK)) >> + ERR_HDR_DMA_TX_ERR_TYPE_OFF; + u32 dma_rx_err_type = ((dma_err_type & + ERR_HDR_DMA_RX_ERR_TYPE_MSK)) >> + ERR_HDR_DMA_RX_ERR_TYPE_OFF; + u32 trans_tx_fail_type = + cpu_to_le32(err_record->trans_tx_fail_type); + u32 trans_rx_fail_type = + cpu_to_le32(err_record->trans_rx_fail_type); + + if (dma_tx_err_type) { + /* dma tx err */ + error = ffs(dma_tx_err_type) + - 1 + DMA_TX_ERR_BASE; + } else if (dma_rx_err_type) { + /* dma rx err */ + error = ffs(dma_rx_err_type) + - 1 + DMA_RX_ERR_BASE; + } else if (trans_tx_fail_type) { + /* trans tx err */ + error = ffs(trans_tx_fail_type) + - 1 + TRANS_TX_FAIL_BASE; + } else if (trans_rx_fail_type) { + /* trans rx err */ + error = ffs(trans_rx_fail_type) + - 1 + TRANS_RX_FAIL_BASE; + } + + switch (error) { + case DMA_TX_DATA_UNDERFLOW_ERR: + case DMA_RX_DATA_UNDERFLOW_ERR: + { + ts->residual = 0; + ts->stat = SAS_DATA_UNDERRUN; + break; + } + case DMA_TX_DATA_SGL_OVERFLOW_ERR: + case DMA_TX_DIF_SGL_OVERFLOW_ERR: + case DMA_TX_XFER_RDY_LENGTH_OVERFLOW_ERR: + case DMA_RX_DATA_OVERFLOW_ERR: + case TRANS_RX_FRAME_OVERRUN_ERR: + case TRANS_RX_LINK_BUF_OVERRUN_ERR: + { + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + break; + } + case TRANS_TX_PHY_NOT_ENABLE_ERR: + { + ts->stat = SAS_PHY_DOWN; + break; + } + case TRANS_TX_OPEN_REJCT_WRONG_DEST_ERR: + case TRANS_TX_OPEN_REJCT_ZONE_VIOLATION_ERR: + case TRANS_TX_OPEN_REJCT_BY_OTHER_ERR: + case TRANS_TX_OPEN_REJCT_AIP_TIMEOUT_ERR: + case TRANS_TX_OPEN_REJCT_STP_BUSY_ERR: + case TRANS_TX_OPEN_REJCT_PROTOCOL_NOT_SUPPORT_ERR: + case TRANS_TX_OPEN_REJCT_RATE_NOT_SUPPORT_ERR: + case TRANS_TX_OPEN_REJCT_BAD_DEST_ERR: + case TRANS_TX_OPEN_BREAK_RECEIVE_ERR: + case TRANS_TX_OPEN_REJCT_PATHWAY_BLOCKED_ERR: + case TRANS_TX_OPEN_REJCT_NO_DEST_ERR: + case TRANS_TX_OPEN_RETRY_ERR: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + } + case TRANS_TX_OPEN_TIMEOUT_ERR: + { + ts->stat = SAS_OPEN_TO; + break; + } + case TRANS_TX_NAK_RECEIVE_ERR: + case TRANS_TX_ACK_NAK_TIMEOUT_ERR: + { + ts->stat = SAS_NAK_R_ERR; + break; + } + case TRANS_TX_CREDIT_TIMEOUT_ERR: + case TRANS_TX_CLOSE_NORMAL_ERR: + { + /* This will request a retry */ + ts->stat = SAS_QUEUE_FULL; + slot->abort = 1; + break; + } + default: + { + ts->stat = SAM_STAT_CHECK_CONDITION; + break; + } + } + } + break; + case SAS_PROTOCOL_SMP: + ts->stat = SAM_STAT_CHECK_CONDITION; + break; + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + { + dev_err(dev, "slot err: SATA/STP not supported"); + } + break; + default: + break; + } + +} + +static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, int abort) +{ + struct sas_task *task = slot->task; + struct hisi_sas_device *sas_dev; + struct device *dev = &hisi_hba->pdev->dev; + struct task_status_struct *ts; + struct domain_device *device; + enum exec_status sts; + struct hisi_sas_complete_v1_hdr *complete_queue = + hisi_hba->complete_hdr[slot->cmplt_queue]; + struct hisi_sas_complete_v1_hdr *complete_hdr; + u32 cmplt_hdr_data; + + complete_hdr = &complete_queue[slot->cmplt_queue_slot]; + cmplt_hdr_data = le32_to_cpu(complete_hdr->data); + + if (unlikely(!task || !task->lldd_task || !task->dev)) + return -EINVAL; + + ts = &task->task_status; + device = task->dev; + sas_dev = device->lldd_dev; + + task->task_state_flags &= + ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); + task->task_state_flags |= SAS_TASK_STATE_DONE; + + memset(ts, 0, sizeof(*ts)); + ts->resp = SAS_TASK_COMPLETE; + + if (unlikely(!sas_dev || abort)) { + if (!sas_dev) + dev_dbg(dev, "slot complete: port has not device\n"); + ts->stat = SAS_PHY_DOWN; + goto out; + } + + if (cmplt_hdr_data & CMPLT_HDR_IO_CFG_ERR_MSK) { + u32 info_reg = hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO); + + if (info_reg & HGC_INVLD_DQE_INFO_DQ_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq IPTT err", + slot->cmplt_queue, slot->cmplt_queue_slot); + + if (info_reg & HGC_INVLD_DQE_INFO_TYPE_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq type err", + slot->cmplt_queue, slot->cmplt_queue_slot); + + if (info_reg & HGC_INVLD_DQE_INFO_FORCE_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq force phy err", + slot->cmplt_queue, slot->cmplt_queue_slot); + + if (info_reg & HGC_INVLD_DQE_INFO_PHY_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq phy id err", + slot->cmplt_queue, slot->cmplt_queue_slot); + + if (info_reg & HGC_INVLD_DQE_INFO_ABORT_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq abort flag err", + slot->cmplt_queue, slot->cmplt_queue_slot); + + if (info_reg & HGC_INVLD_DQE_INFO_IPTT_OF_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq IPTT or ICT err", + slot->cmplt_queue, slot->cmplt_queue_slot); + + if (info_reg & HGC_INVLD_DQE_INFO_SSP_ERR_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq SSP frame type err", + slot->cmplt_queue, slot->cmplt_queue_slot); + + if (info_reg & HGC_INVLD_DQE_INFO_OFL_MSK) + dev_err(dev, "slot complete: [%d:%d] has dq order frame len err", + slot->cmplt_queue, slot->cmplt_queue_slot); + + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + goto out; + } + + if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK && + !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) { + + slot_err_v1_hw(hisi_hba, task, slot); + if (unlikely(slot->abort)) { + queue_work(hisi_hba->wq, &slot->abort_slot); + /* immediately return and do not complete */ + return ts->stat; + } + goto out; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + { + struct ssp_response_iu *iu = slot->status_buffer + + sizeof(struct hisi_sas_err_record); + sas_ssp_task_response(dev, task, iu); + break; + } + case SAS_PROTOCOL_SMP: + { + void *to; + struct scatterlist *sg_resp = &task->smp_task.smp_resp; + + ts->stat = SAM_STAT_GOOD; + to = kmap_atomic(sg_page(sg_resp)); + + dma_unmap_sg(dev, &task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); + dma_unmap_sg(dev, &task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + memcpy(to + sg_resp->offset, + slot->status_buffer + + sizeof(struct hisi_sas_err_record), + sg_dma_len(sg_resp)); + kunmap_atomic(to); + break; + } + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + dev_err(dev, "slot complete: SATA/STP not supported"); + break; + + default: + ts->stat = SAM_STAT_CHECK_CONDITION; + break; + } + + if (!slot->port->port_attached) { + dev_err(dev, "slot complete: port %d has removed\n", + slot->port->sas_port.id); + ts->stat = SAS_PHY_DOWN; + } + +out: + if (sas_dev && sas_dev->running_req) + sas_dev->running_req--; + + hisi_sas_slot_task_free(hisi_hba, task, slot); + sts = ts->stat; + + if (task->task_done) + task->task_done(task); + + return sts; +} + +/* Interrupts */ +static irqreturn_t int_phyup_v1_hw(int irq_no, void *p) +{ + struct hisi_sas_phy *phy = p; + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct device *dev = &hisi_hba->pdev->dev; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + int i, phy_no = sas_phy->id; + u32 irq_value, context, port_id, link_rate; + u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; + struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; + irqreturn_t res = IRQ_HANDLED; + + irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); + if (!(irq_value & CHL_INT2_SL_PHY_ENA_MSK)) { + dev_dbg(dev, "phyup: irq_value = %x not set enable bit\n", + irq_value); + res = IRQ_NONE; + goto end; + } + + context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); + if (context & 1 << phy_no) { + dev_err(dev, "phyup: phy%d SATA attached equipment\n", + phy_no); + goto end; + } + + port_id = (hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA) >> (4 * phy_no)) + & 0xf; + if (port_id == 0xf) { + dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); + res = IRQ_NONE; + goto end; + } + + for (i = 0; i < 6; i++) { + u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, + RX_IDAF_DWORD0 + (i * 4)); + frame_rcvd[i] = __swab32(idaf); + } + + /* Get the linkrate */ + link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); + link_rate = (link_rate >> (phy_no * 4)) & 0xf; + sas_phy->linkrate = link_rate; + sas_phy->oob_mode = SAS_OOB_MODE; + memcpy(sas_phy->attached_sas_addr, + &id->sas_addr, SAS_ADDR_SIZE); + dev_info(dev, "phyup: phy%d link_rate=%d\n", + phy_no, link_rate); + phy->port_id = port_id; + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + phy->phy_type |= PORT_TYPE_SAS; + phy->phy_attached = 1; + phy->identify.device_type = id->dev_type; + phy->frame_rcvd_size = sizeof(struct sas_identify_frame); + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SMP; + queue_work(hisi_hba->wq, &phy->phyup_ws); + +end: + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, + CHL_INT2_SL_PHY_ENA_MSK); + + if (irq_value & CHL_INT2_SL_PHY_ENA_MSK) { + u32 chl_int0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); + + chl_int0 &= ~CHL_INT0_PHYCTRL_NOTRDY_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, chl_int0); + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0_MSK, 0x3ce3ee); + } + + return res; +} + +static irqreturn_t int_bcast_v1_hw(int irq, void *p) +{ + struct hisi_sas_phy *phy = p; + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_ha_struct *sha = &hisi_hba->sha; + struct device *dev = &hisi_hba->pdev->dev; + int phy_no = sas_phy->id; + u32 irq_value; + irqreturn_t res = IRQ_HANDLED; + + irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); + + if (!(irq_value & CHL_INT2_SL_RX_BC_ACK_MSK)) { + dev_err(dev, "bcast: irq_value = %x not set enable bit", + irq_value); + res = IRQ_NONE; + goto end; + } + + sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + +end: + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, + CHL_INT2_SL_RX_BC_ACK_MSK); + + return res; +} + +static irqreturn_t int_abnormal_v1_hw(int irq, void *p) +{ + struct hisi_sas_phy *phy = p; + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct device *dev = &hisi_hba->pdev->dev; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + u32 irq_value, irq_mask_old; + int phy_no = sas_phy->id; + + /* mask_int0 */ + irq_mask_old = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0_MSK, 0x3fffff); + + /* read int0 */ + irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); + + if (irq_value & CHL_INT0_PHYCTRL_NOTRDY_MSK) { + u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); + + hisi_sas_phy_down(hisi_hba, phy_no, + (phy_state & 1 << phy_no) ? 1 : 0); + } + + if (irq_value & CHL_INT0_ID_TIMEOUT_MSK) + dev_dbg(dev, "abnormal: ID_TIMEOUT phy%d identify timeout\n", + phy_no); + + if (irq_value & CHL_INT0_DWS_LOST_MSK) + dev_dbg(dev, "abnormal: DWS_LOST phy%d dws lost\n", phy_no); + + if (irq_value & CHL_INT0_SN_FAIL_NGR_MSK) + dev_dbg(dev, "abnormal: SN_FAIL_NGR phy%d sn fail ngr\n", + phy_no); + + if (irq_value & CHL_INT0_SL_IDAF_FAIL_MSK || + irq_value & CHL_INT0_SL_OPAF_FAIL_MSK) + dev_dbg(dev, "abnormal: SL_ID/OPAF_FAIL phy%d check adr frm err\n", + phy_no); + + if (irq_value & CHL_INT0_SL_PS_FAIL_OFF) + dev_dbg(dev, "abnormal: SL_PS_FAIL phy%d fail\n", phy_no); + + /* write to zero */ + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, irq_value); + + if (irq_value & CHL_INT0_PHYCTRL_NOTRDY_MSK) + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0_MSK, + 0x3fffff & ~CHL_INT0_MSK_PHYCTRL_NOTRDY_MSK); + else + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0_MSK, + irq_mask_old); + + return IRQ_HANDLED; +} + +static irqreturn_t cq_interrupt_v1_hw(int irq, void *p) +{ + struct hisi_sas_cq *cq = p; + struct hisi_hba *hisi_hba = cq->hisi_hba; + struct hisi_sas_slot *slot; + int queue = cq->id; + struct hisi_sas_complete_v1_hdr *complete_queue = + (struct hisi_sas_complete_v1_hdr *) + hisi_hba->complete_hdr[queue]; + u32 irq_value, rd_point, wr_point; + + irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC); + + hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); + + rd_point = hisi_sas_read32(hisi_hba, + COMPL_Q_0_RD_PTR + (0x14 * queue)); + wr_point = hisi_sas_read32(hisi_hba, + COMPL_Q_0_WR_PTR + (0x14 * queue)); + + while (rd_point != wr_point) { + struct hisi_sas_complete_v1_hdr *complete_hdr; + int idx; + u32 cmplt_hdr_data; + + complete_hdr = &complete_queue[rd_point]; + cmplt_hdr_data = cpu_to_le32(complete_hdr->data); + idx = (cmplt_hdr_data & CMPLT_HDR_IPTT_MSK) >> + CMPLT_HDR_IPTT_OFF; + slot = &hisi_hba->slot_info[idx]; + + /* The completion queue and queue slot index are not + * necessarily the same as the delivery queue and + * queue slot index. + */ + slot->cmplt_queue_slot = rd_point; + slot->cmplt_queue = queue; + slot_complete_v1_hw(hisi_hba, slot, 0); + + if (++rd_point >= HISI_SAS_QUEUE_SLOTS) + rd_point = 0; + } + + /* update rd_point */ + hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + + return IRQ_HANDLED; +} + +static irqreturn_t fatal_ecc_int_v1_hw(int irq, void *p) +{ + struct hisi_hba *hisi_hba = p; + struct device *dev = &hisi_hba->pdev->dev; + u32 ecc_int = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); + + if (ecc_int & SAS_ECC_INTR_DQ_ECC1B_MSK) { + u32 ecc_err = hisi_sas_read32(hisi_hba, HGC_ECC_ERR); + + panic("%s: Fatal DQ 1b ECC interrupt (0x%x)\n", + dev_name(dev), ecc_err); + } + + if (ecc_int & SAS_ECC_INTR_DQ_ECCBAD_MSK) { + u32 addr = (hisi_sas_read32(hisi_hba, HGC_DQ_ECC_ADDR) & + HGC_DQ_ECC_ADDR_BAD_MSK) >> + HGC_DQ_ECC_ADDR_BAD_OFF; + + panic("%s: Fatal DQ RAM ECC interrupt @ 0x%08x\n", + dev_name(dev), addr); + } + + if (ecc_int & SAS_ECC_INTR_IOST_ECC1B_MSK) { + u32 ecc_err = hisi_sas_read32(hisi_hba, HGC_ECC_ERR); + + panic("%s: Fatal IOST 1b ECC interrupt (0x%x)\n", + dev_name(dev), ecc_err); + } + + if (ecc_int & SAS_ECC_INTR_IOST_ECCBAD_MSK) { + u32 addr = (hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR) & + HGC_IOST_ECC_ADDR_BAD_MSK) >> + HGC_IOST_ECC_ADDR_BAD_OFF; + + panic("%s: Fatal IOST RAM ECC interrupt @ 0x%08x\n", + dev_name(dev), addr); + } + + if (ecc_int & SAS_ECC_INTR_ITCT_ECCBAD_MSK) { + u32 addr = (hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR) & + HGC_ITCT_ECC_ADDR_BAD_MSK) >> + HGC_ITCT_ECC_ADDR_BAD_OFF; + + panic("%s: Fatal TCT RAM ECC interrupt @ 0x%08x\n", + dev_name(dev), addr); + } + + if (ecc_int & SAS_ECC_INTR_ITCT_ECC1B_MSK) { + u32 ecc_err = hisi_sas_read32(hisi_hba, HGC_ECC_ERR); + + panic("%s: Fatal ITCT 1b ECC interrupt (0x%x)\n", + dev_name(dev), ecc_err); + } + + hisi_sas_write32(hisi_hba, SAS_ECC_INTR, ecc_int | 0x3f); + + return IRQ_HANDLED; +} + +static irqreturn_t fatal_axi_int_v1_hw(int irq, void *p) +{ + struct hisi_hba *hisi_hba = p; + struct device *dev = &hisi_hba->pdev->dev; + u32 axi_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC2); + u32 axi_info = hisi_sas_read32(hisi_hba, HGC_AXI_FIFO_ERR_INFO); + + if (axi_int & ENT_INT_SRC2_DQ_CFG_ERR_MSK) + panic("%s: Fatal DQ_CFG_ERR interrupt (0x%x)\n", + dev_name(dev), axi_info); + + if (axi_int & ENT_INT_SRC2_CQ_CFG_ERR_MSK) + panic("%s: Fatal CQ_CFG_ERR interrupt (0x%x)\n", + dev_name(dev), axi_info); + + if (axi_int & ENT_INT_SRC2_AXI_WRONG_INT_MSK) + panic("%s: Fatal AXI_WRONG_INT interrupt (0x%x)\n", + dev_name(dev), axi_info); + + if (axi_int & ENT_INT_SRC2_AXI_OVERLF_INT_MSK) + panic("%s: Fatal AXI_OVERLF_INT incorrect interrupt (0x%x)\n", + dev_name(dev), axi_info); + + hisi_sas_write32(hisi_hba, ENT_INT_SRC2, axi_int | 0x30000000); + + return IRQ_HANDLED; +} + +static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = { + int_bcast_v1_hw, + int_phyup_v1_hw, + int_abnormal_v1_hw +}; + +static irq_handler_t fatal_interrupts[HISI_SAS_MAX_QUEUES] = { + fatal_ecc_int_v1_hw, + fatal_axi_int_v1_hw +}; + +static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba) +{ + struct platform_device *pdev = hisi_hba->pdev; + struct device *dev = &pdev->dev; + int i, j, irq, rc, idx; + + for (i = 0; i < hisi_hba->n_phy; i++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[i]; + + idx = i * HISI_SAS_PHY_INT_NR; + for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) { + irq = platform_get_irq(pdev, idx); + if (!irq) { + dev_err(dev, + "irq init: fail map phy interrupt %d\n", + idx); + return -ENOENT; + } + + rc = devm_request_irq(dev, irq, phy_interrupts[j], 0, + DRV_NAME " phy", phy); + if (rc) { + dev_err(dev, "irq init: could not request " + "phy interrupt %d, rc=%d\n", + irq, rc); + return -ENOENT; + } + } + } + + idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR; + for (i = 0; i < hisi_hba->queue_count; i++, idx++) { + irq = platform_get_irq(pdev, idx); + if (!irq) { + dev_err(dev, "irq init: could not map cq interrupt %d\n", + idx); + return -ENOENT; + } + + rc = devm_request_irq(dev, irq, cq_interrupt_v1_hw, 0, + DRV_NAME " cq", &hisi_hba->cq[i]); + if (rc) { + dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n", + irq, rc); + return -ENOENT; + } + } + + idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count; + for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) { + irq = platform_get_irq(pdev, idx); + if (!irq) { + dev_err(dev, "irq init: could not map fatal interrupt %d\n", + idx); + return -ENOENT; + } + + rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0, + DRV_NAME " fatal", hisi_hba); + if (rc) { + dev_err(dev, + "irq init: could not request fatal interrupt %d, rc=%d\n", + irq, rc); + return -ENOENT; + } + } + + return 0; +} + +static int interrupt_openall_v1_hw(struct hisi_hba *hisi_hba) +{ + int i; + u32 val; + + for (i = 0; i < hisi_hba->n_phy; i++) { + /* Clear interrupt status */ + val = hisi_sas_phy_read32(hisi_hba, i, CHL_INT0); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, val); + val = hisi_sas_phy_read32(hisi_hba, i, CHL_INT1); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, val); + val = hisi_sas_phy_read32(hisi_hba, i, CHL_INT2); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, val); + + /* Unmask interrupt */ + hisi_sas_phy_write32(hisi_hba, i, CHL_INT0_MSK, 0x3ce3ee); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0x17fff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8000012a); + + /* bypass chip bug mask abnormal intr */ + hisi_sas_phy_write32(hisi_hba, i, CHL_INT0_MSK, + 0x3fffff & ~CHL_INT0_MSK_PHYCTRL_NOTRDY_MSK); + } + + return 0; +} + +static int hisi_sas_v1_init(struct hisi_hba *hisi_hba) +{ + int rc; + + rc = hw_init_v1_hw(hisi_hba); + if (rc) + return rc; + + rc = interrupt_init_v1_hw(hisi_hba); + if (rc) + return rc; + + rc = interrupt_openall_v1_hw(hisi_hba); + if (rc) + return rc; + + phys_init_v1_hw(hisi_hba); + + return 0; +} + +static const struct hisi_sas_hw hisi_sas_v1_hw = { + .hw_init = hisi_sas_v1_init, + .setup_itct = setup_itct_v1_hw, + .sl_notify = sl_notify_v1_hw, + .free_device = free_device_v1_hw, + .prep_smp = prep_smp_v1_hw, + .prep_ssp = prep_ssp_v1_hw, + .get_free_slot = get_free_slot_v1_hw, + .start_delivery = start_delivery_v1_hw, + .slot_complete = slot_complete_v1_hw, + .phy_enable = enable_phy_v1_hw, + .phy_disable = disable_phy_v1_hw, + .phy_hard_reset = phy_hard_reset_v1_hw, + .get_wideport_bitmap = get_wideport_bitmap_v1_hw, + .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW, + .complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr), +}; + +static int hisi_sas_v1_probe(struct platform_device *pdev) +{ + return hisi_sas_probe(pdev, &hisi_sas_v1_hw); +} + +static int hisi_sas_v1_remove(struct platform_device *pdev) +{ + return hisi_sas_remove(pdev); +} + +static const struct of_device_id sas_v1_of_match[] = { + { .compatible = "hisilicon,hip05-sas-v1",}, + {}, +}; +MODULE_DEVICE_TABLE(of, sas_v1_of_match); + +static const struct acpi_device_id sas_v1_acpi_match[] = { + { "HISI0161", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, sas_v1_acpi_match); + +static struct platform_driver hisi_sas_v1_driver = { + .probe = hisi_sas_v1_probe, + .remove = hisi_sas_v1_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = sas_v1_of_match, + .acpi_match_table = ACPI_PTR(sas_v1_acpi_match), + }, +}; + +module_platform_driver(hisi_sas_v1_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); +MODULE_DESCRIPTION("HISILICON SAS controller v1 hw driver"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c new file mode 100644 index 000000000000..b7337476454b --- /dev/null +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -0,0 +1,2214 @@ +/* + * Copyright (c) 2016 Linaro Ltd. + * Copyright (c) 2016 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include "hisi_sas.h" +#define DRV_NAME "hisi_sas_v2_hw" + +/* global registers need init*/ +#define DLVRY_QUEUE_ENABLE 0x0 +#define IOST_BASE_ADDR_LO 0x8 +#define IOST_BASE_ADDR_HI 0xc +#define ITCT_BASE_ADDR_LO 0x10 +#define ITCT_BASE_ADDR_HI 0x14 +#define IO_BROKEN_MSG_ADDR_LO 0x18 +#define IO_BROKEN_MSG_ADDR_HI 0x1c +#define PHY_CONTEXT 0x20 +#define PHY_STATE 0x24 +#define PHY_PORT_NUM_MA 0x28 +#define PORT_STATE 0x2c +#define PORT_STATE_PHY8_PORT_NUM_OFF 16 +#define PORT_STATE_PHY8_PORT_NUM_MSK (0xf << PORT_STATE_PHY8_PORT_NUM_OFF) +#define PORT_STATE_PHY8_CONN_RATE_OFF 20 +#define PORT_STATE_PHY8_CONN_RATE_MSK (0xf << PORT_STATE_PHY8_CONN_RATE_OFF) +#define PHY_CONN_RATE 0x30 +#define HGC_TRANS_TASK_CNT_LIMIT 0x38 +#define AXI_AHB_CLK_CFG 0x3c +#define ITCT_CLR 0x44 +#define ITCT_CLR_EN_OFF 16 +#define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) +#define ITCT_DEV_OFF 0 +#define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) +#define AXI_USER1 0x48 +#define AXI_USER2 0x4c +#define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 +#define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c +#define SATA_INITI_D2H_STORE_ADDR_LO 0x60 +#define SATA_INITI_D2H_STORE_ADDR_HI 0x64 +#define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 +#define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 +#define HGC_GET_ITV_TIME 0x90 +#define DEVICE_MSG_WORK_MODE 0x94 +#define OPENA_WT_CONTI_TIME 0x9c +#define I_T_NEXUS_LOSS_TIME 0xa0 +#define MAX_CON_TIME_LIMIT_TIME 0xa4 +#define BUS_INACTIVE_LIMIT_TIME 0xa8 +#define REJECT_TO_OPEN_LIMIT_TIME 0xac +#define CFG_AGING_TIME 0xbc +#define HGC_DFX_CFG2 0xc0 +#define HGC_IOMB_PROC1_STATUS 0x104 +#define CFG_1US_TIMER_TRSH 0xcc +#define HGC_INVLD_DQE_INFO 0x148 +#define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9 +#define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF) +#define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18 +#define INT_COAL_EN 0x19c +#define OQ_INT_COAL_TIME 0x1a0 +#define OQ_INT_COAL_CNT 0x1a4 +#define ENT_INT_COAL_TIME 0x1a8 +#define ENT_INT_COAL_CNT 0x1ac +#define OQ_INT_SRC 0x1b0 +#define OQ_INT_SRC_MSK 0x1b4 +#define ENT_INT_SRC1 0x1b8 +#define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 +#define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) +#define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 +#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) +#define ENT_INT_SRC2 0x1bc +#define ENT_INT_SRC3 0x1c0 +#define ENT_INT_SRC3_ITC_INT_OFF 15 +#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) +#define ENT_INT_SRC_MSK1 0x1c4 +#define ENT_INT_SRC_MSK2 0x1c8 +#define ENT_INT_SRC_MSK3 0x1cc +#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 +#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) +#define SAS_ECC_INTR_MSK 0x1ec +#define HGC_ERR_STAT_EN 0x238 +#define DLVRY_Q_0_BASE_ADDR_LO 0x260 +#define DLVRY_Q_0_BASE_ADDR_HI 0x264 +#define DLVRY_Q_0_DEPTH 0x268 +#define DLVRY_Q_0_WR_PTR 0x26c +#define DLVRY_Q_0_RD_PTR 0x270 +#define HYPER_STREAM_ID_EN_CFG 0xc80 +#define OQ0_INT_SRC_MSK 0xc90 +#define COMPL_Q_0_BASE_ADDR_LO 0x4e0 +#define COMPL_Q_0_BASE_ADDR_HI 0x4e4 +#define COMPL_Q_0_DEPTH 0x4e8 +#define COMPL_Q_0_WR_PTR 0x4ec +#define COMPL_Q_0_RD_PTR 0x4f0 + +/* phy registers need init */ +#define PORT_BASE (0x2000) + +#define PHY_CFG (PORT_BASE + 0x0) +#define HARD_PHY_LINKRATE (PORT_BASE + 0x4) +#define PHY_CFG_ENA_OFF 0 +#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) +#define PHY_CFG_DC_OPT_OFF 2 +#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) +#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) +#define PROG_PHY_LINK_RATE_MAX_OFF 0 +#define PROG_PHY_LINK_RATE_MAX_MSK (0xff << PROG_PHY_LINK_RATE_MAX_OFF) +#define PHY_CTRL (PORT_BASE + 0x14) +#define PHY_CTRL_RESET_OFF 0 +#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) +#define SAS_PHY_CTRL (PORT_BASE + 0x20) +#define SL_CFG (PORT_BASE + 0x84) +#define PHY_PCN (PORT_BASE + 0x44) +#define SL_TOUT_CFG (PORT_BASE + 0x8c) +#define SL_CONTROL (PORT_BASE + 0x94) +#define SL_CONTROL_NOTIFY_EN_OFF 0 +#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) +#define TX_ID_DWORD0 (PORT_BASE + 0x9c) +#define TX_ID_DWORD1 (PORT_BASE + 0xa0) +#define TX_ID_DWORD2 (PORT_BASE + 0xa4) +#define TX_ID_DWORD3 (PORT_BASE + 0xa8) +#define TX_ID_DWORD4 (PORT_BASE + 0xaC) +#define TX_ID_DWORD5 (PORT_BASE + 0xb0) +#define TX_ID_DWORD6 (PORT_BASE + 0xb4) +#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) +#define RX_IDAF_DWORD1 (PORT_BASE + 0xc8) +#define RX_IDAF_DWORD2 (PORT_BASE + 0xcc) +#define RX_IDAF_DWORD3 (PORT_BASE + 0xd0) +#define RX_IDAF_DWORD4 (PORT_BASE + 0xd4) +#define RX_IDAF_DWORD5 (PORT_BASE + 0xd8) +#define RX_IDAF_DWORD6 (PORT_BASE + 0xdc) +#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) +#define DONE_RECEIVED_TIME (PORT_BASE + 0x11c) +#define CHL_INT0 (PORT_BASE + 0x1b4) +#define CHL_INT0_HOTPLUG_TOUT_OFF 0 +#define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) +#define CHL_INT0_SL_RX_BCST_ACK_OFF 1 +#define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) +#define CHL_INT0_SL_PHY_ENABLE_OFF 2 +#define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) +#define CHL_INT0_NOT_RDY_OFF 4 +#define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) +#define CHL_INT0_PHY_RDY_OFF 5 +#define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) +#define CHL_INT1 (PORT_BASE + 0x1b8) +#define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15 +#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF) +#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17 +#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF) +#define CHL_INT2 (PORT_BASE + 0x1bc) +#define CHL_INT0_MSK (PORT_BASE + 0x1c0) +#define CHL_INT1_MSK (PORT_BASE + 0x1c4) +#define CHL_INT2_MSK (PORT_BASE + 0x1c8) +#define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) +#define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) +#define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) +#define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) +#define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) +#define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) +#define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) +#define DMA_TX_STATUS (PORT_BASE + 0x2d0) +#define DMA_TX_STATUS_BUSY_OFF 0 +#define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) +#define DMA_RX_STATUS (PORT_BASE + 0x2e8) +#define DMA_RX_STATUS_BUSY_OFF 0 +#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) + +#define AXI_CFG (0x5100) +#define AM_CFG_MAX_TRANS (0x5010) +#define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) + +/* HW dma structures */ +/* Delivery queue header */ +/* dw0 */ +#define CMD_HDR_RESP_REPORT_OFF 5 +#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) +#define CMD_HDR_TLR_CTRL_OFF 6 +#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) +#define CMD_HDR_PORT_OFF 18 +#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) +#define CMD_HDR_PRIORITY_OFF 27 +#define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) +#define CMD_HDR_CMD_OFF 29 +#define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) +/* dw1 */ +#define CMD_HDR_DIR_OFF 5 +#define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) +#define CMD_HDR_RESET_OFF 7 +#define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) +#define CMD_HDR_VDTL_OFF 10 +#define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) +#define CMD_HDR_FRAME_TYPE_OFF 11 +#define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) +#define CMD_HDR_DEV_ID_OFF 16 +#define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) +/* dw2 */ +#define CMD_HDR_CFL_OFF 0 +#define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) +#define CMD_HDR_NCQ_TAG_OFF 10 +#define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) +#define CMD_HDR_MRFL_OFF 15 +#define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) +#define CMD_HDR_SG_MOD_OFF 24 +#define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) +#define CMD_HDR_FIRST_BURST_OFF 26 +#define CMD_HDR_FIRST_BURST_MSK (0x1 << CMD_HDR_SG_MOD_OFF) +/* dw3 */ +#define CMD_HDR_IPTT_OFF 0 +#define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) +/* dw6 */ +#define CMD_HDR_DIF_SGL_LEN_OFF 0 +#define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) +#define CMD_HDR_DATA_SGL_LEN_OFF 16 +#define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) + +/* Completion header */ +/* dw0 */ +#define CMPLT_HDR_RSPNS_XFRD_OFF 10 +#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) +#define CMPLT_HDR_ERX_OFF 12 +#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) +/* dw1 */ +#define CMPLT_HDR_IPTT_OFF 0 +#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) +#define CMPLT_HDR_DEV_ID_OFF 16 +#define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) + +/* ITCT header */ +/* qw0 */ +#define ITCT_HDR_DEV_TYPE_OFF 0 +#define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) +#define ITCT_HDR_VALID_OFF 2 +#define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) +#define ITCT_HDR_MCR_OFF 5 +#define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) +#define ITCT_HDR_VLN_OFF 9 +#define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) +#define ITCT_HDR_PORT_ID_OFF 28 +#define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) +/* qw2 */ +#define ITCT_HDR_INLT_OFF 0 +#define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) +#define ITCT_HDR_BITLT_OFF 16 +#define ITCT_HDR_BITLT_MSK (0xffffULL << ITCT_HDR_BITLT_OFF) +#define ITCT_HDR_MCTLT_OFF 32 +#define ITCT_HDR_MCTLT_MSK (0xffffULL << ITCT_HDR_MCTLT_OFF) +#define ITCT_HDR_RTOLT_OFF 48 +#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) + +struct hisi_sas_complete_v2_hdr { + __le32 dw0; + __le32 dw1; + __le32 act; + __le32 dw3; +}; + +struct hisi_sas_err_record_v2 { + /* dw0 */ + __le32 trans_tx_fail_type; + + /* dw1 */ + __le32 trans_rx_fail_type; + + /* dw2 */ + __le16 dma_tx_err_type; + __le16 sipc_rx_err_type; + + /* dw3 */ + __le32 dma_rx_err_type; +}; + +enum { + HISI_SAS_PHY_PHY_UPDOWN, + HISI_SAS_PHY_CHNL_INT, + HISI_SAS_PHY_INT_NR +}; + +enum { + TRANS_TX_FAIL_BASE = 0x0, /* dw0 */ + TRANS_RX_FAIL_BASE = 0x100, /* dw1 */ + DMA_TX_ERR_BASE = 0x200, /* dw2 bit 15-0 */ + SIPC_RX_ERR_BASE = 0x300, /* dw2 bit 31-16*/ + DMA_RX_ERR_BASE = 0x400, /* dw3 */ + + /* trans tx*/ + TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS = TRANS_TX_FAIL_BASE, /* 0x0 */ + TRANS_TX_ERR_PHY_NOT_ENABLE, /* 0x1 */ + TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, /* 0x2 */ + TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, /* 0x3 */ + TRANS_TX_OPEN_CNX_ERR_BY_OTHER, /* 0x4 */ + RESERVED0, /* 0x5 */ + TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, /* 0x6 */ + TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, /* 0x7 */ + TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, /* 0x8 */ + TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, /* 0x9 */ + TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, /* 0xa */ + TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, /* 0xb */ + TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, /* 0xc */ + TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, /* 0xd */ + TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, /* 0xe */ + TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, /* 0xf */ + TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, /* 0x10 */ + TRANS_TX_ERR_FRAME_TXED, /* 0x11 */ + TRANS_TX_ERR_WITH_BREAK_TIMEOUT, /* 0x12 */ + TRANS_TX_ERR_WITH_BREAK_REQUEST, /* 0x13 */ + TRANS_TX_ERR_WITH_BREAK_RECEVIED, /* 0x14 */ + TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, /* 0x15 */ + TRANS_TX_ERR_WITH_CLOSE_NORMAL, /* 0x16 for ssp*/ + TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, /* 0x17 */ + TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x18 */ + TRANS_TX_ERR_WITH_CLOSE_COMINIT, /* 0x19 */ + TRANS_TX_ERR_WITH_NAK_RECEVIED, /* 0x1a for ssp*/ + TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, /* 0x1b for ssp*/ + /*IO_TX_ERR_WITH_R_ERR_RECEVIED, [> 0x1b for sata/stp<] */ + TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, /* 0x1c for ssp */ + /*IO_RX_ERR_WITH_SATA_DEVICE_LOST 0x1c for sata/stp */ + TRANS_TX_ERR_WITH_IPTT_CONFLICT, /* 0x1d for ssp/smp */ + TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, /* 0x1e */ + /*IO_TX_ERR_WITH_SYNC_RXD, [> 0x1e <] for sata/stp */ + TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, /* 0x1f for sata/stp */ + + /* trans rx */ + TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x100 */ + TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x101 for sata/stp */ + TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x102 for ssp/smp */ + /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x102 <] for sata/stp */ + TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x103 for sata/stp */ + TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x104 for sata/stp */ + TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x105 for smp */ + /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x105 <] for sata/stp */ + TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x106 for sata/stp*/ + TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x107 */ + TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x108 */ + TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x109 */ + TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x10a */ + RESERVED1, /* 0x10b */ + TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x10c */ + TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x10d */ + TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x10e */ + TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x10f */ + TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x110 for ssp/smp */ + TRANS_RX_ERR_WITH_BAD_HASH, /* 0x111 for ssp */ + /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x111 <] for sata/stp */ + TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x112 for ssp*/ + /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x112 <] for sata/stp */ + TRANS_RX_SSP_FRM_LEN_ERR, /* 0x113 for ssp */ + /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x113 <] for sata */ + RESERVED2, /* 0x114 */ + RESERVED3, /* 0x115 */ + RESERVED4, /* 0x116 */ + RESERVED5, /* 0x117 */ + TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x118 */ + TRANS_RX_SMP_FRM_LEN_ERR, /* 0x119 */ + TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x11a */ + RESERVED6, /* 0x11b */ + RESERVED7, /* 0x11c */ + RESERVED8, /* 0x11d */ + RESERVED9, /* 0x11e */ + TRANS_RX_R_ERR, /* 0x11f */ + + /* dma tx */ + DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x200 */ + DMA_TX_DIF_APP_ERR, /* 0x201 */ + DMA_TX_DIF_RPP_ERR, /* 0x202 */ + DMA_TX_DATA_SGL_OVERFLOW, /* 0x203 */ + DMA_TX_DIF_SGL_OVERFLOW, /* 0x204 */ + DMA_TX_UNEXP_XFER_ERR, /* 0x205 */ + DMA_TX_UNEXP_RETRANS_ERR, /* 0x206 */ + DMA_TX_XFER_LEN_OVERFLOW, /* 0x207 */ + DMA_TX_XFER_OFFSET_ERR, /* 0x208 */ + DMA_TX_RAM_ECC_ERR, /* 0x209 */ + DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x20a */ + + /* sipc rx */ + SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x300 */ + SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x301 */ + SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x302 */ + SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x303 */ + SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x304 */ + SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x305 */ + SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x306 */ + SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x307 */ + SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x308 */ + SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x309 */ + SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x30a */ + + /* dma rx */ + DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x400 */ + DMA_RX_DIF_APP_ERR, /* 0x401 */ + DMA_RX_DIF_RPP_ERR, /* 0x402 */ + DMA_RX_DATA_SGL_OVERFLOW, /* 0x403 */ + DMA_RX_DIF_SGL_OVERFLOW, /* 0x404 */ + DMA_RX_DATA_LEN_OVERFLOW, /* 0x405 */ + DMA_RX_DATA_LEN_UNDERFLOW, /* 0x406 */ + DMA_RX_DATA_OFFSET_ERR, /* 0x407 */ + RESERVED10, /* 0x408 */ + DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x409 */ + DMA_RX_RESP_BUF_OVERFLOW, /* 0x40a */ + DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x40b */ + DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x40c */ + DMA_RX_UNEXP_RDFRAME_ERR, /* 0x40d */ + DMA_RX_PIO_DATA_LEN_ERR, /* 0x40e */ + DMA_RX_RDSETUP_STATUS_ERR, /* 0x40f */ + DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x410 */ + DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x411 */ + DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x412 */ + DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x413 */ + DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x414 */ + DMA_RX_RDSETUP_OFFSET_ERR, /* 0x415 */ + DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x416 */ + DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x417 */ + DMA_RX_RAM_ECC_ERR, /* 0x418 */ + DMA_RX_UNKNOWN_FRM_ERR, /* 0x419 */ +}; + +#define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096 + +#define DIR_NO_DATA 0 +#define DIR_TO_INI 1 +#define DIR_TO_DEVICE 2 +#define DIR_RESERVED 3 + +#define SATA_PROTOCOL_NONDATA 0x1 +#define SATA_PROTOCOL_PIO 0x2 +#define SATA_PROTOCOL_DMA 0x4 +#define SATA_PROTOCOL_FPDMA 0x8 +#define SATA_PROTOCOL_ATAPI 0x10 + +static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) +{ + void __iomem *regs = hisi_hba->regs + off; + + return readl(regs); +} + +static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) +{ + void __iomem *regs = hisi_hba->regs + off; + + return readl_relaxed(regs); +} + +static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) +{ + void __iomem *regs = hisi_hba->regs + off; + + writel(val, regs); +} + +static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, + u32 off, u32 val) +{ + void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; + + writel(val, regs); +} + +static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, + int phy_no, u32 off) +{ + void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; + + return readl(regs); +} + +static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg &= ~PHY_CFG_DC_OPT_MSK; + cfg |= 1 << PHY_CFG_DC_OPT_OFF; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + struct sas_identify_frame identify_frame; + u32 *identify_buffer; + + memset(&identify_frame, 0, sizeof(identify_frame)); + identify_frame.dev_type = SAS_END_DEVICE; + identify_frame.frame_type = 0; + identify_frame._un1 = 1; + identify_frame.initiator_bits = SAS_PROTOCOL_ALL; + identify_frame.target_bits = SAS_PROTOCOL_NONE; + memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); + memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); + identify_frame.phy_id = phy_no; + identify_buffer = (u32 *)(&identify_frame); + + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, + __swab32(identify_buffer[0])); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, + identify_buffer[2]); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, + identify_buffer[1]); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, + identify_buffer[4]); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, + identify_buffer[3]); + hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, + __swab32(identify_buffer[5])); +} + +static void init_id_frame_v2_hw(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) + config_id_frame_v2_hw(hisi_hba, i); +} + +static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) +{ + struct domain_device *device = sas_dev->sas_device; + struct device *dev = &hisi_hba->pdev->dev; + u64 qw0, device_id = sas_dev->device_id; + struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; + struct domain_device *parent_dev = device->parent; + struct hisi_sas_port *port = device->port->lldd_port; + + memset(itct, 0, sizeof(*itct)); + + /* qw0 */ + qw0 = 0; + switch (sas_dev->dev_type) { + case SAS_END_DEVICE: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: + qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; + break; + case SAS_SATA_DEV: + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) + qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; + else + qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; + break; + default: + dev_warn(dev, "setup itct: unsupported dev type (%d)\n", + sas_dev->dev_type); + } + + qw0 |= ((1 << ITCT_HDR_VALID_OFF) | + (device->max_linkrate << ITCT_HDR_MCR_OFF) | + (1 << ITCT_HDR_VLN_OFF) | + (port->id << ITCT_HDR_PORT_ID_OFF)); + itct->qw0 = cpu_to_le64(qw0); + + /* qw1 */ + memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE); + itct->sas_addr = __swab64(itct->sas_addr); + + /* qw2 */ + itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) | + (0xff00ULL << ITCT_HDR_BITLT_OFF) | + (0xff00ULL << ITCT_HDR_MCTLT_OFF) | + (0xff00ULL << ITCT_HDR_RTOLT_OFF)); +} + +static void free_device_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) +{ + u64 qw0, dev_id = sas_dev->device_id; + struct device *dev = &hisi_hba->pdev->dev; + struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; + u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); + int i; + + /* clear the itct interrupt state */ + if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, + ENT_INT_SRC3_ITC_INT_MSK); + + /* clear the itct int*/ + for (i = 0; i < 2; i++) { + /* clear the itct table*/ + reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); + reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); + hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); + + udelay(10); + reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); + if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) { + dev_dbg(dev, "got clear ITCT done interrupt\n"); + + /* invalid the itct state*/ + qw0 = cpu_to_le64(itct->qw0); + qw0 &= ~(1 << ITCT_HDR_VALID_OFF); + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, + ENT_INT_SRC3_ITC_INT_MSK); + hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED; + hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL; + + /* clear the itct */ + hisi_sas_write32(hisi_hba, ITCT_CLR, 0); + dev_dbg(dev, "clear ITCT ok\n"); + break; + } + } +} + +static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) +{ + int i, reset_val; + u32 val; + unsigned long end_time; + struct device *dev = &hisi_hba->pdev->dev; + + /* The mask needs to be set depending on the number of phys */ + if (hisi_hba->n_phy == 9) + reset_val = 0x1fffff; + else + reset_val = 0x7ffff; + + /* Disable all of the DQ */ + for (i = 0; i < HISI_SAS_MAX_QUEUES; i++) + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); + + /* Disable all of the PHYs */ + for (i = 0; i < hisi_hba->n_phy; i++) { + u32 phy_cfg = hisi_sas_phy_read32(hisi_hba, i, PHY_CFG); + + phy_cfg &= ~PHY_CTRL_RESET_MSK; + hisi_sas_phy_write32(hisi_hba, i, PHY_CFG, phy_cfg); + } + udelay(50); + + /* Ensure DMA tx & rx idle */ + for (i = 0; i < hisi_hba->n_phy; i++) { + u32 dma_tx_status, dma_rx_status; + + end_time = jiffies + msecs_to_jiffies(1000); + + while (1) { + dma_tx_status = hisi_sas_phy_read32(hisi_hba, i, + DMA_TX_STATUS); + dma_rx_status = hisi_sas_phy_read32(hisi_hba, i, + DMA_RX_STATUS); + + if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) && + !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK)) + break; + + msleep(20); + if (time_after(jiffies, end_time)) + return -EIO; + } + } + + /* Ensure axi bus idle */ + end_time = jiffies + msecs_to_jiffies(1000); + while (1) { + u32 axi_status = + hisi_sas_read32(hisi_hba, AXI_CFG); + + if (axi_status == 0) + break; + + msleep(20); + if (time_after(jiffies, end_time)) + return -EIO; + } + + /* reset and disable clock*/ + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg, + reset_val); + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4, + reset_val); + msleep(1); + regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); + if (reset_val != (val & reset_val)) { + dev_err(dev, "SAS reset fail.\n"); + return -EIO; + } + + /* De-reset and enable clock*/ + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4, + reset_val); + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg, + reset_val); + msleep(1); + regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, + &val); + if (val & reset_val) { + dev_err(dev, "SAS de-reset fail.\n"); + return -EIO; + } + + return 0; +} + +static void init_reg_v2_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = &hisi_hba->pdev->dev; + struct device_node *np = dev->of_node; + int i; + + /* Global registers init */ + + /* Deal with am-max-transmissions quirk */ + if (of_get_property(np, "hip06-sas-v2-quirk-amt", NULL)) { + hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020); + hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS, + 0x2020); + } /* Else, use defaults -> do nothing */ + + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, + (u32)((1ULL << hisi_hba->queue_count) - 1)); + hisi_sas_write32(hisi_hba, AXI_USER1, 0xc0000000); + hisi_sas_write32(hisi_hba, AXI_USER2, 0x10000); + hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); + hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF); + hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1); + hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4); + hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x4E20); + hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1); + hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); + hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1); + hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1); + hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0x0); + hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfffff3c0); + for (i = 0; i < hisi_hba->queue_count; i++) + hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0); + + hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1); + hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855); + hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908); + hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d); + hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); + hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x23f801fc); + hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694); + } + + for (i = 0; i < hisi_hba->queue_count; i++) { + /* Delivery queue */ + hisi_sas_write32(hisi_hba, + DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), + upper_32_bits(hisi_hba->cmd_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), + lower_32_bits(hisi_hba->cmd_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), + HISI_SAS_QUEUE_SLOTS); + + /* Completion queue */ + hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), + upper_32_bits(hisi_hba->complete_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), + lower_32_bits(hisi_hba->complete_hdr_dma[i])); + + hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), + HISI_SAS_QUEUE_SLOTS); + } + + /* itct */ + hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, + lower_32_bits(hisi_hba->itct_dma)); + + hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, + upper_32_bits(hisi_hba->itct_dma)); + + /* iost */ + hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, + lower_32_bits(hisi_hba->iost_dma)); + + hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, + upper_32_bits(hisi_hba->iost_dma)); + + /* breakpoint */ + hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, + lower_32_bits(hisi_hba->breakpoint_dma)); + + hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, + upper_32_bits(hisi_hba->breakpoint_dma)); + + /* SATA broken msg */ + hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, + lower_32_bits(hisi_hba->sata_breakpoint_dma)); + + hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, + upper_32_bits(hisi_hba->sata_breakpoint_dma)); + + /* SATA initial fis */ + hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, + lower_32_bits(hisi_hba->initial_fis_dma)); + + hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, + upper_32_bits(hisi_hba->initial_fis_dma)); +} + +static int hw_init_v2_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = &hisi_hba->pdev->dev; + int rc; + + rc = reset_hw_v2_hw(hisi_hba); + if (rc) { + dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc); + return rc; + } + + msleep(100); + init_reg_v2_hw(hisi_hba); + + init_id_frame_v2_hw(hisi_hba); + + return 0; +} + +static void enable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg |= PHY_CFG_ENA_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + cfg &= ~PHY_CFG_ENA_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); +} + +static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + config_id_frame_v2_hw(hisi_hba, phy_no); + config_phy_opt_mode_v2_hw(hisi_hba, phy_no); + enable_phy_v2_hw(hisi_hba, phy_no); +} + +static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + disable_phy_v2_hw(hisi_hba, phy_no); +} + +static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + stop_phy_v2_hw(hisi_hba, phy_no); + msleep(100); + start_phy_v2_hw(hisi_hba, phy_no); +} + +static void start_phys_v2_hw(unsigned long data) +{ + struct hisi_hba *hisi_hba = (struct hisi_hba *)data; + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) + start_phy_v2_hw(hisi_hba, i); +} + +static void phys_init_v2_hw(struct hisi_hba *hisi_hba) +{ + int i; + struct timer_list *timer = &hisi_hba->timer; + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x6a); + hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK); + } + + setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba); + mod_timer(timer, jiffies + HZ); +} + +static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 sl_control; + + sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + sl_control |= SL_CONTROL_NOTIFY_EN_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); + msleep(1); + sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); + sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); +} + +static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) +{ + int i, bitmap = 0; + u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); + u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); + + for (i = 0; i < (hisi_hba->n_phy < 9 ? hisi_hba->n_phy : 8); i++) + if (phy_state & 1 << i) + if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) + bitmap |= 1 << i; + + if (hisi_hba->n_phy == 9) { + u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); + + if (phy_state & 1 << 8) + if (((port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> + PORT_STATE_PHY8_PORT_NUM_OFF) == port_id) + bitmap |= 1 << 9; + } + + return bitmap; +} + +/** + * This function allocates across all queues to load balance. + * Slots are allocated from queues in a round-robin fashion. + * + * The callpath to this function and upto writing the write + * queue pointer should be safe from interruption. + */ +static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s) +{ + struct device *dev = &hisi_hba->pdev->dev; + u32 r, w; + int queue = hisi_hba->queue; + + while (1) { + w = hisi_sas_read32_relaxed(hisi_hba, + DLVRY_Q_0_WR_PTR + (queue * 0x14)); + r = hisi_sas_read32_relaxed(hisi_hba, + DLVRY_Q_0_RD_PTR + (queue * 0x14)); + if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { + queue = (queue + 1) % hisi_hba->queue_count; + if (queue == hisi_hba->queue) { + dev_warn(dev, "could not find free slot\n"); + return -EAGAIN; + } + continue; + } + break; + } + hisi_hba->queue = (queue + 1) % hisi_hba->queue_count; + *q = queue; + *s = w; + return 0; +} + +static void start_delivery_v2_hw(struct hisi_hba *hisi_hba) +{ + int dlvry_queue = hisi_hba->slot_prep->dlvry_queue; + int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot; + + hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), + ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS); +} + +static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, + struct hisi_sas_cmd_hdr *hdr, + struct scatterlist *scatter, + int n_elem) +{ + struct device *dev = &hisi_hba->pdev->dev; + struct scatterlist *sg; + int i; + + if (n_elem > HISI_SAS_SGE_PAGE_CNT) { + dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", + n_elem); + return -EINVAL; + } + + slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC, + &slot->sge_page_dma); + if (!slot->sge_page) + return -ENOMEM; + + for_each_sg(scatter, sg, n_elem, i) { + struct hisi_sas_sge *entry = &slot->sge_page->sge[i]; + + entry->addr = cpu_to_le64(sg_dma_address(sg)); + entry->page_ctrl_0 = entry->page_ctrl_1 = 0; + entry->data_len = cpu_to_le32(sg_dma_len(sg)); + entry->data_off = 0; + } + + hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma); + + hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); + + return 0; +} + +static int prep_smp_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct domain_device *device = task->dev; + struct device *dev = &hisi_hba->pdev->dev; + struct hisi_sas_port *port = slot->port; + struct scatterlist *sg_req, *sg_resp; + struct hisi_sas_device *sas_dev = device->lldd_dev; + dma_addr_t req_dma_addr; + unsigned int req_len, resp_len; + int elem, rc; + + /* + * DMA-map SMP request, response buffers + */ + /* req */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + req_dma_addr = sg_dma_address(sg_req); + + /* resp */ + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out_req; + } + resp_len = sg_dma_len(sg_resp); + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_resp; + } + + /* create header */ + /* dw0 */ + hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | + (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ + (2 << CMD_HDR_CMD_OFF)); /* smp */ + + /* map itct entry */ + hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | + (1 << CMD_HDR_FRAME_TYPE_OFF) | + (DIR_NO_DATA << CMD_HDR_DIR_OFF)); + + /* dw2 */ + hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | + (HISI_SAS_MAX_SMP_RESP_SZ / 4 << + CMD_HDR_MRFL_OFF)); + + hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); + + hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); + hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + + return 0; + +err_out_resp: + dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); +err_out_req: + dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + return rc; +} + +static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, int is_tmf, + struct hisi_sas_tmf_task *tmf) +{ + struct sas_task *task = slot->task; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_port *port = slot->port; + struct sas_ssp_task *ssp_task = &task->ssp_task; + struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; + int has_data = 0, rc, priority = is_tmf; + u8 *buf_cmd; + u32 dw1 = 0, dw2 = 0; + + hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | + (2 << CMD_HDR_TLR_CTRL_OFF) | + (port->id << CMD_HDR_PORT_OFF) | + (priority << CMD_HDR_PRIORITY_OFF) | + (1 << CMD_HDR_CMD_OFF)); /* ssp */ + + dw1 = 1 << CMD_HDR_VDTL_OFF; + if (is_tmf) { + dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; + dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; + } else { + dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; + switch (scsi_cmnd->sc_data_direction) { + case DMA_TO_DEVICE: + has_data = 1; + dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; + break; + case DMA_FROM_DEVICE: + has_data = 1; + dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; + break; + default: + dw1 &= ~CMD_HDR_DIR_MSK; + } + } + + /* map itct entry */ + dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; + hdr->dw1 = cpu_to_le32(dw1); + + dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) + + 3) / 4) << CMD_HDR_CFL_OFF) | + ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | + (2 << CMD_HDR_SG_MOD_OFF); + hdr->dw2 = cpu_to_le32(dw2); + + hdr->transfer_tags = cpu_to_le32(slot->idx); + + if (has_data) { + rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, + slot->n_elem); + if (rc) + return rc; + } + + hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); + hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); + hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + + buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr); + + memcpy(buf_cmd, &task->ssp_task.LUN, 8); + if (!is_tmf) { + buf_cmd[9] = task->ssp_task.task_attr | + (task->ssp_task.task_prio << 3); + memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, + task->ssp_task.cmd->cmd_len); + } else { + buf_cmd[10] = tmf->tmf; + switch (tmf->tmf) { + case TMF_ABORT_TASK: + case TMF_QUERY_TASK: + buf_cmd[12] = + (tmf->tag_of_task_to_be_managed >> 8) & 0xff; + buf_cmd[13] = + tmf->tag_of_task_to_be_managed & 0xff; + break; + default: + break; + } + } + + return 0; +} + +static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task, + struct hisi_sas_slot *slot) +{ + struct task_status_struct *ts = &task->task_status; + struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; + struct dev_to_host_fis *d2h = slot->status_buffer + + sizeof(struct hisi_sas_err_record); + + resp->frame_len = sizeof(struct dev_to_host_fis); + memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); + + ts->buf_valid_size = sizeof(*resp); +} + +/* by default, task resp is complete */ +static void slot_err_v2_hw(struct hisi_hba *hisi_hba, + struct sas_task *task, + struct hisi_sas_slot *slot) +{ + struct task_status_struct *ts = &task->task_status; + struct hisi_sas_err_record_v2 *err_record = slot->status_buffer; + u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type); + u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type); + u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type); + u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type); + u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type); + int error = -1; + + if (dma_rx_err_type) { + error = ffs(dma_rx_err_type) + - 1 + DMA_RX_ERR_BASE; + } else if (sipc_rx_err_type) { + error = ffs(sipc_rx_err_type) + - 1 + SIPC_RX_ERR_BASE; + } else if (dma_tx_err_type) { + error = ffs(dma_tx_err_type) + - 1 + DMA_TX_ERR_BASE; + } else if (trans_rx_fail_type) { + error = ffs(trans_rx_fail_type) + - 1 + TRANS_RX_FAIL_BASE; + } else if (trans_tx_fail_type) { + error = ffs(trans_tx_fail_type) + - 1 + TRANS_TX_FAIL_BASE; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + { + switch (error) { + case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_NO_DEST; + break; + } + case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_PATH_BLOCKED; + break; + } + case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + } + case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + } + case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + } + case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + } + case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + } + case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + } + case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: + { + /* not sure */ + ts->stat = SAS_DEV_NO_RESPONSE; + break; + } + case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: + { + ts->stat = SAS_PHY_DOWN; + break; + } + case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: + { + ts->stat = SAS_OPEN_TO; + break; + } + case DMA_RX_DATA_LEN_OVERFLOW: + { + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + break; + } + case DMA_RX_DATA_LEN_UNDERFLOW: + case SIPC_RX_DATA_UNDERFLOW_ERR: + { + ts->residual = trans_tx_fail_type; + ts->stat = SAS_DATA_UNDERRUN; + break; + } + case TRANS_TX_ERR_FRAME_TXED: + { + /* This will request a retry */ + ts->stat = SAS_QUEUE_FULL; + slot->abort = 1; + break; + } + case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: + case TRANS_TX_ERR_PHY_NOT_ENABLE: + case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: + case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: + case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: + case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: + case TRANS_TX_ERR_WITH_BREAK_REQUEST: + case TRANS_TX_ERR_WITH_BREAK_RECEVIED: + case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: + case TRANS_TX_ERR_WITH_CLOSE_NORMAL: + case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: + case TRANS_TX_ERR_WITH_CLOSE_COMINIT: + case TRANS_TX_ERR_WITH_NAK_RECEVIED: + case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: + case TRANS_TX_ERR_WITH_IPTT_CONFLICT: + case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: + case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR: + case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: + case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: + case TRANS_RX_ERR_WITH_BREAK_TIMEOUT: + case TRANS_RX_ERR_WITH_BREAK_REQUEST: + case TRANS_RX_ERR_WITH_BREAK_RECEVIED: + case TRANS_RX_ERR_WITH_CLOSE_NORMAL: + case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: + case TRANS_RX_ERR_WITH_CLOSE_COMINIT: + case TRANS_RX_ERR_WITH_DATA_LEN0: + case TRANS_RX_ERR_WITH_BAD_HASH: + case TRANS_RX_XRDY_WLEN_ZERO_ERR: + case TRANS_RX_SSP_FRM_LEN_ERR: + case TRANS_RX_ERR_WITH_BAD_FRM_TYPE: + case DMA_TX_UNEXP_XFER_ERR: + case DMA_TX_UNEXP_RETRANS_ERR: + case DMA_TX_XFER_LEN_OVERFLOW: + case DMA_TX_XFER_OFFSET_ERR: + case DMA_RX_DATA_OFFSET_ERR: + case DMA_RX_UNEXP_NORM_RESP_ERR: + case DMA_RX_UNEXP_RDFRAME_ERR: + case DMA_RX_UNKNOWN_FRM_ERR: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + } + default: + break; + } + } + break; + case SAS_PROTOCOL_SMP: + ts->stat = SAM_STAT_CHECK_CONDITION; + break; + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + { + switch (error) { + case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: + case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: + case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: + { + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + } + case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: + case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: + case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: + case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: + case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: + case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: + case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY: + { + ts->stat = SAS_OPEN_REJECT; + break; + } + case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: + { + ts->stat = SAS_OPEN_TO; + break; + } + case DMA_RX_DATA_LEN_OVERFLOW: + { + ts->stat = SAS_DATA_OVERRUN; + break; + } + case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: + case TRANS_TX_ERR_PHY_NOT_ENABLE: + case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: + case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: + case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: + case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: + case TRANS_TX_ERR_WITH_BREAK_REQUEST: + case TRANS_TX_ERR_WITH_BREAK_RECEVIED: + case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: + case TRANS_TX_ERR_WITH_CLOSE_NORMAL: + case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: + case TRANS_TX_ERR_WITH_CLOSE_COMINIT: + case TRANS_TX_ERR_WITH_NAK_RECEVIED: + case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: + case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: + case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT: + case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: + case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: + case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR: + case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR: + case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN: + case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP: + case TRANS_RX_ERR_WITH_CLOSE_NORMAL: + case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: + case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: + case TRANS_RX_ERR_WITH_CLOSE_COMINIT: + case TRANS_RX_ERR_WITH_DATA_LEN0: + case TRANS_RX_ERR_WITH_BAD_HASH: + case TRANS_RX_XRDY_WLEN_ZERO_ERR: + case TRANS_RX_SSP_FRM_LEN_ERR: + case SIPC_RX_FIS_STATUS_ERR_BIT_VLD: + case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR: + case SIPC_RX_FIS_STATUS_BSY_BIT_ERR: + case SIPC_RX_WRSETUP_LEN_ODD_ERR: + case SIPC_RX_WRSETUP_LEN_ZERO_ERR: + case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR: + case SIPC_RX_SATA_UNEXP_FIS_ERR: + case DMA_RX_SATA_FRAME_TYPE_ERR: + case DMA_RX_UNEXP_RDFRAME_ERR: + case DMA_RX_PIO_DATA_LEN_ERR: + case DMA_RX_RDSETUP_STATUS_ERR: + case DMA_RX_RDSETUP_STATUS_DRQ_ERR: + case DMA_RX_RDSETUP_STATUS_BSY_ERR: + case DMA_RX_RDSETUP_LEN_ODD_ERR: + case DMA_RX_RDSETUP_LEN_ZERO_ERR: + case DMA_RX_RDSETUP_LEN_OVER_ERR: + case DMA_RX_RDSETUP_OFFSET_ERR: + case DMA_RX_RDSETUP_ACTIVE_ERR: + case DMA_RX_RDSETUP_ESTATUS_ERR: + case DMA_RX_UNKNOWN_FRM_ERR: + { + ts->stat = SAS_OPEN_REJECT; + break; + } + default: + { + ts->stat = SAS_PROTO_RESPONSE; + break; + } + } + sata_done_v2_hw(hisi_hba, task, slot); + } + break; + default: + break; + } +} + +static int +slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, + int abort) +{ + struct sas_task *task = slot->task; + struct hisi_sas_device *sas_dev; + struct device *dev = &hisi_hba->pdev->dev; + struct task_status_struct *ts; + struct domain_device *device; + enum exec_status sts; + struct hisi_sas_complete_v2_hdr *complete_queue = + hisi_hba->complete_hdr[slot->cmplt_queue]; + struct hisi_sas_complete_v2_hdr *complete_hdr = + &complete_queue[slot->cmplt_queue_slot]; + + if (unlikely(!task || !task->lldd_task || !task->dev)) + return -EINVAL; + + ts = &task->task_status; + device = task->dev; + sas_dev = device->lldd_dev; + + task->task_state_flags &= + ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); + task->task_state_flags |= SAS_TASK_STATE_DONE; + + memset(ts, 0, sizeof(*ts)); + ts->resp = SAS_TASK_COMPLETE; + + if (unlikely(!sas_dev || abort)) { + if (!sas_dev) + dev_dbg(dev, "slot complete: port has not device\n"); + ts->stat = SAS_PHY_DOWN; + goto out; + } + + if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) && + (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) { + + slot_err_v2_hw(hisi_hba, task, slot); + if (unlikely(slot->abort)) { + queue_work(hisi_hba->wq, &slot->abort_slot); + /* immediately return and do not complete */ + return ts->stat; + } + goto out; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + { + struct ssp_response_iu *iu = slot->status_buffer + + sizeof(struct hisi_sas_err_record); + + sas_ssp_task_response(dev, task, iu); + break; + } + case SAS_PROTOCOL_SMP: + { + struct scatterlist *sg_resp = &task->smp_task.smp_resp; + void *to; + + ts->stat = SAM_STAT_GOOD; + to = kmap_atomic(sg_page(sg_resp)); + + dma_unmap_sg(dev, &task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); + dma_unmap_sg(dev, &task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + memcpy(to + sg_resp->offset, + slot->status_buffer + + sizeof(struct hisi_sas_err_record), + sg_dma_len(sg_resp)); + kunmap_atomic(to); + break; + } + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + { + ts->stat = SAM_STAT_GOOD; + sata_done_v2_hw(hisi_hba, task, slot); + break; + } + default: + ts->stat = SAM_STAT_CHECK_CONDITION; + break; + } + + if (!slot->port->port_attached) { + dev_err(dev, "slot complete: port %d has removed\n", + slot->port->sas_port.id); + ts->stat = SAS_PHY_DOWN; + } + +out: + if (sas_dev && sas_dev->running_req) + sas_dev->running_req--; + + hisi_sas_slot_task_free(hisi_hba, task, slot); + sts = ts->stat; + + if (task->task_done) + task->task_done(task); + + return sts; +} + +static u8 get_ata_protocol(u8 cmd, int direction) +{ + switch (cmd) { + case ATA_CMD_FPDMA_WRITE: + case ATA_CMD_FPDMA_READ: + return SATA_PROTOCOL_FPDMA; + + case ATA_CMD_ID_ATA: + case ATA_CMD_PMP_READ: + case ATA_CMD_READ_LOG_EXT: + case ATA_CMD_PIO_READ: + case ATA_CMD_PIO_READ_EXT: + case ATA_CMD_PMP_WRITE: + case ATA_CMD_WRITE_LOG_EXT: + case ATA_CMD_PIO_WRITE: + case ATA_CMD_PIO_WRITE_EXT: + return SATA_PROTOCOL_PIO; + + case ATA_CMD_READ: + case ATA_CMD_READ_EXT: + case ATA_CMD_READ_LOG_DMA_EXT: + case ATA_CMD_WRITE: + case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE_QUEUED: + case ATA_CMD_WRITE_LOG_DMA_EXT: + return SATA_PROTOCOL_DMA; + + case ATA_CMD_DOWNLOAD_MICRO: + case ATA_CMD_DEV_RESET: + case ATA_CMD_CHK_POWER: + case ATA_CMD_FLUSH: + case ATA_CMD_FLUSH_EXT: + case ATA_CMD_VERIFY: + case ATA_CMD_VERIFY_EXT: + case ATA_CMD_SET_FEATURES: + case ATA_CMD_STANDBY: + case ATA_CMD_STANDBYNOW1: + return SATA_PROTOCOL_NONDATA; + default: + if (direction == DMA_NONE) + return SATA_PROTOCOL_NONDATA; + return SATA_PROTOCOL_PIO; + } +} + +static int get_ncq_tag_v2_hw(struct sas_task *task, u32 *tag) +{ + struct ata_queued_cmd *qc = task->uldd_task; + + if (qc) { + if (qc->tf.command == ATA_CMD_FPDMA_WRITE || + qc->tf.command == ATA_CMD_FPDMA_READ) { + *tag = qc->tag; + return 1; + } + } + return 0; +} + +static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + struct sas_task *task = slot->task; + struct domain_device *device = task->dev; + struct domain_device *parent_dev = device->parent; + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; + struct hisi_sas_port *port = device->port->lldd_port; + u8 *buf_cmd; + int has_data = 0, rc = 0, hdr_tag = 0; + u32 dw1 = 0, dw2 = 0; + + /* create header */ + /* dw0 */ + hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) + hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); + else + hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF); + + /* dw1 */ + switch (task->data_dir) { + case DMA_TO_DEVICE: + has_data = 1; + dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; + break; + case DMA_FROM_DEVICE: + has_data = 1; + dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; + break; + default: + dw1 &= ~CMD_HDR_DIR_MSK; + } + + if (0 == task->ata_task.fis.command) + dw1 |= 1 << CMD_HDR_RESET_OFF; + + dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir)) + << CMD_HDR_FRAME_TYPE_OFF; + dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; + hdr->dw1 = cpu_to_le32(dw1); + + /* dw2 */ + if (task->ata_task.use_ncq && get_ncq_tag_v2_hw(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; + } + + dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | + 2 << CMD_HDR_SG_MOD_OFF; + hdr->dw2 = cpu_to_le32(dw2); + + /* dw3 */ + hdr->transfer_tags = cpu_to_le32(slot->idx); + + if (has_data) { + rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, + slot->n_elem); + if (rc) + return rc; + } + + + hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); + hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); + hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); + + buf_cmd = slot->command_table; + + if (likely(!task->ata_task.device_control_reg_update)) + task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ + /* fill in command FIS */ + memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); + + return 0; +} + +static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) +{ + int i, res = 0; + u32 context, port_id, link_rate, hard_phy_linkrate; + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct device *dev = &hisi_hba->pdev->dev; + u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; + struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; + + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); + + /* Check for SATA dev */ + context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); + if (context & (1 << phy_no)) + goto end; + + if (phy_no == 8) { + u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); + + port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> + PORT_STATE_PHY8_PORT_NUM_OFF; + link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >> + PORT_STATE_PHY8_CONN_RATE_OFF; + } else { + port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); + port_id = (port_id >> (4 * phy_no)) & 0xf; + link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); + link_rate = (link_rate >> (phy_no * 4)) & 0xf; + } + + if (port_id == 0xf) { + dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); + res = IRQ_NONE; + goto end; + } + + for (i = 0; i < 6; i++) { + u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, + RX_IDAF_DWORD0 + (i * 4)); + frame_rcvd[i] = __swab32(idaf); + } + + /* Get the linkrates */ + link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); + link_rate = (link_rate >> (phy_no * 4)) & 0xf; + sas_phy->linkrate = link_rate; + hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, + HARD_PHY_LINKRATE); + phy->maximum_linkrate = hard_phy_linkrate & 0xf; + phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; + + sas_phy->oob_mode = SAS_OOB_MODE; + memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE); + dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); + phy->port_id = port_id; + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + phy->phy_type |= PORT_TYPE_SAS; + phy->phy_attached = 1; + phy->identify.device_type = id->dev_type; + phy->frame_rcvd_size = sizeof(struct sas_identify_frame); + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SMP; + queue_work(hisi_hba->wq, &phy->phyup_ws); + +end: + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, + CHL_INT0_SL_PHY_ENABLE_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); + + return res; +} + +static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) +{ + int res = 0; + u32 phy_cfg, phy_state; + + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); + + phy_cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + + phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); + + hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0); + + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); + + return res; +} + +static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) +{ + struct hisi_hba *hisi_hba = p; + u32 irq_msk; + int phy_no = 0; + irqreturn_t res = IRQ_HANDLED; + + irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) + >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff; + while (irq_msk) { + if (irq_msk & 1) { + u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT0); + + if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK) + /* phy up */ + if (phy_up_v2_hw(phy_no, hisi_hba)) { + res = IRQ_NONE; + goto end; + } + + if (irq_value & CHL_INT0_NOT_RDY_MSK) + /* phy down */ + if (phy_down_v2_hw(phy_no, hisi_hba)) { + res = IRQ_NONE; + goto end; + } + } + irq_msk >>= 1; + phy_no++; + } + +end: + return res; +} + +static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_ha_struct *sas_ha = &hisi_hba->sha; + unsigned long flags; + + hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); + + spin_lock_irqsave(&hisi_hba->lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, + CHL_INT0_SL_RX_BCST_ACK_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); +} + +static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) +{ + struct hisi_hba *hisi_hba = p; + struct device *dev = &hisi_hba->pdev->dev; + u32 ent_msk, ent_tmp, irq_msk; + int phy_no = 0; + + ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); + ent_tmp = ent_msk; + ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK; + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk); + + irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >> + HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff; + + while (irq_msk) { + if (irq_msk & (1 << phy_no)) { + u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT0); + u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT1); + u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT2); + + if (irq_value1) { + if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK | + CHL_INT1_DMAC_TX_ECC_ERR_MSK)) + panic("%s: DMAC RX/TX ecc bad error! (0x%x)", + dev_name(dev), irq_value1); + + hisi_sas_phy_write32(hisi_hba, phy_no, + CHL_INT1, irq_value1); + } + + if (irq_value2) + hisi_sas_phy_write32(hisi_hba, phy_no, + CHL_INT2, irq_value2); + + + if (irq_value0) { + if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK) + phy_bcast_v2_hw(phy_no, hisi_hba); + + hisi_sas_phy_write32(hisi_hba, phy_no, + CHL_INT0, irq_value0 + & (~CHL_INT0_HOTPLUG_TOUT_MSK) + & (~CHL_INT0_SL_PHY_ENABLE_MSK) + & (~CHL_INT0_NOT_RDY_MSK)); + } + } + irq_msk &= ~(1 << phy_no); + phy_no++; + } + + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp); + + return IRQ_HANDLED; +} + +static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) +{ + struct hisi_sas_cq *cq = p; + struct hisi_hba *hisi_hba = cq->hisi_hba; + struct hisi_sas_slot *slot; + struct hisi_sas_itct *itct; + struct hisi_sas_complete_v2_hdr *complete_queue; + u32 irq_value, rd_point, wr_point, dev_id; + int queue = cq->id; + + complete_queue = hisi_hba->complete_hdr[queue]; + irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC); + + hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); + + rd_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_RD_PTR + + (0x14 * queue)); + wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + + (0x14 * queue)); + + while (rd_point != wr_point) { + struct hisi_sas_complete_v2_hdr *complete_hdr; + int iptt; + + complete_hdr = &complete_queue[rd_point]; + + /* Check for NCQ completion */ + if (complete_hdr->act) { + u32 act_tmp = complete_hdr->act; + int ncq_tag_count = ffs(act_tmp); + + dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >> + CMPLT_HDR_DEV_ID_OFF; + itct = &hisi_hba->itct[dev_id]; + + /* The NCQ tags are held in the itct header */ + while (ncq_tag_count) { + __le64 *ncq_tag = &itct->qw4_15[0]; + + ncq_tag_count -= 1; + iptt = (ncq_tag[ncq_tag_count / 5] + >> (ncq_tag_count % 5) * 12) & 0xfff; + + slot = &hisi_hba->slot_info[iptt]; + slot->cmplt_queue_slot = rd_point; + slot->cmplt_queue = queue; + slot_complete_v2_hw(hisi_hba, slot, 0); + + act_tmp &= ~(1 << ncq_tag_count); + ncq_tag_count = ffs(act_tmp); + } + } else { + iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK; + slot = &hisi_hba->slot_info[iptt]; + slot->cmplt_queue_slot = rd_point; + slot->cmplt_queue = queue; + slot_complete_v2_hw(hisi_hba, slot, 0); + } + + if (++rd_point >= HISI_SAS_QUEUE_SLOTS) + rd_point = 0; + } + + /* update rd_point */ + hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + return IRQ_HANDLED; +} + +static irqreturn_t sata_int_v2_hw(int irq_no, void *p) +{ + struct hisi_sas_phy *phy = p; + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct device *dev = &hisi_hba->pdev->dev; + struct hisi_sas_initial_fis *initial_fis; + struct dev_to_host_fis *fis; + u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; + irqreturn_t res = IRQ_HANDLED; + u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; + int phy_no; + + phy_no = sas_phy->id; + initial_fis = &hisi_hba->initial_fis[phy_no]; + fis = &initial_fis->fis; + + ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk | 1 << phy_no); + + ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1); + ent_tmp = ent_int; + ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4); + if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) { + dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no); + hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk); + res = IRQ_NONE; + goto end; + } + + if (unlikely(phy_no == 8)) { + u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); + + port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> + PORT_STATE_PHY8_PORT_NUM_OFF; + link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >> + PORT_STATE_PHY8_CONN_RATE_OFF; + } else { + port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); + port_id = (port_id >> (4 * phy_no)) & 0xf; + link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); + link_rate = (link_rate >> (phy_no * 4)) & 0xf; + } + + if (port_id == 0xf) { + dev_err(dev, "sata int: phy%d invalid portid\n", phy_no); + res = IRQ_NONE; + goto end; + } + + sas_phy->linkrate = link_rate; + hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, + HARD_PHY_LINKRATE); + phy->maximum_linkrate = hard_phy_linkrate & 0xf; + phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; + + sas_phy->oob_mode = SATA_OOB_MODE; + /* Make up some unique SAS address */ + attached_sas_addr[0] = 0x50; + attached_sas_addr[7] = phy_no; + memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE); + memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis)); + dev_info(dev, "sata int phyup: phy%d link_rate=%d\n", phy_no, link_rate); + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + phy->port_id = port_id; + phy->phy_type |= PORT_TYPE_SATA; + phy->phy_attached = 1; + phy->identify.device_type = SAS_SATA_DEV; + phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); + phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + queue_work(hisi_hba->wq, &phy->phyup_ws); + +end: + hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk); + + return res; +} + +static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = { + int_phy_updown_v2_hw, + int_chnl_int_v2_hw, +}; + +/** + * There is a limitation in the hip06 chipset that we need + * to map in all mbigen interrupts, even if they are not used. + */ +static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) +{ + struct platform_device *pdev = hisi_hba->pdev; + struct device *dev = &pdev->dev; + int i, irq, rc, irq_map[128]; + + + for (i = 0; i < 128; i++) + irq_map[i] = platform_get_irq(pdev, i); + + for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) { + int idx = i; + + irq = irq_map[idx + 1]; /* Phy up/down is irq1 */ + if (!irq) { + dev_err(dev, "irq init: fail map phy interrupt %d\n", + idx); + return -ENOENT; + } + + rc = devm_request_irq(dev, irq, phy_interrupts[i], 0, + DRV_NAME " phy", hisi_hba); + if (rc) { + dev_err(dev, "irq init: could not request " + "phy interrupt %d, rc=%d\n", + irq, rc); + return -ENOENT; + } + } + + for (i = 0; i < hisi_hba->n_phy; i++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[i]; + int idx = i + 72; /* First SATA interrupt is irq72 */ + + irq = irq_map[idx]; + if (!irq) { + dev_err(dev, "irq init: fail map phy interrupt %d\n", + idx); + return -ENOENT; + } + + rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0, + DRV_NAME " sata", phy); + if (rc) { + dev_err(dev, "irq init: could not request " + "sata interrupt %d, rc=%d\n", + irq, rc); + return -ENOENT; + } + } + + for (i = 0; i < hisi_hba->queue_count; i++) { + int idx = i + 96; /* First cq interrupt is irq96 */ + + irq = irq_map[idx]; + if (!irq) { + dev_err(dev, + "irq init: could not map cq interrupt %d\n", + idx); + return -ENOENT; + } + rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0, + DRV_NAME " cq", &hisi_hba->cq[i]); + if (rc) { + dev_err(dev, + "irq init: could not request cq interrupt %d, rc=%d\n", + irq, rc); + return -ENOENT; + } + } + + return 0; +} + +static int hisi_sas_v2_init(struct hisi_hba *hisi_hba) +{ + int rc; + + rc = hw_init_v2_hw(hisi_hba); + if (rc) + return rc; + + rc = interrupt_init_v2_hw(hisi_hba); + if (rc) + return rc; + + phys_init_v2_hw(hisi_hba); + + return 0; +} + +static const struct hisi_sas_hw hisi_sas_v2_hw = { + .hw_init = hisi_sas_v2_init, + .setup_itct = setup_itct_v2_hw, + .sl_notify = sl_notify_v2_hw, + .get_wideport_bitmap = get_wideport_bitmap_v2_hw, + .free_device = free_device_v2_hw, + .prep_smp = prep_smp_v2_hw, + .prep_ssp = prep_ssp_v2_hw, + .prep_stp = prep_ata_v2_hw, + .get_free_slot = get_free_slot_v2_hw, + .start_delivery = start_delivery_v2_hw, + .slot_complete = slot_complete_v2_hw, + .phy_enable = enable_phy_v2_hw, + .phy_disable = disable_phy_v2_hw, + .phy_hard_reset = phy_hard_reset_v2_hw, + .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW, + .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr), +}; + +static int hisi_sas_v2_probe(struct platform_device *pdev) +{ + return hisi_sas_probe(pdev, &hisi_sas_v2_hw); +} + +static int hisi_sas_v2_remove(struct platform_device *pdev) +{ + return hisi_sas_remove(pdev); +} + +static const struct of_device_id sas_v2_of_match[] = { + { .compatible = "hisilicon,hip06-sas-v2",}, + {}, +}; +MODULE_DEVICE_TABLE(of, sas_v2_of_match); + +static struct platform_driver hisi_sas_v2_driver = { + .probe = hisi_sas_v2_probe, + .remove = hisi_sas_v2_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = sas_v2_of_match, + }, +}; + +module_platform_driver(hisi_sas_v2_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); +MODULE_DESCRIPTION("HISILICON SAS controller v2 hw driver"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 82ac1cd818ac..94025c5cf797 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -33,7 +33,7 @@ #include <linux/transport_class.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> - +#include <linux/idr.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> @@ -42,7 +42,7 @@ #include "scsi_logging.h" -static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */ +static DEFINE_IDA(host_index_ida); static void scsi_host_cls_release(struct device *dev) @@ -355,6 +355,8 @@ static void scsi_host_dev_release(struct device *dev) kfree(shost->shost_data); + ida_simple_remove(&host_index_ida, shost->host_no); + if (parent) put_device(parent); kfree(shost); @@ -388,6 +390,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) { struct Scsi_Host *shost; gfp_t gfp_mask = GFP_KERNEL; + int index; if (sht->unchecked_isa_dma && privsize) gfp_mask |= __GFP_DMA; @@ -406,11 +409,11 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) init_waitqueue_head(&shost->host_wait); mutex_init(&shost->scan_mutex); - /* - * subtract one because we increment first then return, but we need to - * know what the next host number was before increment - */ - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1; + index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL); + if (index < 0) + goto fail_kfree; + shost->host_no = index; + shost->dma_channel = 0xff; /* These three are default values which can be overridden */ @@ -495,7 +498,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost_printk(KERN_WARNING, shost, "error handler thread failed to spawn, error = %ld\n", PTR_ERR(shost->ehandler)); - goto fail_kfree; + goto fail_index_remove; } shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d", @@ -511,6 +514,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) fail_kthread: kthread_stop(shost->ehandler); + fail_index_remove: + ida_simple_remove(&host_index_ida, shost->host_no); fail_kfree: kfree(shost); return NULL; @@ -606,6 +611,7 @@ int scsi_init_hosts(void) void scsi_exit_hosts(void) { class_unregister(&shost_class); + ida_destroy(&host_index_ida); } int scsi_is_host_device(const struct device *dev) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index a3860367b568..5be944c8b71c 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1,5 +1,6 @@ /* * Disk Array driver for HP Smart Array SAS controllers + * Copyright 2016 Microsemi Corporation * Copyright 2014-2015 PMC-Sierra, Inc. * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. * @@ -12,7 +13,7 @@ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more details. * - * Questions/Comments/Bugfixes to storagedev@pmcs.com + * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com * */ @@ -750,7 +751,6 @@ static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, } #define MAX_PATHS 8 - static ssize_t path_info_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -792,10 +792,8 @@ static ssize_t path_info_show(struct device *dev, hdev->bus, hdev->target, hdev->lun, scsi_device_type(hdev->devtype)); - if (hdev->external || - hdev->devtype == TYPE_RAID || - is_logical_device(hdev)) { - output_len += snprintf(buf + output_len, + if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) { + output_len += scnprintf(buf + output_len, PAGE_SIZE - output_len, "%s\n", active); continue; @@ -808,29 +806,29 @@ static ssize_t path_info_show(struct device *dev, phys_connector[0] = '0'; if (phys_connector[1] < '0') phys_connector[1] = '0'; - if (hdev->phys_connector[i] > 0) - output_len += snprintf(buf + output_len, + output_len += scnprintf(buf + output_len, PAGE_SIZE - output_len, "PORT: %.2s ", phys_connector); - if (hdev->devtype == TYPE_DISK && hdev->expose_device) { + if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) && + hdev->expose_device) { if (box == 0 || box == 0xFF) { - output_len += snprintf(buf + output_len, + output_len += scnprintf(buf + output_len, PAGE_SIZE - output_len, "BAY: %hhu %s\n", bay, active); } else { - output_len += snprintf(buf + output_len, + output_len += scnprintf(buf + output_len, PAGE_SIZE - output_len, "BOX: %hhu BAY: %hhu %s\n", box, bay, active); } } else if (box != 0 && box != 0xFF) { - output_len += snprintf(buf + output_len, + output_len += scnprintf(buf + output_len, PAGE_SIZE - output_len, "BOX: %hhu %s\n", box, active); } else - output_len += snprintf(buf + output_len, + output_len += scnprintf(buf + output_len, PAGE_SIZE - output_len, "%s\n", active); } @@ -1170,6 +1168,7 @@ static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h, snprintf(label, LABEL_SIZE, "enclosure"); break; case TYPE_DISK: + case TYPE_ZBC: if (dev->external) snprintf(label, LABEL_SIZE, "external"); else if (!is_logical_dev_addr_mode(dev->scsi3addr)) @@ -1640,6 +1639,8 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, continue; if (dev[j]->devtype != TYPE_DISK) continue; + if (dev[j]->devtype != TYPE_ZBC) + continue; if (is_logical_device(dev[j])) continue; if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle) @@ -1685,6 +1686,8 @@ static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, continue; if (dev[i]->devtype != TYPE_DISK) continue; + if (dev[i]->devtype != TYPE_ZBC) + continue; if (!is_logical_device(dev[i])) continue; @@ -3191,6 +3194,89 @@ out: return rc; } +/* + * get enclosure information + * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number + * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure + * Uses id_physical_device to determine the box_index. + */ +static void hpsa_get_enclosure_info(struct ctlr_info *h, + unsigned char *scsi3addr, + struct ReportExtendedLUNdata *rlep, int rle_index, + struct hpsa_scsi_dev_t *encl_dev) +{ + int rc = -1; + struct CommandList *c = NULL; + struct ErrorInfo *ei = NULL; + struct bmic_sense_storage_box_params *bssbp = NULL; + struct bmic_identify_physical_device *id_phys = NULL; + struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; + u16 bmic_device_index = 0; + + bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); + + if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) { + rc = IO_OK; + goto out; + } + + bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL); + if (!bssbp) + goto out; + + id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); + if (!id_phys) + goto out; + + rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index, + id_phys, sizeof(*id_phys)); + if (rc) { + dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n", + __func__, encl_dev->external, bmic_device_index); + goto out; + } + + c = cmd_alloc(h); + + rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp, + sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD); + + if (rc) + goto out; + + if (id_phys->phys_connector[1] == 'E') + c->Request.CDB[5] = id_phys->box_index; + else + c->Request.CDB[5] = 0; + + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, + NO_TIMEOUT); + if (rc) + goto out; + + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + rc = -1; + goto out; + } + + encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port; + memcpy(&encl_dev->phys_connector[id_phys->active_path_number], + bssbp->phys_connector, sizeof(bssbp->phys_connector)); + + rc = IO_OK; +out: + kfree(bssbp); + kfree(id_phys); + + if (c) + cmd_free(h, c); + + if (rc != IO_OK) + hpsa_show_dev_msg(KERN_INFO, h, encl_dev, + "Error, could not get enclosure information\n"); +} + static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h, unsigned char *scsi3addr) { @@ -3580,18 +3666,6 @@ static int hpsa_device_supports_aborts(struct ctlr_info *h, return rc; } -static void sanitize_inquiry_string(unsigned char *s, int len) -{ - bool terminated = false; - - for (; len > 0; (--len, ++s)) { - if (*s == 0) - terminated = true; - if (terminated || *s < 0x20 || *s > 0x7e) - *s = ' '; - } -} - static int hpsa_update_device_info(struct ctlr_info *h, unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, unsigned char *is_OBDR_device) @@ -3622,8 +3696,8 @@ static int hpsa_update_device_info(struct ctlr_info *h, goto bail_out; } - sanitize_inquiry_string(&inq_buff[8], 8); - sanitize_inquiry_string(&inq_buff[16], 16); + scsi_sanitize_inquiry_string(&inq_buff[8], 8); + scsi_sanitize_inquiry_string(&inq_buff[16], 16); this_device->devtype = (inq_buff[0] & 0x1f); memcpy(this_device->scsi3addr, scsi3addr, 8); @@ -3636,7 +3710,8 @@ static int hpsa_update_device_info(struct ctlr_info *h, hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, sizeof(this_device->device_id)); - if (this_device->devtype == TYPE_DISK && + if ((this_device->devtype == TYPE_DISK || + this_device->devtype == TYPE_ZBC) && is_logical_dev_addr_mode(scsi3addr)) { int volume_offline; @@ -4032,7 +4107,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) /* skip masked non-disk devices */ if (MASKED_DEVICE(lunaddrbytes) && physical_device && - (physdev_list->LUN[phys_dev_index].device_flags & 0x01)) + (physdev_list->LUN[phys_dev_index].device_type != 0x06) && + (physdev_list->LUN[phys_dev_index].device_flags & 0x01)) continue; /* Get device type, vendor, model, device id */ @@ -4103,6 +4179,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) ncurrent++; break; case TYPE_DISK: + case TYPE_ZBC: if (this_device->physical_device) { /* The disk is in HBA mode. */ /* Never use RAID mapper in HBA mode. */ @@ -4116,7 +4193,13 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) break; case TYPE_TAPE: case TYPE_MEDIUM_CHANGER: + ncurrent++; + break; case TYPE_ENCLOSURE: + if (!this_device->external) + hpsa_get_enclosure_info(h, lunaddrbytes, + physdev_list, phys_dev_index, + this_device); ncurrent++; break; case TYPE_RAID: @@ -4887,6 +4970,8 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, return IO_ACCEL_INELIGIBLE; c->phys_disk = dev->phys_disk[map_index]; + if (!c->phys_disk) + return IO_ACCEL_INELIGIBLE; disk_handle = dd[map_index].ioaccel_handle; disk_block = le64_to_cpu(map->disk_starting_blk) + @@ -5752,7 +5837,7 @@ static int hpsa_send_abort_ioaccel2(struct ctlr_info *h, } static int hpsa_send_abort_both_ways(struct ctlr_info *h, - unsigned char *scsi3addr, struct CommandList *abort, int reply_queue) + struct hpsa_scsi_dev_t *dev, struct CommandList *abort, int reply_queue) { /* * ioccelerator mode 2 commands should be aborted via the @@ -5761,14 +5846,16 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h, * Change abort to physical device reset when abort TMF is unsupported. */ if (abort->cmd_type == CMD_IOACCEL2) { - if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) + if ((HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) || + dev->physical_device) return hpsa_send_abort_ioaccel2(h, abort, reply_queue); else - return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, + return hpsa_send_reset_as_abort_ioaccel2(h, + dev->scsi3addr, abort, reply_queue); } - return hpsa_send_abort(h, scsi3addr, abort, reply_queue); + return hpsa_send_abort(h, dev->scsi3addr, abort, reply_queue); } /* Find out which reply queue a command was meant to return on */ @@ -5906,7 +5993,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) cmd_free(h, abort); return FAILED; } - rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue); + rc = hpsa_send_abort_both_ways(h, dev, abort, reply_queue); atomic_inc(&h->abort_cmds_available); wake_up_all(&h->abort_cmd_wait_queue); if (rc != 0) { @@ -6629,6 +6716,16 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, c->Request.CDB[7] = (size >> 16) & 0xFF; c->Request.CDB[8] = (size >> 8) & 0XFF; break; + case BMIC_SENSE_STORAGE_BOX_PARAMS: + c->Request.CDBLen = 10; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_READ; + c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS; + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0XFF; + break; case BMIC_IDENTIFY_CONTROLLER: c->Request.CDBLen = 10; c->Request.type_attr_dir = diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index ae5beda1bdb5..d06bb7417e36 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -1,5 +1,6 @@ /* * Disk Array driver for HP Smart Array SAS controllers + * Copyright 2016 Microsemi Corporation * Copyright 2014-2015 PMC-Sierra, Inc. * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. * @@ -12,7 +13,7 @@ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more details. * - * Questions/Comments/Bugfixes to storagedev@pmcs.com + * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com * */ #ifndef HPSA_H @@ -400,7 +401,7 @@ struct offline_device_entry { #define HPSA_PHYSICAL_DEVICE_BUS 0 #define HPSA_RAID_VOLUME_BUS 1 #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2 -#define HPSA_HBA_BUS 3 +#define HPSA_HBA_BUS 0 /* Send the command to the hardware diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index d92ef0d352b5..a5be153d92d4 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -1,5 +1,6 @@ /* * Disk Array driver for HP Smart Array SAS controllers + * Copyright 2016 Microsemi Corporation * Copyright 2014-2015 PMC-Sierra, Inc. * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. * @@ -12,7 +13,7 @@ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more details. * - * Questions/Comments/Bugfixes to storagedev@pmcs.com + * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com * */ #ifndef HPSA_CMD_H @@ -289,8 +290,9 @@ struct SenseSubsystem_info { #define BMIC_IDENTIFY_CONTROLLER 0x11 #define BMIC_SET_DIAG_OPTIONS 0xF4 #define BMIC_SENSE_DIAG_OPTIONS 0xF5 -#define HPSA_DIAG_OPTS_DISABLE_RLD_CACHING 0x40000000 +#define HPSA_DIAG_OPTS_DISABLE_RLD_CACHING 0x80000000 #define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66 +#define BMIC_SENSE_STORAGE_BOX_PARAMS 0x65 /* Command List Structure */ union SCSI3Addr { @@ -842,5 +844,17 @@ struct bmic_sense_subsystem_info { u8 pad[332]; }; +struct bmic_sense_storage_box_params { + u8 reserved[36]; + u8 inquiry_valid; + u8 reserved_1[68]; + u8 phys_box_on_port; + u8 reserved_2[22]; + u16 connection_info; + u8 reserver_3[84]; + u8 phys_connector[2]; + u8 reserved_4[296]; +}; + #pragma pack() #endif /* HPSA_CMD_H */ diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 6aa317c303e2..fc523c3e5019 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -2636,7 +2636,8 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, struct ibmvfc_target *tgt; ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx," - " node_name: %llx%s\n", desc->desc, crq->scsi_id, crq->wwpn, crq->node_name, + " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id), + be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name), ibmvfc_get_link_state(crq->link_state)); switch (be64_to_cpu(crq->event)) { diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index adfef9db6f1e..d9534ee6ef52 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -182,7 +182,7 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) spin_lock_irqsave(&queue->lock, flags); crq = &queue->msgs[queue->cur]; - if (crq->valid & 0x80) { + if (crq->valid != VIOSRP_CRQ_FREE) { if (++queue->cur == queue->size) queue->cur = 0; @@ -231,7 +231,7 @@ static void ibmvscsi_task(void *data) /* Pull all the valid messages off the CRQ */ while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) { ibmvscsi_handle_crq(crq, hostdata); - crq->valid = 0x00; + crq->valid = VIOSRP_CRQ_FREE; } vio_enable_interrupts(vdev); @@ -239,7 +239,7 @@ static void ibmvscsi_task(void *data) if (crq != NULL) { vio_disable_interrupts(vdev); ibmvscsi_handle_crq(crq, hostdata); - crq->valid = 0x00; + crq->valid = VIOSRP_CRQ_FREE; } else { done = 1; } @@ -248,25 +248,23 @@ static void ibmvscsi_task(void *data) static void gather_partition_info(void) { - struct device_node *rootdn; - const char *ppartition_name; const __be32 *p_number_ptr; /* Retrieve information about this partition */ - rootdn = of_find_node_by_path("/"); - if (!rootdn) { + if (!of_root) return; - } - ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL); + of_node_get(of_root); + + ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL); if (ppartition_name) strncpy(partition_name, ppartition_name, sizeof(partition_name)); - p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL); + p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL); if (p_number_ptr) partition_number = of_read_number(p_number_ptr, 1); - of_node_put(rootdn); + of_node_put(of_root); } static void set_adapter_info(struct ibmvscsi_host_data *hostdata) @@ -283,8 +281,8 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata) hostdata->madapter_info.partition_number = cpu_to_be32(partition_number); - hostdata->madapter_info.mad_version = cpu_to_be32(1); - hostdata->madapter_info.os_type = cpu_to_be32(2); + hostdata->madapter_info.mad_version = cpu_to_be32(SRP_MAD_VERSION_1); + hostdata->madapter_info.os_type = cpu_to_be32(SRP_MAD_OS_LINUX); } /** @@ -316,7 +314,7 @@ static int ibmvscsi_reset_crq_queue(struct crq_queue *queue, rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, queue->msg_token, PAGE_SIZE); - if (rc == 2) { + if (rc == H_CLOSED) { /* Adapter is good, but other end is not ready */ dev_warn(hostdata->dev, "Partner adapter not ready\n"); } else if (rc != 0) { @@ -366,7 +364,7 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue, rc = ibmvscsi_reset_crq_queue(queue, hostdata); - if (rc == 2) { + if (rc == H_CLOSED) { /* Adapter is good, but other end is not ready */ dev_warn(hostdata->dev, "Partner adapter not ready\n"); retrc = 0; @@ -474,7 +472,7 @@ static int initialize_event_pool(struct event_pool *pool, struct srp_event_struct *evt = &pool->events[i]; memset(&evt->crq, 0x00, sizeof(evt->crq)); atomic_set(&evt->free, 1); - evt->crq.valid = 0x80; + evt->crq.valid = VIOSRP_CRQ_CMD_RSP; evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu)); evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token + sizeof(*evt->xfer_iu) * i); @@ -1398,7 +1396,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) hostdata->host->max_sectors = be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9; - if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 && + if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX && strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", hostdata->madapter_info.srp_version); @@ -1407,7 +1405,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; } - if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) { + if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX) { enable_fast_fail(hostdata); return; } @@ -1767,9 +1765,9 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, struct srp_event_struct *evt_struct = (__force struct srp_event_struct *)crq->IU_data_ptr; switch (crq->valid) { - case 0xC0: /* initialization */ + case VIOSRP_CRQ_INIT_RSP: /* initialization */ switch (crq->format) { - case 0x01: /* Initialization message */ + case VIOSRP_CRQ_INIT: /* Initialization message */ dev_info(hostdata->dev, "partner initialized\n"); /* Send back a response */ rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0); @@ -1781,7 +1779,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, } break; - case 0x02: /* Initialization response */ + case VIOSRP_CRQ_INIT_COMPLETE: /* Initialization response */ dev_info(hostdata->dev, "partner initialization complete\n"); /* Now login */ @@ -1791,7 +1789,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); } return; - case 0xFF: /* Hypervisor telling us the connection is closed */ + case VIOSRP_CRQ_XPORT_EVENT: /* Hypervisor telling us the connection is closed */ scsi_block_requests(hostdata->host); atomic_set(&hostdata->request_limit, 0); if (crq->format == 0x06) { @@ -1807,7 +1805,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, ibmvscsi_reset_host(hostdata); } return; - case 0x80: /* real payload */ + case VIOSRP_CRQ_CMD_RSP: /* real payload */ break; default: dev_err(hostdata->dev, "got an invalid message type 0x%02x\n", @@ -1855,62 +1853,6 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, } /** - * ibmvscsi_get_host_config: Send the command to the server to get host - * configuration data. The data is opaque to us. - */ -static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, - unsigned char *buffer, int length) -{ - struct viosrp_host_config *host_config; - struct srp_event_struct *evt_struct; - unsigned long flags; - dma_addr_t addr; - int rc; - - evt_struct = get_event_struct(&hostdata->pool); - if (!evt_struct) { - dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n"); - return -1; - } - - init_event_struct(evt_struct, - sync_completion, - VIOSRP_MAD_FORMAT, - info_timeout); - - host_config = &evt_struct->iu.mad.host_config; - - /* The transport length field is only 16-bit */ - length = min(0xffff, length); - - /* Set up a lun reset SRP command */ - memset(host_config, 0x00, sizeof(*host_config)); - host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE); - host_config->common.length = cpu_to_be16(length); - addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL); - - if (dma_mapping_error(hostdata->dev, addr)) { - if (!firmware_has_feature(FW_FEATURE_CMO)) - dev_err(hostdata->dev, - "dma_mapping error getting host config\n"); - free_event_struct(&hostdata->pool, evt_struct); - return -1; - } - - host_config->buffer = cpu_to_be64(addr); - - init_completion(&evt_struct->comp); - spin_lock_irqsave(hostdata->host->host_lock, flags); - rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); - spin_unlock_irqrestore(hostdata->host->host_lock, flags); - if (rc == 0) - wait_for_completion(&evt_struct->comp); - dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL); - - return rc; -} - -/** * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk. * @sdev: struct scsi_device device to configure * @@ -2041,7 +1983,7 @@ static ssize_t show_host_partition_number(struct device *dev, int len; len = snprintf(buf, PAGE_SIZE, "%d\n", - hostdata->madapter_info.partition_number); + be32_to_cpu(hostdata->madapter_info.partition_number)); return len; } @@ -2061,7 +2003,7 @@ static ssize_t show_host_mad_version(struct device *dev, int len; len = snprintf(buf, PAGE_SIZE, "%d\n", - hostdata->madapter_info.mad_version); + be32_to_cpu(hostdata->madapter_info.mad_version)); return len; } @@ -2080,7 +2022,8 @@ static ssize_t show_host_os_type(struct device *dev, struct ibmvscsi_host_data *hostdata = shost_priv(shost); int len; - len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type); + len = snprintf(buf, PAGE_SIZE, "%d\n", + be32_to_cpu(hostdata->madapter_info.os_type)); return len; } @@ -2095,21 +2038,14 @@ static struct device_attribute ibmvscsi_host_os_type = { static ssize_t show_host_config(struct device *dev, struct device_attribute *attr, char *buf) { - struct Scsi_Host *shost = class_to_shost(dev); - struct ibmvscsi_host_data *hostdata = shost_priv(shost); - - /* returns null-terminated host config data */ - if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0) - return strlen(buf); - else - return 0; + return 0; } static struct device_attribute ibmvscsi_host_config = { .attr = { - .name = "config", - .mode = S_IRUGO, - }, + .name = "config", + .mode = S_IRUGO, + }, .show = show_host_config, }; diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h index 116243087622..c1ab8a4c3161 100644 --- a/drivers/scsi/ibmvscsi/viosrp.h +++ b/drivers/scsi/ibmvscsi/viosrp.h @@ -51,13 +51,25 @@ union srp_iu { u8 reserved[SRP_MAX_IU_LEN]; }; +enum viosrp_crq_headers { + VIOSRP_CRQ_FREE = 0x00, + VIOSRP_CRQ_CMD_RSP = 0x80, + VIOSRP_CRQ_INIT_RSP = 0xC0, + VIOSRP_CRQ_XPORT_EVENT = 0xFF +}; + +enum viosrp_crq_init_formats { + VIOSRP_CRQ_INIT = 0x01, + VIOSRP_CRQ_INIT_COMPLETE = 0x02 +}; + enum viosrp_crq_formats { VIOSRP_SRP_FORMAT = 0x01, VIOSRP_MAD_FORMAT = 0x02, VIOSRP_OS400_FORMAT = 0x03, VIOSRP_AIX_FORMAT = 0x04, - VIOSRP_LINUX_FORMAT = 0x06, - VIOSRP_INLINE_FORMAT = 0x07 + VIOSRP_LINUX_FORMAT = 0x05, + VIOSRP_INLINE_FORMAT = 0x06 }; enum viosrp_crq_status { @@ -87,7 +99,6 @@ enum viosrp_mad_types { VIOSRP_EMPTY_IU_TYPE = 0x01, VIOSRP_ERROR_LOG_TYPE = 0x02, VIOSRP_ADAPTER_INFO_TYPE = 0x03, - VIOSRP_HOST_CONFIG_TYPE = 0x04, VIOSRP_CAPABILITIES_TYPE = 0x05, VIOSRP_ENABLE_FAST_FAIL = 0x08, }; @@ -153,11 +164,6 @@ struct viosrp_adapter_info { __be64 buffer; }; -struct viosrp_host_config { - struct mad_common common; - __be64 buffer; -}; - struct viosrp_fast_fail { struct mad_common common; }; @@ -195,7 +201,6 @@ union mad_iu { struct viosrp_empty_iu empty_iu; struct viosrp_error_log error_log; struct viosrp_adapter_info adapter_info; - struct viosrp_host_config host_config; struct viosrp_fast_fail fast_fail; struct viosrp_capabilities capabilities; }; @@ -209,7 +214,10 @@ struct mad_adapter_info_data { char srp_version[8]; char partition_name[96]; __be32 partition_number; +#define SRP_MAD_VERSION_1 1 __be32 mad_version; +#define SRP_MAD_OS_LINUX 2 +#define SRP_MAD_OS_AIX 3 __be32 os_type; __be32 port_max_txu[8]; /* per-port maximum transfer */ }; diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 4e1a632ccf16..9164ce1249c1 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c @@ -43,6 +43,7 @@ typedef struct { unsigned dp:1; /* Data phase present */ unsigned rd:1; /* Read data in data phase */ unsigned wanted:1; /* Parport sharing busy flag */ + unsigned int dev_no; /* Device number */ wait_queue_head_t *waiting; struct Scsi_Host *host; struct list_head list; @@ -76,9 +77,10 @@ static void imm_wakeup(void *ref) spin_lock_irqsave(&arbitration_lock, flags); if (dev->wanted) { - parport_claim(dev->dev); - got_it(dev); - dev->wanted = 0; + if (parport_claim(dev->dev) == 0) { + got_it(dev); + dev->wanted = 0; + } } spin_unlock_irqrestore(&arbitration_lock, flags); } @@ -1120,15 +1122,40 @@ static struct scsi_host_template imm_template = { static LIST_HEAD(imm_hosts); +/* + * Finds the first available device number that can be alloted to the + * new imm device and returns the address of the previous node so that + * we can add to the tail and have a list in the ascending order. + */ + +static inline imm_struct *find_parent(void) +{ + imm_struct *dev, *par = NULL; + unsigned int cnt = 0; + + if (list_empty(&imm_hosts)) + return NULL; + + list_for_each_entry(dev, &imm_hosts, list) { + if (dev->dev_no != cnt) + return par; + cnt++; + par = dev; + } + + return par; +} + static int __imm_attach(struct parport *pb) { struct Scsi_Host *host; - imm_struct *dev; + imm_struct *dev, *temp; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waiting); DEFINE_WAIT(wait); int ports; int modes, ppb; int err = -ENOMEM; + struct pardev_cb imm_cb; init_waitqueue_head(&waiting); @@ -1141,9 +1168,15 @@ static int __imm_attach(struct parport *pb) dev->mode = IMM_AUTODETECT; INIT_LIST_HEAD(&dev->list); - dev->dev = parport_register_device(pb, "imm", NULL, imm_wakeup, - NULL, 0, dev); + temp = find_parent(); + if (temp) + dev->dev_no = temp->dev_no + 1; + memset(&imm_cb, 0, sizeof(imm_cb)); + imm_cb.private = dev; + imm_cb.wakeup = imm_wakeup; + + dev->dev = parport_register_dev_model(pb, "imm", &imm_cb, dev->dev_no); if (!dev->dev) goto out; @@ -1207,7 +1240,10 @@ static int __imm_attach(struct parport *pb) host->unique_id = pb->number; *(imm_struct **)&host->hostdata = dev; dev->host = host; - list_add_tail(&dev->list, &imm_hosts); + if (!temp) + list_add_tail(&dev->list, &imm_hosts); + else + list_add_tail(&dev->list, &temp->list); err = scsi_add_host(host, NULL); if (err) goto out2; @@ -1245,9 +1281,10 @@ static void imm_detach(struct parport *pb) } static struct parport_driver imm_driver = { - .name = "imm", - .attach = imm_attach, - .detach = imm_detach, + .name = "imm", + .match_port = imm_attach, + .detach = imm_detach, + .devmodel = true, }; static int __init imm_driver_init(void) diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index 6a926bae76b2..7a91cf3ff173 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c @@ -110,11 +110,6 @@ #define i91u_MAXQUEUE 2 #define i91u_REVID "Initio INI-9X00U/UW SCSI device driver; Revision: 1.04a" -#define I950_DEVICE_ID 0x9500 /* Initio's inic-950 product ID */ -#define I940_DEVICE_ID 0x9400 /* Initio's inic-940 product ID */ -#define I935_DEVICE_ID 0x9401 /* Initio's inic-935 product ID */ -#define I920_DEVICE_ID 0x0002 /* Initio's other product ID */ - #ifdef DEBUG_i91u static unsigned int i91u_debug = DEBUG_DEFAULT; #endif @@ -127,17 +122,6 @@ static int setup_debug = 0; static void i91uSCBPost(u8 * pHcb, u8 * pScb); -/* PCI Devices supported by this driver */ -static struct pci_device_id i91u_pci_devices[] = { - { PCI_VENDOR_ID_INIT, I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - { PCI_VENDOR_ID_INIT, I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - { PCI_VENDOR_ID_INIT, I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - { PCI_VENDOR_ID_INIT, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - { PCI_VENDOR_ID_DOMEX, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - { } -}; -MODULE_DEVICE_TABLE(pci, i91u_pci_devices); - #define DEBUG_INTERRUPT 0 #define DEBUG_QUEUE 0 #define DEBUG_STATE 0 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 536cd5a80422..d6a691e27d33 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -3638,7 +3638,7 @@ static struct device_attribute ipr_ioa_reset_attr = { .store = ipr_store_reset_adapter }; -static int ipr_iopoll(struct blk_iopoll *iop, int budget); +static int ipr_iopoll(struct irq_poll *iop, int budget); /** * ipr_show_iopoll_weight - Show ipr polling mode * @dev: class device struct @@ -3681,34 +3681,33 @@ static ssize_t ipr_store_iopoll_weight(struct device *dev, int i; if (!ioa_cfg->sis64) { - dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n"); + dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n"); return -EINVAL; } if (kstrtoul(buf, 10, &user_iopoll_weight)) return -EINVAL; if (user_iopoll_weight > 256) { - dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n"); + dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n"); return -EINVAL; } if (user_iopoll_weight == ioa_cfg->iopoll_weight) { - dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n"); + dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n"); return strlen(buf); } if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { for (i = 1; i < ioa_cfg->hrrq_num; i++) - blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); + irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); } spin_lock_irqsave(shost->host_lock, lock_flags); ioa_cfg->iopoll_weight = user_iopoll_weight; if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { for (i = 1; i < ioa_cfg->hrrq_num; i++) { - blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, + irq_poll_init(&ioa_cfg->hrrq[i].iopoll, ioa_cfg->iopoll_weight, ipr_iopoll); - blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll); } } spin_unlock_irqrestore(shost->host_lock, lock_flags); @@ -4003,13 +4002,17 @@ static ssize_t ipr_store_update_fw(struct device *dev, struct ipr_sglist *sglist; char fname[100]; char *src; - int len, result, dnld_size; + char *endline; + int result, dnld_size; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - len = snprintf(fname, 99, "%s", buf); - fname[len-1] = '\0'; + snprintf(fname, sizeof(fname), "%s", buf); + + endline = strchr(fname, '\n'); + if (endline) + *endline = '\0'; if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); @@ -5569,7 +5572,7 @@ static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget, return num_hrrq; } -static int ipr_iopoll(struct blk_iopoll *iop, int budget) +static int ipr_iopoll(struct irq_poll *iop, int budget) { struct ipr_ioa_cfg *ioa_cfg; struct ipr_hrr_queue *hrrq; @@ -5585,7 +5588,7 @@ static int ipr_iopoll(struct blk_iopoll *iop, int budget) completed_ops = ipr_process_hrrq(hrrq, budget, &doneq); if (completed_ops < budget) - blk_iopoll_complete(iop); + irq_poll_complete(iop); spin_unlock_irqrestore(hrrq->lock, hrrq_flags); list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { @@ -5693,8 +5696,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == hrrq->toggle_bit) { - if (!blk_iopoll_sched_prep(&hrrq->iopoll)) - blk_iopoll_sched(&hrrq->iopoll); + irq_poll_sched(&hrrq->iopoll); spin_unlock_irqrestore(hrrq->lock, hrrq_flags); return IRQ_HANDLED; } @@ -10405,9 +10407,8 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { for (i = 1; i < ioa_cfg->hrrq_num; i++) { - blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, + irq_poll_init(&ioa_cfg->hrrq[i].iopoll, ioa_cfg->iopoll_weight, ipr_iopoll); - blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll); } } @@ -10436,7 +10437,7 @@ static void ipr_shutdown(struct pci_dev *pdev) if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { ioa_cfg->iopoll_weight = 0; for (i = 1; i < ioa_cfg->hrrq_num; i++) - blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); + irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); } while (ioa_cfg->in_reset_reload) { diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index a34c7a5a995e..56c57068300a 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -32,7 +32,7 @@ #include <linux/libata.h> #include <linux/list.h> #include <linux/kref.h> -#include <linux/blk-iopoll.h> +#include <linux/irq_poll.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -517,7 +517,7 @@ struct ipr_hrr_queue { u8 allow_cmds:1; u8 removing_ioa:1; - struct blk_iopoll iopoll; + struct irq_poll iopoll; }; /* Command packet structure */ diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c index 680bf6f0ce76..8f0ea97cf31f 100644 --- a/drivers/scsi/iscsi_boot_sysfs.c +++ b/drivers/scsi/iscsi_boot_sysfs.c @@ -166,6 +166,7 @@ static struct attribute_group iscsi_boot_target_attr_group = { iscsi_boot_rd_attr(eth_index, index, ISCSI_BOOT_ETH_INDEX); iscsi_boot_rd_attr(eth_flags, flags, ISCSI_BOOT_ETH_FLAGS); iscsi_boot_rd_attr(eth_ip, ip-addr, ISCSI_BOOT_ETH_IP_ADDR); +iscsi_boot_rd_attr(eth_prefix, prefix-len, ISCSI_BOOT_ETH_PREFIX_LEN); iscsi_boot_rd_attr(eth_subnet, subnet-mask, ISCSI_BOOT_ETH_SUBNET_MASK); iscsi_boot_rd_attr(eth_origin, origin, ISCSI_BOOT_ETH_ORIGIN); iscsi_boot_rd_attr(eth_gateway, gateway, ISCSI_BOOT_ETH_GATEWAY); @@ -181,6 +182,7 @@ static struct attribute *ethernet_attrs[] = { &iscsi_boot_attr_eth_index.attr, &iscsi_boot_attr_eth_flags.attr, &iscsi_boot_attr_eth_ip.attr, + &iscsi_boot_attr_eth_prefix.attr, &iscsi_boot_attr_eth_subnet.attr, &iscsi_boot_attr_eth_origin.attr, &iscsi_boot_attr_eth_gateway.attr, @@ -208,6 +210,9 @@ static umode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj, else if (attr == &iscsi_boot_attr_eth_ip.attr) return boot_kobj->is_visible(boot_kobj->data, ISCSI_BOOT_ETH_IP_ADDR); + else if (attr == &iscsi_boot_attr_eth_prefix.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_PREFIX_LEN); else if (attr == &iscsi_boot_attr_eth_subnet.attr) return boot_kobj->is_visible(boot_kobj->data, ISCSI_BOOT_ETH_SUBNET_MASK); diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 0b8af186e707..2e4c82f8329c 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -26,12 +26,12 @@ * Zhenyu Wang */ +#include <crypto/hash.h> #include <linux/types.h> #include <linux/inet.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/blkdev.h> -#include <linux/crypto.h> #include <linux/delay.h> #include <linux/kfifo.h> #include <linux/scatterlist.h> @@ -428,7 +428,7 @@ static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, * sufficient room. */ if (conn->hdrdgst_en) { - iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen, + iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen, hdr + hdrlen); hdrlen += ISCSI_DIGEST_SIZE; } @@ -454,7 +454,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg, { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; - struct hash_desc *tx_hash = NULL; + struct ahash_request *tx_hash = NULL; unsigned int hdr_spec_len; ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len, @@ -467,7 +467,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg, WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); if (conn->datadgst_en) - tx_hash = &tcp_sw_conn->tx_hash; + tx_hash = tcp_sw_conn->tx_hash; return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment, sg, count, offset, len, @@ -480,7 +480,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data, { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; - struct hash_desc *tx_hash = NULL; + struct ahash_request *tx_hash = NULL; unsigned int hdr_spec_len; ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ? @@ -492,7 +492,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data, WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); if (conn->datadgst_en) - tx_hash = &tcp_sw_conn->tx_hash; + tx_hash = tcp_sw_conn->tx_hash; iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment, data, len, NULL, tx_hash); @@ -543,6 +543,7 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn; struct iscsi_tcp_conn *tcp_conn; struct iscsi_sw_tcp_conn *tcp_sw_conn; + struct crypto_ahash *tfm; cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn), conn_idx); @@ -552,23 +553,28 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session, tcp_conn = conn->dd_data; tcp_sw_conn = tcp_conn->dd_data; - tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, - CRYPTO_ALG_ASYNC); - tcp_sw_conn->tx_hash.flags = 0; - if (IS_ERR(tcp_sw_conn->tx_hash.tfm)) + tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) goto free_conn; - tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0, - CRYPTO_ALG_ASYNC); - tcp_sw_conn->rx_hash.flags = 0; - if (IS_ERR(tcp_sw_conn->rx_hash.tfm)) - goto free_tx_tfm; - tcp_conn->rx_hash = &tcp_sw_conn->rx_hash; + tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL); + if (!tcp_sw_conn->tx_hash) + goto free_tfm; + ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL); + + tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL); + if (!tcp_sw_conn->rx_hash) + goto free_tx_hash; + ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL); + + tcp_conn->rx_hash = tcp_sw_conn->rx_hash; return cls_conn; -free_tx_tfm: - crypto_free_hash(tcp_sw_conn->tx_hash.tfm); +free_tx_hash: + ahash_request_free(tcp_sw_conn->tx_hash); +free_tfm: + crypto_free_ahash(tfm); free_conn: iscsi_conn_printk(KERN_ERR, conn, "Could not create connection due to crc32c " @@ -607,10 +613,14 @@ static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) iscsi_sw_tcp_release_conn(conn); - if (tcp_sw_conn->tx_hash.tfm) - crypto_free_hash(tcp_sw_conn->tx_hash.tfm); - if (tcp_sw_conn->rx_hash.tfm) - crypto_free_hash(tcp_sw_conn->rx_hash.tfm); + ahash_request_free(tcp_sw_conn->rx_hash); + if (tcp_sw_conn->tx_hash) { + struct crypto_ahash *tfm; + + tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash); + ahash_request_free(tcp_sw_conn->tx_hash); + crypto_free_ahash(tfm); + } iscsi_tcp_conn_teardown(cls_conn); } diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index f42ecb238af5..06d42d00a323 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -45,8 +45,8 @@ struct iscsi_sw_tcp_conn { void (*old_write_space)(struct sock *); /* data and header digests */ - struct hash_desc tx_hash; /* CRC32C (Tx) */ - struct hash_desc rx_hash; /* CRC32C (Rx) */ + struct ahash_request *tx_hash; /* CRC32C (Tx) */ + struct ahash_request *rx_hash; /* CRC32C (Rx) */ /* MIB custom statistics */ uint32_t sendpage_failures_cnt; diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 60cb6dc3c6f0..63a1d69ff515 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -26,13 +26,13 @@ * Zhenyu Wang */ +#include <crypto/hash.h> #include <linux/types.h> #include <linux/list.h> #include <linux/inet.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/blkdev.h> -#include <linux/crypto.h> #include <linux/delay.h> #include <linux/kfifo.h> #include <linux/scatterlist.h> @@ -214,7 +214,8 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn, } else sg_init_one(&sg, segment->data + segment->copied, copied); - crypto_hash_update(segment->hash, &sg, copied); + ahash_request_set_crypt(segment->hash, &sg, NULL, copied); + crypto_ahash_update(segment->hash); } segment->copied += copied; @@ -260,7 +261,9 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn, * is completely handled in hdr done function. */ if (segment->hash) { - crypto_hash_final(segment->hash, segment->digest); + ahash_request_set_crypt(segment->hash, NULL, + segment->digest, 0); + crypto_ahash_final(segment->hash); iscsi_tcp_segment_splice_digest(segment, recv ? segment->recv_digest : segment->digest); return 0; @@ -310,13 +313,14 @@ iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn, } inline void -iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen, - unsigned char digest[ISCSI_DIGEST_SIZE]) +iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr, + size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE]) { struct scatterlist sg; sg_init_one(&sg, hdr, hdrlen); - crypto_hash_digest(hash, &sg, hdrlen, digest); + ahash_request_set_crypt(hash, &sg, digest, hdrlen); + crypto_ahash_digest(hash); } EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header); @@ -341,7 +345,7 @@ iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn, */ static inline void __iscsi_segment_init(struct iscsi_segment *segment, size_t size, - iscsi_segment_done_fn_t *done, struct hash_desc *hash) + iscsi_segment_done_fn_t *done, struct ahash_request *hash) { memset(segment, 0, sizeof(*segment)); segment->total_size = size; @@ -349,14 +353,14 @@ __iscsi_segment_init(struct iscsi_segment *segment, size_t size, if (hash) { segment->hash = hash; - crypto_hash_init(hash); + crypto_ahash_init(hash); } } inline void iscsi_segment_init_linear(struct iscsi_segment *segment, void *data, size_t size, iscsi_segment_done_fn_t *done, - struct hash_desc *hash) + struct ahash_request *hash) { __iscsi_segment_init(segment, size, done, hash); segment->data = data; @@ -368,7 +372,8 @@ inline int iscsi_segment_seek_sg(struct iscsi_segment *segment, struct scatterlist *sg_list, unsigned int sg_count, unsigned int offset, size_t size, - iscsi_segment_done_fn_t *done, struct hash_desc *hash) + iscsi_segment_done_fn_t *done, + struct ahash_request *hash) { struct scatterlist *sg; unsigned int i; @@ -431,7 +436,7 @@ static void iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn) { struct iscsi_conn *conn = tcp_conn->iscsi_conn; - struct hash_desc *rx_hash = NULL; + struct ahash_request *rx_hash = NULL; if (conn->datadgst_en && !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) @@ -686,7 +691,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) if (tcp_conn->in.datalen) { struct iscsi_tcp_task *tcp_task = task->dd_data; - struct hash_desc *rx_hash = NULL; + struct ahash_request *rx_hash = NULL; struct scsi_data_buffer *sdb = scsi_in(task->sc); /* diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index ceee9a3fd9e5..90a3ca5a4dbd 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -386,7 +386,6 @@ struct lpfc_vport { uint32_t work_port_events; /* Timeout to be handled */ #define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */ #define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */ -#define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */ #define WORKER_DELAYED_DISC_TMO 0x8 /* vport: delayed discovery */ #define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */ @@ -396,7 +395,6 @@ struct lpfc_vport { #define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */ #define WORKER_SERVICE_TXQ 0x2000 /* hba: IOCBs on the txq */ - struct timer_list fc_fdmitmo; struct timer_list els_tmofunc; struct timer_list delayed_disc_tmo; @@ -405,6 +403,7 @@ struct lpfc_vport { uint8_t load_flag; #define FC_LOADING 0x1 /* HBA in process of loading drvr */ #define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */ +#define FC_ALLOW_FDMI 0x4 /* port is ready for FDMI requests */ /* Vport Config Parameters */ uint32_t cfg_scan_down; uint32_t cfg_lun_queue_depth; @@ -414,10 +413,6 @@ struct lpfc_vport { uint32_t cfg_peer_port_login; uint32_t cfg_fcp_class; uint32_t cfg_use_adisc; - uint32_t cfg_fdmi_on; -#define LPFC_FDMI_SUPPORT 1 /* bit 0 - FDMI supported? */ -#define LPFC_FDMI_REG_DELAY 2 /* bit 1 - 60 sec registration delay */ -#define LPFC_FDMI_ALL_ATTRIB 4 /* bit 2 - register ALL attributes? */ uint32_t cfg_discovery_threads; uint32_t cfg_log_verbose; uint32_t cfg_max_luns; @@ -443,6 +438,10 @@ struct lpfc_vport { unsigned long rcv_buffer_time_stamp; uint32_t vport_flag; #define STATIC_VPORT 1 + + uint16_t fdmi_num_disc; + uint32_t fdmi_hba_mask; + uint32_t fdmi_port_mask; }; struct hbq_s { @@ -755,6 +754,11 @@ struct lpfc_hba { #define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ uint32_t cfg_enable_dss; + uint32_t cfg_fdmi_on; +#define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */ +#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ +#define LPFC_FDMI_SMART_SAN 2 /* SmartSAN supported */ + uint32_t cfg_enable_SmartSAN; lpfc_vpd_t vpd; /* vital product data */ struct pci_dev *pcidev; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index f6446d759d7f..343ae9482891 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -4572,19 +4572,27 @@ LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1, 255, "Identifies TYPE for additional ring configuration"); /* -# lpfc_fdmi_on: controls FDMI support. -# Set NOT Set -# bit 0 = FDMI support no FDMI support -# LPFC_FDMI_SUPPORT just turns basic support on/off -# bit 1 = Register delay no register delay (60 seconds) -# LPFC_FDMI_REG_DELAY 60 sec registration delay after FDMI login -# bit 2 = All attributes Use a attribute subset -# LPFC_FDMI_ALL_ATTRIB applies to both port and HBA attributes -# Port attrutes subset: 1 thru 6 OR all: 1 thru 0xd 0x101 0x102 0x103 -# HBA attributes subset: 1 thru 0xb OR all: 1 thru 0xc -# Value range [0,7]. Default value is 0. +# lpfc_enable_SmartSAN: Sets up FDMI support for SmartSAN +# 0 = SmartSAN functionality disabled (default) +# 1 = SmartSAN functionality enabled +# This parameter will override the value of lpfc_fdmi_on module parameter. +# Value range is [0,1]. Default value is 0. */ -LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 7, "Enable FDMI support"); +LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality"); + +/* +# lpfc_fdmi_on: Controls FDMI support. +# 0 No FDMI support (default) +# 1 Traditional FDMI support +# 2 Smart SAN support +# If lpfc_enable_SmartSAN is set 1, the driver sets lpfc_fdmi_on to value 2 +# overwriting the current value. If lpfc_enable_SmartSAN is set 0, the +# driver uses the current value of lpfc_fdmi_on provided it has value 0 or 1. +# A value of 2 with lpfc_enable_SmartSAN set to 0 causes the driver to +# set lpfc_fdmi_on back to 1. +# Value range [0,2]. Default value is 0. +*/ +LPFC_ATTR_R(fdmi_on, 0, 0, 2, "Enable FDMI support"); /* # Specifies the maximum number of ELS cmds we can have outstanding (for @@ -4815,6 +4823,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_multi_ring_rctl, &dev_attr_lpfc_multi_ring_type, &dev_attr_lpfc_fdmi_on, + &dev_attr_lpfc_enable_SmartSAN, &dev_attr_lpfc_max_luns, &dev_attr_lpfc_enable_npiv, &dev_attr_lpfc_fcf_failover_policy, @@ -4887,7 +4896,6 @@ struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, &dev_attr_lpfc_first_burst_size, - &dev_attr_lpfc_fdmi_on, &dev_attr_lpfc_max_luns, &dev_attr_nport_evt_cnt, &dev_attr_npiv_info, @@ -5247,7 +5255,7 @@ lpfc_get_host_speed(struct Scsi_Host *shost) spin_lock_irq(shost->host_lock); - if (lpfc_is_link_up(phba)) { + if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) { switch(phba->fc_linkspeed) { case LPFC_LINK_SPEED_1GHZ: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; @@ -5826,6 +5834,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); lpfc_enable_rrq_init(phba, lpfc_enable_rrq); + lpfc_fdmi_on_init(phba, lpfc_fdmi_on); + lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN); lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); @@ -5846,6 +5856,15 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_poll = 0; else phba->cfg_poll = lpfc_poll; + + /* Ensure fdmi_on and enable_SmartSAN don't conflict */ + if (phba->cfg_enable_SmartSAN) { + phba->cfg_fdmi_on = LPFC_FDMI_SMART_SAN; + } else { + if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN) + phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT; + } + phba->cfg_soft_wwnn = 0L; phba->cfg_soft_wwpn = 0L; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); @@ -5879,7 +5898,6 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport) lpfc_use_adisc_init(vport, lpfc_use_adisc); lpfc_first_burst_size_init(vport, lpfc_first_burst_size); lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time); - lpfc_fdmi_on_init(vport, lpfc_fdmi_on); lpfc_discovery_threads_init(vport, lpfc_discovery_threads); lpfc_max_luns_init(vport, lpfc_max_luns); lpfc_scan_down_init(vport, lpfc_scan_down); diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index b0e6fe46448d..4e55b35180a4 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -72,6 +72,7 @@ void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); void lpfc_retry_pport_discovery(struct lpfc_hba *); void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t); +void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -167,9 +168,8 @@ void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *); int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); -int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); -void lpfc_fdmi_tmo(unsigned long); -void lpfc_fdmi_timeout_handler(struct lpfc_vport *); +int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t); +void lpfc_fdmi_num_disc_check(struct lpfc_vport *); void lpfc_delayed_disc_tmo(unsigned long); void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 8fded1f7605f..79e261d2a0c8 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -48,15 +48,26 @@ #include "lpfc_vport.h" #include "lpfc_debugfs.h" -/* FDMI Port Speed definitions */ -#define HBA_PORTSPEED_1GBIT 0x0001 /* 1 GBit/sec */ -#define HBA_PORTSPEED_2GBIT 0x0002 /* 2 GBit/sec */ -#define HBA_PORTSPEED_4GBIT 0x0008 /* 4 GBit/sec */ -#define HBA_PORTSPEED_10GBIT 0x0004 /* 10 GBit/sec */ -#define HBA_PORTSPEED_8GBIT 0x0010 /* 8 GBit/sec */ -#define HBA_PORTSPEED_16GBIT 0x0020 /* 16 GBit/sec */ -#define HBA_PORTSPEED_32GBIT 0x0040 /* 32 GBit/sec */ -#define HBA_PORTSPEED_UNKNOWN 0x0800 /* Unknown */ +/* FDMI Port Speed definitions - FC-GS-7 */ +#define HBA_PORTSPEED_1GFC 0x00000001 /* 1G FC */ +#define HBA_PORTSPEED_2GFC 0x00000002 /* 2G FC */ +#define HBA_PORTSPEED_4GFC 0x00000008 /* 4G FC */ +#define HBA_PORTSPEED_10GFC 0x00000004 /* 10G FC */ +#define HBA_PORTSPEED_8GFC 0x00000010 /* 8G FC */ +#define HBA_PORTSPEED_16GFC 0x00000020 /* 16G FC */ +#define HBA_PORTSPEED_32GFC 0x00000040 /* 32G FC */ +#define HBA_PORTSPEED_20GFC 0x00000080 /* 20G FC */ +#define HBA_PORTSPEED_40GFC 0x00000100 /* 40G FC */ +#define HBA_PORTSPEED_128GFC 0x00000200 /* 128G FC */ +#define HBA_PORTSPEED_64GFC 0x00000400 /* 64G FC */ +#define HBA_PORTSPEED_256GFC 0x00000800 /* 256G FC */ +#define HBA_PORTSPEED_UNKNOWN 0x00008000 /* Unknown */ +#define HBA_PORTSPEED_10GE 0x00010000 /* 10G E */ +#define HBA_PORTSPEED_40GE 0x00020000 /* 40G E */ +#define HBA_PORTSPEED_100GE 0x00040000 /* 100G E */ +#define HBA_PORTSPEED_25GE 0x00080000 /* 25G E */ +#define HBA_PORTSPEED_50GE 0x00100000 /* 50G E */ +#define HBA_PORTSPEED_400GE 0x00200000 /* 400G E */ #define FOURBYTES 4 @@ -287,6 +298,17 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb) return 0; } +/** + * lpfc_gen_req - Build and issue a GEN_REQUEST command to the SLI Layer + * @vport: pointer to a host virtual N_Port data structure. + * @bmp: Pointer to BPL for SLI command + * @inp: Pointer to data buffer for response data. + * @outp: Pointer to data buffer that hold the CT command. + * @cmpl: completion routine to call when command completes + * @ndlp: Destination NPort nodelist entry + * + * This function as the final part for issuing a CT command. + */ static int lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp, @@ -311,7 +333,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; - icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64)); + icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); if (usr_flg) geniocb->context3 = NULL; @@ -370,6 +392,16 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, return 0; } +/** + * lpfc_ct_cmd - Build and issue a CT command + * @vport: pointer to a host virtual N_Port data structure. + * @inmp: Pointer to data buffer for response data. + * @bmp: Pointer to BPL for SLI command + * @ndlp: Destination NPort nodelist entry + * @cmpl: completion routine to call when command completes + * + * This function is called for issuing a CT command. + */ static int lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp, struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp, @@ -453,7 +485,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) Cnt -= 16; /* subtract length of CT header */ /* Loop through entire NameServer list of DIDs */ - while (Cnt >= sizeof (uint32_t)) { + while (Cnt >= sizeof(uint32_t)) { /* Get next DID from NameServer List */ CTentry = *ctptr++; Did = ((be32_to_cpu(CTentry)) & Mask_DID); @@ -558,7 +590,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) } if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY))) goto nsout1; - Cnt -= sizeof (uint32_t); + Cnt -= sizeof(uint32_t); } ctptr = NULL; @@ -1146,7 +1178,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, /* fill in BDEs for command */ /* Allocate buffer for command payload */ - mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); + mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) { rc=2; goto ns_cmd_exit; @@ -1160,7 +1192,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, } /* Allocate buffer for Buffer ptr list */ - bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); + bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!bmp) { rc=4; goto ns_cmd_free_mpvirt; @@ -1204,7 +1236,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, bpl->tus.w = le32_to_cpu(bpl->tus.w); CtReq = (struct lpfc_sli_ct_request *) mp->virt; - memset(CtReq, 0, sizeof (struct lpfc_sli_ct_request)); + memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request)); CtReq->RevisionId.bits.Revision = SLI_CT_REVISION; CtReq->RevisionId.bits.InId = 0; CtReq->FsType = SLI_CT_DIRECTORY_SERVICE; @@ -1244,7 +1276,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, cpu_to_be16(SLI_CTNS_RNN_ID); CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID); memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename, - sizeof (struct lpfc_name)); + sizeof(struct lpfc_name)); cmpl = lpfc_cmpl_ct_cmd_rnn_id; break; @@ -1264,7 +1296,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_RSNN_NN); memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename, - sizeof (struct lpfc_name)); + sizeof(struct lpfc_name)); size = sizeof(CtReq->un.rsnn.symbname); CtReq->un.rsnn.len = lpfc_vport_symbolic_node_name(vport, @@ -1319,20 +1351,29 @@ ns_cmd_exit: return 1; } +/** + * lpfc_cmpl_ct_disc_fdmi - Handle a discovery FDMI completion + * @phba: Pointer to HBA context object. + * @cmdiocb: Pointer to the command IOCBQ. + * @rspiocb: Pointer to the response IOCBQ. + * + * This function to handle the completion of a driver initiated FDMI + * CT command issued during discovery. + */ static void -lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, - struct lpfc_iocbq * rspiocb) +lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) { + struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_dmabuf *inp = cmdiocb->context1; struct lpfc_dmabuf *outp = cmdiocb->context2; - struct lpfc_sli_ct_request *CTrsp = outp->virt; struct lpfc_sli_ct_request *CTcmd = inp->virt; - struct lpfc_nodelist *ndlp; + struct lpfc_sli_ct_request *CTrsp = outp->virt; uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp; - struct lpfc_vport *vport = cmdiocb->vport; IOCB_t *irsp = &rspiocb->iocb; - uint32_t latt; + struct lpfc_nodelist *ndlp; + uint32_t latt, cmd, err; latt = lpfc_els_chk_latt(vport); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, @@ -1340,91 +1381,1115 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp->ulpStatus, irsp->un.ulpWord[4], latt); if (latt || irsp->ulpStatus) { + + /* Look for a retryable error */ + if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { + switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { + case IOERR_SLI_ABORTED: + case IOERR_ABORT_IN_PROGRESS: + case IOERR_SEQUENCE_TIMEOUT: + case IOERR_ILLEGAL_FRAME: + case IOERR_NO_RESOURCES: + case IOERR_ILLEGAL_COMMAND: + cmdiocb->retry++; + if (cmdiocb->retry >= LPFC_FDMI_MAX_RETRY) + break; + + /* Retry the same FDMI command */ + err = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, + cmdiocb, 0); + if (err == IOCB_ERROR) + break; + return; + default: + break; + } + } + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0229 FDMI cmd %04x failed, latt = %d " "ulpStatus: x%x, rid x%x\n", be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus, irsp->un.ulpWord[4]); - goto fail_out; } + lpfc_ct_free_iocb(phba, cmdiocb); ndlp = lpfc_findnode_did(vport, FDMI_DID); if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) - goto fail_out; + return; + /* Check for a CT LS_RJT response */ + cmd = be16_to_cpu(fdmi_cmd); if (fdmi_rsp == cpu_to_be16(SLI_CT_RESPONSE_FS_RJT)) { /* FDMI rsp failed */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "0220 FDMI rsp failed Data: x%x\n", - be16_to_cpu(fdmi_cmd)); + "0220 FDMI cmd failed FS_RJT Data: x%x", cmd); + + /* Should we fallback to FDMI-2 / FDMI-1 ? */ + switch (cmd) { + case SLI_MGMT_RHBA: + if (vport->fdmi_hba_mask == LPFC_FDMI2_HBA_ATTR) { + /* Fallback to FDMI-1 */ + vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR; + vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; + /* Start over */ + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); + } + return; + + case SLI_MGMT_RPRT: + if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) { + /* Fallback to FDMI-1 */ + vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; + /* Start over */ + lpfc_fdmi_cmd(vport, ndlp, cmd, 0); + } + if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) { + vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; + /* Retry the same command */ + lpfc_fdmi_cmd(vport, ndlp, cmd, 0); + } + return; + + case SLI_MGMT_RPA: + if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) { + /* Fallback to FDMI-1 */ + vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR; + vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; + /* Start over */ + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); + } + if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) { + vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; + /* Retry the same command */ + lpfc_fdmi_cmd(vport, ndlp, cmd, 0); + } + return; + } } -fail_out: - lpfc_ct_free_iocb(phba, cmdiocb); + /* + * On success, need to cycle thru FDMI registration for discovery + * DHBA -> DPRT -> RHBA -> RPA (physical port) + * DPRT -> RPRT (vports) + */ + switch (cmd) { + case SLI_MGMT_RHBA: + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, 0); + break; + + case SLI_MGMT_DHBA: + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); + break; + + case SLI_MGMT_DPRT: + if (vport->port_type == LPFC_PHYSICAL_PORT) + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA, 0); + else + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0); + break; + } + return; } -static void -lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, - struct lpfc_iocbq *rspiocb) + +/** + * lpfc_fdmi_num_disc_check - Check how many mapped NPorts we are connected to + * @vport: pointer to a host virtual N_Port data structure. + * + * Called from hbeat timeout routine to check if the number of discovered + * ports has changed. If so, re-register thar port Attribute. + */ +void +lpfc_fdmi_num_disc_check(struct lpfc_vport *vport) { - struct lpfc_vport *vport = cmdiocb->vport; - struct lpfc_dmabuf *inp = cmdiocb->context1; - struct lpfc_sli_ct_request *CTcmd = inp->virt; - uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; + struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp; + uint16_t cnt; + + if (!lpfc_is_link_up(phba)) + return; + + if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc)) + return; - lpfc_cmpl_ct_cmd_fdmi(phba, cmdiocb, rspiocb); + cnt = lpfc_find_map_node(vport); + if (cnt == vport->fdmi_num_disc) + return; ndlp = lpfc_findnode_did(vport, FDMI_DID); if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) return; - /* - * Need to cycle thru FDMI registration for discovery - * DHBA -> DPRT -> RHBA -> RPA - */ - switch (be16_to_cpu(fdmi_cmd)) { - case SLI_MGMT_RHBA: - lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA); - break; + if (vport->port_type == LPFC_PHYSICAL_PORT) { + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, + LPFC_FDMI_PORT_ATTR_num_disc); + } else { + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, + LPFC_FDMI_PORT_ATTR_num_disc); + } +} - case SLI_MGMT_DHBA: - lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT); - break; +/* Routines for all individual HBA attributes */ +int +lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; - case SLI_MGMT_DPRT: - lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA); - break; + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(struct lpfc_name)); + + memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, + sizeof(struct lpfc_name)); + size = FOURBYTES + sizeof(struct lpfc_name); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_NODENAME); + return size; +} +int +lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + strncpy(ae->un.AttrString, + "Emulex Corporation", + sizeof(ae->un.AttrString)); + len = strnlen(ae->un.AttrString, + sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_MANUFACTURER); + return size; +} + +int +lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + strncpy(ae->un.AttrString, phba->SerialNumber, + sizeof(ae->un.AttrString)); + len = strnlen(ae->un.AttrString, + sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_SERIAL_NUMBER); + return size; +} + +int +lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + strncpy(ae->un.AttrString, phba->ModelName, + sizeof(ae->un.AttrString)); + len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_MODEL); + return size; +} + +int +lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + strncpy(ae->un.AttrString, phba->ModelDesc, + sizeof(ae->un.AttrString)); + len = strnlen(ae->un.AttrString, + sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_MODEL_DESCRIPTION); + return size; +} + +int +lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_hba *phba = vport->phba; + lpfc_vpd_t *vp = &phba->vpd; + struct lpfc_fdmi_attr_entry *ae; + uint32_t i, j, incr, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + /* Convert JEDEC ID to ascii for hardware version */ + incr = vp->rev.biuRev; + for (i = 0; i < 8; i++) { + j = (incr & 0xf); + if (j <= 9) + ae->un.AttrString[7 - i] = + (char)((uint8_t) 0x30 + + (uint8_t) j); + else + ae->un.AttrString[7 - i] = + (char)((uint8_t) 0x61 + + (uint8_t) (j - 10)); + incr = (incr >> 4); } + size = FOURBYTES + 8; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_HARDWARE_VERSION); + return size; +} + +int +lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + strncpy(ae->un.AttrString, lpfc_release_version, + sizeof(ae->un.AttrString)); + len = strnlen(ae->un.AttrString, + sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_DRIVER_VERSION); + return size; +} + +int +lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); + else + strncpy(ae->un.AttrString, phba->OptionROMVersion, + sizeof(ae->un.AttrString)); + len = strnlen(ae->un.AttrString, + sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_OPTION_ROM_VERSION); + return size; +} + +int +lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); + len = strnlen(ae->un.AttrString, + sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_FIRMWARE_VERSION); + return size; +} + +int +lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s", + init_utsname()->sysname, + init_utsname()->release, + init_utsname()->version); + + len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_OS_NAME_VERSION); + return size; +} + +int +lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + + ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_MAX_CT_PAYLOAD_LEN); + return size; +} + +int +lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + len = lpfc_vport_symbolic_node_name(vport, + ae->un.AttrString, 256); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_SYM_NODENAME); + return size; +} + +int +lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + + /* Nothing is defined for this currently */ + ae->un.AttrInt = cpu_to_be32(0); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_VENDOR_INFO); + return size; } +int +lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + + /* Each driver instance corresponds to a single port */ + ae->un.AttrInt = cpu_to_be32(1); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_NUM_PORTS); + return size; +} + +int +lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(struct lpfc_name)); + + memcpy(&ae->un.AttrWWN, &vport->fabric_nodename, + sizeof(struct lpfc_name)); + size = FOURBYTES + sizeof(struct lpfc_name); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_FABRIC_WWNN); + return size; +} int -lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) +lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); + len = strnlen(ae->un.AttrString, + sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_BIOS_VERSION); + return size; +} + +int +lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + + /* Driver doesn't have access to this information */ + ae->un.AttrInt = cpu_to_be32(0); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_BIOS_STATE); + return size; +} + +int +lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + strncpy(ae->un.AttrString, "EMULEX", + sizeof(ae->un.AttrString)); + len = strnlen(ae->un.AttrString, + sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RHBA_VENDOR_ID); + return size; +} + +/* Routines for all individual PORT attributes */ +int +lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 32); + + ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ + ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ + ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */ + size = FOURBYTES + 32; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES); + return size; +} + +int +lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + + ae->un.AttrInt = 0; + if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (phba->lmt & LMT_32Gb) + ae->un.AttrInt |= HBA_PORTSPEED_32GFC; + if (phba->lmt & LMT_16Gb) + ae->un.AttrInt |= HBA_PORTSPEED_16GFC; + if (phba->lmt & LMT_10Gb) + ae->un.AttrInt |= HBA_PORTSPEED_10GFC; + if (phba->lmt & LMT_8Gb) + ae->un.AttrInt |= HBA_PORTSPEED_8GFC; + if (phba->lmt & LMT_4Gb) + ae->un.AttrInt |= HBA_PORTSPEED_4GFC; + if (phba->lmt & LMT_2Gb) + ae->un.AttrInt |= HBA_PORTSPEED_2GFC; + if (phba->lmt & LMT_1Gb) + ae->un.AttrInt |= HBA_PORTSPEED_1GFC; + } else { + /* FCoE links support only one speed */ + switch (phba->fc_linkspeed) { + case LPFC_ASYNC_LINK_SPEED_10GBPS: + ae->un.AttrInt = HBA_PORTSPEED_10GE; + break; + case LPFC_ASYNC_LINK_SPEED_25GBPS: + ae->un.AttrInt = HBA_PORTSPEED_25GE; + break; + case LPFC_ASYNC_LINK_SPEED_40GBPS: + ae->un.AttrInt = HBA_PORTSPEED_40GE; + break; + case LPFC_ASYNC_LINK_SPEED_100GBPS: + ae->un.AttrInt = HBA_PORTSPEED_100GE; + break; + } + } + ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED); + return size; +} + +int +lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + + if (!(phba->hba_flag & HBA_FCOE_MODE)) { + switch (phba->fc_linkspeed) { + case LPFC_LINK_SPEED_1GHZ: + ae->un.AttrInt = HBA_PORTSPEED_1GFC; + break; + case LPFC_LINK_SPEED_2GHZ: + ae->un.AttrInt = HBA_PORTSPEED_2GFC; + break; + case LPFC_LINK_SPEED_4GHZ: + ae->un.AttrInt = HBA_PORTSPEED_4GFC; + break; + case LPFC_LINK_SPEED_8GHZ: + ae->un.AttrInt = HBA_PORTSPEED_8GFC; + break; + case LPFC_LINK_SPEED_10GHZ: + ae->un.AttrInt = HBA_PORTSPEED_10GFC; + break; + case LPFC_LINK_SPEED_16GHZ: + ae->un.AttrInt = HBA_PORTSPEED_16GFC; + break; + case LPFC_LINK_SPEED_32GHZ: + ae->un.AttrInt = HBA_PORTSPEED_32GFC; + break; + default: + ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN; + break; + } + } else { + switch (phba->fc_linkspeed) { + case LPFC_ASYNC_LINK_SPEED_10GBPS: + ae->un.AttrInt = HBA_PORTSPEED_10GE; + break; + case LPFC_ASYNC_LINK_SPEED_25GBPS: + ae->un.AttrInt = HBA_PORTSPEED_25GE; + break; + case LPFC_ASYNC_LINK_SPEED_40GBPS: + ae->un.AttrInt = HBA_PORTSPEED_40GE; + break; + case LPFC_ASYNC_LINK_SPEED_100GBPS: + ae->un.AttrInt = HBA_PORTSPEED_100GE; + break; + default: + ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN; + break; + } + } + + ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_PORT_SPEED); + return size; +} + +int +lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct serv_parm *hsp; + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + + hsp = (struct serv_parm *)&vport->fc_sparam; + ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) | + (uint32_t) hsp->cmn.bbRcvSizeLsb; + ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_MAX_FRAME_SIZE); + return size; +} + +int +lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), + "/sys/class/scsi_host/host%d", shost->host_no); + len = strnlen((char *)ae->un.AttrString, + sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_OS_DEVICE_NAME); + return size; +} + +int +lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s", + init_utsname()->nodename); + + len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_HOST_NAME); + return size; +} + +int +lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(struct lpfc_name)); + + memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, + sizeof(struct lpfc_name)); + size = FOURBYTES + sizeof(struct lpfc_name); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_NODENAME); + return size; +} + +int +lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(struct lpfc_name)); + + memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + size = FOURBYTES + sizeof(struct lpfc_name); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_PORTNAME); + return size; +} + +int +lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_SYM_PORTNAME); + return size; +} + +int +lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) + ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT); + else + ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NPORT); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_PORT_TYPE); + return size; +} + +int +lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_CLASS); + return size; +} + +int +lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, sizeof(struct lpfc_name)); + + memcpy(&ae->un.AttrWWN, &vport->fabric_portname, + sizeof(struct lpfc_name)); + size = FOURBYTES + sizeof(struct lpfc_name); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_FABRICNAME); + return size; +} + +int +lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 32); + + ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ + ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ + ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */ + size = FOURBYTES + 32; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES); + return size; +} + +int +lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + /* Link Up - operational */ + ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_PORT_STATE); + return size; +} + +int +lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + vport->fdmi_num_disc = lpfc_find_map_node(vport); + ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_DISC_PORT); + return size; +} + +int +lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae->un.AttrInt = cpu_to_be32(vport->fc_myDID); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_PORT_ID); + return size; +} + +int +lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + strncpy(ae->un.AttrString, "Smart SAN Initiator", + sizeof(ae->un.AttrString)); + len = strnlen(ae->un.AttrString, + sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_SMART_SERVICE); + return size; +} + +int +lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName, + sizeof(struct lpfc_name)); + memcpy((((uint8_t *)&ae->un.AttrString) + + sizeof(struct lpfc_name)), + &vport->fc_sparam.portName, sizeof(struct lpfc_name)); + size = FOURBYTES + (2 * sizeof(struct lpfc_name)); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_SMART_GUID); + return size; +} + +int +lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + strncpy(ae->un.AttrString, "Smart SAN Version 1.0", + sizeof(ae->un.AttrString)); + len = strnlen(ae->un.AttrString, + sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_SMART_VERSION); + return size; +} + +int +lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + memset(ae, 0, 256); + + strncpy(ae->un.AttrString, phba->ModelName, + sizeof(ae->un.AttrString)); + len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString)); + len += (len & 3) ? (4 - (len & 3)) : 4; + size = FOURBYTES + len; + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_SMART_MODEL); + return size; +} + +int +lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + + /* SRIOV (type 3) is not supported */ + if (vport->vpi) + ae->un.AttrInt = cpu_to_be32(2); /* NPIV */ + else + ae->un.AttrInt = cpu_to_be32(1); /* Physical */ + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_SMART_PORT_INFO); + return size; +} + +int +lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae->un.AttrInt = cpu_to_be32(0); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_SMART_QOS); + return size; +} + +int +lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_def *ad) +{ + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae->un.AttrInt = cpu_to_be32(0); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); + ad->AttrType = cpu_to_be16(RPRT_SMART_SECURITY); + return size; +} + +/* RHBA attribute jump table */ +int (*lpfc_fdmi_hba_action[]) + (struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = { + /* Action routine Mask bit Attribute type */ + lpfc_fdmi_hba_attr_wwnn, /* bit0 RHBA_NODENAME */ + lpfc_fdmi_hba_attr_manufacturer, /* bit1 RHBA_MANUFACTURER */ + lpfc_fdmi_hba_attr_sn, /* bit2 RHBA_SERIAL_NUMBER */ + lpfc_fdmi_hba_attr_model, /* bit3 RHBA_MODEL */ + lpfc_fdmi_hba_attr_description, /* bit4 RHBA_MODEL_DESCRIPTION */ + lpfc_fdmi_hba_attr_hdw_ver, /* bit5 RHBA_HARDWARE_VERSION */ + lpfc_fdmi_hba_attr_drvr_ver, /* bit6 RHBA_DRIVER_VERSION */ + lpfc_fdmi_hba_attr_rom_ver, /* bit7 RHBA_OPTION_ROM_VERSION */ + lpfc_fdmi_hba_attr_fmw_ver, /* bit8 RHBA_FIRMWARE_VERSION */ + lpfc_fdmi_hba_attr_os_ver, /* bit9 RHBA_OS_NAME_VERSION */ + lpfc_fdmi_hba_attr_ct_len, /* bit10 RHBA_MAX_CT_PAYLOAD_LEN */ + lpfc_fdmi_hba_attr_symbolic_name, /* bit11 RHBA_SYM_NODENAME */ + lpfc_fdmi_hba_attr_vendor_info, /* bit12 RHBA_VENDOR_INFO */ + lpfc_fdmi_hba_attr_num_ports, /* bit13 RHBA_NUM_PORTS */ + lpfc_fdmi_hba_attr_fabric_wwnn, /* bit14 RHBA_FABRIC_WWNN */ + lpfc_fdmi_hba_attr_bios_ver, /* bit15 RHBA_BIOS_VERSION */ + lpfc_fdmi_hba_attr_bios_state, /* bit16 RHBA_BIOS_STATE */ + lpfc_fdmi_hba_attr_vendor_id, /* bit17 RHBA_VENDOR_ID */ +}; + +/* RPA / RPRT attribute jump table */ +int (*lpfc_fdmi_port_action[]) + (struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = { + /* Action routine Mask bit Attribute type */ + lpfc_fdmi_port_attr_fc4type, /* bit0 RPRT_SUPPORT_FC4_TYPES */ + lpfc_fdmi_port_attr_support_speed, /* bit1 RPRT_SUPPORTED_SPEED */ + lpfc_fdmi_port_attr_speed, /* bit2 RPRT_PORT_SPEED */ + lpfc_fdmi_port_attr_max_frame, /* bit3 RPRT_MAX_FRAME_SIZE */ + lpfc_fdmi_port_attr_os_devname, /* bit4 RPRT_OS_DEVICE_NAME */ + lpfc_fdmi_port_attr_host_name, /* bit5 RPRT_HOST_NAME */ + lpfc_fdmi_port_attr_wwnn, /* bit6 RPRT_NODENAME */ + lpfc_fdmi_port_attr_wwpn, /* bit7 RPRT_PORTNAME */ + lpfc_fdmi_port_attr_symbolic_name, /* bit8 RPRT_SYM_PORTNAME */ + lpfc_fdmi_port_attr_port_type, /* bit9 RPRT_PORT_TYPE */ + lpfc_fdmi_port_attr_class, /* bit10 RPRT_SUPPORTED_CLASS */ + lpfc_fdmi_port_attr_fabric_wwpn, /* bit11 RPRT_FABRICNAME */ + lpfc_fdmi_port_attr_active_fc4type, /* bit12 RPRT_ACTIVE_FC4_TYPES */ + lpfc_fdmi_port_attr_port_state, /* bit13 RPRT_PORT_STATE */ + lpfc_fdmi_port_attr_num_disc, /* bit14 RPRT_DISC_PORT */ + lpfc_fdmi_port_attr_nportid, /* bit15 RPRT_PORT_ID */ + lpfc_fdmi_smart_attr_service, /* bit16 RPRT_SMART_SERVICE */ + lpfc_fdmi_smart_attr_guid, /* bit17 RPRT_SMART_GUID */ + lpfc_fdmi_smart_attr_version, /* bit18 RPRT_SMART_VERSION */ + lpfc_fdmi_smart_attr_model, /* bit19 RPRT_SMART_MODEL */ + lpfc_fdmi_smart_attr_port_info, /* bit20 RPRT_SMART_PORT_INFO */ + lpfc_fdmi_smart_attr_qos, /* bit21 RPRT_SMART_QOS */ + lpfc_fdmi_smart_attr_security, /* bit22 RPRT_SMART_SECURITY */ +}; + +/** + * lpfc_fdmi_cmd - Build and send a FDMI cmd to the specified NPort + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID) + * cmdcode: FDMI command to send + * mask: Mask of HBA or PORT Attributes to send + * + * Builds and sends a FDMI command using the CT subsystem. + */ +int +lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + int cmdcode, uint32_t new_mask) { struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *mp, *bmp; struct lpfc_sli_ct_request *CtReq; struct ulp_bde64 *bpl; + uint32_t bit_pos; uint32_t size; uint32_t rsp_size; + uint32_t mask; struct lpfc_fdmi_reg_hba *rh; struct lpfc_fdmi_port_entry *pe; struct lpfc_fdmi_reg_portattr *pab = NULL; struct lpfc_fdmi_attr_block *ab = NULL; - struct lpfc_fdmi_attr_entry *ae; - struct lpfc_fdmi_attr_def *ad; - void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, - struct lpfc_iocbq *); + int (*func)(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad); + void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *); - if (ndlp == NULL) { - ndlp = lpfc_findnode_did(vport, FDMI_DID); - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) - return 0; - cmpl = lpfc_cmpl_ct_cmd_fdmi; /* cmd interface */ - } else { - cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */ - } + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return 0; + + cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */ /* fill in BDEs for command */ /* Allocate buffer for command payload */ @@ -1470,573 +2535,99 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) switch (cmdcode) { case SLI_MGMT_RHAT: case SLI_MGMT_RHBA: - { - lpfc_vpd_t *vp = &phba->vpd; - uint32_t i, j, incr; - int len = 0; + rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un.PortID; + /* HBA Identifier */ + memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName, + sizeof(struct lpfc_name)); - rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un.PortID; - /* HBA Identifier */ - memcpy(&rh->hi.PortName, &vport->fc_sparam.portName, + if (cmdcode == SLI_MGMT_RHBA) { + /* Registered Port List */ + /* One entry (port) per adapter */ + rh->rpl.EntryCnt = cpu_to_be32(1); + memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName, sizeof(struct lpfc_name)); - if (cmdcode == SLI_MGMT_RHBA) { - /* Registered Port List */ - /* One entry (port) per adapter */ - rh->rpl.EntryCnt = cpu_to_be32(1); - memcpy(&rh->rpl.pe, &vport->fc_sparam.portName, - sizeof(struct lpfc_name)); - - /* point to the HBA attribute block */ - size = 2 * sizeof(struct lpfc_name) + - FOURBYTES; - } else { - size = sizeof(struct lpfc_name); - } - ab = (struct lpfc_fdmi_attr_block *) - ((uint8_t *)rh + size); - ab->EntryCnt = 0; - size += FOURBYTES; - - /* - * Point to beginning of first HBA attribute entry - */ - /* #1 HBA attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)rh + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); - ad->AttrType = cpu_to_be16(RHBA_NODENAME); - ad->AttrLen = cpu_to_be16(FOURBYTES - + sizeof(struct lpfc_name)); - memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName, - sizeof(struct lpfc_name)); - ab->EntryCnt++; - size += FOURBYTES + sizeof(struct lpfc_name); - if ((size + LPFC_FDMI_MAX_AE_SIZE) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto hba_out; - - /* #2 HBA attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)rh + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(ae->un.Manufacturer)); - ad->AttrType = cpu_to_be16(RHBA_MANUFACTURER); - strncpy(ae->un.Manufacturer, "Emulex Corporation", - sizeof(ae->un.Manufacturer)); - len = strnlen(ae->un.Manufacturer, - sizeof(ae->un.Manufacturer)); - len += (len & 3) ? (4 - (len & 3)) : 4; - ad->AttrLen = cpu_to_be16(FOURBYTES + len); - ab->EntryCnt++; - size += FOURBYTES + len; - if ((size + LPFC_FDMI_MAX_AE_SIZE) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto hba_out; - - /* #3 HBA attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)rh + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(ae->un.SerialNumber)); - ad->AttrType = cpu_to_be16(RHBA_SERIAL_NUMBER); - strncpy(ae->un.SerialNumber, phba->SerialNumber, - sizeof(ae->un.SerialNumber)); - len = strnlen(ae->un.SerialNumber, - sizeof(ae->un.SerialNumber)); - len += (len & 3) ? (4 - (len & 3)) : 4; - ad->AttrLen = cpu_to_be16(FOURBYTES + len); - ab->EntryCnt++; - size += FOURBYTES + len; - if ((size + LPFC_FDMI_MAX_AE_SIZE) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto hba_out; - - /* #4 HBA attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)rh + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(ae->un.Model)); - ad->AttrType = cpu_to_be16(RHBA_MODEL); - strncpy(ae->un.Model, phba->ModelName, - sizeof(ae->un.Model)); - len = strnlen(ae->un.Model, sizeof(ae->un.Model)); - len += (len & 3) ? (4 - (len & 3)) : 4; - ad->AttrLen = cpu_to_be16(FOURBYTES + len); - ab->EntryCnt++; - size += FOURBYTES + len; - if ((size + LPFC_FDMI_MAX_AE_SIZE) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto hba_out; - - /* #5 HBA attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)rh + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(ae->un.ModelDescription)); - ad->AttrType = cpu_to_be16(RHBA_MODEL_DESCRIPTION); - strncpy(ae->un.ModelDescription, phba->ModelDesc, - sizeof(ae->un.ModelDescription)); - len = strnlen(ae->un.ModelDescription, - sizeof(ae->un.ModelDescription)); - len += (len & 3) ? (4 - (len & 3)) : 4; - ad->AttrLen = cpu_to_be16(FOURBYTES + len); - ab->EntryCnt++; - size += FOURBYTES + len; - if ((size + 8) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto hba_out; - - /* #6 HBA attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)rh + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 8); - ad->AttrType = cpu_to_be16(RHBA_HARDWARE_VERSION); - ad->AttrLen = cpu_to_be16(FOURBYTES + 8); - /* Convert JEDEC ID to ascii for hardware version */ - incr = vp->rev.biuRev; - for (i = 0; i < 8; i++) { - j = (incr & 0xf); - if (j <= 9) - ae->un.HardwareVersion[7 - i] = - (char)((uint8_t)0x30 + - (uint8_t)j); - else - ae->un.HardwareVersion[7 - i] = - (char)((uint8_t)0x61 + - (uint8_t)(j - 10)); - incr = (incr >> 4); + /* point to the HBA attribute block */ + size = 2 * sizeof(struct lpfc_name) + + FOURBYTES; + } else { + size = sizeof(struct lpfc_name); + } + ab = (struct lpfc_fdmi_attr_block *)((uint8_t *)rh + size); + ab->EntryCnt = 0; + size += FOURBYTES; + bit_pos = 0; + if (new_mask) + mask = new_mask; + else + mask = vport->fdmi_hba_mask; + + /* Mask will dictate what attributes to build in the request */ + while (mask) { + if (mask & 0x1) { + func = lpfc_fdmi_hba_action[bit_pos]; + size += func(vport, + (struct lpfc_fdmi_attr_def *) + ((uint8_t *)rh + size)); + ab->EntryCnt++; + if ((size + 256) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto hba_out; } - ab->EntryCnt++; - size += FOURBYTES + 8; - if ((size + LPFC_FDMI_MAX_AE_SIZE) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto hba_out; - - /* #7 HBA attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)rh + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(ae->un.DriverVersion)); - ad->AttrType = cpu_to_be16(RHBA_DRIVER_VERSION); - strncpy(ae->un.DriverVersion, lpfc_release_version, - sizeof(ae->un.DriverVersion)); - len = strnlen(ae->un.DriverVersion, - sizeof(ae->un.DriverVersion)); - len += (len & 3) ? (4 - (len & 3)) : 4; - ad->AttrLen = cpu_to_be16(FOURBYTES + len); - ab->EntryCnt++; - size += FOURBYTES + len; - if ((size + LPFC_FDMI_MAX_AE_SIZE) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto hba_out; - - /* #8 HBA attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)rh + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(ae->un.OptionROMVersion)); - ad->AttrType = cpu_to_be16(RHBA_OPTION_ROM_VERSION); - strncpy(ae->un.OptionROMVersion, phba->OptionROMVersion, - sizeof(ae->un.OptionROMVersion)); - len = strnlen(ae->un.OptionROMVersion, - sizeof(ae->un.OptionROMVersion)); - len += (len & 3) ? (4 - (len & 3)) : 4; - ad->AttrLen = cpu_to_be16(FOURBYTES + len); - ab->EntryCnt++; - size += FOURBYTES + len; - if ((size + LPFC_FDMI_MAX_AE_SIZE) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto hba_out; - - /* #9 HBA attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)rh + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(ae->un.FirmwareVersion)); - ad->AttrType = cpu_to_be16(RHBA_FIRMWARE_VERSION); - lpfc_decode_firmware_rev(phba, ae->un.FirmwareVersion, - 1); - len = strnlen(ae->un.FirmwareVersion, - sizeof(ae->un.FirmwareVersion)); - len += (len & 3) ? (4 - (len & 3)) : 4; - ad->AttrLen = cpu_to_be16(FOURBYTES + len); - ab->EntryCnt++; - size += FOURBYTES + len; - if ((size + LPFC_FDMI_MAX_AE_SIZE) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto hba_out; - - /* #10 HBA attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)rh + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(ae->un.OsNameVersion)); - ad->AttrType = cpu_to_be16(RHBA_OS_NAME_VERSION); - snprintf(ae->un.OsNameVersion, - sizeof(ae->un.OsNameVersion), - "%s %s %s", - init_utsname()->sysname, - init_utsname()->release, - init_utsname()->version); - len = strnlen(ae->un.OsNameVersion, - sizeof(ae->un.OsNameVersion)); - len += (len & 3) ? (4 - (len & 3)) : 4; - ad->AttrLen = cpu_to_be16(FOURBYTES + len); - ab->EntryCnt++; - size += FOURBYTES + len; - if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto hba_out; - - /* #11 HBA attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)rh + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - ad->AttrType = - cpu_to_be16(RHBA_MAX_CT_PAYLOAD_LEN); - ad->AttrLen = cpu_to_be16(FOURBYTES + 4); - ae->un.MaxCTPayloadLen = cpu_to_be32(LPFC_MAX_CT_SIZE); - ab->EntryCnt++; - size += FOURBYTES + 4; - if ((size + LPFC_FDMI_MAX_AE_SIZE) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto hba_out; - - /* - * Currently switches don't seem to support the - * following extended HBA attributes. - */ - if (!(vport->cfg_fdmi_on & LPFC_FDMI_ALL_ATTRIB)) - goto hba_out; - - /* #12 HBA attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)rh + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(ae->un.NodeSymName)); - ad->AttrType = cpu_to_be16(RHBA_SYM_NODENAME); - len = lpfc_vport_symbolic_node_name(vport, - ae->un.NodeSymName, sizeof(ae->un.NodeSymName)); - len += (len & 3) ? (4 - (len & 3)) : 4; - ad->AttrLen = cpu_to_be16(FOURBYTES + len); - ab->EntryCnt++; - size += FOURBYTES + len; -hba_out: - ab->EntryCnt = cpu_to_be32(ab->EntryCnt); - /* Total size */ - size = GID_REQUEST_SZ - 4 + size; + mask = mask >> 1; + bit_pos++; } +hba_out: + ab->EntryCnt = cpu_to_be32(ab->EntryCnt); + /* Total size */ + size = GID_REQUEST_SZ - 4 + size; break; case SLI_MGMT_RPRT: case SLI_MGMT_RPA: - { - struct serv_parm *hsp; - int len = 0; - - if (cmdcode == SLI_MGMT_RPRT) { - rh = (struct lpfc_fdmi_reg_hba *) - &CtReq->un.PortID; - /* HBA Identifier */ - memcpy(&rh->hi.PortName, - &vport->fc_sparam.portName, - sizeof(struct lpfc_name)); - pab = (struct lpfc_fdmi_reg_portattr *) - &rh->rpl.EntryCnt; - } else - pab = (struct lpfc_fdmi_reg_portattr *) - &CtReq->un.PortID; - size = sizeof(struct lpfc_name) + FOURBYTES; - memcpy((uint8_t *)&pab->PortName, - (uint8_t *)&vport->fc_sparam.portName, + pab = (struct lpfc_fdmi_reg_portattr *)&CtReq->un.PortID; + if (cmdcode == SLI_MGMT_RPRT) { + rh = (struct lpfc_fdmi_reg_hba *)pab; + /* HBA Identifier */ + memcpy(&rh->hi.PortName, + &phba->pport->fc_sparam.portName, sizeof(struct lpfc_name)); - pab->ab.EntryCnt = 0; - - /* #1 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(ae->un.FC4Types)); - ad->AttrType = - cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES); - ad->AttrLen = cpu_to_be16(FOURBYTES + 32); - ae->un.FC4Types[0] = 0x40; /* Type 1 - ELS */ - ae->un.FC4Types[1] = 0x80; /* Type 8 - FCP */ - ae->un.FC4Types[4] = 0x80; /* Type 32 - CT */ - pab->ab.EntryCnt++; - size += FOURBYTES + 32; - - /* #2 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED); - ad->AttrLen = cpu_to_be16(FOURBYTES + 4); - ae->un.SupportSpeed = 0; - if (phba->lmt & LMT_32Gb) - ae->un.SupportSpeed |= HBA_PORTSPEED_32GBIT; - if (phba->lmt & LMT_16Gb) - ae->un.SupportSpeed |= HBA_PORTSPEED_16GBIT; - if (phba->lmt & LMT_10Gb) - ae->un.SupportSpeed |= HBA_PORTSPEED_10GBIT; - if (phba->lmt & LMT_8Gb) - ae->un.SupportSpeed |= HBA_PORTSPEED_8GBIT; - if (phba->lmt & LMT_4Gb) - ae->un.SupportSpeed |= HBA_PORTSPEED_4GBIT; - if (phba->lmt & LMT_2Gb) - ae->un.SupportSpeed |= HBA_PORTSPEED_2GBIT; - if (phba->lmt & LMT_1Gb) - ae->un.SupportSpeed |= HBA_PORTSPEED_1GBIT; - ae->un.SupportSpeed = - cpu_to_be32(ae->un.SupportSpeed); - - pab->ab.EntryCnt++; - size += FOURBYTES + 4; - - /* #3 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - ad->AttrType = cpu_to_be16(RPRT_PORT_SPEED); - ad->AttrLen = cpu_to_be16(FOURBYTES + 4); - switch (phba->fc_linkspeed) { - case LPFC_LINK_SPEED_1GHZ: - ae->un.PortSpeed = HBA_PORTSPEED_1GBIT; - break; - case LPFC_LINK_SPEED_2GHZ: - ae->un.PortSpeed = HBA_PORTSPEED_2GBIT; - break; - case LPFC_LINK_SPEED_4GHZ: - ae->un.PortSpeed = HBA_PORTSPEED_4GBIT; - break; - case LPFC_LINK_SPEED_8GHZ: - ae->un.PortSpeed = HBA_PORTSPEED_8GBIT; - break; - case LPFC_LINK_SPEED_10GHZ: - ae->un.PortSpeed = HBA_PORTSPEED_10GBIT; - break; - case LPFC_LINK_SPEED_16GHZ: - ae->un.PortSpeed = HBA_PORTSPEED_16GBIT; - break; - case LPFC_LINK_SPEED_32GHZ: - ae->un.PortSpeed = HBA_PORTSPEED_32GBIT; - break; - default: - ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN; - break; - } - ae->un.PortSpeed = cpu_to_be32(ae->un.PortSpeed); - pab->ab.EntryCnt++; - size += FOURBYTES + 4; - - /* #4 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - ad->AttrType = cpu_to_be16(RPRT_MAX_FRAME_SIZE); - ad->AttrLen = cpu_to_be16(FOURBYTES + 4); - hsp = (struct serv_parm *)&vport->fc_sparam; - ae->un.MaxFrameSize = - (((uint32_t)hsp->cmn. - bbRcvSizeMsb) << 8) | (uint32_t)hsp->cmn. - bbRcvSizeLsb; - ae->un.MaxFrameSize = - cpu_to_be32(ae->un.MaxFrameSize); - pab->ab.EntryCnt++; - size += FOURBYTES + 4; - if ((size + LPFC_FDMI_MAX_AE_SIZE) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto port_out; - - /* #5 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(ae->un.OsDeviceName)); - ad->AttrType = cpu_to_be16(RPRT_OS_DEVICE_NAME); - strncpy((char *)ae->un.OsDeviceName, LPFC_DRIVER_NAME, - sizeof(ae->un.OsDeviceName)); - len = strnlen((char *)ae->un.OsDeviceName, - sizeof(ae->un.OsDeviceName)); - len += (len & 3) ? (4 - (len & 3)) : 4; - ad->AttrLen = cpu_to_be16(FOURBYTES + len); - pab->ab.EntryCnt++; - size += FOURBYTES + len; - if ((size + LPFC_FDMI_MAX_AE_SIZE) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto port_out; - - /* #6 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(ae->un.HostName)); - snprintf(ae->un.HostName, sizeof(ae->un.HostName), "%s", - init_utsname()->nodename); - ad->AttrType = cpu_to_be16(RPRT_HOST_NAME); - len = strnlen(ae->un.HostName, - sizeof(ae->un.HostName)); - len += (len & 3) ? (4 - (len & 3)) : 4; - ad->AttrLen = - cpu_to_be16(FOURBYTES + len); - pab->ab.EntryCnt++; - size += FOURBYTES + len; - if ((size + sizeof(struct lpfc_name)) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto port_out; + pab = (struct lpfc_fdmi_reg_portattr *) + ((uint8_t *)pab + sizeof(struct lpfc_name)); + } - /* - * Currently switches don't seem to support the - * following extended Port attributes. - */ - if (!(vport->cfg_fdmi_on & LPFC_FDMI_ALL_ATTRIB)) - goto port_out; - - /* #7 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); - ad->AttrType = cpu_to_be16(RPRT_NODENAME); - ad->AttrLen = cpu_to_be16(FOURBYTES - + sizeof(struct lpfc_name)); - memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName, - sizeof(struct lpfc_name)); - pab->ab.EntryCnt++; - size += FOURBYTES + sizeof(struct lpfc_name); - if ((size + sizeof(struct lpfc_name)) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto port_out; - - /* #8 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); - ad->AttrType = cpu_to_be16(RPRT_PORTNAME); - ad->AttrLen = cpu_to_be16(FOURBYTES - + sizeof(struct lpfc_name)); - memcpy(&ae->un.PortName, &vport->fc_sparam.portName, - sizeof(struct lpfc_name)); - pab->ab.EntryCnt++; - size += FOURBYTES + sizeof(struct lpfc_name); - if ((size + LPFC_FDMI_MAX_AE_SIZE) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto port_out; - - /* #9 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(ae->un.NodeSymName)); - ad->AttrType = cpu_to_be16(RPRT_SYM_PORTNAME); - len = lpfc_vport_symbolic_port_name(vport, - ae->un.NodeSymName, sizeof(ae->un.NodeSymName)); - len += (len & 3) ? (4 - (len & 3)) : 4; - ad->AttrLen = cpu_to_be16(FOURBYTES + len); - pab->ab.EntryCnt++; - size += FOURBYTES + len; - if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto port_out; - - /* #10 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - ad->AttrType = cpu_to_be16(RPRT_PORT_TYPE); - ae->un.PortState = 0; - ad->AttrLen = cpu_to_be16(FOURBYTES + 4); - pab->ab.EntryCnt++; - size += FOURBYTES + 4; - if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto port_out; - - /* #11 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_CLASS); - ae->un.SupportClass = - cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3); - ad->AttrLen = cpu_to_be16(FOURBYTES + 4); - pab->ab.EntryCnt++; - size += FOURBYTES + 4; - if ((size + sizeof(struct lpfc_name)) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto port_out; - - /* #12 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); - ad->AttrType = cpu_to_be16(RPRT_FABRICNAME); - ad->AttrLen = cpu_to_be16(FOURBYTES - + sizeof(struct lpfc_name)); - memcpy(&ae->un.FabricName, &vport->fabric_nodename, - sizeof(struct lpfc_name)); - pab->ab.EntryCnt++; - size += FOURBYTES + sizeof(struct lpfc_name); - if ((size + LPFC_FDMI_MAX_AE_SIZE) > - (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto port_out; - - /* #13 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(ae->un.FC4Types)); - ad->AttrType = - cpu_to_be16(RPRT_ACTIVE_FC4_TYPES); - ad->AttrLen = cpu_to_be16(FOURBYTES + 32); - ae->un.FC4Types[0] = 0x40; /* Type 1 - ELS */ - ae->un.FC4Types[1] = 0x80; /* Type 8 - FCP */ - ae->un.FC4Types[4] = 0x80; /* Type 32 - CT */ - pab->ab.EntryCnt++; - size += FOURBYTES + 32; - if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto port_out; - - /* #257 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - ad->AttrType = cpu_to_be16(RPRT_PORT_STATE); - ae->un.PortState = 0; - ad->AttrLen = cpu_to_be16(FOURBYTES + 4); - pab->ab.EntryCnt++; - size += FOURBYTES + 4; - if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto port_out; - - /* #258 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - ad->AttrType = cpu_to_be16(RPRT_DISC_PORT); - ae->un.PortState = lpfc_find_map_node(vport); - ae->un.PortState = cpu_to_be32(ae->un.PortState); - ad->AttrLen = cpu_to_be16(FOURBYTES + 4); - pab->ab.EntryCnt++; - size += FOURBYTES + 4; - if ((size + 4) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) - goto port_out; - - /* #259 Port attribute entry */ - ad = (struct lpfc_fdmi_attr_def *) - ((uint8_t *)pab + size); - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - ad->AttrType = cpu_to_be16(RPRT_PORT_ID); - ae->un.PortId = cpu_to_be32(vport->fc_myDID); - ad->AttrLen = cpu_to_be16(FOURBYTES + 4); - pab->ab.EntryCnt++; - size += FOURBYTES + 4; -port_out: - pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt); - /* Total size */ - size = GID_REQUEST_SZ - 4 + size; + memcpy((uint8_t *)&pab->PortName, + (uint8_t *)&vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + size += sizeof(struct lpfc_name) + FOURBYTES; + pab->ab.EntryCnt = 0; + bit_pos = 0; + if (new_mask) + mask = new_mask; + else + mask = vport->fdmi_port_mask; + + /* Mask will dictate what attributes to build in the request */ + while (mask) { + if (mask & 0x1) { + func = lpfc_fdmi_port_action[bit_pos]; + size += func(vport, + (struct lpfc_fdmi_attr_def *) + ((uint8_t *)pab + size)); + pab->ab.EntryCnt++; + if ((size + 256) > + (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) + goto port_out; + } + mask = mask >> 1; + bit_pos++; } +port_out: + pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt); + /* Total size */ + if (cmdcode == SLI_MGMT_RPRT) + size += sizeof(struct lpfc_name); + size = GID_REQUEST_SZ - 4 + size; break; case SLI_MGMT_GHAT: @@ -2158,41 +2749,6 @@ lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport) } void -lpfc_fdmi_tmo(unsigned long ptr) -{ - struct lpfc_vport *vport = (struct lpfc_vport *)ptr; - struct lpfc_hba *phba = vport->phba; - uint32_t tmo_posted; - unsigned long iflag; - - spin_lock_irqsave(&vport->work_port_lock, iflag); - tmo_posted = vport->work_port_events & WORKER_FDMI_TMO; - if (!tmo_posted) - vport->work_port_events |= WORKER_FDMI_TMO; - spin_unlock_irqrestore(&vport->work_port_lock, iflag); - - if (!tmo_posted) - lpfc_worker_wake_up(phba); - return; -} - -void -lpfc_fdmi_timeout_handler(struct lpfc_vport *vport) -{ - struct lpfc_nodelist *ndlp; - - ndlp = lpfc_findnode_did(vport, FDMI_DID); - if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { - if (init_utsname()->nodename[0] != '\0') - lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); - else - mod_timer(&vport->fc_fdmitmo, jiffies + - msecs_to_jiffies(1000 * 60)); - } - return; -} - -void lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) { struct lpfc_sli *psli = &phba->sli; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 25aa9b98d53a..a63542bac153 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1054,11 +1054,11 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, { struct dentry *dent = file->f_path.dentry; struct lpfc_hba *phba = file->private_data; - char dstbuf[32]; + char dstbuf[33]; uint64_t tmp = 0; int size; - memset(dstbuf, 0, 32); + memset(dstbuf, 0, 33); size = (nbytes < 32) ? nbytes : 32; if (copy_from_user(dstbuf, buf, size)) return 0; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b6fa257ea3e0..7f5abb8f52bc 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -455,9 +455,9 @@ int lpfc_issue_reg_vfi(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; - LPFC_MBOXQ_t *mboxq; + LPFC_MBOXQ_t *mboxq = NULL; struct lpfc_nodelist *ndlp; - struct lpfc_dmabuf *dmabuf; + struct lpfc_dmabuf *dmabuf = NULL; int rc = 0; /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ @@ -471,25 +471,33 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport) } } - dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); - if (!dmabuf) { + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { rc = -ENOMEM; goto fail; } - dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); - if (!dmabuf->virt) { - rc = -ENOMEM; - goto fail_free_dmabuf; - } - mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) { - rc = -ENOMEM; - goto fail_free_coherent; + /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ + if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) { + rc = -ENOMEM; + goto fail; + } + dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); + if (!dmabuf->virt) { + rc = -ENOMEM; + goto fail; + } + memcpy(dmabuf->virt, &phba->fc_fabparam, + sizeof(struct serv_parm)); } + vport->port_state = LPFC_FABRIC_CFG_LINK; - memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam)); - lpfc_reg_vfi(mboxq, vport, dmabuf->phys); + if (dmabuf) + lpfc_reg_vfi(mboxq, vport, dmabuf->phys); + else + lpfc_reg_vfi(mboxq, vport, 0); mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; mboxq->vport = vport; @@ -497,17 +505,19 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { rc = -ENXIO; - goto fail_free_mbox; + goto fail; } return 0; -fail_free_mbox: - mempool_free(mboxq, phba->mbox_mem_pool); -fail_free_coherent: - lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); -fail_free_dmabuf: - kfree(dmabuf); fail: + if (mboxq) + mempool_free(mboxq, phba->mbox_mem_pool); + if (dmabuf) { + if (dmabuf->virt) + lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + } + lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0289 Issue Register VFI failed: Err %d\n", rc); @@ -678,6 +688,21 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, sp->cmn.bbRcvSizeLsb; fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); + if (fabric_param_changed) { + /* Reset FDMI attribute masks based on config parameter */ + if (phba->cfg_fdmi_on == LPFC_FDMI_NO_SUPPORT) { + vport->fdmi_hba_mask = 0; + vport->fdmi_port_mask = 0; + } else { + /* Setup appropriate attribute masks */ + vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; + if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN) + vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; + else + vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; + } + + } memcpy(&vport->fabric_portname, &sp->portName, sizeof(struct lpfc_name)); memcpy(&vport->fabric_nodename, &sp->nodeName, @@ -711,9 +736,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * For FC we need to do some special processing because of the SLI * Port's default settings of the Common Service Parameters. */ - if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) { + if ((phba->sli_rev == LPFC_SLI_REV4) && + (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ - if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed) + if (fabric_param_changed) lpfc_unregister_fcf_prep(phba); /* This should just update the VFI CSPs*/ @@ -824,13 +850,21 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + vport->fc_flag |= FC_PT2PT; spin_unlock_irq(shost->host_lock); - phba->fc_edtov = FF_DEF_EDTOV; - phba->fc_ratov = FF_DEF_RATOV; + /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ + if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + } + rc = memcmp(&vport->fc_portname, &sp->portName, sizeof(vport->fc_portname)); - memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); if (rc >= 0) { /* This side will initiate the PLOGI */ @@ -839,38 +873,14 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_unlock_irq(shost->host_lock); /* - * N_Port ID cannot be 0, set our to LocalID the other - * side will be RemoteID. + * N_Port ID cannot be 0, set our Id to LocalID + * the other side will be RemoteID. */ /* not equal */ if (rc) vport->fc_myDID = PT2PT_LocalID; - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - goto fail; - - lpfc_config_link(phba, mbox); - - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - mbox->vport = vport; - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - mempool_free(mbox, phba->mbox_mem_pool); - goto fail; - } - - /* - * For SLI4, the VFI/VPI are registered AFTER the - * Nport with the higher WWPN sends the PLOGI with - * an assigned NPortId. - */ - - /* not equal */ - if ((phba->sli_rev == LPFC_SLI_REV4) && rc) - lpfc_issue_reg_vfi(vport); - /* Decrement ndlp reference count indicating that ndlp can be * safely released when other references to it are done. */ @@ -912,29 +922,20 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* If we are pt2pt with another NPort, force NPIV off! */ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; - spin_lock_irq(shost->host_lock); - vport->fc_flag |= FC_PT2PT; - spin_unlock_irq(shost->host_lock); - /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ - if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { - lpfc_unregister_fcf_prep(phba); + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + goto fail; - /* The FC_VFI_REGISTERED flag will get clear in the cmpl - * handler for unreg_vfi, but if we don't force the - * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be - * built with the update bit set instead of just the vp bit to - * change the Nport ID. We need to have the vp set and the - * Upd cleared on topology changes. - */ - spin_lock_irq(shost->host_lock); - vport->fc_flag &= ~FC_VFI_REGISTERED; - spin_unlock_irq(shost->host_lock); - phba->fc_topology_changed = 0; - lpfc_issue_reg_vfi(vport); + lpfc_config_link(phba, mbox); + + mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; + mbox->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + goto fail; } - /* Start discovery - this should just do CLEAR_LA */ - lpfc_disc_start(vport); return 0; fail: return -ENXIO; @@ -1157,6 +1158,7 @@ flogifail: spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; spin_unlock_irq(&phba->hbalock); + lpfc_nlp_put(ndlp); if (!lpfc_error_lost_link(irsp)) { @@ -3792,14 +3794,17 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); } + + ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) != MBX_NOT_FINISHED) goto out; - else - /* Decrement the ndlp reference count we - * set for this failed mailbox command. - */ - lpfc_nlp_put(ndlp); + + /* Decrement the ndlp reference count we + * set for this failed mailbox command. + */ + lpfc_nlp_put(ndlp); + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; /* ELS rsp: Cannot issue reg_login for <NPortid> */ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, @@ -3856,6 +3861,7 @@ out: * the routine lpfc_els_free_iocb. */ cmdiocb->context1 = NULL; + } lpfc_els_free_iocb(phba, cmdiocb); @@ -3898,6 +3904,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, IOCB_t *oldcmd; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; + struct serv_parm *sp; uint16_t cmdsize; int rc; ELS_PKT *els_pkt_ptr; @@ -3927,6 +3934,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, "Issue ACC: did:x%x flg:x%x", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; + case ELS_CMD_FLOGI: case ELS_CMD_PLOGI: cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, @@ -3944,10 +3952,34 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, *((uint32_t *) (pcmd)) = ELS_CMD_ACC; pcmd += sizeof(uint32_t); - memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); + sp = (struct serv_parm *)pcmd; + + if (flag == ELS_CMD_FLOGI) { + /* Copy the received service parameters back */ + memcpy(sp, &phba->fc_fabparam, + sizeof(struct serv_parm)); + + /* Clear the F_Port bit */ + sp->cmn.fPort = 0; + + /* Mark all class service parameters as invalid */ + sp->cls1.classValid = 0; + sp->cls2.classValid = 0; + sp->cls3.classValid = 0; + sp->cls4.classValid = 0; + + /* Copy our worldwide names */ + memcpy(&sp->portName, &vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, + sizeof(struct lpfc_name)); + } else { + memcpy(pcmd, &vport->fc_sparam, + sizeof(struct serv_parm)); + } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC PLOGI: did:x%x flg:x%x", + "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_PRLO: @@ -4673,6 +4705,23 @@ lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, desc->length = cpu_to_be32(sizeof(desc->info)); } +int +lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) +{ + if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) + return 0; + desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); + + desc->info.CorrectedBlocks = + cpu_to_be32(stat->fecCorrBlkCount); + desc->info.UncorrectableBlocks = + cpu_to_be32(stat->fecUncorrBlkCount); + + desc->length = cpu_to_be32(sizeof(desc->info)); + + return sizeof(struct fc_fec_rdp_desc); +} + void lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) { @@ -4681,26 +4730,26 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); - switch (phba->sli4_hba.link_state.speed) { - case LPFC_FC_LA_SPEED_1G: + switch (phba->fc_linkspeed) { + case LPFC_LINK_SPEED_1GHZ: rdp_speed = RDP_PS_1GB; break; - case LPFC_FC_LA_SPEED_2G: + case LPFC_LINK_SPEED_2GHZ: rdp_speed = RDP_PS_2GB; break; - case LPFC_FC_LA_SPEED_4G: + case LPFC_LINK_SPEED_4GHZ: rdp_speed = RDP_PS_4GB; break; - case LPFC_FC_LA_SPEED_8G: + case LPFC_LINK_SPEED_8GHZ: rdp_speed = RDP_PS_8GB; break; - case LPFC_FC_LA_SPEED_10G: + case LPFC_LINK_SPEED_10GHZ: rdp_speed = RDP_PS_10GB; break; - case LPFC_FC_LA_SPEED_16G: + case LPFC_LINK_SPEED_16GHZ: rdp_speed = RDP_PS_16GB; break; - case LPFC_FC_LA_SPEED_32G: + case LPFC_LINK_SPEED_32GHZ: rdp_speed = RDP_PS_32GB; break; default: @@ -4778,15 +4827,18 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, struct lpfc_nodelist *ndlp = rdp_context->ndlp; struct lpfc_vport *vport = ndlp->vport; struct lpfc_iocbq *elsiocb; + struct ulp_bde64 *bpl; IOCB_t *icmd; uint8_t *pcmd; struct ls_rjt *stat; struct fc_rdp_res_frame *rdp_res; uint32_t cmdsize; - int rc; + int rc, fec_size; if (status != SUCCESS) goto error; + + /* This will change once we know the true size of the RDP payload */ cmdsize = sizeof(struct fc_rdp_res_frame); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, @@ -4823,10 +4875,18 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, lpfc_rdp_res_diag_port_names(&rdp_res->diag_port_names_desc, phba); lpfc_rdp_res_attach_port_names(&rdp_res->attached_port_names_desc, vport, ndlp); - rdp_res->length = cpu_to_be32(RDP_DESC_PAYLOAD_SIZE); - + fec_size = lpfc_rdp_res_fec_desc(&rdp_res->fec_desc, + &rdp_context->link_stat); + rdp_res->length = cpu_to_be32(fec_size + RDP_DESC_PAYLOAD_SIZE); elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; + /* Now that we know the true size of the payload, update the BPL */ + bpl = (struct ulp_bde64 *) + (((struct lpfc_dmabuf *)(elsiocb->context3))->virt); + bpl->tus.f.bdeSize = (fec_size + RDP_DESC_PAYLOAD_SIZE + 8); + bpl->tus.f.bdeFlags = 0; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + phba->fc_stat.elsXmitACC++; rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) @@ -4956,13 +5016,12 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, if (RDP_NPORT_ID_SIZE != be32_to_cpu(rdp_req->nport_id_desc.length)) goto rjt_logerr; - rdp_context = kmalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); + rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); if (!rdp_context) { rjt_err = LSRJT_UNABLE_TPC; goto error; } - memset(rdp_context, 0, sizeof(struct lpfc_rdp_context)); cmd = &cmdiocb->iocb; rdp_context->ndlp = lpfc_nlp_get(ndlp); rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id; @@ -5739,7 +5798,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, IOCB_t *icmd = &cmdiocb->iocb; struct serv_parm *sp; LPFC_MBOXQ_t *mbox; - struct ls_rjt stat; uint32_t cmd, did; int rc; uint32_t fc_flag = 0; @@ -5765,135 +5823,92 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, return 1; } - if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) { - /* For a FLOGI we accept, then if our portname is greater - * then the remote portname we initiate Nport login. - */ + (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); - rc = memcmp(&vport->fc_portname, &sp->portName, - sizeof(struct lpfc_name)); - if (!rc) { - if (phba->sli_rev < LPFC_SLI_REV4) { - mbox = mempool_alloc(phba->mbox_mem_pool, - GFP_KERNEL); - if (!mbox) - return 1; - lpfc_linkdown(phba); - lpfc_init_link(phba, mbox, - phba->cfg_topology, - phba->cfg_link_speed); - mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - mbox->vport = vport; - rc = lpfc_sli_issue_mbox(phba, mbox, - MBX_NOWAIT); - lpfc_set_loopback_flag(phba); - if (rc == MBX_NOT_FINISHED) - mempool_free(mbox, phba->mbox_mem_pool); - return 1; - } else { - /* abort the flogi coming back to ourselves - * due to external loopback on the port. - */ - lpfc_els_abort_flogi(phba); - return 0; - } - } else if (rc > 0) { /* greater than */ - spin_lock_irq(shost->host_lock); - vport->fc_flag |= FC_PT2PT_PLOGI; - spin_unlock_irq(shost->host_lock); + /* + * If our portname is greater than the remote portname, + * then we initiate Nport login. + */ - /* If we have the high WWPN we can assign our own - * myDID; otherwise, we have to WAIT for a PLOGI - * from the remote NPort to find out what it - * will be. - */ - vport->fc_myDID = PT2PT_LocalID; - } else - vport->fc_myDID = PT2PT_RemoteID; + rc = memcmp(&vport->fc_portname, &sp->portName, + sizeof(struct lpfc_name)); - /* - * The vport state should go to LPFC_FLOGI only - * AFTER we issue a FLOGI, not receive one. + if (!rc) { + if (phba->sli_rev < LPFC_SLI_REV4) { + mbox = mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!mbox) + return 1; + lpfc_linkdown(phba); + lpfc_init_link(phba, mbox, + phba->cfg_topology, + phba->cfg_link_speed); + mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mbox, + MBX_NOWAIT); + lpfc_set_loopback_flag(phba); + if (rc == MBX_NOT_FINISHED) + mempool_free(mbox, phba->mbox_mem_pool); + return 1; + } + + /* abort the flogi coming back to ourselves + * due to external loopback on the port. */ + lpfc_els_abort_flogi(phba); + return 0; + + } else if (rc > 0) { /* greater than */ spin_lock_irq(shost->host_lock); - fc_flag = vport->fc_flag; - port_state = vport->port_state; - vport->fc_flag |= FC_PT2PT; - vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + vport->fc_flag |= FC_PT2PT_PLOGI; spin_unlock_irq(shost->host_lock); - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "3311 Rcv Flogi PS x%x new PS x%x " - "fc_flag x%x new fc_flag x%x\n", - port_state, vport->port_state, - fc_flag, vport->fc_flag); - /* - * We temporarily set fc_myDID to make it look like we are - * a Fabric. This is done just so we end up with the right - * did / sid on the FLOGI ACC rsp. + /* If we have the high WWPN we can assign our own + * myDID; otherwise, we have to WAIT for a PLOGI + * from the remote NPort to find out what it + * will be. */ - did = vport->fc_myDID; - vport->fc_myDID = Fabric_DID; - + vport->fc_myDID = PT2PT_LocalID; } else { - /* Reject this request because invalid parameters */ - stat.un.b.lsRjtRsvd0 = 0; - stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; - stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; - stat.un.b.vendorUnique = 0; - - /* - * We temporarily set fc_myDID to make it look like we are - * a Fabric. This is done just so we end up with the right - * did / sid on the FLOGI LS_RJT rsp. - */ - did = vport->fc_myDID; - vport->fc_myDID = Fabric_DID; - - lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, - NULL); + vport->fc_myDID = PT2PT_RemoteID; + } - /* Now lets put fc_myDID back to what its supposed to be */ - vport->fc_myDID = did; + /* + * The vport state should go to LPFC_FLOGI only + * AFTER we issue a FLOGI, not receive one. + */ + spin_lock_irq(shost->host_lock); + fc_flag = vport->fc_flag; + port_state = vport->port_state; + vport->fc_flag |= FC_PT2PT; + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + spin_unlock_irq(shost->host_lock); + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3311 Rcv Flogi PS x%x new PS x%x " + "fc_flag x%x new fc_flag x%x\n", + port_state, vport->port_state, + fc_flag, vport->fc_flag); - return 1; - } + /* + * We temporarily set fc_myDID to make it look like we are + * a Fabric. This is done just so we end up with the right + * did / sid on the FLOGI ACC rsp. + */ + did = vport->fc_myDID; + vport->fc_myDID = Fabric_DID; - /* send our FLOGI first */ - if (vport->port_state < LPFC_FLOGI) { - vport->fc_myDID = 0; - lpfc_initial_flogi(vport); - vport->fc_myDID = Fabric_DID; - } + memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); /* Send back ACC */ - lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); + lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); /* Now lets put fc_myDID back to what its supposed to be */ vport->fc_myDID = did; - if (!(vport->fc_flag & FC_PT2PT_PLOGI)) { - - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - goto fail; - - lpfc_config_link(phba, mbox); - - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - mbox->vport = vport; - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - mempool_free(mbox, phba->mbox_mem_pool); - goto fail; - } - } - return 0; -fail: - return 1; } /** @@ -7345,7 +7360,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* reject till our FLOGI completes */ if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && - (cmd != ELS_CMD_FLOGI)) { + (cmd != ELS_CMD_FLOGI)) { rjt_err = LSRJT_UNABLE_TPC; rjt_exp = LSEXP_NOTHING_MORE; goto lsrjt; @@ -7381,6 +7396,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, rjt_exp = LSEXP_NOTHING_MORE; break; } + if (vport->port_state < LPFC_DISC_AUTH) { if (!(phba->pport->fc_flag & FC_PT2PT) || (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { @@ -7730,6 +7746,35 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } +void +lpfc_start_fdmi(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp; + + /* If this is the first time, allocate an ndlp and initialize + * it. Otherwise, make sure the node is enabled and then do the + * login. + */ + ndlp = lpfc_findnode_did(vport, FDMI_DID); + if (!ndlp) { + ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); + if (ndlp) { + lpfc_nlp_init(vport, ndlp, FDMI_DID); + ndlp->nlp_type |= NLP_FABRIC; + } else { + return; + } + } + if (!NLP_CHK_NODE_ACT(ndlp)) + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); + + if (ndlp) { + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); + } +} + /** * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr * @phba: pointer to lpfc hba data structure. @@ -7746,7 +7791,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, void lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) { - struct lpfc_nodelist *ndlp, *ndlp_fdmi; + struct lpfc_nodelist *ndlp; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); /* @@ -7804,32 +7849,9 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) return; } - if (vport->cfg_fdmi_on & LPFC_FDMI_SUPPORT) { - /* If this is the first time, allocate an ndlp and initialize - * it. Otherwise, make sure the node is enabled and then do the - * login. - */ - ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID); - if (!ndlp_fdmi) { - ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, - GFP_KERNEL); - if (ndlp_fdmi) { - lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); - ndlp_fdmi->nlp_type |= NLP_FABRIC; - } else - return; - } - if (!NLP_CHK_NODE_ACT(ndlp_fdmi)) - ndlp_fdmi = lpfc_enable_node(vport, - ndlp_fdmi, - NLP_STE_NPR_NODE); - - if (ndlp_fdmi) { - lpfc_nlp_set_state(vport, ndlp_fdmi, - NLP_STE_PLOGI_ISSUE); - lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0); - } - } + if ((phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) && + (vport->load_flag & FC_ALLOW_FDMI)) + lpfc_start_fdmi(vport); } /** diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index bfc2442dd74a..25b5dcd1a5c8 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -25,6 +25,7 @@ #include <linux/pci.h> #include <linux/kthread.h> #include <linux/interrupt.h> +#include <linux/lockdep.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -674,8 +675,6 @@ lpfc_work_done(struct lpfc_hba *phba) lpfc_mbox_timeout_handler(phba); if (work_port_events & WORKER_FABRIC_BLOCK_TMO) lpfc_unblock_fabric_iocbs(phba); - if (work_port_events & WORKER_FDMI_TMO) - lpfc_fdmi_timeout_handler(vport); if (work_port_events & WORKER_RAMP_DOWN_QUEUE) lpfc_ramp_down_queue_handler(phba); if (work_port_events & WORKER_DELAYED_DISC_TMO) @@ -1083,7 +1082,7 @@ out: } -static void +void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; @@ -1113,8 +1112,10 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Start discovery by sending a FLOGI. port_state is identically * LPFC_FLOGI while waiting for FLOGI cmpl */ - if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI) + if (vport->port_state != LPFC_FLOGI) lpfc_initial_flogi(vport); + else if (vport->fc_flag & FC_PT2PT) + lpfc_disc_start(vport); return; out: @@ -1314,6 +1315,8 @@ __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, { struct lpfc_fcf_pri *fcf_pri; + lockdep_assert_held(&phba->hbalock); + fcf_pri = &phba->fcf.fcf_pri[fcf_index]; fcf_pri->fcf_rec.fcf_index = fcf_index; /* FCF record priority */ @@ -1398,6 +1401,8 @@ __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, struct fcf_record *new_fcf_record, uint32_t addr_mode, uint16_t vlan_id, uint32_t flag) { + lockdep_assert_held(&phba->hbalock); + /* Copy the fields from the HBA's FCF record */ lpfc_copy_fcf_record(fcf_rec, new_fcf_record); /* Update other fields of driver FCF record */ @@ -2963,8 +2968,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) out_free_mem: mempool_free(mboxq, phba->mbox_mem_pool); - lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); - kfree(dmabuf); + if (dmabuf) { + lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + } return; } @@ -3035,19 +3042,22 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) uint32_t fc_flags = 0; spin_lock_irq(&phba->hbalock); - switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { - case LPFC_LINK_SPEED_1GHZ: - case LPFC_LINK_SPEED_2GHZ: - case LPFC_LINK_SPEED_4GHZ: - case LPFC_LINK_SPEED_8GHZ: - case LPFC_LINK_SPEED_10GHZ: - case LPFC_LINK_SPEED_16GHZ: - case LPFC_LINK_SPEED_32GHZ: - phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); - break; - default: - phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; - break; + phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); + + if (!(phba->hba_flag & HBA_FCOE_MODE)) { + switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { + case LPFC_LINK_SPEED_1GHZ: + case LPFC_LINK_SPEED_2GHZ: + case LPFC_LINK_SPEED_4GHZ: + case LPFC_LINK_SPEED_8GHZ: + case LPFC_LINK_SPEED_10GHZ: + case LPFC_LINK_SPEED_16GHZ: + case LPFC_LINK_SPEED_32GHZ: + break; + default: + phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; + break; + } } if (phba->fc_topology && @@ -3448,10 +3458,10 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; spin_unlock_irq(shost->host_lock); - } else - /* Good status, call state machine */ - lpfc_disc_state_machine(vport, ndlp, pmb, - NLP_EVT_CMPL_REG_LOGIN); + } + + /* Call state machine */ + lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); @@ -5550,15 +5560,15 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ndlp->nlp_usg_map, ndlp); /* * Start issuing Fabric-Device Management Interface (FDMI) command to - * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if - * fdmi-on=2 (supporting RPA/hostnmae) + * 0xfffffa (FDMI well known port). + * DHBA -> DPRT -> RHBA -> RPA (physical port) + * DPRT -> RPRT (vports) */ - - if (vport->cfg_fdmi_on & LPFC_FDMI_REG_DELAY) - mod_timer(&vport->fc_fdmitmo, - jiffies + msecs_to_jiffies(1000 * 60)); + if (vport->port_type == LPFC_PHYSICAL_PORT) + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); else - lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); + /* decrement the node reference count held for this callback * function. diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 2cce88e967ce..dd20412c7e4c 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1097,6 +1097,18 @@ struct fc_rdp_port_name_desc { }; +struct fc_rdp_fec_info { + uint32_t CorrectedBlocks; + uint32_t UncorrectableBlocks; +}; + +#define RDP_FEC_DESC_TAG 0x00010005 +struct fc_fec_rdp_desc { + uint32_t tag; + uint32_t length; + struct fc_rdp_fec_info info; +}; + struct fc_rdp_link_error_status_payload_info { struct fc_link_status link_status; /* 24 bytes */ uint32_t port_type; /* bits 31-30 only */ @@ -1196,14 +1208,15 @@ struct fc_rdp_res_frame { struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */ struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */ struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */ + struct fc_fec_rdp_desc fec_desc; /* FC Word 34 - 37 */ }; #define RDP_DESC_PAYLOAD_SIZE (sizeof(struct fc_rdp_link_service_desc) \ - + sizeof(struct fc_rdp_sfp_desc) \ - + sizeof(struct fc_rdp_port_speed_desc) \ - + sizeof(struct fc_rdp_link_error_status_desc) \ - + (sizeof(struct fc_rdp_port_name_desc) * 2)) + + sizeof(struct fc_rdp_sfp_desc) \ + + sizeof(struct fc_rdp_port_speed_desc) \ + + sizeof(struct fc_rdp_link_error_status_desc) \ + + (sizeof(struct fc_rdp_port_name_desc) * 2)) /******** FDMI ********/ @@ -1233,31 +1246,10 @@ struct lpfc_fdmi_attr_def { /* Defined in TLV format */ /* Attribute Entry */ struct lpfc_fdmi_attr_entry { union { - uint32_t VendorSpecific; - uint32_t SupportClass; - uint32_t SupportSpeed; - uint32_t PortSpeed; - uint32_t MaxFrameSize; - uint32_t MaxCTPayloadLen; - uint32_t PortState; - uint32_t PortId; - struct lpfc_name NodeName; - struct lpfc_name PortName; - struct lpfc_name FabricName; - uint8_t FC4Types[32]; - uint8_t Manufacturer[64]; - uint8_t SerialNumber[64]; - uint8_t Model[256]; - uint8_t ModelDescription[256]; - uint8_t HardwareVersion[256]; - uint8_t DriverVersion[256]; - uint8_t OptionROMVersion[256]; - uint8_t FirmwareVersion[256]; - uint8_t OsHostName[256]; - uint8_t NodeSymName[256]; - uint8_t OsDeviceName[256]; - uint8_t OsNameVersion[256]; - uint8_t HostName[256]; + uint32_t AttrInt; + uint8_t AttrTypes[32]; + uint8_t AttrString[256]; + struct lpfc_name AttrWWN; } un; }; @@ -1327,6 +1319,8 @@ struct lpfc_fdmi_reg_portattr { #define SLI_MGMT_DPRT 0x310 /* De-register Port */ #define SLI_MGMT_DPA 0x311 /* De-register Port attributes */ +#define LPFC_FDMI_MAX_RETRY 3 /* Max retries for a FDMI command */ + /* * HBA Attribute Types */ @@ -1342,6 +1336,39 @@ struct lpfc_fdmi_reg_portattr { #define RHBA_OS_NAME_VERSION 0xa /* 4 to 256 byte ASCII string */ #define RHBA_MAX_CT_PAYLOAD_LEN 0xb /* 32-bit unsigned int */ #define RHBA_SYM_NODENAME 0xc /* 4 to 256 byte ASCII string */ +#define RHBA_VENDOR_INFO 0xd /* 32-bit unsigned int */ +#define RHBA_NUM_PORTS 0xe /* 32-bit unsigned int */ +#define RHBA_FABRIC_WWNN 0xf /* 8 byte WWNN */ +#define RHBA_BIOS_VERSION 0x10 /* 4 to 256 byte ASCII string */ +#define RHBA_BIOS_STATE 0x11 /* 32-bit unsigned int */ +#define RHBA_VENDOR_ID 0xe0 /* 8 byte ASCII string */ + +/* Bit mask for all individual HBA attributes */ +#define LPFC_FDMI_HBA_ATTR_wwnn 0x00000001 +#define LPFC_FDMI_HBA_ATTR_manufacturer 0x00000002 +#define LPFC_FDMI_HBA_ATTR_sn 0x00000004 +#define LPFC_FDMI_HBA_ATTR_model 0x00000008 +#define LPFC_FDMI_HBA_ATTR_description 0x00000010 +#define LPFC_FDMI_HBA_ATTR_hdw_ver 0x00000020 +#define LPFC_FDMI_HBA_ATTR_drvr_ver 0x00000040 +#define LPFC_FDMI_HBA_ATTR_rom_ver 0x00000080 +#define LPFC_FDMI_HBA_ATTR_fmw_ver 0x00000100 +#define LPFC_FDMI_HBA_ATTR_os_ver 0x00000200 +#define LPFC_FDMI_HBA_ATTR_ct_len 0x00000400 +#define LPFC_FDMI_HBA_ATTR_symbolic_name 0x00000800 +#define LPFC_FDMI_HBA_ATTR_vendor_info 0x00001000 /* Not used */ +#define LPFC_FDMI_HBA_ATTR_num_ports 0x00002000 +#define LPFC_FDMI_HBA_ATTR_fabric_wwnn 0x00004000 +#define LPFC_FDMI_HBA_ATTR_bios_ver 0x00008000 +#define LPFC_FDMI_HBA_ATTR_bios_state 0x00010000 /* Not used */ +#define LPFC_FDMI_HBA_ATTR_vendor_id 0x00020000 + +/* Bit mask for FDMI-1 defined HBA attributes */ +#define LPFC_FDMI1_HBA_ATTR 0x000007ff + +/* Bit mask for FDMI-2 defined HBA attributes */ +/* Skip vendor_info and bios_state */ +#define LPFC_FDMI2_HBA_ATTR 0x0002efff /* * Port Attrubute Types @@ -1353,15 +1380,65 @@ struct lpfc_fdmi_reg_portattr { #define RPRT_OS_DEVICE_NAME 0x5 /* 4 to 256 byte ASCII string */ #define RPRT_HOST_NAME 0x6 /* 4 to 256 byte ASCII string */ #define RPRT_NODENAME 0x7 /* 8 byte WWNN */ -#define RPRT_PORTNAME 0x8 /* 8 byte WWNN */ +#define RPRT_PORTNAME 0x8 /* 8 byte WWPN */ #define RPRT_SYM_PORTNAME 0x9 /* 4 to 256 byte ASCII string */ #define RPRT_PORT_TYPE 0xa /* 32-bit unsigned int */ #define RPRT_SUPPORTED_CLASS 0xb /* 32-bit unsigned int */ -#define RPRT_FABRICNAME 0xc /* 8 byte Fabric WWNN */ +#define RPRT_FABRICNAME 0xc /* 8 byte Fabric WWPN */ #define RPRT_ACTIVE_FC4_TYPES 0xd /* 32 byte binary array */ #define RPRT_PORT_STATE 0x101 /* 32-bit unsigned int */ #define RPRT_DISC_PORT 0x102 /* 32-bit unsigned int */ #define RPRT_PORT_ID 0x103 /* 32-bit unsigned int */ +#define RPRT_SMART_SERVICE 0xf100 /* 4 to 256 byte ASCII string */ +#define RPRT_SMART_GUID 0xf101 /* 8 byte WWNN + 8 byte WWPN */ +#define RPRT_SMART_VERSION 0xf102 /* 4 to 256 byte ASCII string */ +#define RPRT_SMART_MODEL 0xf103 /* 4 to 256 byte ASCII string */ +#define RPRT_SMART_PORT_INFO 0xf104 /* 32-bit unsigned int */ +#define RPRT_SMART_QOS 0xf105 /* 32-bit unsigned int */ +#define RPRT_SMART_SECURITY 0xf106 /* 32-bit unsigned int */ + +/* Bit mask for all individual PORT attributes */ +#define LPFC_FDMI_PORT_ATTR_fc4type 0x00000001 +#define LPFC_FDMI_PORT_ATTR_support_speed 0x00000002 +#define LPFC_FDMI_PORT_ATTR_speed 0x00000004 +#define LPFC_FDMI_PORT_ATTR_max_frame 0x00000008 +#define LPFC_FDMI_PORT_ATTR_os_devname 0x00000010 +#define LPFC_FDMI_PORT_ATTR_host_name 0x00000020 +#define LPFC_FDMI_PORT_ATTR_wwnn 0x00000040 +#define LPFC_FDMI_PORT_ATTR_wwpn 0x00000080 +#define LPFC_FDMI_PORT_ATTR_symbolic_name 0x00000100 +#define LPFC_FDMI_PORT_ATTR_port_type 0x00000200 +#define LPFC_FDMI_PORT_ATTR_class 0x00000400 +#define LPFC_FDMI_PORT_ATTR_fabric_wwpn 0x00000800 +#define LPFC_FDMI_PORT_ATTR_port_state 0x00001000 +#define LPFC_FDMI_PORT_ATTR_active_fc4type 0x00002000 +#define LPFC_FDMI_PORT_ATTR_num_disc 0x00004000 +#define LPFC_FDMI_PORT_ATTR_nportid 0x00008000 +#define LPFC_FDMI_SMART_ATTR_service 0x00010000 /* Vendor specific */ +#define LPFC_FDMI_SMART_ATTR_guid 0x00020000 /* Vendor specific */ +#define LPFC_FDMI_SMART_ATTR_version 0x00040000 /* Vendor specific */ +#define LPFC_FDMI_SMART_ATTR_model 0x00080000 /* Vendor specific */ +#define LPFC_FDMI_SMART_ATTR_port_info 0x00100000 /* Vendor specific */ +#define LPFC_FDMI_SMART_ATTR_qos 0x00200000 /* Vendor specific */ +#define LPFC_FDMI_SMART_ATTR_security 0x00400000 /* Vendor specific */ + +/* Bit mask for FDMI-1 defined PORT attributes */ +#define LPFC_FDMI1_PORT_ATTR 0x0000003f + +/* Bit mask for FDMI-2 defined PORT attributes */ +#define LPFC_FDMI2_PORT_ATTR 0x0000ffff + +/* Bit mask for Smart SAN defined PORT attributes */ +#define LPFC_FDMI2_SMART_ATTR 0x007fffff + +/* Defines for PORT port state attribute */ +#define LPFC_FDMI_PORTSTATE_UNKNOWN 1 +#define LPFC_FDMI_PORTSTATE_ONLINE 2 + +/* Defines for PORT port type attribute */ +#define LPFC_FDMI_PORTTYPE_UNKNOWN 0 +#define LPFC_FDMI_PORTTYPE_NPORT 1 +#define LPFC_FDMI_PORTTYPE_NLPORT 2 /* * Begin HBA configuration parameters. @@ -2498,10 +2575,38 @@ typedef struct { /* Structure for MB Command READ_LINK_STAT (18) */ typedef struct { - uint32_t rsvd1; + uint32_t word0; + +#define lpfc_read_link_stat_rec_SHIFT 0 +#define lpfc_read_link_stat_rec_MASK 0x1 +#define lpfc_read_link_stat_rec_WORD word0 + +#define lpfc_read_link_stat_gec_SHIFT 1 +#define lpfc_read_link_stat_gec_MASK 0x1 +#define lpfc_read_link_stat_gec_WORD word0 + +#define lpfc_read_link_stat_w02oftow23of_SHIFT 2 +#define lpfc_read_link_stat_w02oftow23of_MASK 0x3FFFFF +#define lpfc_read_link_stat_w02oftow23of_WORD word0 + +#define lpfc_read_link_stat_rsvd_SHIFT 24 +#define lpfc_read_link_stat_rsvd_MASK 0x1F +#define lpfc_read_link_stat_rsvd_WORD word0 + +#define lpfc_read_link_stat_gec2_SHIFT 29 +#define lpfc_read_link_stat_gec2_MASK 0x1 +#define lpfc_read_link_stat_gec2_WORD word0 + +#define lpfc_read_link_stat_clrc_SHIFT 30 +#define lpfc_read_link_stat_clrc_MASK 0x1 +#define lpfc_read_link_stat_clrc_WORD word0 + +#define lpfc_read_link_stat_clof_SHIFT 31 +#define lpfc_read_link_stat_clof_MASK 0x1 +#define lpfc_read_link_stat_clof_WORD word0 + uint32_t linkFailureCnt; uint32_t lossSyncCnt; - uint32_t lossSignalCnt; uint32_t primSeqErrCnt; uint32_t invalidXmitWord; @@ -2509,6 +2614,19 @@ typedef struct { uint32_t primSeqTimeout; uint32_t elasticOverrun; uint32_t arbTimeout; + uint32_t advRecBufCredit; + uint32_t curRecBufCredit; + uint32_t advTransBufCredit; + uint32_t curTransBufCredit; + uint32_t recEofCount; + uint32_t recEofdtiCount; + uint32_t recEofniCount; + uint32_t recSofcount; + uint32_t rsvd1; + uint32_t rsvd2; + uint32_t recDrpXriCount; + uint32_t fecCorrBlkCount; + uint32_t fecUncorrBlkCount; } READ_LNK_VAR; /* Structure for MB Command REG_LOGIN (19) */ diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 33ec4fa39ccb..608f9415fb08 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -3317,6 +3317,7 @@ struct lpfc_acqe_link { #define LPFC_ASYNC_LINK_SPEED_20GBPS 0x5 #define LPFC_ASYNC_LINK_SPEED_25GBPS 0x6 #define LPFC_ASYNC_LINK_SPEED_40GBPS 0x7 +#define LPFC_ASYNC_LINK_SPEED_100GBPS 0x8 #define lpfc_acqe_link_duplex_SHIFT 16 #define lpfc_acqe_link_duplex_MASK 0x000000FF #define lpfc_acqe_link_duplex_WORD word0 @@ -3447,23 +3448,50 @@ struct lpfc_acqe_fc_la { struct lpfc_acqe_misconfigured_event { struct { uint32_t word0; -#define lpfc_sli_misconfigured_port0_SHIFT 0 -#define lpfc_sli_misconfigured_port0_MASK 0x000000FF -#define lpfc_sli_misconfigured_port0_WORD word0 -#define lpfc_sli_misconfigured_port1_SHIFT 8 -#define lpfc_sli_misconfigured_port1_MASK 0x000000FF -#define lpfc_sli_misconfigured_port1_WORD word0 -#define lpfc_sli_misconfigured_port2_SHIFT 16 -#define lpfc_sli_misconfigured_port2_MASK 0x000000FF -#define lpfc_sli_misconfigured_port2_WORD word0 -#define lpfc_sli_misconfigured_port3_SHIFT 24 -#define lpfc_sli_misconfigured_port3_MASK 0x000000FF -#define lpfc_sli_misconfigured_port3_WORD word0 +#define lpfc_sli_misconfigured_port0_state_SHIFT 0 +#define lpfc_sli_misconfigured_port0_state_MASK 0x000000FF +#define lpfc_sli_misconfigured_port0_state_WORD word0 +#define lpfc_sli_misconfigured_port1_state_SHIFT 8 +#define lpfc_sli_misconfigured_port1_state_MASK 0x000000FF +#define lpfc_sli_misconfigured_port1_state_WORD word0 +#define lpfc_sli_misconfigured_port2_state_SHIFT 16 +#define lpfc_sli_misconfigured_port2_state_MASK 0x000000FF +#define lpfc_sli_misconfigured_port2_state_WORD word0 +#define lpfc_sli_misconfigured_port3_state_SHIFT 24 +#define lpfc_sli_misconfigured_port3_state_MASK 0x000000FF +#define lpfc_sli_misconfigured_port3_state_WORD word0 + uint32_t word1; +#define lpfc_sli_misconfigured_port0_op_SHIFT 0 +#define lpfc_sli_misconfigured_port0_op_MASK 0x00000001 +#define lpfc_sli_misconfigured_port0_op_WORD word1 +#define lpfc_sli_misconfigured_port0_severity_SHIFT 1 +#define lpfc_sli_misconfigured_port0_severity_MASK 0x00000003 +#define lpfc_sli_misconfigured_port0_severity_WORD word1 +#define lpfc_sli_misconfigured_port1_op_SHIFT 8 +#define lpfc_sli_misconfigured_port1_op_MASK 0x00000001 +#define lpfc_sli_misconfigured_port1_op_WORD word1 +#define lpfc_sli_misconfigured_port1_severity_SHIFT 9 +#define lpfc_sli_misconfigured_port1_severity_MASK 0x00000003 +#define lpfc_sli_misconfigured_port1_severity_WORD word1 +#define lpfc_sli_misconfigured_port2_op_SHIFT 16 +#define lpfc_sli_misconfigured_port2_op_MASK 0x00000001 +#define lpfc_sli_misconfigured_port2_op_WORD word1 +#define lpfc_sli_misconfigured_port2_severity_SHIFT 17 +#define lpfc_sli_misconfigured_port2_severity_MASK 0x00000003 +#define lpfc_sli_misconfigured_port2_severity_WORD word1 +#define lpfc_sli_misconfigured_port3_op_SHIFT 24 +#define lpfc_sli_misconfigured_port3_op_MASK 0x00000001 +#define lpfc_sli_misconfigured_port3_op_WORD word1 +#define lpfc_sli_misconfigured_port3_severity_SHIFT 25 +#define lpfc_sli_misconfigured_port3_severity_MASK 0x00000003 +#define lpfc_sli_misconfigured_port3_severity_WORD word1 } theEvent; #define LPFC_SLI_EVENT_STATUS_VALID 0x00 #define LPFC_SLI_EVENT_STATUS_NOT_PRESENT 0x01 #define LPFC_SLI_EVENT_STATUS_WRONG_TYPE 0x02 #define LPFC_SLI_EVENT_STATUS_UNSUPPORTED 0x03 +#define LPFC_SLI_EVENT_STATUS_UNQUALIFIED 0x04 +#define LPFC_SLI_EVENT_STATUS_UNCERTIFIED 0x05 }; struct lpfc_acqe_sli { diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index db9446c612da..a544366a367e 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1184,8 +1184,10 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { lpfc_rcv_seq_check_edtov(vports[i]); + lpfc_fdmi_num_disc_check(vports[i]); + } lpfc_destroy_vport_work_array(phba, vports); if ((phba->link_state == LPFC_HBA_ERROR) || @@ -1290,6 +1292,10 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); } + } else { + mod_timer(&phba->hb_tmofunc, + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); } } @@ -2621,7 +2627,6 @@ void lpfc_stop_vport_timers(struct lpfc_vport *vport) { del_timer_sync(&vport->els_tmofunc); - del_timer_sync(&vport->fc_fdmitmo); del_timer_sync(&vport->delayed_disc_tmo); lpfc_can_disctmo(vport); return; @@ -3340,10 +3345,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) vport->fc_disctmo.function = lpfc_disc_timeout; vport->fc_disctmo.data = (unsigned long)vport; - init_timer(&vport->fc_fdmitmo); - vport->fc_fdmitmo.function = lpfc_fdmi_tmo; - vport->fc_fdmitmo.data = (unsigned long)vport; - init_timer(&vport->els_tmofunc); vport->els_tmofunc.function = lpfc_els_timeout; vport->els_tmofunc.data = (unsigned long)vport; @@ -3709,49 +3710,6 @@ lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, } /** - * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed - * @phba: pointer to lpfc hba data structure. - * @acqe_link: pointer to the async link completion queue entry. - * - * This routine is to parse the SLI4 link-attention link speed and translate - * it into the base driver's link-attention link speed coding. - * - * Return: Link-attention link speed in terms of base driver's coding. - **/ -static uint8_t -lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, - struct lpfc_acqe_link *acqe_link) -{ - uint8_t link_speed; - - switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { - case LPFC_ASYNC_LINK_SPEED_ZERO: - case LPFC_ASYNC_LINK_SPEED_10MBPS: - case LPFC_ASYNC_LINK_SPEED_100MBPS: - link_speed = LPFC_LINK_SPEED_UNKNOWN; - break; - case LPFC_ASYNC_LINK_SPEED_1GBPS: - link_speed = LPFC_LINK_SPEED_1GHZ; - break; - case LPFC_ASYNC_LINK_SPEED_10GBPS: - link_speed = LPFC_LINK_SPEED_10GHZ; - break; - case LPFC_ASYNC_LINK_SPEED_20GBPS: - case LPFC_ASYNC_LINK_SPEED_25GBPS: - case LPFC_ASYNC_LINK_SPEED_40GBPS: - link_speed = LPFC_LINK_SPEED_UNKNOWN; - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0483 Invalid link-attention link speed: x%x\n", - bf_get(lpfc_acqe_link_speed, acqe_link)); - link_speed = LPFC_LINK_SPEED_UNKNOWN; - break; - } - return link_speed; -} - -/** * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed * @phba: pointer to lpfc hba data structure. * @@ -3767,27 +3725,35 @@ lpfc_sli_port_speed_get(struct lpfc_hba *phba) if (!lpfc_is_link_up(phba)) return 0; - switch (phba->fc_linkspeed) { - case LPFC_LINK_SPEED_1GHZ: - link_speed = 1000; - break; - case LPFC_LINK_SPEED_2GHZ: - link_speed = 2000; - break; - case LPFC_LINK_SPEED_4GHZ: - link_speed = 4000; - break; - case LPFC_LINK_SPEED_8GHZ: - link_speed = 8000; - break; - case LPFC_LINK_SPEED_10GHZ: - link_speed = 10000; - break; - case LPFC_LINK_SPEED_16GHZ: - link_speed = 16000; - break; - default: - link_speed = 0; + if (phba->sli_rev <= LPFC_SLI_REV3) { + switch (phba->fc_linkspeed) { + case LPFC_LINK_SPEED_1GHZ: + link_speed = 1000; + break; + case LPFC_LINK_SPEED_2GHZ: + link_speed = 2000; + break; + case LPFC_LINK_SPEED_4GHZ: + link_speed = 4000; + break; + case LPFC_LINK_SPEED_8GHZ: + link_speed = 8000; + break; + case LPFC_LINK_SPEED_10GHZ: + link_speed = 10000; + break; + case LPFC_LINK_SPEED_16GHZ: + link_speed = 16000; + break; + default: + link_speed = 0; + } + } else { + if (phba->sli4_hba.link_state.logical_speed) + link_speed = + phba->sli4_hba.link_state.logical_speed; + else + link_speed = phba->sli4_hba.link_state.speed; } return link_speed; } @@ -3983,7 +3949,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, la->eventTag = acqe_link->event_tag; bf_set(lpfc_mbx_read_top_att_type, la, att_type); bf_set(lpfc_mbx_read_top_link_spd, la, - lpfc_sli4_parse_latt_link_speed(phba, acqe_link)); + (bf_get(lpfc_acqe_link_speed, acqe_link))); /* Fake the the following irrelvant fields */ bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); @@ -4113,22 +4079,18 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) char message[128]; uint8_t status; uint8_t evt_type; + uint8_t operational = 0; struct temp_event temp_event_data; struct lpfc_acqe_misconfigured_event *misconfigured; struct Scsi_Host *shost; evt_type = bf_get(lpfc_trailer_type, acqe_sli); - /* Special case Lancer */ - if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != - LPFC_SLI_INTF_IF_TYPE_2) { - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2901 Async SLI event - Event Data1:x%08x Event Data2:" - "x%08x SLI Event Type:%d\n", - acqe_sli->event_data1, acqe_sli->event_data2, - evt_type); - return; - } + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2901 Async SLI event - Event Data1:x%08x Event Data2:" + "x%08x SLI Event Type:%d\n", + acqe_sli->event_data1, acqe_sli->event_data2, + evt_type); port_name = phba->Port[0]; if (port_name == 0x00) @@ -4174,29 +4136,46 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) /* fetch the status for this port */ switch (phba->sli4_hba.lnk_info.lnk_no) { case LPFC_LINK_NUMBER_0: - status = bf_get(lpfc_sli_misconfigured_port0, + status = bf_get(lpfc_sli_misconfigured_port0_state, + &misconfigured->theEvent); + operational = bf_get(lpfc_sli_misconfigured_port0_op, &misconfigured->theEvent); break; case LPFC_LINK_NUMBER_1: - status = bf_get(lpfc_sli_misconfigured_port1, + status = bf_get(lpfc_sli_misconfigured_port1_state, + &misconfigured->theEvent); + operational = bf_get(lpfc_sli_misconfigured_port1_op, &misconfigured->theEvent); break; case LPFC_LINK_NUMBER_2: - status = bf_get(lpfc_sli_misconfigured_port2, + status = bf_get(lpfc_sli_misconfigured_port2_state, + &misconfigured->theEvent); + operational = bf_get(lpfc_sli_misconfigured_port2_op, &misconfigured->theEvent); break; case LPFC_LINK_NUMBER_3: - status = bf_get(lpfc_sli_misconfigured_port3, + status = bf_get(lpfc_sli_misconfigured_port3_state, + &misconfigured->theEvent); + operational = bf_get(lpfc_sli_misconfigured_port3_op, &misconfigured->theEvent); break; default: - status = ~LPFC_SLI_EVENT_STATUS_VALID; - break; + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3296 " + "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " + "event: Invalid link %d", + phba->sli4_hba.lnk_info.lnk_no); + return; } + /* Skip if optic state unchanged */ + if (phba->sli4_hba.lnk_info.optic_state == status) + return; + switch (status) { case LPFC_SLI_EVENT_STATUS_VALID: - return; /* no message if the sfp is okay */ + sprintf(message, "Physical Link is functional"); + break; case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: sprintf(message, "Optics faulted/incorrectly " "installed/not installed - Reseat optics, " @@ -4211,15 +4190,26 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) sprintf(message, "Incompatible optics - Replace with " "compatible optics for card to function."); break; + case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: + sprintf(message, "Unqualified optics - Replace with " + "Avago optics for Warranty and Technical " + "Support - Link is%s operational", + (operational) ? "" : " not"); + break; + case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: + sprintf(message, "Uncertified optics - Replace with " + "Avago-certified optics to enable link " + "operation - Link is%s operational", + (operational) ? "" : " not"); + break; default: /* firmware is reporting a status we don't know about */ sprintf(message, "Unknown event status x%02x", status); break; } - + phba->sli4_hba.lnk_info.optic_state = status; lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "3176 Misconfigured Physical Port - " - "Port Name %c %s\n", port_name, message); + "3176 Port Name %c %s\n", port_name, message); break; case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: lpfc_printf_log(phba, KERN_INFO, LOG_SLI, @@ -5293,6 +5283,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); + /* initialize optic_state to 0xFF */ + phba->sli4_hba.lnk_info.optic_state = 0xff; + /* Initialize the driver internal SLI layer lists. */ lpfc_sli_setup(phba); lpfc_sli_queue_setup(phba); @@ -6159,6 +6152,20 @@ lpfc_create_shost(struct lpfc_hba *phba) /* Put reference to SCSI host to driver's device private data */ pci_set_drvdata(phba->pcidev, shost); + /* + * At this point we are fully registered with PSA. In addition, + * any initial discovery should be completed. + */ + vport->load_flag |= FC_ALLOW_FDMI; + if (phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) { + + /* Setup appropriate attribute masks */ + vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; + if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN) + vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; + else + vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; + } return 0; } @@ -8833,9 +8840,12 @@ found: * already mapped to this phys_id. */ if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { - chann[saved_chann] = - cpup->channel_id; - saved_chann++; + if (saved_chann <= + LPFC_FCP_IO_CHAN_MAX) { + chann[saved_chann] = + cpup->channel_id; + saved_chann++; + } goto out; } diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 3fa65338d3f5..4fb3581d4614 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -231,15 +231,13 @@ lpfc_mem_free(struct lpfc_hba *phba) if (phba->lpfc_hbq_pool) pci_pool_destroy(phba->lpfc_hbq_pool); phba->lpfc_hbq_pool = NULL; - - if (phba->rrq_pool) - mempool_destroy(phba->rrq_pool); + mempool_destroy(phba->rrq_pool); phba->rrq_pool = NULL; /* Free NLP memory pool */ mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; - if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { + if (phba->sli_rev == LPFC_SLI_REV4) { mempool_destroy(phba->active_rrq_pool); phba->active_rrq_pool = NULL; } diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index ed9a2c80c4aa..193733e8c823 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -280,38 +280,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t *lp; IOCB_t *icmd; struct serv_parm *sp; + uint32_t ed_tov; LPFC_MBOXQ_t *mbox; struct ls_rjt stat; int rc; memset(&stat, 0, sizeof (struct ls_rjt)); - if (vport->port_state <= LPFC_FDISC) { - /* Before responding to PLOGI, check for pt2pt mode. - * If we are pt2pt, with an outstanding FLOGI, abort - * the FLOGI and resend it first. - */ - if (vport->fc_flag & FC_PT2PT) { - lpfc_els_abort_flogi(phba); - if (!(vport->fc_flag & FC_PT2PT_PLOGI)) { - /* If the other side is supposed to initiate - * the PLOGI anyway, just ACC it now and - * move on with discovery. - */ - phba->fc_edtov = FF_DEF_EDTOV; - phba->fc_ratov = FF_DEF_RATOV; - /* Start discovery - this should just do - CLEAR_LA */ - lpfc_disc_start(vport); - } else - lpfc_initial_flogi(vport); - } else { - stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; - stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; - lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, - ndlp, NULL); - return 0; - } - } pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; lp = (uint32_t *) pcmd->virt; sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); @@ -404,30 +378,46 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Check for Nport to NPort pt2pt protocol */ if ((vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_PT2PT_PLOGI)) { - /* rcv'ed PLOGI decides what our NPortId will be */ vport->fc_myDID = icmd->un.rcvels.parmRo; - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (mbox == NULL) - goto out; - lpfc_config_link(phba, mbox); - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - mbox->vport = vport; - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - mempool_free(mbox, phba->mbox_mem_pool); - goto out; + + ed_tov = be32_to_cpu(sp->cmn.e_d_tov); + if (sp->cmn.edtovResolution) { + /* E_D_TOV ticks are in nanoseconds */ + ed_tov = (phba->fc_edtov + 999999) / 1000000; } + /* - * For SLI4, the VFI/VPI are registered AFTER the - * Nport with the higher WWPN sends us a PLOGI with - * our assigned NPortId. + * For pt-to-pt, use the larger EDTOV + * RATOV = 2 * EDTOV */ + if (ed_tov > phba->fc_edtov) + phba->fc_edtov = ed_tov; + phba->fc_ratov = (2 * phba->fc_edtov) / 1000; + + memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); + + /* Issue config_link / reg_vfi to account for updated TOV's */ + if (phba->sli_rev == LPFC_SLI_REV4) lpfc_issue_reg_vfi(vport); + else { + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox == NULL) + goto out; + lpfc_config_link(phba, mbox); + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + goto out; + } + } lpfc_can_disctmo(vport); } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) goto out; @@ -1038,7 +1028,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, uint32_t *lp; IOCB_t *irsp; struct serv_parm *sp; + uint32_t ed_tov; LPFC_MBOXQ_t *mbox; + int rc; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; @@ -1094,18 +1086,63 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; + if ((vport->fc_flag & FC_PT2PT) && + (vport->fc_flag & FC_PT2PT_PLOGI)) { + ed_tov = be32_to_cpu(sp->cmn.e_d_tov); + if (sp->cmn.edtovResolution) { + /* E_D_TOV ticks are in nanoseconds */ + ed_tov = (phba->fc_edtov + 999999) / 1000000; + } + + /* + * Use the larger EDTOV + * RATOV = 2 * EDTOV for pt-to-pt + */ + if (ed_tov > phba->fc_edtov) + phba->fc_edtov = ed_tov; + phba->fc_ratov = (2 * phba->fc_edtov) / 1000; + + memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); + + /* Issue config_link / reg_vfi to account for updated TOV's */ + if (phba->sli_rev == LPFC_SLI_REV4) { + lpfc_issue_reg_vfi(vport); + } else { + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0133 PLOGI: no memory " + "for config_link " + "Data: x%x x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_state, + ndlp->nlp_flag, ndlp->nlp_rpi); + goto out; + } + + lpfc_config_link(phba, mbox); + + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + goto out; + } + } + } + + lpfc_unreg_rpi(vport, ndlp); + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "0133 PLOGI: no memory for reg_login " - "Data: x%x x%x x%x x%x\n", - ndlp->nlp_DID, ndlp->nlp_state, - ndlp->nlp_flag, ndlp->nlp_rpi); + "0018 PLOGI: no memory for reg_login " + "Data: x%x x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_state, + ndlp->nlp_flag, ndlp->nlp_rpi); goto out; } - lpfc_unreg_rpi(vport, ndlp); - if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID, (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) { switch (ndlp->nlp_DID) { @@ -2299,6 +2336,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, if (vport->phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_REGISTERED; + if (ndlp->nlp_flag & NLP_LOGO_ACC) { + lpfc_unreg_rpi(vport, ndlp); + } } else { if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { lpfc_drop_node(vport, ndlp); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 4679ed4444a7..3bd0be6277b3 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -3676,6 +3676,7 @@ static void lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) { + struct lpfc_hba *phba = vport->phba; struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; @@ -3685,6 +3686,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, uint32_t *lp; uint32_t host_status = DID_OK; uint32_t rsplen = 0; + uint32_t fcpDl; uint32_t logit = LOG_FCP | LOG_FCP_ERROR; @@ -3755,13 +3757,14 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, fcprsp->rspInfo3); scsi_set_resid(cmnd, 0); + fcpDl = be32_to_cpu(fcpcmd->fcpDl); if (resp_info & RESID_UNDER) { scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER, "9025 FCP Read Underrun, expected %d, " "residual %d Data: x%x x%x x%x\n", - be32_to_cpu(fcpcmd->fcpDl), + fcpDl, scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], cmnd->underflow); @@ -3777,7 +3780,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, LOG_FCP | LOG_FCP_ERROR, "9026 FCP Read Check Error " "and Underrun Data: x%x x%x x%x x%x\n", - be32_to_cpu(fcpcmd->fcpDl), + fcpDl, scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0]); scsi_set_resid(cmnd, scsi_bufflen(cmnd)); @@ -3812,13 +3815,25 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, * Check SLI validation that all the transfer was actually done * (fcpi_parm should be zero). Apply check only to reads. */ - } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { + } else if (fcpi_parm) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, - "9029 FCP Read Check Error Data: " + "9029 FCP %s Check Error xri x%x Data: " "x%x x%x x%x x%x x%x\n", - be32_to_cpu(fcpcmd->fcpDl), - be32_to_cpu(fcprsp->rspResId), + ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ? + "Read" : "Write"), + ((phba->sli_rev == LPFC_SLI_REV4) ? + lpfc_cmd->cur_iocbq.sli4_xritag : + rsp_iocb->iocb.ulpContext), + fcpDl, be32_to_cpu(fcprsp->rspResId), fcpi_parm, cmnd->cmnd[0], scsi_status); + + /* There is some issue with the LPe12000 that causes it + * to miscalculate the fcpi_parm and falsely trip this + * recovery logic. Detect this case and don't error when true. + */ + if (fcpi_parm > fcpDl) + goto out; + switch (scsi_status) { case SAM_STAT_GOOD: case SAM_STAT_CHECK_CONDITION: @@ -3908,9 +3923,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, uint32_t logit = LOG_FCP; /* Sanity check on return of outstanding command */ - if (!(lpfc_cmd->pCmd)) - return; cmd = lpfc_cmd->pCmd; + if (!cmd) + return; shost = cmd->device->host; lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); @@ -4124,23 +4139,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, /* The sdev is not guaranteed to be valid post scsi_done upcall. */ cmd->scsi_done(cmd); - if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { - spin_lock_irqsave(&phba->hbalock, flags); - lpfc_cmd->pCmd = NULL; - spin_unlock_irqrestore(&phba->hbalock, flags); - - /* - * If there is a thread waiting for command completion - * wake up the thread. - */ - spin_lock_irqsave(shost->host_lock, flags); - if (lpfc_cmd->waitq) - wake_up(lpfc_cmd->waitq); - spin_unlock_irqrestore(shost->host_lock, flags); - lpfc_release_scsi_buf(phba, lpfc_cmd); - return; - } - spin_lock_irqsave(&phba->hbalock, flags); lpfc_cmd->pCmd = NULL; spin_unlock_irqrestore(&phba->hbalock, flags); @@ -4446,15 +4444,7 @@ lpfc_info(struct Scsi_Host *host) phba->Port); } len = strlen(lpfcinfobuf); - if (phba->sli_rev <= LPFC_SLI_REV3) { - link_speed = lpfc_sli_port_speed_get(phba); - } else { - if (phba->sli4_hba.link_state.logical_speed) - link_speed = - phba->sli4_hba.link_state.logical_speed; - else - link_speed = phba->sli4_hba.link_state.speed; - } + link_speed = lpfc_sli_port_speed_get(phba); if (link_speed != 0) snprintf(lpfcinfobuf + len, 384-len, " Logical Link Speed: %d Mbps", link_speed); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index f9585cdd8933..2207726b88ee 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -24,6 +24,7 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/slab.h> +#include <linux/lockdep.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -576,6 +577,8 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba) struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; struct lpfc_iocbq * iocbq = NULL; + lockdep_assert_held(&phba->hbalock); + list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); if (iocbq) phba->iocb_cnt++; @@ -797,6 +800,7 @@ int lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint16_t xritag) { + lockdep_assert_held(&phba->hbalock); if (!ndlp) return 0; if (!ndlp->active_rrqs_xri_bitmap) @@ -914,6 +918,8 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) struct lpfc_nodelist *ndlp; int found = 0; + lockdep_assert_held(&phba->hbalock); + if (piocbq->iocb_flag & LPFC_IO_FCP) { lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; ndlp = lpfc_cmd->rdata->pnode; @@ -1003,6 +1009,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) unsigned long iflag = 0; struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + lockdep_assert_held(&phba->hbalock); + if (iocbq->sli4_xritag == NO_XRI) sglq = NULL; else @@ -1058,6 +1066,7 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { size_t start_clean = offsetof(struct lpfc_iocbq, iocb); + lockdep_assert_held(&phba->hbalock); /* * Clean all volatile data fields, preserve iotag and node struct. @@ -1080,6 +1089,8 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) static void __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { + lockdep_assert_held(&phba->hbalock); + phba->__lpfc_sli_release_iocbq(phba, iocbq); phba->iocb_cnt--; } @@ -1310,6 +1321,8 @@ static int lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb) { + lockdep_assert_held(&phba->hbalock); + list_add_tail(&piocb->list, &pring->txcmplq); piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; @@ -1344,6 +1357,8 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { struct lpfc_iocbq *cmd_iocb; + lockdep_assert_held(&phba->hbalock); + list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); return cmd_iocb; } @@ -1367,6 +1382,9 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; + + lockdep_assert_held(&phba->hbalock); + if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) pring->sli.sli3.next_cmdidx = 0; @@ -1497,6 +1515,7 @@ static void lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, IOCB_t *iocb, struct lpfc_iocbq *nextiocb) { + lockdep_assert_held(&phba->hbalock); /* * Set up an iotag */ @@ -1606,6 +1625,8 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) IOCB_t *iocb; struct lpfc_iocbq *nextiocb; + lockdep_assert_held(&phba->hbalock); + /* * Check to see if: * (a) there is anything on the txq to send @@ -1647,6 +1668,8 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) { struct hbq_s *hbqp = &phba->hbqs[hbqno]; + lockdep_assert_held(&phba->hbalock); + if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && ++hbqp->next_hbqPutIdx >= hbqp->entry_count) hbqp->next_hbqPutIdx = 0; @@ -1747,6 +1770,7 @@ static int lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, struct hbq_dmabuf *hbq_buf) { + lockdep_assert_held(&phba->hbalock); return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); } @@ -1768,6 +1792,7 @@ lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, struct lpfc_hbq_entry *hbqe; dma_addr_t physaddr = hbq_buf->dbuf.phys; + lockdep_assert_held(&phba->hbalock); /* Get next HBQ entry slot to use */ hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); if (hbqe) { @@ -1808,6 +1833,7 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, struct lpfc_rqe hrqe; struct lpfc_rqe drqe; + lockdep_assert_held(&phba->hbalock); hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); @@ -1986,6 +2012,8 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) struct hbq_dmabuf *hbq_buf; uint32_t hbqno; + lockdep_assert_held(&phba->hbalock); + hbqno = tag >> 16; if (hbqno >= LPFC_MAX_HBQS) return NULL; @@ -2647,6 +2675,7 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, { struct lpfc_iocbq *cmd_iocb = NULL; uint16_t iotag; + lockdep_assert_held(&phba->hbalock); iotag = prspiocb->iocb.ulpIoTag; @@ -2685,6 +2714,7 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, { struct lpfc_iocbq *cmd_iocb; + lockdep_assert_held(&phba->hbalock); if (iotag != 0 && iotag <= phba->sli.last_iotag) { cmd_iocb = phba->sli.iocbq_lookup[iotag]; if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { @@ -3799,6 +3829,8 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) int i; uint8_t hdrtype; + lockdep_assert_held(&phba->hbalock); + pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); if (hdrtype != 0x80 || (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && @@ -7861,6 +7893,7 @@ void __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb) { + lockdep_assert_held(&phba->hbalock); /* Insert the caller's iocb in the txq tail for later processing. */ list_add_tail(&piocb->list, &pring->txq); } @@ -7888,6 +7921,8 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, { struct lpfc_iocbq * nextiocb; + lockdep_assert_held(&phba->hbalock); + nextiocb = lpfc_sli_ringtx_get(phba, pring); if (!nextiocb) { nextiocb = *piocb; @@ -7927,6 +7962,8 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, IOCB_t *iocb; struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; + lockdep_assert_held(&phba->hbalock); + if (piocb->iocb_cmpl && (!piocb->vport) && (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { @@ -8642,6 +8679,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_queue *wq; struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; + lockdep_assert_held(&phba->hbalock); + if (piocb->sli4_xritag == NO_XRI) { if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) @@ -9752,6 +9791,8 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int retval; unsigned long iflags; + lockdep_assert_held(&phba->hbalock); + /* * There are certain command types we don't want to abort. And we * don't want to abort commands that are already in the process of @@ -9854,6 +9895,8 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int retval = IOCB_ERROR; IOCB_t *icmd = NULL; + lockdep_assert_held(&phba->hbalock); + /* * There are certain command types we don't want to abort. And we * don't want to abort commands that are already in the process of @@ -14842,10 +14885,12 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) struct lpfc_dmabuf *h_buf; struct hbq_dmabuf *seq_dmabuf = NULL; struct hbq_dmabuf *temp_dmabuf = NULL; + uint8_t found = 0; INIT_LIST_HEAD(&dmabuf->dbuf.list); dmabuf->time_stamp = jiffies; new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + /* Use the hdr_buf to find the sequence that this frame belongs to */ list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { temp_hdr = (struct fc_frame_header *)h_buf->virt; @@ -14885,7 +14930,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) return seq_dmabuf; } /* find the correct place in the sequence to insert this frame */ - list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { + d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); + while (!found) { temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; /* @@ -14895,9 +14941,17 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) if (be16_to_cpu(new_hdr->fh_seq_cnt) > be16_to_cpu(temp_hdr->fh_seq_cnt)) { list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); - return seq_dmabuf; + found = 1; + break; } + + if (&d_buf->list == &seq_dmabuf->dbuf.list) + break; + d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); } + + if (found) + return seq_dmabuf; return NULL; } @@ -16173,7 +16227,7 @@ fail_fcf_read: } /** - * lpfc_check_next_fcf_pri + * lpfc_check_next_fcf_pri_level * phba pointer to the lpfc_hba struct for this port. * This routine is called from the lpfc_sli4_fcf_rr_next_index_get * routine when the rr_bmask is empty. The FCF indecies are put into the @@ -16329,8 +16383,12 @@ next_priority: if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & - LPFC_FCF_FLOGI_FAILED) + LPFC_FCF_FLOGI_FAILED) { + if (list_is_singular(&phba->fcf.fcf_pri_list)) + return LPFC_FCOE_FCF_NEXT_NONE; + goto next_priority; + } lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2845 Get next roundrobin failover FCF (x%x)\n", diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 1e916e16ce98..cd780c29495a 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -442,6 +442,7 @@ struct lpfc_sli4_lnk_info { #define LPFC_LNK_GE 0x0 /* FCoE */ #define LPFC_LNK_FC 0x1 /* FC */ uint8_t lnk_no; + uint8_t optic_state; }; #define LPFC_SLI4_HANDLER_CNT (LPFC_FCP_IO_CHAN_MAX+ \ diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index ea53aa664759..4dc22562aaf1 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "11.0.0.0." +#define LPFC_DRIVER_VERSION "11.0.0.10." #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 769012663a8f..b3f85def18cc 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -393,6 +393,14 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) *(struct lpfc_vport **)fc_vport->dd_data = vport; vport->fc_vport = fc_vport; + /* At this point we are fully registered with SCSI Layer. */ + vport->load_flag |= FC_ALLOW_FDMI; + if (phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) { + /* Setup appropriate attribute masks */ + vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask; + vport->fdmi_port_mask = phba->pport->fdmi_port_mask; + } + /* * In SLI4, the vpi must be activated before it can be used * by the port. diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c index 141226631429..a6682c508c4c 100644 --- a/drivers/scsi/mac53c94.c +++ b/drivers/scsi/mac53c94.c @@ -18,11 +18,11 @@ #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/module.h> +#include <linux/pci.h> #include <asm/dbdma.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/prom.h> -#include <asm/pci-bridge.h> #include <asm/macio.h> #include <scsi/scsi.h> diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index d64a769b8155..bb2381314a2b 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -12,7 +12,6 @@ */ #include <linux/types.h> -#include <linux/delay.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> @@ -32,14 +31,13 @@ #define PSEUDO_DMA #define NCR5380_implementation_fields unsigned char *pdma_base -#define NCR5380_local_declare() struct Scsi_Host *_instance -#define NCR5380_setup(instance) _instance = instance -#define NCR5380_read(reg) macscsi_read(_instance, reg) -#define NCR5380_write(reg, value) macscsi_write(_instance, reg, value) +#define NCR5380_read(reg) macscsi_read(instance, reg) +#define NCR5380_write(reg, value) macscsi_write(instance, reg, value) #define NCR5380_pread macscsi_pread #define NCR5380_pwrite macscsi_pwrite +#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) #define NCR5380_intr macscsi_intr #define NCR5380_queue_command macscsi_queue_command @@ -51,8 +49,6 @@ #include "NCR5380.h" -#define RESET_BOOT - static int setup_can_queue = -1; module_param(setup_can_queue, int, 0); static int setup_cmd_per_lun = -1; @@ -65,17 +61,8 @@ static int setup_use_tagged_queuing = -1; module_param(setup_use_tagged_queuing, int, 0); static int setup_hostid = -1; module_param(setup_hostid, int, 0); - -/* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms, - * we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more - * need ten times the standard value... */ -#define TOSHIBA_DELAY - -#ifdef TOSHIBA_DELAY -#define AFTER_RESET_DELAY (5*HZ/2) -#else -#define AFTER_RESET_DELAY (HZ/2) -#endif +static int setup_toshiba_delay = -1; +module_param(setup_toshiba_delay, int, 0); /* * NCR 5380 register access functions @@ -94,12 +81,12 @@ static inline void macscsi_write(struct Scsi_Host *instance, int reg, int value) #ifndef MODULE static int __init mac_scsi_setup(char *str) { - int ints[7]; + int ints[8]; (void)get_options(str, ARRAY_SIZE(ints), ints); - if (ints[0] < 1 || ints[0] > 6) { - pr_err("Usage: mac5380=<can_queue>[,<cmd_per_lun>[,<sg_tablesize>[,<hostid>[,<use_tags>[,<use_pdma>]]]]]\n"); + if (ints[0] < 1) { + pr_err("Usage: mac5380=<can_queue>[,<cmd_per_lun>[,<sg_tablesize>[,<hostid>[,<use_tags>[,<use_pdma>[,<toshiba_delay>]]]]]]\n"); return 0; } if (ints[0] >= 1) @@ -114,50 +101,14 @@ static int __init mac_scsi_setup(char *str) setup_use_tagged_queuing = ints[5]; if (ints[0] >= 6) setup_use_pdma = ints[6]; + if (ints[0] >= 7) + setup_toshiba_delay = ints[7]; return 1; } __setup("mac5380=", mac_scsi_setup); #endif /* !MODULE */ -#ifdef RESET_BOOT -/* - * Our 'bus reset on boot' function - */ - -static void mac_scsi_reset_boot(struct Scsi_Host *instance) -{ - unsigned long end; - - NCR5380_local_declare(); - NCR5380_setup(instance); - - /* - * Do a SCSI reset to clean up the bus during initialization. No messing - * with the queues, interrupts, or locks necessary here. - */ - - printk(KERN_INFO "Macintosh SCSI: resetting the SCSI bus..." ); - - /* get in phase */ - NCR5380_write( TARGET_COMMAND_REG, - PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); - - /* assert RST */ - NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); - /* The min. reset hold time is 25us, so 40us should be enough */ - udelay( 50 ); - /* reset RST and interrupt */ - NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); - NCR5380_read( RESET_PARITY_INTERRUPT_REG ); - - for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); ) - barrier(); - - printk(KERN_INFO " done\n" ); -} -#endif - #ifdef PSEUDO_DMA /* Pseudo-DMA: (Ove Edlund) @@ -235,9 +186,6 @@ static int macscsi_pread(struct Scsi_Host *instance, unsigned char *d; unsigned char *s; - NCR5380_local_declare(); - NCR5380_setup(instance); - s = hostdata->pdma_base + (INPUT_DATA_REG << 4); d = dst; @@ -329,9 +277,6 @@ static int macscsi_pwrite(struct Scsi_Host *instance, unsigned char *s; unsigned char *d; - NCR5380_local_declare(); - NCR5380_setup(instance); - s = src; d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4); @@ -364,20 +309,22 @@ static int macscsi_pwrite(struct Scsi_Host *instance, #define PFX DRV_MODULE_NAME ": " static struct scsi_host_template mac_scsi_template = { - .module = THIS_MODULE, - .proc_name = DRV_MODULE_NAME, - .show_info = macscsi_show_info, - .write_info = macscsi_write_info, - .name = "Macintosh NCR5380 SCSI", - .info = macscsi_info, - .queuecommand = macscsi_queue_command, - .eh_abort_handler = macscsi_abort, - .eh_bus_reset_handler = macscsi_bus_reset, - .can_queue = 16, - .this_id = 7, - .sg_tablesize = SG_ALL, - .cmd_per_lun = 2, - .use_clustering = DISABLE_CLUSTERING + .module = THIS_MODULE, + .proc_name = DRV_MODULE_NAME, + .show_info = macscsi_show_info, + .write_info = macscsi_write_info, + .name = "Macintosh NCR5380 SCSI", + .info = macscsi_info, + .queuecommand = macscsi_queue_command, + .eh_abort_handler = macscsi_abort, + .eh_bus_reset_handler = macscsi_bus_reset, + .can_queue = 16, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = 2, + .use_clustering = DISABLE_CLUSTERING, + .cmd_size = NCR5380_CMD_SIZE, + .max_sectors = 128, }; static int __init mac_scsi_probe(struct platform_device *pdev) @@ -432,15 +379,14 @@ static int __init mac_scsi_probe(struct platform_device *pdev) } else host_flags |= FLAG_NO_PSEUDO_DMA; -#ifdef RESET_BOOT - mac_scsi_reset_boot(instance); -#endif - #ifdef SUPPORT_TAGS host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; #endif + host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; - NCR5380_init(instance, host_flags); + error = NCR5380_init(instance, host_flags); + if (error) + goto fail_init; if (instance->irq != NO_IRQ) { error = request_irq(instance->irq, macscsi_intr, IRQF_SHARED, @@ -449,6 +395,8 @@ static int __init mac_scsi_probe(struct platform_device *pdev) goto fail_irq; } + NCR5380_maybe_reset_bus(instance); + error = scsi_add_host(instance, NULL); if (error) goto fail_host; @@ -463,6 +411,7 @@ fail_host: free_irq(instance->irq, instance); fail_irq: NCR5380_exit(instance); +fail_init: scsi_host_put(instance); return error; } diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index a70692779a16..4cf9ed96414f 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -179,8 +179,12 @@ mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) /* * The following call will block till a kioc is available + * or return NULL if the list head is empty for the pointer + * of type mraid_mmapt passed to mraid_mm_alloc_kioc */ kioc = mraid_mm_alloc_kioc(adp); + if (!kioc) + return -ENXIO; /* * User sent the old mimd_t ioctl packet. Convert it to uioc_t. diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index c0f7c8ce54aa..4484e63033a5 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -35,8 +35,8 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "06.808.16.00-rc1" -#define MEGASAS_RELDATE "Oct. 8, 2015" +#define MEGASAS_VERSION "06.810.09.00-rc1" +#define MEGASAS_RELDATE "Jan. 28, 2016" /* * Device IDs @@ -152,6 +152,7 @@ #define MFI_RESET_FLAGS MFI_INIT_READY| \ MFI_INIT_MFIMODE| \ MFI_INIT_ABORT +#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01) /* * MFI frame flags @@ -170,6 +171,7 @@ /* Driver internal */ #define DRV_DCMD_POLLED_MODE 0x1 +#define DRV_DCMD_SKIP_REFIRE 0x2 /* * Definition for cmd_status @@ -214,6 +216,7 @@ #define MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS 0x01190100 #define MR_DRIVER_SET_APP_CRASHDUMP_MODE (0xF0010000 | 0x0600) +#define MR_DCMD_PD_GET_INFO 0x02020000 /* * Global functions @@ -390,6 +393,7 @@ enum MR_EVT_ARGS { #define SGE_BUFFER_SIZE 4096 +#define MEGASAS_CLUSTER_ID_SIZE 16 /* * define constants for device list query options */ @@ -434,6 +438,257 @@ enum MR_PD_STATE { MR_PD_STATE_SYSTEM = 0x40, }; +union MR_PD_REF { + struct { + u16 deviceId; + u16 seqNum; + } mrPdRef; + u32 ref; +}; + +/* + * define the DDF Type bit structure + */ +union MR_PD_DDF_TYPE { + struct { + union { + struct { +#ifndef __BIG_ENDIAN_BITFIELD + u16 forcedPDGUID:1; + u16 inVD:1; + u16 isGlobalSpare:1; + u16 isSpare:1; + u16 isForeign:1; + u16 reserved:7; + u16 intf:4; +#else + u16 intf:4; + u16 reserved:7; + u16 isForeign:1; + u16 isSpare:1; + u16 isGlobalSpare:1; + u16 inVD:1; + u16 forcedPDGUID:1; +#endif + } pdType; + u16 type; + }; + u16 reserved; + } ddf; + struct { + u32 reserved; + } nonDisk; + u32 type; +} __packed; + +/* + * defines the progress structure + */ +union MR_PROGRESS { + struct { + u16 progress; + union { + u16 elapsedSecs; + u16 elapsedSecsForLastPercent; + }; + } mrProgress; + u32 w; +} __packed; + +/* + * defines the physical drive progress structure + */ +struct MR_PD_PROGRESS { + struct { +#ifndef MFI_BIG_ENDIAN + u32 rbld:1; + u32 patrol:1; + u32 clear:1; + u32 copyBack:1; + u32 erase:1; + u32 locate:1; + u32 reserved:26; +#else + u32 reserved:26; + u32 locate:1; + u32 erase:1; + u32 copyBack:1; + u32 clear:1; + u32 patrol:1; + u32 rbld:1; +#endif + } active; + union MR_PROGRESS rbld; + union MR_PROGRESS patrol; + union { + union MR_PROGRESS clear; + union MR_PROGRESS erase; + }; + + struct { +#ifndef MFI_BIG_ENDIAN + u32 rbld:1; + u32 patrol:1; + u32 clear:1; + u32 copyBack:1; + u32 erase:1; + u32 reserved:27; +#else + u32 reserved:27; + u32 erase:1; + u32 copyBack:1; + u32 clear:1; + u32 patrol:1; + u32 rbld:1; +#endif + } pause; + + union MR_PROGRESS reserved[3]; +} __packed; + +struct MR_PD_INFO { + union MR_PD_REF ref; + u8 inquiryData[96]; + u8 vpdPage83[64]; + u8 notSupported; + u8 scsiDevType; + + union { + u8 connectedPortBitmap; + u8 connectedPortNumbers; + }; + + u8 deviceSpeed; + u32 mediaErrCount; + u32 otherErrCount; + u32 predFailCount; + u32 lastPredFailEventSeqNum; + + u16 fwState; + u8 disabledForRemoval; + u8 linkSpeed; + union MR_PD_DDF_TYPE state; + + struct { + u8 count; +#ifndef __BIG_ENDIAN_BITFIELD + u8 isPathBroken:4; + u8 reserved3:3; + u8 widePortCapable:1; +#else + u8 widePortCapable:1; + u8 reserved3:3; + u8 isPathBroken:4; +#endif + + u8 connectorIndex[2]; + u8 reserved[4]; + u64 sasAddr[2]; + u8 reserved2[16]; + } pathInfo; + + u64 rawSize; + u64 nonCoercedSize; + u64 coercedSize; + u16 enclDeviceId; + u8 enclIndex; + + union { + u8 slotNumber; + u8 enclConnectorIndex; + }; + + struct MR_PD_PROGRESS progInfo; + u8 badBlockTableFull; + u8 unusableInCurrentConfig; + u8 vpdPage83Ext[64]; + u8 powerState; + u8 enclPosition; + u32 allowedOps; + u16 copyBackPartnerId; + u16 enclPartnerDeviceId; + struct { +#ifndef __BIG_ENDIAN_BITFIELD + u16 fdeCapable:1; + u16 fdeEnabled:1; + u16 secured:1; + u16 locked:1; + u16 foreign:1; + u16 needsEKM:1; + u16 reserved:10; +#else + u16 reserved:10; + u16 needsEKM:1; + u16 foreign:1; + u16 locked:1; + u16 secured:1; + u16 fdeEnabled:1; + u16 fdeCapable:1; +#endif + } security; + u8 mediaType; + u8 notCertified; + u8 bridgeVendor[8]; + u8 bridgeProductIdentification[16]; + u8 bridgeProductRevisionLevel[4]; + u8 satBridgeExists; + + u8 interfaceType; + u8 temperature; + u8 emulatedBlockSize; + u16 userDataBlockSize; + u16 reserved2; + + struct { +#ifndef __BIG_ENDIAN_BITFIELD + u32 piType:3; + u32 piFormatted:1; + u32 piEligible:1; + u32 NCQ:1; + u32 WCE:1; + u32 commissionedSpare:1; + u32 emergencySpare:1; + u32 ineligibleForSSCD:1; + u32 ineligibleForLd:1; + u32 useSSEraseType:1; + u32 wceUnchanged:1; + u32 supportScsiUnmap:1; + u32 reserved:18; +#else + u32 reserved:18; + u32 supportScsiUnmap:1; + u32 wceUnchanged:1; + u32 useSSEraseType:1; + u32 ineligibleForLd:1; + u32 ineligibleForSSCD:1; + u32 emergencySpare:1; + u32 commissionedSpare:1; + u32 WCE:1; + u32 NCQ:1; + u32 piEligible:1; + u32 piFormatted:1; + u32 piType:3; +#endif + } properties; + + u64 shieldDiagCompletionTime; + u8 shieldCounter; + + u8 linkSpeedOther; + u8 reserved4[2]; + + struct { +#ifndef __BIG_ENDIAN_BITFIELD + u32 bbmErrCountSupported:1; + u32 bbmErrCount:31; +#else + u32 bbmErrCount:31; + u32 bbmErrCountSupported:1; +#endif + } bbmErr; + + u8 reserved1[512-428]; +} __packed; /* * defines the physical drive address structure @@ -473,6 +728,7 @@ struct megasas_pd_list { u16 tid; u8 driveType; u8 driveState; + u8 interface; } __packed; /* @@ -972,7 +1228,8 @@ struct megasas_ctrl_info { */ struct { #if defined(__BIG_ENDIAN_BITFIELD) - u32 reserved:26; + u32 reserved:25; + u32 passive:1; u32 premiumFeatureMismatch:1; u32 ctrlPropIncompatible:1; u32 fwVersionMismatch:1; @@ -986,11 +1243,12 @@ struct megasas_ctrl_info { u32 fwVersionMismatch:1; u32 ctrlPropIncompatible:1; u32 premiumFeatureMismatch:1; - u32 reserved:26; + u32 passive:1; + u32 reserved:25; #endif } cluster; - char clusterId[16]; /*7D4h */ + char clusterId[MEGASAS_CLUSTER_ID_SIZE]; /*0x7D4 */ struct { u8 maxVFsSupported; /*0x7E4*/ u8 numVFsEnabled; /*0x7E5*/ @@ -1083,6 +1341,8 @@ struct megasas_ctrl_info { #define VD_EXT_DEBUG 0 +#define SCAN_PD_CHANNEL 0x1 +#define SCAN_VD_CHANNEL 0x2 enum MR_SCSI_CMD_TYPE { READ_WRITE_LDIO = 0, @@ -1091,6 +1351,17 @@ enum MR_SCSI_CMD_TYPE { NON_READ_WRITE_SYSPDIO = 3, }; +enum DCMD_TIMEOUT_ACTION { + INITIATE_OCR = 0, + KILL_ADAPTER = 1, + IGNORE_TIMEOUT = 2, +}; + +enum FW_BOOT_CONTEXT { + PROBE_CONTEXT = 0, + OCR_CONTEXT = 1, +}; + /* Frame Type */ #define IO_FRAME 0 #define PTHRU_FRAME 1 @@ -1137,6 +1408,7 @@ enum MR_SCSI_CMD_TYPE { #define MFI_OB_INTR_STATUS_MASK 0x00000002 #define MFI_POLL_TIMEOUT_SECS 60 +#define MFI_IO_TIMEOUT_SECS 180 #define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF (5 * HZ) #define MEGASAS_OCR_SETTLE_TIME_VF (1000 * 30) #define MEGASAS_ROUTINE_WAIT_TIME_VF 300 @@ -1154,6 +1426,7 @@ enum MR_SCSI_CMD_TYPE { #define MR_MAX_REPLY_QUEUES_EXT_OFFSET 0X003FC000 #define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14 #define MR_MAX_MSIX_REG_ARRAY 16 +#define MR_RDPQ_MODE_OFFSET 0X00800000 /* * register set for both 1068 and 1078 controllers * structure extended for 1078 registers @@ -1193,8 +1466,9 @@ struct megasas_register_set { u32 outbound_scratch_pad ; /*00B0h*/ u32 outbound_scratch_pad_2; /*00B4h*/ + u32 outbound_scratch_pad_3; /*00B8h*/ - u32 reserved_4[2]; /*00B8h*/ + u32 reserved_4; /*00BCh*/ u32 inbound_low_queue_port ; /*00C0h*/ @@ -1266,7 +1540,10 @@ union megasas_sgl_frame { typedef union _MFI_CAPABILITIES { struct { #if defined(__BIG_ENDIAN_BITFIELD) - u32 reserved:23; + u32 reserved:20; + u32 support_qd_throttling:1; + u32 support_fp_rlbypass:1; + u32 support_vfid_in_ioframe:1; u32 support_ext_io_size:1; u32 support_ext_queue_depth:1; u32 security_protocol_cmds_fw:1; @@ -1286,7 +1563,10 @@ typedef union _MFI_CAPABILITIES { u32 security_protocol_cmds_fw:1; u32 support_ext_queue_depth:1; u32 support_ext_io_size:1; - u32 reserved:23; + u32 support_vfid_in_ioframe:1; + u32 support_fp_rlbypass:1; + u32 support_qd_throttling:1; + u32 reserved:20; #endif } mfi_capabilities; __le32 reg; @@ -1511,6 +1791,15 @@ union megasas_frame { u8 raw_bytes[64]; }; +/** + * struct MR_PRIV_DEVICE - sdev private hostdata + * @is_tm_capable: firmware managed tm_capable flag + * @tm_busy: TM request is in progress + */ +struct MR_PRIV_DEVICE { + bool is_tm_capable; + bool tm_busy; +}; struct megasas_cmd; union megasas_evt_class_locale { @@ -1700,6 +1989,19 @@ struct MR_DRV_SYSTEM_INFO { u8 reserved[1980]; }; +enum MR_PD_TYPE { + UNKNOWN_DRIVE = 0, + PARALLEL_SCSI = 1, + SAS_PD = 2, + SATA_PD = 3, + FC_PD = 4, +}; + +/* JBOD Queue depth definitions */ +#define MEGASAS_SATA_QD 32 +#define MEGASAS_SAS_QD 64 +#define MEGASAS_DEFAULT_PD_QD 64 + struct megasas_instance { __le32 *producer; @@ -1714,6 +2016,8 @@ struct megasas_instance { dma_addr_t vf_affiliation_111_h; struct MR_CTRL_HB_HOST_MEM *hb_host_mem; dma_addr_t hb_host_mem_h; + struct MR_PD_INFO *pd_info; + dma_addr_t pd_info_h; __le32 *reply_queue; dma_addr_t reply_queue_h; @@ -1745,6 +2049,8 @@ struct megasas_instance { u16 max_fw_cmds; u16 max_mfi_cmds; u16 max_scsi_cmds; + u16 ldio_threshold; + u16 cur_can_queue; u32 max_sectors_per_req; struct megasas_aen_event *ev; @@ -1762,7 +2068,7 @@ struct megasas_instance { struct megasas_evt_detail *evt_detail; dma_addr_t evt_detail_h; struct megasas_cmd *aen_cmd; - struct mutex aen_mutex; + struct mutex hba_mutex; struct semaphore ioctl_sem; struct Scsi_Host *host; @@ -1775,6 +2081,7 @@ struct megasas_instance { u32 fw_support_ieee; atomic_t fw_outstanding; + atomic_t ldio_outstanding; atomic_t fw_reset_no_pci_access; struct megasas_instance_template *instancet; @@ -1797,7 +2104,7 @@ struct megasas_instance { u16 drv_supported_vd_count; u16 drv_supported_pd_count; - u8 adprecovery; + atomic_t adprecovery; unsigned long last_time; u32 mfiStatus; u32 last_seq_num; @@ -1822,11 +2129,14 @@ struct megasas_instance { char skip_heartbeat_timer_del; u8 requestorId; char PlasmaFW111; - char mpio; + char clusterId[MEGASAS_CLUSTER_ID_SIZE]; + u8 peerIsPresent; + u8 passive; u16 throttlequeuedepth; u8 mask_interrupts; u16 max_chain_frame_sz; u8 is_imr; + u8 is_rdpq; bool dev_handle; }; struct MR_LD_VF_MAP { @@ -1916,7 +2226,7 @@ struct megasas_instance_template { u32 (*init_adapter)(struct megasas_instance *); u32 (*build_and_issue_cmd) (struct megasas_instance *, struct scsi_cmnd *); - void (*issue_dcmd) (struct megasas_instance *instance, + int (*issue_dcmd)(struct megasas_instance *instance, struct megasas_cmd *cmd); }; @@ -2014,6 +2324,19 @@ struct megasas_mgmt_info { int max_index; }; +enum MEGASAS_OCR_CAUSE { + FW_FAULT_OCR = 0, + SCSIIO_TIMEOUT_OCR = 1, + MFI_IO_TIMEOUT_OCR = 2, +}; + +enum DCMD_RETURN_STATUS { + DCMD_SUCCESS = 0, + DCMD_TIMEOUT = 1, + DCMD_FAILED = 2, + DCMD_NOT_FIRED = 3, +}; + u8 MR_BuildRaidContext(struct megasas_instance *instance, struct IO_REQUEST_INFO *io_info, @@ -2051,4 +2374,8 @@ void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance, int megasas_cmd_type(struct scsi_cmnd *cmd); void megasas_setup_jbod_map(struct megasas_instance *instance); +void megasas_update_sdev_properties(struct scsi_device *sdev); +int megasas_reset_fusion(struct Scsi_Host *shost, int reason); +int megasas_task_abort_fusion(struct scsi_cmnd *scmd); +int megasas_reset_target_fusion(struct scsi_cmnd *scmd); #endif /*LSI_MEGARAID_SAS_H */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 97a1c1c33b05..5c08568ccfbf 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -83,7 +83,7 @@ module_param(throttlequeuedepth, int, S_IRUGO); MODULE_PARM_DESC(throttlequeuedepth, "Adapter queue depth when throttled due to I/O timeout. Default: 16"); -int resetwaittime = MEGASAS_RESET_WAIT_TIME; +unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; module_param(resetwaittime, int, S_IRUGO); MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout " "before resetting adapter. Default: 180"); @@ -92,6 +92,18 @@ int smp_affinity_enable = 1; module_param(smp_affinity_enable, int, S_IRUGO); MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)"); +int rdpq_enable = 1; +module_param(rdpq_enable, int, S_IRUGO); +MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)"); + +unsigned int dual_qdepth_disable; +module_param(dual_qdepth_disable, int, S_IRUGO); +MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); + +unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; +module_param(scmd_timeout, int, S_IRUGO); +MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); + MODULE_LICENSE("GPL"); MODULE_VERSION(MEGASAS_VERSION); MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com"); @@ -104,6 +116,8 @@ static int megasas_ld_list_query(struct megasas_instance *instance, static int megasas_issue_init_mfi(struct megasas_instance *instance); static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word); +static int +megasas_get_pd_info(struct megasas_instance *instance, u16 device_id); /* * PCI ID table for all supported controllers */ @@ -189,18 +203,18 @@ int wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, int seconds); void megasas_reset_reply_desc(struct megasas_instance *instance); -int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout); void megasas_fusion_ocr_wq(struct work_struct *work); static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, int initial); int megasas_check_mpio_paths(struct megasas_instance *instance, struct scsi_cmnd *scmd); -void +int megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); + return 0; } /** @@ -473,7 +487,7 @@ static int megasas_check_reset_xscale(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { - if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && + if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && (le32_to_cpu(*instance->consumer) == MEGASAS_ADPRESET_INPROG_SIGN)) return 1; @@ -609,7 +623,7 @@ static int megasas_check_reset_ppc(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { - if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) return 1; return 0; @@ -735,6 +749,7 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance, &(regs)->inbound_high_queue_port); writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, &(regs)->inbound_low_queue_port); + mmiowb(); spin_unlock_irqrestore(&instance->hba_lock, flags); } @@ -746,7 +761,7 @@ static int megasas_check_reset_skinny(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { - if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) return 1; return 0; @@ -940,9 +955,8 @@ static int megasas_check_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { - if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) return 1; - } return 0; } @@ -983,25 +997,20 @@ extern struct megasas_instance_template megasas_instance_template_fusion; int megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) { - int seconds; struct megasas_header *frame_hdr = &cmd->frame->hdr; - frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE; + frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); - /* - * Issue the frame using inbound queue port - */ - instance->instancet->issue_dcmd(instance, cmd); + if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || + (instance->instancet->issue_dcmd(instance, cmd))) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + return DCMD_NOT_FIRED; + } - /* - * Wait for cmd_status to change - */ - if (instance->requestorId) - seconds = MEGASAS_ROUTINE_WAIT_TIME_VF; - else - seconds = MFI_POLL_TIMEOUT_SECS; - return wait_and_poll(instance, cmd, seconds); + return wait_and_poll(instance, cmd, instance->requestorId ? + MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); } /** @@ -1019,21 +1028,29 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, int timeout) { int ret = 0; - cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; - instance->instancet->issue_dcmd(instance, cmd); + if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || + (instance->instancet->issue_dcmd(instance, cmd))) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + return DCMD_NOT_FIRED; + } + if (timeout) { ret = wait_event_timeout(instance->int_cmd_wait_q, cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); - if (!ret) - return 1; + if (!ret) { + dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n", + __func__, __LINE__); + return DCMD_TIMEOUT; + } } else wait_event(instance->int_cmd_wait_q, cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); return (cmd->cmd_status_drv == MFI_STAT_OK) ? - 0 : 1; + DCMD_SUCCESS : DCMD_FAILED; } /** @@ -1077,15 +1094,20 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, cmd->sync_cmd = 1; cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; - instance->instancet->issue_dcmd(instance, cmd); + if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || + (instance->instancet->issue_dcmd(instance, cmd))) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + return DCMD_NOT_FIRED; + } if (timeout) { ret = wait_event_timeout(instance->abort_cmd_wait_q, cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); if (!ret) { - dev_err(&instance->pdev->dev, "Command timedout" - "from %s\n", __func__); - return 1; + dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n", + __func__, __LINE__); + return DCMD_TIMEOUT; } } else wait_event(instance->abort_cmd_wait_q, @@ -1094,7 +1116,8 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, cmd->sync_cmd = 0; megasas_return_cmd(instance, cmd); - return 0; + return (cmd->cmd_status_drv == MFI_STAT_OK) ? + DCMD_SUCCESS : DCMD_FAILED; } /** @@ -1621,7 +1644,7 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance, return 0; out_return_cmd: megasas_return_cmd(instance, cmd); - return 1; + return SCSI_MLQUEUE_HOST_BUSY; } @@ -1634,7 +1657,7 @@ static int megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { struct megasas_instance *instance; - unsigned long flags; + struct MR_PRIV_DEVICE *mr_device_priv_data; instance = (struct megasas_instance *) scmd->device->host->hostdata; @@ -1648,35 +1671,38 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (instance->issuepend_done == 0) return SCSI_MLQUEUE_HOST_BUSY; - spin_lock_irqsave(&instance->hba_lock, flags); /* Check for an mpio path and adjust behavior */ - if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) { + if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { if (megasas_check_mpio_paths(instance, scmd) == (DID_RESET << 16)) { - spin_unlock_irqrestore(&instance->hba_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } else { - spin_unlock_irqrestore(&instance->hba_lock, flags); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; } } - if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { - spin_unlock_irqrestore(&instance->hba_lock, flags); + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; } - if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { - spin_unlock_irqrestore(&instance->hba_lock, flags); - return SCSI_MLQUEUE_HOST_BUSY; + mr_device_priv_data = scmd->device->hostdata; + if (!mr_device_priv_data) { + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + return 0; } - spin_unlock_irqrestore(&instance->hba_lock, flags); + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) + return SCSI_MLQUEUE_HOST_BUSY; + + if (mr_device_priv_data->tm_busy) + return SCSI_MLQUEUE_DEVICE_BUSY; + scmd->result = 0; @@ -1699,12 +1725,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) break; } - if (instance->instancet->build_and_issue_cmd(instance, scmd)) { - dev_err(&instance->pdev->dev, "Err returned from build_and_issue_cmd\n"); - return SCSI_MLQUEUE_HOST_BUSY; - } - - return 0; + return instance->instancet->build_and_issue_cmd(instance, scmd); out_done: scmd->scsi_done(scmd); @@ -1726,27 +1747,39 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no) } /* -* megasas_set_dma_alignment - Set DMA alignment for PI enabled VD +* megasas_update_sdev_properties - Update sdev structure based on controller's FW capabilities * * @sdev: OS provided scsi device * * Returns void */ -static void megasas_set_dma_alignment(struct scsi_device *sdev) +void megasas_update_sdev_properties(struct scsi_device *sdev) { + u16 pd_index = 0; u32 device_id, ld; struct megasas_instance *instance; struct fusion_context *fusion; + struct MR_PRIV_DEVICE *mr_device_priv_data; + struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; struct MR_LD_RAID *raid; struct MR_DRV_RAID_MAP_ALL *local_map_ptr; instance = megasas_lookup_instance(sdev->host->host_no); fusion = instance->ctrl_context; + mr_device_priv_data = sdev->hostdata; if (!fusion) return; - if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS) { + if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && + instance->use_seqnum_jbod_fp) { + pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + pd_sync = (void *)fusion->pd_seq_sync + [(instance->pd_seq_map_id - 1) & 1]; + mr_device_priv_data->is_tm_capable = + pd_sync->seq[pd_index].capability.tmCapable; + } else { device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; @@ -1754,10 +1787,51 @@ static void megasas_set_dma_alignment(struct scsi_device *sdev) raid = MR_LdRaidGet(ld, local_map_ptr); if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) - blk_queue_update_dma_alignment(sdev->request_queue, 0x7); + blk_queue_update_dma_alignment(sdev->request_queue, 0x7); + mr_device_priv_data->is_tm_capable = + raid->capability.tmCapable; + } +} + +static void megasas_set_device_queue_depth(struct scsi_device *sdev) +{ + u16 pd_index = 0; + int ret = DCMD_FAILED; + struct megasas_instance *instance; + + instance = megasas_lookup_instance(sdev->host->host_no); + + if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { + pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; + + if (instance->pd_info) { + mutex_lock(&instance->hba_mutex); + ret = megasas_get_pd_info(instance, pd_index); + mutex_unlock(&instance->hba_mutex); + } + + if (ret != DCMD_SUCCESS) + return; + + if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { + + switch (instance->pd_list[pd_index].interface) { + case SAS_PD: + scsi_change_queue_depth(sdev, MEGASAS_SAS_QD); + break; + + case SATA_PD: + scsi_change_queue_depth(sdev, MEGASAS_SATA_QD); + break; + + default: + scsi_change_queue_depth(sdev, MEGASAS_DEFAULT_PD_QD); + } + } } } + static int megasas_slave_configure(struct scsi_device *sdev) { u16 pd_index = 0; @@ -1774,12 +1848,14 @@ static int megasas_slave_configure(struct scsi_device *sdev) return -ENXIO; } } - megasas_set_dma_alignment(sdev); + megasas_set_device_queue_depth(sdev); + megasas_update_sdev_properties(sdev); + /* * The RAID firmware may require extended timeouts. */ blk_queue_rq_timeout(sdev->request_queue, - MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); + scmd_timeout * HZ); return 0; } @@ -1788,6 +1864,7 @@ static int megasas_slave_alloc(struct scsi_device *sdev) { u16 pd_index = 0; struct megasas_instance *instance ; + struct MR_PRIV_DEVICE *mr_device_priv_data; instance = megasas_lookup_instance(sdev->host->host_no); if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { @@ -1799,13 +1876,26 @@ static int megasas_slave_alloc(struct scsi_device *sdev) sdev->id; if ((instance->allow_fw_scan || instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM)) { - return 0; + goto scan_target; } return -ENXIO; } + +scan_target: + mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), + GFP_KERNEL); + if (!mr_device_priv_data) + return -ENOMEM; + sdev->hostdata = mr_device_priv_data; return 0; } +static void megasas_slave_destroy(struct scsi_device *sdev) +{ + kfree(sdev->hostdata); + sdev->hostdata = NULL; +} + /* * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a * kill adapter @@ -1845,7 +1935,7 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc void megaraid_sas_kill_hba(struct megasas_instance *instance) { /* Set critical error to block I/O & ioctls in case caller didn't */ - instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; + atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); /* Wait 1 second to ensure IO or ioctls in build have posted */ msleep(1000); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || @@ -1854,7 +1944,7 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance) writel(MFI_STOP_ADP, &instance->reg_set->doorbell); /* Flush */ readl(&instance->reg_set->doorbell); - if (instance->mpio && instance->requestorId) + if (instance->requestorId && instance->peerIsPresent) memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); } else { writel(MFI_STOP_ADP, @@ -1883,7 +1973,7 @@ megasas_check_and_restore_queue_depth(struct megasas_instance *instance) spin_lock_irqsave(instance->host->host_lock, flags); instance->flag &= ~MEGASAS_FW_BUSY; - instance->host->can_queue = instance->max_scsi_cmds; + instance->host->can_queue = instance->cur_can_queue; spin_unlock_irqrestore(instance->host->host_lock, flags); } } @@ -1905,7 +1995,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) unsigned long flags; /* If we have already declared adapter dead, donot complete cmds */ - if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) return; spin_lock_irqsave(&instance->completion_lock, flags); @@ -1974,7 +2064,7 @@ void megasas_do_ocr(struct megasas_instance *instance) *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); } instance->instancet->disable_intr(instance); - instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; + atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); instance->issuepend_done = 0; atomic_set(&instance->fw_outstanding, 0); @@ -2054,9 +2144,7 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " "scsi%d\n", instance->host->host_no); - megasas_issue_blocked_cmd(instance, cmd, 0); - - if (dcmd->cmd_status) { + if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" " failed with status 0x%x for scsi%d\n", dcmd->cmd_status, instance->host->host_no); @@ -2166,9 +2254,8 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " "scsi%d\n", instance->host->host_no); - megasas_issue_blocked_cmd(instance, cmd, 0); - if (dcmd->cmd_status) { + if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" " failed with status 0x%x for scsi%d\n", dcmd->cmd_status, instance->host->host_no); @@ -2373,21 +2460,21 @@ void megasas_sriov_heartbeat_handler(unsigned long instance_addr) */ static int megasas_wait_for_outstanding(struct megasas_instance *instance) { - int i; + int i, sl, outstanding; u32 reset_index; u32 wait_time = MEGASAS_RESET_WAIT_TIME; - u8 adprecovery; unsigned long flags; struct list_head clist_local; struct megasas_cmd *reset_cmd; u32 fw_state; - u8 kill_adapter_flag; - spin_lock_irqsave(&instance->hba_lock, flags); - adprecovery = instance->adprecovery; - spin_unlock_irqrestore(&instance->hba_lock, flags); + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { + dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", + __func__, __LINE__); + return FAILED; + } - if (adprecovery != MEGASAS_HBA_OPERATIONAL) { + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { INIT_LIST_HEAD(&clist_local); spin_lock_irqsave(&instance->hba_lock, flags); @@ -2398,18 +2485,13 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); for (i = 0; i < wait_time; i++) { msleep(1000); - spin_lock_irqsave(&instance->hba_lock, flags); - adprecovery = instance->adprecovery; - spin_unlock_irqrestore(&instance->hba_lock, flags); - if (adprecovery == MEGASAS_HBA_OPERATIONAL) + if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) break; } - if (adprecovery != MEGASAS_HBA_OPERATIONAL) { + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); - spin_lock_irqsave(&instance->hba_lock, flags); - instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; - spin_unlock_irqrestore(&instance->hba_lock, flags); + atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); return FAILED; } @@ -2447,7 +2529,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) } for (i = 0; i < resetwaittime; i++) { - int outstanding = atomic_read(&instance->fw_outstanding); + outstanding = atomic_read(&instance->fw_outstanding); if (!outstanding) break; @@ -2466,67 +2548,60 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) } i = 0; - kill_adapter_flag = 0; + outstanding = atomic_read(&instance->fw_outstanding); + fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; + + if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) + goto no_outstanding; + + if (instance->disableOnlineCtrlReset) + goto kill_hba_and_failed; do { - fw_state = instance->instancet->read_fw_status_reg( - instance->reg_set) & MFI_STATE_MASK; - if ((fw_state == MFI_STATE_FAULT) && - (instance->disableOnlineCtrlReset == 0)) { - if (i == 3) { - kill_adapter_flag = 2; - break; - } + if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { + dev_info(&instance->pdev->dev, + "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n", + __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); + if (i == 3) + goto kill_hba_and_failed; megasas_do_ocr(instance); - kill_adapter_flag = 1; - /* wait for 1 secs to let FW finish the pending cmds */ - msleep(1000); + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { + dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", + __func__, __LINE__); + return FAILED; + } + dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", + __func__, __LINE__); + + for (sl = 0; sl < 10; sl++) + msleep(500); + + outstanding = atomic_read(&instance->fw_outstanding); + + fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; + if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) + goto no_outstanding; } i++; } while (i <= 3); - if (atomic_read(&instance->fw_outstanding) && !kill_adapter_flag) { - if (instance->disableOnlineCtrlReset == 0) { - megasas_do_ocr(instance); +no_outstanding: - /* wait for 5 secs to let FW finish the pending cmds */ - for (i = 0; i < wait_time; i++) { - int outstanding = - atomic_read(&instance->fw_outstanding); - if (!outstanding) - return SUCCESS; - msleep(1000); - } - } - } + dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", + __func__, __LINE__); + return SUCCESS; - if (atomic_read(&instance->fw_outstanding) || - (kill_adapter_flag == 2)) { - dev_notice(&instance->pdev->dev, "pending cmds after reset\n"); - /* - * Send signal to FW to stop processing any pending cmds. - * The controller will be taken offline by the OS now. - */ - if ((instance->pdev->device == - PCI_DEVICE_ID_LSI_SAS0073SKINNY) || - (instance->pdev->device == - PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { - writel(MFI_STOP_ADP, - &instance->reg_set->doorbell); - } else { - writel(MFI_STOP_ADP, - &instance->reg_set->inbound_doorbell); - } - megasas_dump_pending_frames(instance); - spin_lock_irqsave(&instance->hba_lock, flags); - instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; - spin_unlock_irqrestore(&instance->hba_lock, flags); - return FAILED; - } +kill_hba_and_failed: - dev_notice(&instance->pdev->dev, "no pending cmds after reset\n"); + /* Reset not supported, kill adapter */ + dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" + " disableOnlineCtrlReset %d fw_outstanding %d \n", + __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, + atomic_read(&instance->fw_outstanding)); + megasas_dump_pending_frames(instance); + megaraid_sas_kill_hba(instance); - return SUCCESS; + return FAILED; } /** @@ -2547,7 +2622,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd) scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", scmd->cmnd[0], scmd->retries); - if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); return FAILED; } @@ -2575,7 +2650,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) unsigned long flags; if (time_after(jiffies, scmd->jiffies_at_alloc + - (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) { + (scmd_timeout * 2) * HZ)) { return BLK_EH_NOT_HANDLED; } @@ -2851,6 +2926,16 @@ megasas_page_size_show(struct device *cdev, return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); } +static ssize_t +megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; + + return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); +} + static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR, megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store); static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO, @@ -2859,12 +2944,15 @@ static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR, megasas_fw_crash_state_show, megasas_fw_crash_state_store); static DEVICE_ATTR(page_size, S_IRUGO, megasas_page_size_show, NULL); +static DEVICE_ATTR(ldio_outstanding, S_IRUGO, + megasas_ldio_outstanding_show, NULL); struct device_attribute *megaraid_host_attrs[] = { &dev_attr_fw_crash_buffer_size, &dev_attr_fw_crash_buffer, &dev_attr_fw_crash_state, &dev_attr_page_size, + &dev_attr_ldio_outstanding, NULL, }; @@ -2878,6 +2966,7 @@ static struct scsi_host_template megasas_template = { .proc_name = "megaraid_sas", .slave_configure = megasas_slave_configure, .slave_alloc = megasas_slave_alloc, + .slave_destroy = megasas_slave_destroy, .queuecommand = megasas_queue_command, .eh_device_reset_handler = megasas_reset_device, .eh_bus_reset_handler = megasas_reset_bus_host, @@ -3277,13 +3366,13 @@ process_fw_state_change_wq(struct work_struct *work) u32 wait; unsigned long flags; - if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) { + if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { dev_notice(&instance->pdev->dev, "error, recovery st %x\n", - instance->adprecovery); + atomic_read(&instance->adprecovery)); return ; } - if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) { + if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { dev_notice(&instance->pdev->dev, "FW detected to be in fault" "state, restarting it...\n"); @@ -3326,7 +3415,7 @@ process_fw_state_change_wq(struct work_struct *work) megasas_issue_init_mfi(instance); spin_lock_irqsave(&instance->hba_lock, flags); - instance->adprecovery = MEGASAS_HBA_OPERATIONAL; + atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); spin_unlock_irqrestore(&instance->hba_lock, flags); instance->instancet->enable_intr(instance); @@ -3391,14 +3480,14 @@ megasas_deplete_reply_queue(struct megasas_instance *instance, instance->instancet->disable_intr(instance); - instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; + atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); instance->issuepend_done = 0; atomic_set(&instance->fw_outstanding, 0); megasas_internal_reset_defer_cmds(instance); dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", - fw_state, instance->adprecovery); + fw_state, atomic_read(&instance->adprecovery)); schedule_work(&instance->work_init); return IRQ_HANDLED; @@ -3852,6 +3941,92 @@ int megasas_alloc_cmds(struct megasas_instance *instance) } /* + * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. + * @instance: Adapter soft state + * + * Return 0 for only Fusion adapter, if driver load/unload is not in progress + * or FW is not under OCR. + */ +inline int +dcmd_timeout_ocr_possible(struct megasas_instance *instance) { + + if (!instance->ctrl_context) + return KILL_ADAPTER; + else if (instance->unload || + test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) + return IGNORE_TIMEOUT; + else + return INITIATE_OCR; +} + +static int +megasas_get_pd_info(struct megasas_instance *instance, u16 device_id) +{ + int ret; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + memset(instance->pd_info, 0, sizeof(*instance->pd_info)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->mbox.s[0] = cpu_to_le16(device_id); + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); + dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO)); + + if (instance->ctrl_context && !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); + else + ret = megasas_issue_polled(instance, cmd); + + switch (ret) { + case DCMD_SUCCESS: + instance->pd_list[device_id].interface = + instance->pd_info->state.ddf.pdType.intf; + break; + + case DCMD_TIMEOUT: + + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + + break; + } + + if (ret != DCMD_TIMEOUT) + megasas_return_cmd(instance, cmd); + + return ret; +} +/* * megasas_get_pd_list_info - Returns FW's pd_list structure * @instance: Adapter soft state * @pd_list: pd_list structure @@ -3906,42 +4081,72 @@ megasas_get_pd_list(struct megasas_instance *instance) if (instance->ctrl_context && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, - MEGASAS_BLOCKED_CMD_TIMEOUT); + MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); - /* - * the following function will get the instance PD LIST. - */ + switch (ret) { + case DCMD_FAILED: + megaraid_sas_kill_hba(instance); + break; + case DCMD_TIMEOUT: + + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + /* + * DCMD failed from AEN path. + * AEN path already hold reset_mutex to avoid PCI access + * while OCR is in progress. + */ + mutex_unlock(&instance->reset_mutex); + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + mutex_lock(&instance->reset_mutex); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", + __func__, __LINE__); + break; + } - pd_addr = ci->addr; + break; + + case DCMD_SUCCESS: + pd_addr = ci->addr; - if (ret == 0 && - (le32_to_cpu(ci->count) < - (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { + if ((le32_to_cpu(ci->count) > + (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) + break; memset(instance->local_pd_list, 0, - MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); + MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { - instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = - le16_to_cpu(pd_addr->deviceId); + le16_to_cpu(pd_addr->deviceId); instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = - pd_addr->scsiDevType; + pd_addr->scsiDevType; instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = - MR_PD_STATE_SYSTEM; + MR_PD_STATE_SYSTEM; pd_addr++; } + memcpy(instance->pd_list, instance->local_pd_list, sizeof(instance->pd_list)); + break; + } pci_free_consistent(instance->pdev, MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), ci, ci_h); - megasas_return_cmd(instance, cmd); + if (ret != DCMD_TIMEOUT) + megasas_return_cmd(instance, cmd); return ret; } @@ -4002,33 +4207,63 @@ megasas_get_ld_list(struct megasas_instance *instance) if (instance->ctrl_context && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, - MEGASAS_BLOCKED_CMD_TIMEOUT); + MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); - ld_count = le32_to_cpu(ci->ldCount); - /* the following function will get the instance PD LIST */ + switch (ret) { + case DCMD_FAILED: + megaraid_sas_kill_hba(instance); + break; + case DCMD_TIMEOUT: + + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + /* + * DCMD failed from AEN path. + * AEN path already hold reset_mutex to avoid PCI access + * while OCR is in progress. + */ + mutex_unlock(&instance->reset_mutex); + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + mutex_lock(&instance->reset_mutex); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + + break; + + case DCMD_SUCCESS: + if (ld_count > instance->fw_supported_vd_count) + break; - if ((ret == 0) && (ld_count <= instance->fw_supported_vd_count)) { memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); for (ld_index = 0; ld_index < ld_count; ld_index++) { if (ci->ldList[ld_index].state != 0) { ids = ci->ldList[ld_index].ref.targetId; - instance->ld_ids[ids] = - ci->ldList[ld_index].ref.targetId; + instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; } } + + break; } - pci_free_consistent(instance->pdev, - sizeof(struct MR_LD_LIST), - ci, - ci_h); + pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h); + + if (ret != DCMD_TIMEOUT) + megasas_return_cmd(instance, cmd); - megasas_return_cmd(instance, cmd); return ret; } @@ -4090,26 +4325,61 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) dcmd->pad_0 = 0; if (instance->ctrl_context && !instance->mask_interrupts) - ret = megasas_issue_blocked_cmd(instance, cmd, - MEGASAS_BLOCKED_CMD_TIMEOUT); + ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); - tgtid_count = le32_to_cpu(ci->count); + switch (ret) { + case DCMD_FAILED: + dev_info(&instance->pdev->dev, + "DCMD not supported by firmware - %s %d\n", + __func__, __LINE__); + ret = megasas_get_ld_list(instance); + break; + case DCMD_TIMEOUT: + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + /* + * DCMD failed from AEN path. + * AEN path already hold reset_mutex to avoid PCI access + * while OCR is in progress. + */ + mutex_unlock(&instance->reset_mutex); + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + mutex_lock(&instance->reset_mutex); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + + break; + case DCMD_SUCCESS: + tgtid_count = le32_to_cpu(ci->count); + + if ((tgtid_count > (instance->fw_supported_vd_count))) + break; - if ((ret == 0) && (tgtid_count <= (instance->fw_supported_vd_count))) { memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); for (ld_index = 0; ld_index < tgtid_count; ld_index++) { ids = ci->targetId[ld_index]; instance->ld_ids[ids] = ci->targetId[ld_index]; } + break; } pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), - ci, ci_h); + ci, ci_h); - megasas_return_cmd(instance, cmd); + if (ret != DCMD_TIMEOUT) + megasas_return_cmd(instance, cmd); return ret; } @@ -4223,38 +4493,73 @@ megasas_get_ctrl_info(struct megasas_instance *instance) dcmd->mbox.b[0] = 1; if (instance->ctrl_context && !instance->mask_interrupts) - ret = megasas_issue_blocked_cmd(instance, cmd, - MEGASAS_BLOCKED_CMD_TIMEOUT); + ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); - if (!ret) { + switch (ret) { + case DCMD_SUCCESS: memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); + /* Save required controller information in + * CPU endianness format. + */ le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); + + /* Update the latest Ext VD info. + * From Init path, store current firmware details. + * From OCR path, detect any firmware properties changes. + * in case of Firmware upgrade without system reboot. + */ megasas_update_ext_vd_details(instance); instance->use_seqnum_jbod_fp = ctrl_info->adapterOperations3.useSeqNumJbodFP; + + /*Check whether controller is iMR or MR */ instance->is_imr = (ctrl_info->memory_size ? 0 : 1); dev_info(&instance->pdev->dev, - "controller type\t: %s(%dMB)\n", - instance->is_imr ? "iMR" : "MR", - le16_to_cpu(ctrl_info->memory_size)); + "controller type\t: %s(%dMB)\n", + instance->is_imr ? "iMR" : "MR", + le16_to_cpu(ctrl_info->memory_size)); + instance->disableOnlineCtrlReset = ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; - dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", - instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); instance->secure_jbod_support = ctrl_info->adapterOperations3.supportSecurityonJBOD; + dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", + instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", instance->secure_jbod_support ? "Yes" : "No"); + break; + + case DCMD_TIMEOUT: + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + case DCMD_FAILED: + megaraid_sas_kill_hba(instance); + break; + } pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), ci, ci_h); megasas_return_cmd(instance, cmd); + + return ret; } @@ -4304,12 +4609,28 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance, dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE); if (instance->ctrl_context && !instance->mask_interrupts) - ret = megasas_issue_blocked_cmd(instance, cmd, - MEGASAS_BLOCKED_CMD_TIMEOUT); + ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); - megasas_return_cmd(instance, cmd); + if (ret == DCMD_TIMEOUT) { + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + } else + megasas_return_cmd(instance, cmd); + return ret; } @@ -4426,6 +4747,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance) sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); } + instance->cur_can_queue = instance->max_scsi_cmds; /* * Create a pool of commands */ @@ -4756,6 +5078,9 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->msix_vectors = ((scratch_pad_2 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; + if (rdpq_enable) + instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? + 1 : 0; fw_msix_count = instance->msix_vectors; /* Save 1-15 reply post index address to local memory * Index 0 is already saved from reg offset @@ -4792,6 +5117,8 @@ static int megasas_init_fw(struct megasas_instance *instance) dev_info(&instance->pdev->dev, "current msix/online cpus\t: (%d/%d)\n", instance->msix_vectors, (unsigned int)num_online_cpus()); + dev_info(&instance->pdev->dev, + "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); @@ -4856,7 +5183,9 @@ static int megasas_init_fw(struct megasas_instance *instance) tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); - instance->mpio = ctrl_info->adapterOperations2.mpio; + instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; + instance->passive = ctrl_info->cluster.passive; + memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); instance->UnevenSpanSupport = ctrl_info->adapterOperations2.supportUnevenSpans; if (instance->UnevenSpanSupport) { @@ -4932,6 +5261,11 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; + if (resetwaittime > MEGASAS_RESET_WAIT_TIME) + resetwaittime = MEGASAS_RESET_WAIT_TIME; + + if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) + scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; /* Launch SR-IOV heartbeat timer */ if (instance->requestorId) { @@ -5035,10 +5369,8 @@ megasas_get_seq_num(struct megasas_instance *instance, dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info)); - if (megasas_issue_blocked_cmd(instance, cmd, 30)) - dev_err(&instance->pdev->dev, "Command timedout" - "from %s\n", __func__); - else { + if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) == + DCMD_SUCCESS) { /* * Copy the data back into callers buffer */ @@ -5047,7 +5379,9 @@ megasas_get_seq_num(struct megasas_instance *instance, eli->clear_seq_num = el_info->clear_seq_num; eli->shutdown_seq_num = el_info->shutdown_seq_num; eli->boot_seq_num = el_info->boot_seq_num; - } + } else + dev_err(&instance->pdev->dev, "DCMD failed " + "from %s\n", __func__); pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), el_info, el_info_h); @@ -5262,6 +5596,8 @@ static int megasas_io_attach(struct megasas_instance *instance) if (instance->ctrl_context) { host->hostt->eh_device_reset_handler = NULL; host->hostt->eh_bus_reset_handler = NULL; + host->hostt->eh_target_reset_handler = megasas_reset_target_fusion; + host->hostt->eh_abort_handler = megasas_task_abort_fusion; } /* @@ -5447,7 +5783,7 @@ static int megasas_probe_one(struct pci_dev *pdev, instance->flag_ieee = 0; instance->ev = NULL; instance->issuepend_done = 1; - instance->adprecovery = MEGASAS_HBA_OPERATIONAL; + atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); instance->is_imr = 0; instance->evt_detail = pci_alloc_consistent(pdev, @@ -5461,6 +5797,12 @@ static int megasas_probe_one(struct pci_dev *pdev, goto fail_alloc_dma_buf; } + instance->pd_info = pci_alloc_consistent(pdev, + sizeof(struct MR_PD_INFO), &instance->pd_info_h); + + if (!instance->pd_info) + dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); + /* * Initialize locks and queues */ @@ -5476,8 +5818,8 @@ static int megasas_probe_one(struct pci_dev *pdev, spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->completion_lock); - mutex_init(&instance->aen_mutex); mutex_init(&instance->reset_mutex); + mutex_init(&instance->hba_mutex); /* * Initialize PCI related and misc parameters @@ -5592,6 +5934,10 @@ fail_alloc_dma_buf: instance->evt_detail, instance->evt_detail_h); + if (instance->pd_info) + pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), + instance->pd_info, + instance->pd_info_h); if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); @@ -5616,7 +5962,7 @@ static void megasas_flush_cache(struct megasas_instance *instance) struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; - if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) return; cmd = megasas_get_cmd(instance); @@ -5638,9 +5984,12 @@ static void megasas_flush_cache(struct megasas_instance *instance) dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; - if (megasas_issue_blocked_cmd(instance, cmd, 30)) - dev_err(&instance->pdev->dev, "Command timedout" - " from %s\n", __func__); + if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) + != DCMD_SUCCESS) { + dev_err(&instance->pdev->dev, + "return from %s %d\n", __func__, __LINE__); + return; + } megasas_return_cmd(instance, cmd); } @@ -5656,7 +6005,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance, struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; - if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) return; cmd = megasas_get_cmd(instance); @@ -5666,13 +6015,13 @@ static void megasas_shutdown_controller(struct megasas_instance *instance, if (instance->aen_cmd) megasas_issue_blocked_abort_cmd(instance, - instance->aen_cmd, MEGASAS_BLOCKED_CMD_TIMEOUT); + instance->aen_cmd, MFI_IO_TIMEOUT_SECS); if (instance->map_update_cmd) megasas_issue_blocked_abort_cmd(instance, - instance->map_update_cmd, MEGASAS_BLOCKED_CMD_TIMEOUT); + instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); if (instance->jbod_seq_cmd) megasas_issue_blocked_abort_cmd(instance, - instance->jbod_seq_cmd, MEGASAS_BLOCKED_CMD_TIMEOUT); + instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); dcmd = &cmd->frame->dcmd; @@ -5687,9 +6036,12 @@ static void megasas_shutdown_controller(struct megasas_instance *instance, dcmd->data_xfer_len = 0; dcmd->opcode = cpu_to_le32(opcode); - if (megasas_issue_blocked_cmd(instance, cmd, 30)) - dev_err(&instance->pdev->dev, "Command timedout" - "from %s\n", __func__); + if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) + != DCMD_SUCCESS) { + dev_err(&instance->pdev->dev, + "return from %s %d\n", __func__, __LINE__); + return; + } megasas_return_cmd(instance, cmd); } @@ -5847,6 +6199,10 @@ fail_init_mfi: instance->evt_detail, instance->evt_detail_h); + if (instance->pd_info) + pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), + instance->pd_info, + instance->pd_info_h); if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); @@ -5941,11 +6297,11 @@ static void megasas_detach_one(struct pci_dev *pdev) if (fusion->ld_drv_map[i]) free_pages((ulong)fusion->ld_drv_map[i], fusion->drv_map_pages); - if (fusion->pd_seq_sync) - dma_free_coherent(&instance->pdev->dev, - pd_seq_map_sz, - fusion->pd_seq_sync[i], - fusion->pd_seq_phys[i]); + if (fusion->pd_seq_sync[i]) + dma_free_coherent(&instance->pdev->dev, + pd_seq_map_sz, + fusion->pd_seq_sync[i], + fusion->pd_seq_phys[i]); } free_pages((ulong)instance->ctrl_context, instance->ctrl_context_pages); @@ -5965,6 +6321,10 @@ static void megasas_detach_one(struct pci_dev *pdev) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); + if (instance->pd_info) + pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), + instance->pd_info, + instance->pd_info_h); if (instance->vf_affiliation) pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), @@ -6090,7 +6450,7 @@ static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) for (i = 0; i < megasas_mgmt_info.max_index; i++) { local_instance = megasas_mgmt_info.instance[i]; if (local_instance && local_instance->crash_dump_drv_support) { - if ((local_instance->adprecovery == + if ((atomic_read(&local_instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) && !megasas_set_crash_dump_params(local_instance, crash_support)) { @@ -6227,7 +6587,15 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, * cmd to the SCSI mid-layer */ cmd->sync_cmd = 1; - megasas_issue_blocked_cmd(instance, cmd, 0); + if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) { + cmd->sync_cmd = 0; + dev_err(&instance->pdev->dev, + "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n", + __func__, __LINE__, cmd->frame->dcmd.opcode, + cmd->cmd_status_drv); + return -EBUSY; + } + cmd->sync_cmd = 0; if (instance->unload == 1) { @@ -6330,7 +6698,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) goto out_kfree_ioc; } - if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Controller in crit error\n"); error = -ENODEV; goto out_kfree_ioc; @@ -6349,7 +6717,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) for (i = 0; i < wait_time; i++) { spin_lock_irqsave(&instance->hba_lock, flags); - if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); break; } @@ -6364,7 +6732,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) } spin_lock_irqsave(&instance->hba_lock, flags); - if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); dev_err(&instance->pdev->dev, "timed out while" @@ -6406,7 +6774,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) if (!instance) return -ENODEV; - if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { return -ENODEV; } @@ -6417,7 +6785,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) for (i = 0; i < wait_time; i++) { spin_lock_irqsave(&instance->hba_lock, flags); - if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); break; @@ -6434,7 +6802,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) } spin_lock_irqsave(&instance->hba_lock, flags); - if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); dev_err(&instance->pdev->dev, "timed out while waiting" "for HBA to recover\n"); @@ -6442,10 +6810,10 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) } spin_unlock_irqrestore(&instance->hba_lock, flags); - mutex_lock(&instance->aen_mutex); + mutex_lock(&instance->reset_mutex); error = megasas_register_aen(instance, aen.seq_num, aen.class_locale_word); - mutex_unlock(&instance->aen_mutex); + mutex_unlock(&instance->reset_mutex); return error; } @@ -6476,9 +6844,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) int i; int error = 0; compat_uptr_t ptr; - unsigned long local_raw_ptr; u32 local_sense_off; u32 local_sense_len; + u32 user_sense_off; if (clear_user(ioc, sizeof(*ioc))) return -EFAULT; @@ -6496,17 +6864,16 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) * sense_len is not null, so prepare the 64bit value under * the same condition. */ - if (get_user(local_raw_ptr, ioc->frame.raw) || - get_user(local_sense_off, &ioc->sense_off) || - get_user(local_sense_len, &ioc->sense_len)) + if (get_user(local_sense_off, &ioc->sense_off) || + get_user(local_sense_len, &ioc->sense_len) || + get_user(user_sense_off, &cioc->sense_off)) return -EFAULT; - if (local_sense_len) { void __user **sense_ioc_ptr = - (void __user **)((u8*)local_raw_ptr + local_sense_off); + (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off); compat_uptr_t *sense_cioc_ptr = - (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off); + (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off); if (get_user(ptr, sense_cioc_ptr) || put_user(compat_ptr(ptr), sense_ioc_ptr)) return -EFAULT; @@ -6647,6 +7014,7 @@ megasas_aen_polling(struct work_struct *work) int i, j, doscan = 0; u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME; int error; + u8 dcmd_ret = DCMD_SUCCESS; if (!instance) { printk(KERN_ERR "invalid instance!\n"); @@ -6659,16 +7027,7 @@ megasas_aen_polling(struct work_struct *work) wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; /* Don't run the event workqueue thread if OCR is running */ - for (i = 0; i < wait_time; i++) { - if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) - break; - if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { - dev_notice(&instance->pdev->dev, "%s waiting for " - "controller reset to finish for scsi%d\n", - __func__, instance->host->host_no); - } - msleep(1000); - } + mutex_lock(&instance->reset_mutex); instance->ev = NULL; host = instance->host; @@ -6676,212 +7035,127 @@ megasas_aen_polling(struct work_struct *work) megasas_decode_evt(instance); switch (le32_to_cpu(instance->evt_detail->code)) { - case MR_EVT_PD_INSERTED: - if (megasas_get_pd_list(instance) == 0) { - for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { - for (j = 0; - j < MEGASAS_MAX_DEV_PER_CHANNEL; - j++) { - - pd_index = - (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; - - sdev1 = scsi_device_lookup(host, i, j, 0); - - if (instance->pd_list[pd_index].driveState - == MR_PD_STATE_SYSTEM) { - if (!sdev1) - scsi_add_device(host, i, j, 0); - - if (sdev1) - scsi_device_put(sdev1); - } - } - } - } - doscan = 0; - break; + case MR_EVT_PD_INSERTED: case MR_EVT_PD_REMOVED: - if (megasas_get_pd_list(instance) == 0) { - for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { - for (j = 0; - j < MEGASAS_MAX_DEV_PER_CHANNEL; - j++) { - - pd_index = - (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; - - sdev1 = scsi_device_lookup(host, i, j, 0); - - if (instance->pd_list[pd_index].driveState - == MR_PD_STATE_SYSTEM) { - if (sdev1) - scsi_device_put(sdev1); - } else { - if (sdev1) { - scsi_remove_device(sdev1); - scsi_device_put(sdev1); - } - } - } - } - } - doscan = 0; + dcmd_ret = megasas_get_pd_list(instance); + if (dcmd_ret == DCMD_SUCCESS) + doscan = SCAN_PD_CHANNEL; break; case MR_EVT_LD_OFFLINE: case MR_EVT_CFG_CLEARED: case MR_EVT_LD_DELETED: - if (!instance->requestorId || - megasas_get_ld_vf_affiliation(instance, 0)) { - if (megasas_ld_list_query(instance, - MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) - megasas_get_ld_list(instance); - for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { - for (j = 0; - j < MEGASAS_MAX_DEV_PER_CHANNEL; - j++) { - - ld_index = - (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; - - sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); - - if (instance->ld_ids[ld_index] - != 0xff) { - if (sdev1) - scsi_device_put(sdev1); - } else { - if (sdev1) { - scsi_remove_device(sdev1); - scsi_device_put(sdev1); - } - } - } - } - doscan = 0; - } - break; case MR_EVT_LD_CREATED: if (!instance->requestorId || - megasas_get_ld_vf_affiliation(instance, 0)) { - if (megasas_ld_list_query(instance, - MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) - megasas_get_ld_list(instance); - for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { - for (j = 0; - j < MEGASAS_MAX_DEV_PER_CHANNEL; - j++) { - ld_index = - (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; - - sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); - - if (instance->ld_ids[ld_index] - != 0xff) { - if (!sdev1) - scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); - } - if (sdev1) - scsi_device_put(sdev1); - } - } - doscan = 0; - } + (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) + dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); + + if (dcmd_ret == DCMD_SUCCESS) + doscan = SCAN_VD_CHANNEL; + break; + case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: case MR_EVT_FOREIGN_CFG_IMPORTED: case MR_EVT_LD_STATE_CHANGE: - doscan = 1; + dcmd_ret = megasas_get_pd_list(instance); + + if (dcmd_ret != DCMD_SUCCESS) + break; + + if (!instance->requestorId || + (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) + dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); + + if (dcmd_ret != DCMD_SUCCESS) + break; + + doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL; + dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", + instance->host->host_no); break; + case MR_EVT_CTRL_PROP_CHANGED: - megasas_get_ctrl_info(instance); - break; + dcmd_ret = megasas_get_ctrl_info(instance); + break; default: doscan = 0; break; } } else { dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); + mutex_unlock(&instance->reset_mutex); kfree(ev); return; } - if (doscan) { - dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", - instance->host->host_no); - if (megasas_get_pd_list(instance) == 0) { - for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { - for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { - pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; - sdev1 = scsi_device_lookup(host, i, j, 0); - if (instance->pd_list[pd_index].driveState == - MR_PD_STATE_SYSTEM) { - if (!sdev1) { - scsi_add_device(host, i, j, 0); - } - if (sdev1) - scsi_device_put(sdev1); - } else { - if (sdev1) { - scsi_remove_device(sdev1); - scsi_device_put(sdev1); - } + mutex_unlock(&instance->reset_mutex); + + if (doscan & SCAN_PD_CHANNEL) { + for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { + for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { + pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; + sdev1 = scsi_device_lookup(host, i, j, 0); + if (instance->pd_list[pd_index].driveState == + MR_PD_STATE_SYSTEM) { + if (!sdev1) + scsi_add_device(host, i, j, 0); + else + scsi_device_put(sdev1); + } else { + if (sdev1) { + scsi_remove_device(sdev1); + scsi_device_put(sdev1); } } } } + } - if (!instance->requestorId || - megasas_get_ld_vf_affiliation(instance, 0)) { - if (megasas_ld_list_query(instance, - MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) - megasas_get_ld_list(instance); - for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { - for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; - j++) { - ld_index = - (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; - - sdev1 = scsi_device_lookup(host, - MEGASAS_MAX_PD_CHANNELS + i, j, 0); - if (instance->ld_ids[ld_index] - != 0xff) { - if (!sdev1) - scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); - else - scsi_device_put(sdev1); - } else { - if (sdev1) { - scsi_remove_device(sdev1); - scsi_device_put(sdev1); - } + if (doscan & SCAN_VD_CHANNEL) { + for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { + for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { + ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; + sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); + if (instance->ld_ids[ld_index] != 0xff) { + if (!sdev1) + scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); + else + scsi_device_put(sdev1); + } else { + if (sdev1) { + scsi_remove_device(sdev1); + scsi_device_put(sdev1); } } } } } - if (instance->aen_cmd != NULL) { - kfree(ev); - return ; - } - - seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; + if (dcmd_ret == DCMD_SUCCESS) + seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; + else + seq_num = instance->last_seq_num; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; - mutex_lock(&instance->aen_mutex); + + if (instance->aen_cmd != NULL) { + kfree(ev); + return; + } + + mutex_lock(&instance->reset_mutex); error = megasas_register_aen(instance, seq_num, class_locale.word); - mutex_unlock(&instance->aen_mutex); - if (error) - dev_err(&instance->pdev->dev, "register aen failed error %x\n", error); + dev_err(&instance->pdev->dev, + "register aen failed error %x\n", error); + mutex_unlock(&instance->reset_mutex); kfree(ev); } diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 741509b35617..e413113c86ac 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -1020,6 +1020,8 @@ MR_BuildRaidContext(struct megasas_instance *instance, /* assume this IO needs the full row - we'll adjust if not true */ regSize = stripSize; + io_info->do_fp_rlbypass = raid->capability.fpBypassRegionLock; + /* Check if we can send this I/O via FastPath */ if (raid->capability.fpCapable) { if (isRead) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 8d630a552b07..98a848bdfdc2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -91,7 +91,10 @@ void megasas_start_timer(struct megasas_instance *instance, struct timer_list *timer, void *fn, unsigned long interval); extern struct megasas_mgmt_info megasas_mgmt_info; -extern int resetwaittime; +extern unsigned int resetwaittime; +extern unsigned int dual_qdepth_disable; +static void megasas_free_rdpq_fusion(struct megasas_instance *instance); +static void megasas_free_reply_fusion(struct megasas_instance *instance); @@ -201,58 +204,72 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance, &instance->reg_set->inbound_low_queue_port); writel(le32_to_cpu(req_desc->u.high), &instance->reg_set->inbound_high_queue_port); + mmiowb(); spin_unlock_irqrestore(&instance->hba_lock, flags); #endif } - /** - * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool - * @instance: Adapter soft state + * megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here + * @instance: Adapter soft state + * fw_boot_context: Whether this function called during probe or after OCR + * + * This function is only for fusion controllers. + * Update host can queue, if firmware downgrade max supported firmware commands. + * Firmware upgrade case will be skiped because underlying firmware has + * more resource than exposed to the OS. + * */ -static void megasas_teardown_frame_pool_fusion( - struct megasas_instance *instance) +static void +megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context) { - int i; - struct fusion_context *fusion = instance->ctrl_context; - - u16 max_cmd = instance->max_fw_cmds; + u16 cur_max_fw_cmds = 0; + u16 ldio_threshold = 0; + struct megasas_register_set __iomem *reg_set; - struct megasas_cmd_fusion *cmd; + reg_set = instance->reg_set; - if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) { - dev_err(&instance->pdev->dev, "dma pool is null. SG Pool %p, " - "sense pool : %p\n", fusion->sg_dma_pool, - fusion->sense_dma_pool); - return; - } + cur_max_fw_cmds = readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF; - /* - * Return all frames to pool - */ - for (i = 0; i < max_cmd; i++) { + if (dual_qdepth_disable || !cur_max_fw_cmds) + cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; + else + ldio_threshold = + (instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS; + + dev_info(&instance->pdev->dev, + "Current firmware maximum commands: %d\t LDIO threshold: %d\n", + cur_max_fw_cmds, ldio_threshold); + + if (fw_boot_context == OCR_CONTEXT) { + cur_max_fw_cmds = cur_max_fw_cmds - 1; + if (cur_max_fw_cmds <= instance->max_fw_cmds) { + instance->cur_can_queue = + cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS + + MEGASAS_FUSION_IOCTL_CMDS); + instance->host->can_queue = instance->cur_can_queue; + instance->ldio_threshold = ldio_threshold; + } + } else { + instance->max_fw_cmds = cur_max_fw_cmds; + instance->ldio_threshold = ldio_threshold; - cmd = fusion->cmd_list[i]; + if (!instance->is_rdpq) + instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024); - if (cmd->sg_frame) - pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame, - cmd->sg_frame_phys_addr); + /* + * Reduce the max supported cmds by 1. This is to ensure that the + * reply_q_sz (1 more than the max cmd that driver may send) + * does not exceed max cmds that the FW can support + */ + instance->max_fw_cmds = instance->max_fw_cmds-1; - if (cmd->sense) - pci_pool_free(fusion->sense_dma_pool, cmd->sense, - cmd->sense_phys_addr); + instance->max_scsi_cmds = instance->max_fw_cmds - + (MEGASAS_FUSION_INTERNAL_CMDS + + MEGASAS_FUSION_IOCTL_CMDS); + instance->cur_can_queue = instance->max_scsi_cmds; } - - /* - * Now destroy the pool itself - */ - pci_pool_destroy(fusion->sg_dma_pool); - pci_pool_destroy(fusion->sense_dma_pool); - - fusion->sg_dma_pool = NULL; - fusion->sense_dma_pool = NULL; } - /** * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool * @instance: Adapter soft state @@ -262,55 +279,65 @@ megasas_free_cmds_fusion(struct megasas_instance *instance) { int i; struct fusion_context *fusion = instance->ctrl_context; + struct megasas_cmd_fusion *cmd; - u32 max_cmds, req_sz, reply_sz, io_frames_sz; + /* SG, Sense */ + for (i = 0; i < instance->max_fw_cmds; i++) { + cmd = fusion->cmd_list[i]; + if (cmd) { + if (cmd->sg_frame) + pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame, + cmd->sg_frame_phys_addr); + if (cmd->sense) + pci_pool_free(fusion->sense_dma_pool, cmd->sense, + cmd->sense_phys_addr); + } + } + if (fusion->sg_dma_pool) { + pci_pool_destroy(fusion->sg_dma_pool); + fusion->sg_dma_pool = NULL; + } + if (fusion->sense_dma_pool) { + pci_pool_destroy(fusion->sense_dma_pool); + fusion->sense_dma_pool = NULL; + } - req_sz = fusion->request_alloc_sz; - reply_sz = fusion->reply_alloc_sz; - io_frames_sz = fusion->io_frames_alloc_sz; - max_cmds = instance->max_fw_cmds; + /* Reply Frame, Desc*/ + if (instance->is_rdpq) + megasas_free_rdpq_fusion(instance); + else + megasas_free_reply_fusion(instance); - /* Free descriptors and request Frames memory */ + /* Request Frame, Desc*/ if (fusion->req_frames_desc) - dma_free_coherent(&instance->pdev->dev, req_sz, - fusion->req_frames_desc, - fusion->req_frames_desc_phys); - - if (fusion->reply_frames_desc) { - pci_pool_free(fusion->reply_frames_desc_pool, - fusion->reply_frames_desc, - fusion->reply_frames_desc_phys); - pci_pool_destroy(fusion->reply_frames_desc_pool); - } - - if (fusion->io_request_frames) { + dma_free_coherent(&instance->pdev->dev, + fusion->request_alloc_sz, fusion->req_frames_desc, + fusion->req_frames_desc_phys); + if (fusion->io_request_frames) pci_pool_free(fusion->io_request_frames_pool, - fusion->io_request_frames, - fusion->io_request_frames_phys); + fusion->io_request_frames, + fusion->io_request_frames_phys); + if (fusion->io_request_frames_pool) { pci_pool_destroy(fusion->io_request_frames_pool); + fusion->io_request_frames_pool = NULL; } - /* Free the Fusion frame pool */ - megasas_teardown_frame_pool_fusion(instance); - /* Free all the commands in the cmd_list */ - for (i = 0; i < max_cmds; i++) + /* cmd_list */ + for (i = 0; i < instance->max_fw_cmds; i++) kfree(fusion->cmd_list[i]); - /* Free the cmd_list buffer itself */ kfree(fusion->cmd_list); - fusion->cmd_list = NULL; - } /** - * megasas_create_frame_pool_fusion - Creates DMA pool for cmd frames + * megasas_create_sg_sense_fusion - Creates DMA pool for cmd frames * @instance: Adapter soft state * */ -static int megasas_create_frame_pool_fusion(struct megasas_instance *instance) +static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) { int i; u32 max_cmd; @@ -321,25 +348,17 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance) max_cmd = instance->max_fw_cmds; - /* - * Use DMA pool facility provided by PCI layer - */ - - fusion->sg_dma_pool = pci_pool_create("sg_pool_fusion", instance->pdev, - instance->max_chain_frame_sz, - 4, 0); - if (!fusion->sg_dma_pool) { - dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup request pool fusion\n"); - return -ENOMEM; - } - fusion->sense_dma_pool = pci_pool_create("sense pool fusion", - instance->pdev, - SCSI_SENSE_BUFFERSIZE, 64, 0); + fusion->sg_dma_pool = + pci_pool_create("mr_sg", instance->pdev, + instance->max_chain_frame_sz, 4, 0); + /* SCSI_SENSE_BUFFERSIZE = 96 bytes */ + fusion->sense_dma_pool = + pci_pool_create("mr_sense", instance->pdev, + SCSI_SENSE_BUFFERSIZE, 64, 0); - if (!fusion->sense_dma_pool) { - dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool fusion\n"); - pci_pool_destroy(fusion->sg_dma_pool); - fusion->sg_dma_pool = NULL; + if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } @@ -347,160 +366,280 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance) * Allocate and attach a frame to each of the commands in cmd_list */ for (i = 0; i < max_cmd; i++) { - cmd = fusion->cmd_list[i]; - cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool, - GFP_KERNEL, - &cmd->sg_frame_phys_addr); + GFP_KERNEL, &cmd->sg_frame_phys_addr); cmd->sense = pci_pool_alloc(fusion->sense_dma_pool, - GFP_KERNEL, &cmd->sense_phys_addr); - /* - * megasas_teardown_frame_pool_fusion() takes care of freeing - * whatever has been allocated - */ + GFP_KERNEL, &cmd->sense_phys_addr); if (!cmd->sg_frame || !cmd->sense) { - dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n"); - megasas_teardown_frame_pool_fusion(instance); + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } } return 0; } -/** - * megasas_alloc_cmds_fusion - Allocates the command packets - * @instance: Adapter soft state - * - * - * Each frame has a 32-bit field called context. This context is used to get - * back the megasas_cmd_fusion from the frame when a frame gets completed - * In this driver, the 32 bit values are the indices into an array cmd_list. - * This array is used only to look up the megasas_cmd_fusion given the context. - * The free commands themselves are maintained in a linked list called cmd_pool. - * - * cmds are formed in the io_request and sg_frame members of the - * megasas_cmd_fusion. The context field is used to get a request descriptor - * and is used as SMID of the cmd. - * SMID value range is from 1 to max_fw_cmds. - */ int -megasas_alloc_cmds_fusion(struct megasas_instance *instance) +megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) { - int i, j, count; - u32 max_cmd, io_frames_sz; + u32 max_cmd, i; struct fusion_context *fusion; - struct megasas_cmd_fusion *cmd; - union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; - u32 offset; - dma_addr_t io_req_base_phys; - u8 *io_req_base; fusion = instance->ctrl_context; max_cmd = instance->max_fw_cmds; + /* + * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. + * Allocate the dynamic array first and then allocate individual + * commands. + */ + fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) * max_cmd, + GFP_KERNEL); + if (!fusion->cmd_list) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + + for (i = 0; i < max_cmd; i++) { + fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), + GFP_KERNEL); + if (!fusion->cmd_list[i]) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + } + return 0; +} +int +megasas_alloc_request_fusion(struct megasas_instance *instance) +{ + struct fusion_context *fusion; + + fusion = instance->ctrl_context; + fusion->req_frames_desc = dma_alloc_coherent(&instance->pdev->dev, - fusion->request_alloc_sz, - &fusion->req_frames_desc_phys, GFP_KERNEL); - + fusion->request_alloc_sz, + &fusion->req_frames_desc_phys, GFP_KERNEL); if (!fusion->req_frames_desc) { - dev_err(&instance->pdev->dev, "Could not allocate memory for " - "request_frames\n"); - goto fail_req_desc; + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; } + fusion->io_request_frames_pool = + pci_pool_create("mr_ioreq", instance->pdev, + fusion->io_frames_alloc_sz, 16, 0); + + if (!fusion->io_request_frames_pool) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + + fusion->io_request_frames = + pci_pool_alloc(fusion->io_request_frames_pool, + GFP_KERNEL, &fusion->io_request_frames_phys); + if (!fusion->io_request_frames) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + return 0; +} + +int +megasas_alloc_reply_fusion(struct megasas_instance *instance) +{ + int i, count; + struct fusion_context *fusion; + union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; + fusion = instance->ctrl_context; + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; fusion->reply_frames_desc_pool = - pci_pool_create("reply_frames pool", instance->pdev, + pci_pool_create("mr_reply", instance->pdev, fusion->reply_alloc_sz * count, 16, 0); if (!fusion->reply_frames_desc_pool) { - dev_err(&instance->pdev->dev, "Could not allocate memory for " - "reply_frame pool\n"); - goto fail_reply_desc; + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; } - fusion->reply_frames_desc = - pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL, - &fusion->reply_frames_desc_phys); - if (!fusion->reply_frames_desc) { - dev_err(&instance->pdev->dev, "Could not allocate memory for " - "reply_frame pool\n"); - pci_pool_destroy(fusion->reply_frames_desc_pool); - goto fail_reply_desc; + fusion->reply_frames_desc[0] = + pci_pool_alloc(fusion->reply_frames_desc_pool, + GFP_KERNEL, &fusion->reply_frames_desc_phys[0]); + if (!fusion->reply_frames_desc[0]) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; } - - reply_desc = fusion->reply_frames_desc; + reply_desc = fusion->reply_frames_desc[0]; for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++) reply_desc->Words = cpu_to_le64(ULLONG_MAX); - io_frames_sz = fusion->io_frames_alloc_sz; + /* This is not a rdpq mode, but driver still populate + * reply_frame_desc array to use same msix index in ISR path. + */ + for (i = 0; i < (count - 1); i++) + fusion->reply_frames_desc[i + 1] = + fusion->reply_frames_desc[i] + + (fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION); + + return 0; +} - fusion->io_request_frames_pool = - pci_pool_create("io_request_frames pool", instance->pdev, - fusion->io_frames_alloc_sz, 16, 0); +int +megasas_alloc_rdpq_fusion(struct megasas_instance *instance) +{ + int i, j, count; + struct fusion_context *fusion; + union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; - if (!fusion->io_request_frames_pool) { - dev_err(&instance->pdev->dev, "Could not allocate memory for " - "io_request_frame pool\n"); - goto fail_io_frames; + fusion = instance->ctrl_context; + + fusion->rdpq_virt = pci_alloc_consistent(instance->pdev, + sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION, + &fusion->rdpq_phys); + if (!fusion->rdpq_virt) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; } - fusion->io_request_frames = - pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL, - &fusion->io_request_frames_phys); - if (!fusion->io_request_frames) { - dev_err(&instance->pdev->dev, "Could not allocate memory for " - "io_request_frames frames\n"); - pci_pool_destroy(fusion->io_request_frames_pool); - goto fail_io_frames; + memset(fusion->rdpq_virt, 0, + sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION); + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; + fusion->reply_frames_desc_pool = pci_pool_create("mr_rdpq", + instance->pdev, fusion->reply_alloc_sz, 16, 0); + + if (!fusion->reply_frames_desc_pool) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; } - /* - * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. - * Allocate the dynamic array first and then allocate individual - * commands. - */ - fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) - * max_cmd, GFP_KERNEL); + for (i = 0; i < count; i++) { + fusion->reply_frames_desc[i] = + pci_pool_alloc(fusion->reply_frames_desc_pool, + GFP_KERNEL, &fusion->reply_frames_desc_phys[i]); + if (!fusion->reply_frames_desc[i]) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } - if (!fusion->cmd_list) { - dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory. Could not alloc " - "memory for cmd_list_fusion\n"); - goto fail_cmd_list; + fusion->rdpq_virt[i].RDPQBaseAddress = + fusion->reply_frames_desc_phys[i]; + + reply_desc = fusion->reply_frames_desc[i]; + for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++) + reply_desc->Words = cpu_to_le64(ULLONG_MAX); } + return 0; +} - max_cmd = instance->max_fw_cmds; - for (i = 0; i < max_cmd; i++) { - fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion), - GFP_KERNEL); - if (!fusion->cmd_list[i]) { - dev_err(&instance->pdev->dev, "Could not alloc cmd list fusion\n"); +static void +megasas_free_rdpq_fusion(struct megasas_instance *instance) { - for (j = 0; j < i; j++) - kfree(fusion->cmd_list[j]); + int i; + struct fusion_context *fusion; - kfree(fusion->cmd_list); - fusion->cmd_list = NULL; - goto fail_cmd_list; - } + fusion = instance->ctrl_context; + + for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) { + if (fusion->reply_frames_desc[i]) + pci_pool_free(fusion->reply_frames_desc_pool, + fusion->reply_frames_desc[i], + fusion->reply_frames_desc_phys[i]); } - /* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */ - io_req_base = fusion->io_request_frames + - MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; - io_req_base_phys = fusion->io_request_frames_phys + - MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; + if (fusion->reply_frames_desc_pool) + pci_pool_destroy(fusion->reply_frames_desc_pool); + + if (fusion->rdpq_virt) + pci_free_consistent(instance->pdev, + sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION, + fusion->rdpq_virt, fusion->rdpq_phys); +} + +static void +megasas_free_reply_fusion(struct megasas_instance *instance) { + + struct fusion_context *fusion; + + fusion = instance->ctrl_context; + + if (fusion->reply_frames_desc[0]) + pci_pool_free(fusion->reply_frames_desc_pool, + fusion->reply_frames_desc[0], + fusion->reply_frames_desc_phys[0]); + + if (fusion->reply_frames_desc_pool) + pci_pool_destroy(fusion->reply_frames_desc_pool); + +} + + +/** + * megasas_alloc_cmds_fusion - Allocates the command packets + * @instance: Adapter soft state + * + * + * Each frame has a 32-bit field called context. This context is used to get + * back the megasas_cmd_fusion from the frame when a frame gets completed + * In this driver, the 32 bit values are the indices into an array cmd_list. + * This array is used only to look up the megasas_cmd_fusion given the context. + * The free commands themselves are maintained in a linked list called cmd_pool. + * + * cmds are formed in the io_request and sg_frame members of the + * megasas_cmd_fusion. The context field is used to get a request descriptor + * and is used as SMID of the cmd. + * SMID value range is from 1 to max_fw_cmds. + */ +int +megasas_alloc_cmds_fusion(struct megasas_instance *instance) +{ + int i; + struct fusion_context *fusion; + struct megasas_cmd_fusion *cmd; + u32 offset; + dma_addr_t io_req_base_phys; + u8 *io_req_base; + + + fusion = instance->ctrl_context; + + if (megasas_alloc_cmdlist_fusion(instance)) + goto fail_exit; + + if (megasas_alloc_request_fusion(instance)) + goto fail_exit; + + if (instance->is_rdpq) { + if (megasas_alloc_rdpq_fusion(instance)) + goto fail_exit; + } else + if (megasas_alloc_reply_fusion(instance)) + goto fail_exit; + + + /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */ + io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; + io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; /* * Add all the commands to command pool (fusion->cmd_pool) */ /* SMID 0 is reserved. Set SMID/index from 1 */ - for (i = 0; i < max_cmd; i++) { + for (i = 0; i < instance->max_fw_cmds; i++) { cmd = fusion->cmd_list[i]; offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); @@ -518,35 +657,13 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance) cmd->io_request_phys_addr = io_req_base_phys + offset; } - /* - * Create a frame pool and assign one frame to each cmd - */ - if (megasas_create_frame_pool_fusion(instance)) { - dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); - megasas_free_cmds_fusion(instance); - goto fail_req_desc; - } + if (megasas_create_sg_sense_fusion(instance)) + goto fail_exit; return 0; -fail_cmd_list: - pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames, - fusion->io_request_frames_phys); - pci_pool_destroy(fusion->io_request_frames_pool); -fail_io_frames: - dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz, - fusion->reply_frames_desc, - fusion->reply_frames_desc_phys); - pci_pool_free(fusion->reply_frames_desc_pool, - fusion->reply_frames_desc, - fusion->reply_frames_desc_phys); - pci_pool_destroy(fusion->reply_frames_desc_pool); - -fail_reply_desc: - dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz, - fusion->req_frames_desc, - fusion->req_frames_desc_phys); -fail_req_desc: +fail_exit: + megasas_free_cmds_fusion(instance); return -ENOMEM; } @@ -576,11 +693,12 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, msleep(20); } - if (frame_hdr->cmd_status == 0xff) - return -ETIME; - - return (frame_hdr->cmd_status == MFI_STAT_OK) ? - 0 : 1; + if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS) + return DCMD_TIMEOUT; + else if (frame_hdr->cmd_status == MFI_STAT_OK) + return DCMD_SUCCESS; + else + return DCMD_FAILED; } /** @@ -593,16 +711,17 @@ int megasas_ioc_init_fusion(struct megasas_instance *instance) { struct megasas_init_frame *init_frame; - struct MPI2_IOC_INIT_REQUEST *IOCInitMessage; + struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL; dma_addr_t ioc_init_handle; struct megasas_cmd *cmd; - u8 ret; + u8 ret, cur_rdpq_mode; struct fusion_context *fusion; union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc; int i; struct megasas_header *frame_hdr; const char *sys_info; MFI_CAPABILITIES *drv_ops; + u32 scratch_pad_2; fusion = instance->ctrl_context; @@ -614,6 +733,18 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) goto fail_get_cmd; } + scratch_pad_2 = readl + (&instance->reg_set->outbound_scratch_pad_2); + + cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0; + + if (instance->is_rdpq && !cur_rdpq_mode) { + dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*" + " from RDPQ mode to non RDPQ mode\n"); + ret = 1; + goto fail_fw_init; + } + IOCInitMessage = dma_alloc_coherent(&instance->pdev->dev, sizeof(struct MPI2_IOC_INIT_REQUEST), @@ -635,7 +766,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth); - IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys); + IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ? + cpu_to_le64(fusion->rdpq_phys) : + cpu_to_le64(fusion->reply_frames_desc_phys[0]); + IOCInitMessage->MsgFlags = instance->is_rdpq ? + MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0; IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); IOCInitMessage->HostMSIxVectors = instance->msix_vectors; init_frame = (struct megasas_init_frame *)cmd->frame; @@ -665,6 +800,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) drv_ops->mfi_capabilities.support_ext_io_size = 1; + drv_ops->mfi_capabilities.support_fp_rlbypass = 1; + if (!dual_qdepth_disable) + drv_ops->mfi_capabilities.support_ext_queue_depth = 1; + + drv_ops->mfi_capabilities.support_qd_throttling = 1; /* Convert capability to LE32 */ cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); @@ -784,7 +924,8 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) { /* Below code is only for non pended DCMD */ if (instance->ctrl_context && !instance->mask_interrupts) - ret = megasas_issue_blocked_cmd(instance, cmd, 60); + ret = megasas_issue_blocked_cmd(instance, cmd, + MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); @@ -795,7 +936,10 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) { ret = -EINVAL; } - if (!ret) + if (ret == DCMD_TIMEOUT && instance->ctrl_context) + megaraid_sas_kill_hba(instance); + + if (ret == DCMD_SUCCESS) instance->pd_seq_map_id++; megasas_return_cmd(instance, cmd); @@ -875,10 +1019,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance) if (instance->ctrl_context && !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, - MEGASAS_BLOCKED_CMD_TIMEOUT); + MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); + if (ret == DCMD_TIMEOUT && instance->ctrl_context) + megaraid_sas_kill_hba(instance); + megasas_return_cmd(instance, cmd); return ret; @@ -1072,12 +1219,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) reg_set = instance->reg_set; - /* - * Get various operational parameters from status register - */ - instance->max_fw_cmds = - instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; - instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008); + megasas_fusion_update_can_queue(instance, PROBE_CONTEXT); /* * Reduce the max supported cmds by 1. This is to ensure that the @@ -1658,7 +1800,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, local_map_ptr, start_lba_lo); io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; cmd->request_desc->SCSIIO.RequestFlags = - (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY + (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); if (fusion->adapter_type == INVADER_SERIES) { if (io_request->RaidContext.regLockFlags == @@ -1702,8 +1844,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); if (fusion->adapter_type == INVADER_SERIES) { - if (io_request->RaidContext.regLockFlags == - REGION_TYPE_UNUSED) + if (io_info.do_fp_rlbypass || + (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)) cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); @@ -1791,7 +1933,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, /* build request descriptor */ cmd->request_desc->SCSIIO.RequestFlags = - (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << + (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); cmd->request_desc->SCSIIO.DevHandle = devHandle; @@ -1897,7 +2039,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); } cmd->request_desc->SCSIIO.RequestFlags = - (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << + (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); } } @@ -2035,13 +2177,21 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, fusion = instance->ctrl_context; + if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) && + instance->ldio_threshold && + (atomic_inc_return(&instance->ldio_outstanding) > + instance->ldio_threshold)) { + atomic_dec(&instance->ldio_outstanding); + return SCSI_MLQUEUE_DEVICE_BUSY; + } + cmd = megasas_get_cmd_fusion(instance, scmd->request->tag); index = cmd->index; req_desc = megasas_get_request_descriptor(instance, index-1); if (!req_desc) - return 1; + return SCSI_MLQUEUE_HOST_BUSY; req_desc->Words = 0; cmd->request_desc = req_desc; @@ -2050,7 +2200,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, megasas_return_cmd_fusion(instance, cmd); dev_err(&instance->pdev->dev, "Error building command\n"); cmd->request_desc = NULL; - return 1; + return SCSI_MLQUEUE_HOST_BUSY; } req_desc = cmd->request_desc; @@ -2092,16 +2242,16 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) struct LD_LOAD_BALANCE_INFO *lbinfo; int threshold_reply_count = 0; struct scsi_cmnd *scmd_local = NULL; + struct MR_TASK_MANAGE_REQUEST *mr_tm_req; + struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; fusion = instance->ctrl_context; - if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) return IRQ_HANDLED; - desc = fusion->reply_frames_desc; - desc += ((MSIxIndex * fusion->reply_alloc_sz)/ - sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) + - fusion->last_reply_idx[MSIxIndex]; + desc = fusion->reply_frames_desc[MSIxIndex] + + fusion->last_reply_idx[MSIxIndex]; reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; @@ -2133,6 +2283,16 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) extStatus = scsi_io_req->RaidContext.exStatus; switch (scsi_io_req->Function) { + case MPI2_FUNCTION_SCSI_TASK_MGMT: + mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *) + cmd_fusion->io_request; + mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) + &mr_tm_req->TmRequest; + dev_dbg(&instance->pdev->dev, "TM completion:" + "type: 0x%x TaskMID: 0x%x\n", + mpi_tm_req->TaskType, mpi_tm_req->TaskMID); + complete(&cmd_fusion->done); + break; case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ /* Update load balancing info */ device_id = MEGASAS_DEV_INDEX(scmd_local); @@ -2155,6 +2315,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) map_cmd_status(cmd_fusion, status, extStatus); scsi_io_req->RaidContext.status = 0; scsi_io_req->RaidContext.exStatus = 0; + if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) + atomic_dec(&instance->ldio_outstanding); megasas_return_cmd_fusion(instance, cmd_fusion); scsi_dma_unmap(scmd_local); scmd_local->scsi_done(scmd_local); @@ -2186,9 +2348,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) /* Get the next reply descriptor */ if (!fusion->last_reply_idx[MSIxIndex]) - desc = fusion->reply_frames_desc + - ((MSIxIndex * fusion->reply_alloc_sz)/ - sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)); + desc = fusion->reply_frames_desc[MSIxIndex]; else desc++; @@ -2254,7 +2414,7 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) /* If we have already declared adapter dead, donot complete cmds */ spin_lock_irqsave(&instance->hba_lock, flags); - if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { spin_unlock_irqrestore(&instance->hba_lock, flags); return; } @@ -2411,7 +2571,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) * @cmd: mfi cmd pointer * */ -void +int megasas_issue_dcmd_fusion(struct megasas_instance *instance, struct megasas_cmd *cmd) { @@ -2419,10 +2579,13 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance, req_desc = build_mpt_cmd(instance, cmd); if (!req_desc) { - dev_err(&instance->pdev->dev, "Couldn't issue MFI pass thru cmd\n"); - return; + dev_info(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + return DCMD_NOT_FIRED; } + megasas_fire_cmd_fusion(instance, req_desc); + return DCMD_SUCCESS; } /** @@ -2583,7 +2746,7 @@ megasas_check_reset_fusion(struct megasas_instance *instance, /* This function waits for outstanding commands on fusion to complete */ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, - int iotimeout, int *convert) + int reason, int *convert) { int i, outstanding, retval = 0, hb_seconds_missed = 0; u32 fw_state; @@ -2599,14 +2762,22 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, retval = 1; goto out; } + + if (reason == MFI_IO_TIMEOUT_OCR) { + dev_info(&instance->pdev->dev, + "MFI IO is timed out, initiating OCR\n"); + retval = 1; + goto out; + } + /* If SR-IOV VF mode & heartbeat timeout, don't wait */ - if (instance->requestorId && !iotimeout) { + if (instance->requestorId && !reason) { retval = 1; goto out; } /* If SR-IOV VF mode & I/O timeout, check for HB timeout */ - if (instance->requestorId && iotimeout) { + if (instance->requestorId && reason) { if (instance->hb_host_mem->HB.fwCounter != instance->hb_host_mem->HB.driverCounter) { instance->hb_host_mem->HB.driverCounter = @@ -2655,17 +2826,18 @@ out: void megasas_reset_reply_desc(struct megasas_instance *instance) { - int i, count; + int i, j, count; struct fusion_context *fusion; union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; fusion = instance->ctrl_context; count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; - for (i = 0 ; i < count ; i++) + for (i = 0 ; i < count ; i++) { fusion->last_reply_idx[i] = 0; - reply_desc = fusion->reply_frames_desc; - for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++) - reply_desc->Words = cpu_to_le64(ULLONG_MAX); + reply_desc = fusion->reply_frames_desc[i]; + for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++) + reply_desc->Words = cpu_to_le64(ULLONG_MAX); + } } /* @@ -2680,6 +2852,7 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance) struct megasas_cmd *cmd_mfi; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; u16 smid; + bool refire_cmd = 0; fusion = instance->ctrl_context; @@ -2695,42 +2868,500 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance) continue; req_desc = megasas_get_request_descriptor (instance, smid - 1); - if (req_desc && ((cmd_mfi->frame->dcmd.opcode != + refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode != cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) && (cmd_mfi->frame->dcmd.opcode != - cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))) + cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO))) + && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE); + if (refire_cmd) megasas_fire_cmd_fusion(instance, req_desc); else megasas_return_cmd(instance, cmd_mfi); } } +/* + * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device + * @instance: per adapter struct + * @channel: the channel assigned by the OS + * @id: the id assigned by the OS + * + * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED + */ + +static int megasas_track_scsiio(struct megasas_instance *instance, + int id, int channel) +{ + int i, found = 0; + struct megasas_cmd_fusion *cmd_fusion; + struct fusion_context *fusion; + fusion = instance->ctrl_context; + + for (i = 0 ; i < instance->max_scsi_cmds; i++) { + cmd_fusion = fusion->cmd_list[i]; + if (cmd_fusion->scmd && + (cmd_fusion->scmd->device->id == id && + cmd_fusion->scmd->device->channel == channel)) { + dev_info(&instance->pdev->dev, + "SCSI commands pending to target" + "channel %d id %d \tSMID: 0x%x\n", + channel, id, cmd_fusion->index); + scsi_print_command(cmd_fusion->scmd); + found = 1; + break; + } + } + + return found ? FAILED : SUCCESS; +} + +/** + * megasas_tm_response_code - translation of device response code + * @ioc: per adapter object + * @mpi_reply: MPI reply returned by firmware + * + * Return nothing. + */ +static void +megasas_tm_response_code(struct megasas_instance *instance, + struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply) +{ + char *desc; + + switch (mpi_reply->ResponseCode) { + case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: + desc = "task management request completed"; + break; + case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: + desc = "invalid frame"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: + desc = "task management request not supported"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_FAILED: + desc = "task management request failed"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: + desc = "task management request succeeded"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: + desc = "invalid lun"; + break; + case 0xA: + desc = "overlapped tag attempted"; + break; + case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: + desc = "task queued, however not sent to target"; + break; + default: + desc = "unknown"; + break; + } + dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n", + mpi_reply->ResponseCode, desc); + dev_dbg(&instance->pdev->dev, + "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo" + " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n", + mpi_reply->TerminationCount, mpi_reply->DevHandle, + mpi_reply->Function, mpi_reply->TaskType, + mpi_reply->IOCStatus, mpi_reply->IOCLogInfo); +} + +/** + * megasas_issue_tm - main routine for sending tm requests + * @instance: per adapter struct + * @device_handle: device handle + * @channel: the channel assigned by the OS + * @id: the id assigned by the OS + * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c) + * @smid_task: smid assigned to the task + * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF + * Context: user + * + * MegaRaid use MPT interface for Task Magement request. + * A generic API for sending task management requests to firmware. + * + * Return SUCCESS or FAILED. + */ +static int +megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, + uint channel, uint id, u16 smid_task, u8 type) +{ + struct MR_TASK_MANAGE_REQUEST *mr_request; + struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request; + unsigned long timeleft; + struct megasas_cmd_fusion *cmd_fusion; + struct megasas_cmd *cmd_mfi; + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; + struct fusion_context *fusion; + struct megasas_cmd_fusion *scsi_lookup; + int rc; + struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply; + + fusion = instance->ctrl_context; + + cmd_mfi = megasas_get_cmd(instance); + + if (!cmd_mfi) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + return -ENOMEM; + } + + cmd_fusion = megasas_get_cmd_fusion(instance, + instance->max_scsi_cmds + cmd_mfi->index); + + /* Save the smid. To be used for returning the cmd */ + cmd_mfi->context.smid = cmd_fusion->index; + + req_desc = megasas_get_request_descriptor(instance, + (cmd_fusion->index - 1)); + if (!req_desc) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); + megasas_return_cmd(instance, cmd_mfi); + return -ENOMEM; + } + + cmd_fusion->request_desc = req_desc; + req_desc->Words = 0; + + scsi_lookup = fusion->cmd_list[smid_task - 1]; + + mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request; + memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST)); + mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest; + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(device_handle); + mpi_request->TaskType = type; + mpi_request->TaskMID = cpu_to_le16(smid_task); + mpi_request->LUN[1] = 0; + + + req_desc = cmd_fusion->request_desc; + req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index); + req_desc->HighPriority.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + req_desc->HighPriority.MSIxIndex = 0; + req_desc->HighPriority.LMID = 0; + req_desc->HighPriority.Reserved1 = 0; + + if (channel < MEGASAS_MAX_PD_CHANNELS) + mr_request->tmReqFlags.isTMForPD = 1; + else + mr_request->tmReqFlags.isTMForLD = 1; + + init_completion(&cmd_fusion->done); + megasas_fire_cmd_fusion(instance, req_desc); + + timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ); + + if (!timeleft) { + dev_err(&instance->pdev->dev, + "task mgmt type 0x%x timed out\n", type); + cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE; + mutex_unlock(&instance->reset_mutex); + rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); + mutex_lock(&instance->reset_mutex); + return rc; + } + + mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply; + megasas_tm_response_code(instance, mpi_reply); + + megasas_return_cmd(instance, cmd_mfi); + rc = SUCCESS; + switch (type) { + case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + if (scsi_lookup->scmd == NULL) + break; + else { + instance->instancet->disable_intr(instance); + msleep(1000); + megasas_complete_cmd_dpc_fusion + ((unsigned long)instance); + instance->instancet->enable_intr(instance); + if (scsi_lookup->scmd == NULL) + break; + } + rc = FAILED; + break; + + case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF)) + break; + instance->instancet->disable_intr(instance); + msleep(1000); + megasas_complete_cmd_dpc_fusion + ((unsigned long)instance); + rc = megasas_track_scsiio(instance, id, channel); + instance->instancet->enable_intr(instance); + + break; + case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: + break; + default: + rc = FAILED; + break; + } + + return rc; + +} + +/* + * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI + * @instance: per adapter struct + * + * Return Non Zero index, if SMID found in outstanding commands + */ +static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd) +{ + int i, ret = 0; + struct megasas_instance *instance; + struct megasas_cmd_fusion *cmd_fusion; + struct fusion_context *fusion; + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + + fusion = instance->ctrl_context; + + for (i = 0; i < instance->max_scsi_cmds; i++) { + cmd_fusion = fusion->cmd_list[i]; + if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) { + scmd_printk(KERN_NOTICE, scmd, "Abort request is for" + " SMID: %d\n", cmd_fusion->index); + ret = cmd_fusion->index; + break; + } + } + + return ret; +} + +/* +* megasas_get_tm_devhandle - Get devhandle for TM request +* @sdev- OS provided scsi device +* +* Returns- devhandle/targetID of SCSI device +*/ +static u16 megasas_get_tm_devhandle(struct scsi_device *sdev) +{ + u16 pd_index = 0; + u32 device_id; + struct megasas_instance *instance; + struct fusion_context *fusion; + struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; + u16 devhandle = (u16)ULONG_MAX; + + instance = (struct megasas_instance *)sdev->host->hostdata; + fusion = instance->ctrl_context; + + if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { + if (instance->use_seqnum_jbod_fp) { + pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + pd_sync = (void *)fusion->pd_seq_sync + [(instance->pd_seq_map_id - 1) & 1]; + devhandle = pd_sync->seq[pd_index].devHandle; + } else + sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable" + " without JBOD MAP support from %s %d\n", __func__, __LINE__); + } else { + device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + devhandle = device_id; + } + + return devhandle; +} + +/* + * megasas_task_abort_fusion : SCSI task abort function for fusion adapters + * @scmd : pointer to scsi command object + * + * Return SUCCESS, if command aborted else FAILED + */ + +int megasas_task_abort_fusion(struct scsi_cmnd *scmd) +{ + struct megasas_instance *instance; + u16 smid, devhandle; + struct fusion_context *fusion; + int ret; + struct MR_PRIV_DEVICE *mr_device_priv_data; + mr_device_priv_data = scmd->device->hostdata; + + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + fusion = instance->ctrl_context; + + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { + dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," + "SCSI host:%d\n", instance->host->host_no); + ret = FAILED; + return ret; + } + + if (!mr_device_priv_data) { + sdev_printk(KERN_INFO, scmd->device, "device been deleted! " + "scmd(%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + ret = SUCCESS; + goto out; + } + + + if (!mr_device_priv_data->is_tm_capable) { + ret = FAILED; + goto out; + } + + mutex_lock(&instance->reset_mutex); + + smid = megasas_fusion_smid_lookup(scmd); + + if (!smid) { + ret = SUCCESS; + scmd_printk(KERN_NOTICE, scmd, "Command for which abort is" + " issued is not found in oustanding commands\n"); + mutex_unlock(&instance->reset_mutex); + goto out; + } + + devhandle = megasas_get_tm_devhandle(scmd->device); + + if (devhandle == (u16)ULONG_MAX) { + ret = SUCCESS; + sdev_printk(KERN_INFO, scmd->device, + "task abort issued for invalid devhandle\n"); + mutex_unlock(&instance->reset_mutex); + goto out; + } + sdev_printk(KERN_INFO, scmd->device, + "attempting task abort! scmd(%p) tm_dev_handle 0x%x\n", + scmd, devhandle); + + mr_device_priv_data->tm_busy = 1; + ret = megasas_issue_tm(instance, devhandle, + scmd->device->channel, scmd->device->id, smid, + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK); + mr_device_priv_data->tm_busy = 0; + + mutex_unlock(&instance->reset_mutex); +out: + sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", + ((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + + return ret; +} + +/* + * megasas_reset_target_fusion : target reset function for fusion adapters + * scmd: SCSI command pointer + * + * Returns SUCCESS if all commands associated with target aborted else FAILED + */ + +int megasas_reset_target_fusion(struct scsi_cmnd *scmd) +{ + + struct megasas_instance *instance; + int ret = FAILED; + u16 devhandle; + struct fusion_context *fusion; + struct MR_PRIV_DEVICE *mr_device_priv_data; + mr_device_priv_data = scmd->device->hostdata; + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + fusion = instance->ctrl_context; + + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { + dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," + "SCSI host:%d\n", instance->host->host_no); + ret = FAILED; + return ret; + } + + if (!mr_device_priv_data) { + sdev_printk(KERN_INFO, scmd->device, "device been deleted! " + "scmd(%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + ret = SUCCESS; + goto out; + } + + + if (!mr_device_priv_data->is_tm_capable) { + ret = FAILED; + goto out; + } + + mutex_lock(&instance->reset_mutex); + devhandle = megasas_get_tm_devhandle(scmd->device); + + if (devhandle == (u16)ULONG_MAX) { + ret = SUCCESS; + sdev_printk(KERN_INFO, scmd->device, + "target reset issued for invalid devhandle\n"); + mutex_unlock(&instance->reset_mutex); + goto out; + } + + sdev_printk(KERN_INFO, scmd->device, + "attempting target reset! scmd(%p) tm_dev_handle 0x%x\n", + scmd, devhandle); + mr_device_priv_data->tm_busy = 1; + ret = megasas_issue_tm(instance, devhandle, + scmd->device->channel, scmd->device->id, 0, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); + mr_device_priv_data->tm_busy = 0; + mutex_unlock(&instance->reset_mutex); +out: + scmd_printk(KERN_NOTICE, scmd, "megasas: target reset %s!!\n", + (ret == SUCCESS) ? "SUCCESS" : "FAILED"); + + return ret; +} + +/*SRIOV get other instance in cluster if any*/ +struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance) +{ + int i; + + for (i = 0; i < MAX_MGMT_ADAPTERS; i++) { + if (megasas_mgmt_info.instance[i] && + (megasas_mgmt_info.instance[i] != instance) && + megasas_mgmt_info.instance[i]->requestorId && + megasas_mgmt_info.instance[i]->peerIsPresent && + (memcmp((megasas_mgmt_info.instance[i]->clusterId), + instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0)) + return megasas_mgmt_info.instance[i]; + } + return NULL; +} + /* Check for a second path that is currently UP */ int megasas_check_mpio_paths(struct megasas_instance *instance, struct scsi_cmnd *scmd) { - int i, j, retval = (DID_RESET << 16); - - if (instance->mpio && instance->requestorId) { - for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++) - for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++) - if (megasas_mgmt_info.instance[i] && - (megasas_mgmt_info.instance[i] != instance) && - megasas_mgmt_info.instance[i]->mpio && - megasas_mgmt_info.instance[i]->requestorId - && - (megasas_mgmt_info.instance[i]->ld_ids[j] - == scmd->device->id)) { - retval = (DID_NO_CONNECT << 16); - goto out; - } + struct megasas_instance *peer_instance = NULL; + int retval = (DID_RESET << 16); + + if (instance->peerIsPresent) { + peer_instance = megasas_get_peer_instance(instance); + if ((peer_instance) && + (atomic_read(&peer_instance->adprecovery) == + MEGASAS_HBA_OPERATIONAL)) + retval = (DID_NO_CONNECT << 16); } -out: return retval; } /* Core fusion reset function */ -int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) +int megasas_reset_fusion(struct Scsi_Host *shost, int reason) { int retval = SUCCESS, i, convert = 0; struct megasas_instance *instance; @@ -2739,13 +3370,14 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) u32 abs_state, status_reg, reset_adapter; u32 io_timeout_in_crash_mode = 0; struct scsi_cmnd *scmd_local = NULL; + struct scsi_device *sdev; instance = (struct megasas_instance *)shost->hostdata; fusion = instance->ctrl_context; mutex_lock(&instance->reset_mutex); - if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_warn(&instance->pdev->dev, "Hardware critical error, " "returning FAILED for scsi%d.\n", instance->host->host_no); @@ -2757,10 +3389,10 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) /* IO timeout detected, forcibly put FW in FAULT state */ if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf && - instance->crash_dump_app_support && iotimeout) { - dev_info(&instance->pdev->dev, "IO timeout is detected, " + instance->crash_dump_app_support && reason) { + dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, " "forcibly FAULT Firmware\n"); - instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; + atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); status_reg = readl(&instance->reg_set->doorbell); writel(status_reg | MFI_STATE_FORCE_OCR, &instance->reg_set->doorbell); @@ -2772,10 +3404,10 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) dev_dbg(&instance->pdev->dev, "waiting for [%d] " "seconds for crash dump collection and OCR " "to be done\n", (io_timeout_in_crash_mode * 3)); - } while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && + } while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && (io_timeout_in_crash_mode < 80)); - if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { dev_info(&instance->pdev->dev, "OCR done for IO " "timeout case\n"); retval = SUCCESS; @@ -2792,18 +3424,18 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); - instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING; + atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING); instance->instancet->disable_intr(instance); msleep(1000); /* First try waiting for commands to complete */ - if (megasas_wait_for_outstanding_fusion(instance, iotimeout, + if (megasas_wait_for_outstanding_fusion(instance, reason, &convert)) { - instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; + atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); dev_warn(&instance->pdev->dev, "resetting fusion " "adapter scsi%d.\n", instance->host->host_no); if (convert) - iotimeout = 0; + reason = 0; /* Now return commands back to the OS */ for (i = 0 ; i < instance->max_scsi_cmds; i++) { @@ -2813,6 +3445,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) scmd_local->result = megasas_check_mpio_paths(instance, scmd_local); + if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) + atomic_dec(&instance->ldio_outstanding); megasas_return_cmd_fusion(instance, cmd_fusion); scsi_dma_unmap(scmd_local); scmd_local->scsi_done(scmd_local); @@ -2837,55 +3471,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) } /* Let SR-IOV VF & PF sync up if there was a HB failure */ - if (instance->requestorId && !iotimeout) { + if (instance->requestorId && !reason) { msleep(MEGASAS_OCR_SETTLE_TIME_VF); - /* Look for a late HB update after VF settle time */ - if (abs_state == MFI_STATE_OPERATIONAL && - (instance->hb_host_mem->HB.fwCounter != - instance->hb_host_mem->HB.driverCounter)) { - instance->hb_host_mem->HB.driverCounter = - instance->hb_host_mem->HB.fwCounter; - dev_warn(&instance->pdev->dev, "SR-IOV:" - "Late FW heartbeat update for " - "scsi%d.\n", - instance->host->host_no); - } else { - /* In VF mode, first poll for FW ready */ - for (i = 0; - i < (MEGASAS_RESET_WAIT_TIME * 1000); - i += 20) { - status_reg = - instance->instancet-> - read_fw_status_reg( - instance->reg_set); - abs_state = status_reg & - MFI_STATE_MASK; - if (abs_state == MFI_STATE_READY) { - dev_warn(&instance->pdev->dev, - "SR-IOV: FW was found" - "to be in ready state " - "for scsi%d.\n", - instance->host->host_no); - break; - } - msleep(20); - } - if (abs_state != MFI_STATE_READY) { - dev_warn(&instance->pdev->dev, "SR-IOV: " - "FW not in ready state after %d" - " seconds for scsi%d, status_reg = " - "0x%x.\n", - MEGASAS_RESET_WAIT_TIME, - instance->host->host_no, - status_reg); - megaraid_sas_kill_hba(instance); - instance->skip_heartbeat_timer_del = 1; - instance->adprecovery = - MEGASAS_HW_CRITICAL_ERROR; - retval = FAILED; - goto out; - } - } + goto transition_to_ready; } /* Now try to reset the chip */ @@ -2894,23 +3482,28 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) if (instance->instancet->adp_reset (instance, instance->reg_set)) continue; - +transition_to_ready: /* Wait for FW to become ready */ if (megasas_transition_to_ready(instance, 1)) { - dev_warn(&instance->pdev->dev, "Failed to " - "transition controller to ready " - "for scsi%d.\n", - instance->host->host_no); - continue; + dev_warn(&instance->pdev->dev, + "Failed to transition controller to ready for " + "scsi%d.\n", instance->host->host_no); + if (instance->requestorId && !reason) + goto fail_kill_adapter; + else + continue; } - megasas_reset_reply_desc(instance); + megasas_fusion_update_can_queue(instance, OCR_CONTEXT); + if (megasas_ioc_init_fusion(instance)) { dev_warn(&instance->pdev->dev, - "megasas_ioc_init_fusion() failed!" - " for scsi%d\n", - instance->host->host_no); - continue; + "megasas_ioc_init_fusion() failed! for " + "scsi%d\n", instance->host->host_no); + if (instance->requestorId && !reason) + goto fail_kill_adapter; + else + continue; } megasas_refire_mgmt_cmd(instance); @@ -2932,10 +3525,13 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) megasas_setup_jbod_map(instance); + shost_for_each_device(sdev, shost) + megasas_update_sdev_properties(sdev); + clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); instance->instancet->enable_intr(instance); - instance->adprecovery = MEGASAS_HBA_OPERATIONAL; + atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); /* Restart SR-IOV heartbeat */ if (instance->requestorId) { @@ -2964,6 +3560,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) retval = SUCCESS; goto out; } +fail_kill_adapter: /* Reset failed, kill the adapter */ dev_warn(&instance->pdev->dev, "Reset failed, killing " "adapter scsi%d.\n", instance->host->host_no); @@ -2980,7 +3577,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) } clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); instance->instancet->enable_intr(instance); - instance->adprecovery = MEGASAS_HBA_OPERATIONAL; + atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); } out: clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 473005c99b44..80eaee22f5bc 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -176,7 +176,9 @@ enum REGION_TYPE { #define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) #define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) #define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */ -#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) +#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) +#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03) +#define MPI2_REQ_DESCRIPT_FLAGS_FP_IO (0x06) #define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) #define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02) #define MPI2_SCSIIO_CONTROL_WRITE (0x01000000) @@ -277,6 +279,100 @@ union MPI2_SCSI_IO_CDB_UNION { struct MPI2_SGE_SIMPLE_UNION SGE; }; +/**************************************************************************** +* SCSI Task Management messages +****************************************************************************/ + +/*SCSI Task Management Request Message */ +struct MPI2_SCSI_TASK_MANAGE_REQUEST { + u16 DevHandle; /*0x00 */ + u8 ChainOffset; /*0x02 */ + u8 Function; /*0x03 */ + u8 Reserved1; /*0x04 */ + u8 TaskType; /*0x05 */ + u8 Reserved2; /*0x06 */ + u8 MsgFlags; /*0x07 */ + u8 VP_ID; /*0x08 */ + u8 VF_ID; /*0x09 */ + u16 Reserved3; /*0x0A */ + u8 LUN[8]; /*0x0C */ + u32 Reserved4[7]; /*0x14 */ + u16 TaskMID; /*0x30 */ + u16 Reserved5; /*0x32 */ +}; + + +/*SCSI Task Management Reply Message */ +struct MPI2_SCSI_TASK_MANAGE_REPLY { + u16 DevHandle; /*0x00 */ + u8 MsgLength; /*0x02 */ + u8 Function; /*0x03 */ + u8 ResponseCode; /*0x04 */ + u8 TaskType; /*0x05 */ + u8 Reserved1; /*0x06 */ + u8 MsgFlags; /*0x07 */ + u8 VP_ID; /*0x08 */ + u8 VF_ID; /*0x09 */ + u16 Reserved2; /*0x0A */ + u16 Reserved3; /*0x0C */ + u16 IOCStatus; /*0x0E */ + u32 IOCLogInfo; /*0x10 */ + u32 TerminationCount; /*0x14 */ + u32 ResponseInfo; /*0x18 */ +}; + +struct MR_TM_REQUEST { + char request[128]; +}; + +struct MR_TM_REPLY { + char reply[128]; +}; + +/* SCSI Task Management Request Message */ +struct MR_TASK_MANAGE_REQUEST { + /*To be type casted to struct MPI2_SCSI_TASK_MANAGE_REQUEST */ + struct MR_TM_REQUEST TmRequest; + union { + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved1:30; + u32 isTMForPD:1; + u32 isTMForLD:1; +#else + u32 isTMForLD:1; + u32 isTMForPD:1; + u32 reserved1:30; +#endif + u32 reserved2; + } tmReqFlags; + struct MR_TM_REPLY TMReply; + }; +}; + +/* TaskType values */ + +#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01) +#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) +#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) +#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) +#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06) +#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) +#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08) +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09) +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A) + +/* ResponseCode values */ + +#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00) +#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02) +#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04) +#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05) +#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) +#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) +#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A) +#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) + /* * RAID SCSI IO Request Message * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST @@ -547,7 +643,9 @@ struct MR_SPAN_BLOCK_INFO { struct MR_LD_RAID { struct { #if defined(__BIG_ENDIAN_BITFIELD) - u32 reserved4:7; + u32 reserved4:5; + u32 fpBypassRegionLock:1; + u32 tmCapable:1; u32 fpNonRWCapable:1; u32 fpReadAcrossStripe:1; u32 fpWriteAcrossStripe:1; @@ -569,7 +667,9 @@ struct MR_LD_RAID { u32 fpWriteAcrossStripe:1; u32 fpReadAcrossStripe:1; u32 fpNonRWCapable:1; - u32 reserved4:7; + u32 tmCapable:1; + u32 fpBypassRegionLock:1; + u32 reserved4:5; #endif } capability; __le32 reserved6; @@ -639,7 +739,7 @@ struct IO_REQUEST_INFO { u8 fpOkForIo; u8 IoforUnevenSpan; u8 start_span; - u8 reserved; + u8 do_fp_rlbypass; u64 start_row; u8 span_arm; /* span[7:5], arm[4:0] */ u8 pd_after_lb; @@ -694,6 +794,7 @@ struct megasas_cmd_fusion { u32 sync_cmd_idx; u32 index; u8 pd_r1_lb; + struct completion done; }; struct LD_LOAD_BALANCE_INFO { @@ -807,9 +908,18 @@ struct MR_FW_RAID_MAP_EXT { * * define MR_PD_CFG_SEQ structure for system PDs * */ struct MR_PD_CFG_SEQ { - __le16 seqNum; - __le16 devHandle; - u8 reserved[4]; + u16 seqNum; + u16 devHandle; + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u8 reserved:7; + u8 tmCapable:1; +#else + u8 tmCapable:1; + u8 reserved:7; +#endif + } capability; + u8 reserved[3]; } __packed; struct MR_PD_CFG_SEQ_NUM_SYNC { @@ -818,6 +928,12 @@ struct MR_PD_CFG_SEQ_NUM_SYNC { struct MR_PD_CFG_SEQ seq[1]; } __packed; +struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY { + u64 RDPQBaseAddress; + u32 Reserved1; + u32 Reserved2; +}; + struct fusion_context { struct megasas_cmd_fusion **cmd_list; dma_addr_t req_frames_desc_phys; @@ -830,8 +946,8 @@ struct fusion_context { struct dma_pool *sg_dma_pool; struct dma_pool *sense_dma_pool; - dma_addr_t reply_frames_desc_phys; - union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc; + dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION]; + union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION]; struct dma_pool *reply_frames_desc_pool; u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION]; @@ -841,6 +957,8 @@ struct fusion_context { u32 reply_alloc_sz; u32 io_frames_alloc_sz; + struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY *rdpq_virt; + dma_addr_t rdpq_phys; u16 max_sge_in_main_msg; u16 max_sge_in_chain; diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index 555367f00228..1753e42826dd 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -29,6 +29,7 @@ #include <linux/interrupt.h> #include <linux/reboot.h> #include <linux/spinlock.h> +#include <linux/pci.h> #include <asm/dbdma.h> #include <asm/io.h> #include <asm/pgtable.h> @@ -38,7 +39,6 @@ #include <asm/processor.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> -#include <asm/pci-bridge.h> #include <asm/macio.h> #include <scsi/scsi.h> diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h index ec27ad2d186f..dfad5b8c1890 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2014 LSI Corporation. + * Copyright 2000-2015 Avago Technologies. All rights reserved. * * * Name: mpi2.h @@ -8,7 +8,7 @@ * scatter/gather formats. * Creation Date: June 21, 2006 * - * mpi2.h Version: 02.00.35 + * mpi2.h Version: 02.00.39 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -92,6 +92,14 @@ * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT. * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-14 02.00.36 Updated copyright information. + * Bumped MPI2_HEADER_VERSION_UNIT. + * 03-16-15 02.00.37 Bumped MPI2_HEADER_VERSION_UNIT. + * Added Scratchpad registers to + * MPI2_SYSTEM_INTERFACE_REGS. + * Added MPI2_DIAG_SBR_RELOAD. + * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT. * -------------------------------------------------------------------------- */ @@ -124,8 +132,14 @@ MPI25_VERSION_MINOR) #define MPI2_VERSION_02_05 (0x0205) +/*minor version for MPI v2.6 compatible products */ +#define MPI26_VERSION_MINOR (0x06) +#define MPI26_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI26_VERSION_MINOR) +#define MPI2_VERSION_02_06 (0x0206) + /*Unit and Dev versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x23) +#define MPI2_HEADER_VERSION_UNIT (0x27) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) @@ -179,10 +193,12 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS { U32 HCBSize; /*0x74 */ U32 HCBAddressLow; /*0x78 */ U32 HCBAddressHigh; /*0x7C */ - U32 Reserved6[16]; /*0x80 */ + U32 Reserved6[12]; /*0x80 */ + U32 Scratchpad[4]; /*0xB0 */ U32 RequestDescriptorPostLow; /*0xC0 */ U32 RequestDescriptorPostHigh; /*0xC4 */ - U32 Reserved7[14]; /*0xC8 */ + U32 AtomicRequestDescriptorPost;/*0xC8 */ + U32 Reserved7[13]; /*0xCC */ } MPI2_SYSTEM_INTERFACE_REGS, *PTR_MPI2_SYSTEM_INTERFACE_REGS, Mpi2SystemInterfaceRegs_t, @@ -224,6 +240,8 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS { */ #define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008) +#define MPI2_DIAG_SBR_RELOAD (0x00002000) + #define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800) #define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000) #define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800) @@ -298,10 +316,19 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS { #define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C) /* - *Offsets for the Request Queue + *Offsets for the Scratchpad registers + */ +#define MPI26_SCRATCHPAD0_OFFSET (0x000000B0) +#define MPI26_SCRATCHPAD1_OFFSET (0x000000B4) +#define MPI26_SCRATCHPAD2_OFFSET (0x000000B8) +#define MPI26_SCRATCHPAD3_OFFSET (0x000000BC) + +/* + *Offsets for the Request Descriptor Post Queue */ #define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0) #define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4) +#define MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET (0x000000C8) /*Hard Reset delay timings */ #define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000) @@ -329,7 +356,8 @@ typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR { *pMpi2DefaultRequestDescriptor_t; /*defines for the RequestFlags field */ -#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E) +#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x1E) +#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_RSHIFT (1) #define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) #define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02) #define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) @@ -337,7 +365,7 @@ typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR { #define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A) #define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C) -#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01) +#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01) /*High Priority Request Descriptor */ typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR { @@ -408,6 +436,33 @@ typedef union _MPI2_REQUEST_DESCRIPTOR_UNION { Mpi2RequestDescriptorUnion_t, *pMpi2RequestDescriptorUnion_t; +/*Atomic Request Descriptors */ + +/* + * All Atomic Request Descriptors have the same format, so the following + * structure is used for all Atomic Request Descriptors: + * Atomic Default Request Descriptor + * Atomic High Priority Request Descriptor + * Atomic SCSI IO Request Descriptor + * Atomic SCSI Target Request Descriptor + * Atomic RAID Accelerator Request Descriptor + * Atomic Fast Path SCSI IO Request Descriptor + */ + +/*Atomic Request Descriptor */ +typedef struct _MPI26_ATOMIC_REQUEST_DESCRIPTOR { + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ +} MPI26_ATOMIC_REQUEST_DESCRIPTOR, + *PTR_MPI26_ATOMIC_REQUEST_DESCRIPTOR, + Mpi26AtomicRequestDescriptor_t, + *pMpi26AtomicRequestDescriptor_t; + +/*for the RequestFlags field, use the same + *defines as MPI2_DEFAULT_REQUEST_DESCRIPTOR + */ + /*Reply Descriptors */ /*Default Reply Descriptor */ @@ -548,6 +603,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION { #define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) #define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A) #define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B) +#define MPI2_FUNCTION_IO_UNIT_CONTROL (0x1B) #define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C) #define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D) #define MPI2_FUNCTION_DIAG_RELEASE (0x1E) @@ -587,6 +643,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION { #define MPI2_IOCSTATUS_INVALID_FIELD (0x0007) #define MPI2_IOCSTATUS_INVALID_STATE (0x0008) #define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009) +#define MPI2_IOCSTATUS_INSUFFICIENT_POWER (0x000A) /**************************************************************************** * Config IOCStatus values @@ -1045,7 +1102,7 @@ typedef union _MPI2_IEEE_SGE_CHAIN_UNION { Mpi2IeeeSgeChainUnion_t, *pMpi2IeeeSgeChainUnion_t; -/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 products only */ +/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 and later */ typedef struct _MPI25_IEEE_SGE_CHAIN64 { U64 Address; U32 Length; @@ -1098,6 +1155,11 @@ typedef union _MPI25_SGE_IO_UNION { #define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00) #define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) +/*Next Segment Format */ + +#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C) +#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00) + /*Data Location Address Space */ #define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03) @@ -1108,6 +1170,7 @@ typedef union _MPI25_SGE_IO_UNION { #define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03) #define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \ (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) +#define MPI26_IEEE_SGE_FLAGS_IOCCTL_ADDR (0x02) /**************************************************************************** * IEEE SGE operation Macros @@ -1166,6 +1229,7 @@ typedef union _MPI2_SGE_IO_UNION { #define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00) #define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04) #define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) +#define MPI26_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) #define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C) /*values for SGL Type subfield */ #define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h index 581fdb375db5..9cf09bf7c4a8 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2014 LSI Corporation. + * Copyright 2000-2015 Avago Technologies. All rights reserved. * * * Name: mpi2_cnfg.h * Title: MPI Configuration messages and pages * Creation Date: November 10, 2006 * - * mpi2_cnfg.h Version: 02.00.29 + * mpi2_cnfg.h Version: 02.00.33 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -178,7 +178,14 @@ * 01-08-14 02.00.28 Added more defines for the BiosOptions field of * MPI2_CONFIG_PAGE_BIOS_1. * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and - * more defines for the BiosOptions field.. + * more defines for the BiosOptions field. + * 11-18-14 02.00.30 Updated copyright information. + * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG. + * Added AdapterOrderAux fields to BIOS Page 3. + * 03-16-15 02.00.31 Updated for MPI v2.6. + * Added new SAS Phy Event codes + * 05-25-15 02.00.33 Added more defines for the BiosOptions field of + * MPI2_CONFIG_PAGE_BIOS_1. * -------------------------------------------------------------------------- */ @@ -355,7 +362,6 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { #define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF) - /**************************************************************************** * Configuration messages ****************************************************************************/ @@ -457,8 +463,17 @@ typedef struct _MPI2_CONFIG_REPLY { #define MPI25_MFGPAGE_DEVID_SAS3108_5 (0x0094) #define MPI25_MFGPAGE_DEVID_SAS3108_6 (0x0095) - - +/* MPI v2.6 SAS Products */ +#define MPI26_MFGPAGE_DEVID_SAS3216 (0x00C9) +#define MPI26_MFGPAGE_DEVID_SAS3224 (0x00C4) +#define MPI26_MFGPAGE_DEVID_SAS3316_1 (0x00C5) +#define MPI26_MFGPAGE_DEVID_SAS3316_2 (0x00C6) +#define MPI26_MFGPAGE_DEVID_SAS3316_3 (0x00C7) +#define MPI26_MFGPAGE_DEVID_SAS3316_4 (0x00C8) +#define MPI26_MFGPAGE_DEVID_SAS3324_1 (0x00C0) +#define MPI26_MFGPAGE_DEVID_SAS3324_2 (0x00C1) +#define MPI26_MFGPAGE_DEVID_SAS3324_3 (0x00C2) +#define MPI26_MFGPAGE_DEVID_SAS3324_4 (0x00C3) /*Manufacturing Page 0 */ @@ -941,8 +956,8 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { U8 BoardTemperatureUnits; /*0x16 */ U8 Reserved3; /*0x17 */ - U32 Reserved4; /* 0x18 */ - U32 Reserved5; /* 0x1C */ + U32 BoardPowerRequirement; /*0x18 */ + U32 PCISlotPowerAllocation; /*0x1C */ U32 Reserved6; /* 0x20 */ U32 Reserved7; /* 0x24 */ } MPI2_CONFIG_PAGE_IO_UNIT_7, @@ -1151,6 +1166,62 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 { #define MPI2_IOUNITPAGE10_PAGEVERSION (0x01) +/* IO Unit Page 11 (for MPI v2.6 and later) */ + +typedef struct _MPI26_IOUNIT11_SPINUP_GROUP { + U8 MaxTargetSpinup; /* 0x00 */ + U8 SpinupDelay; /* 0x01 */ + U8 SpinupFlags; /* 0x02 */ + U8 Reserved1; /* 0x03 */ +} MPI26_IOUNIT11_SPINUP_GROUP, + *PTR_MPI26_IOUNIT11_SPINUP_GROUP, + Mpi26IOUnit11SpinupGroup_t, + *pMpi26IOUnit11SpinupGroup_t; + +/* defines for IO Unit Page 11 SpinupFlags */ +#define MPI26_IOUNITPAGE11_SPINUP_DISABLE_FLAG (0x01) + + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * four and check the value returned for NumPhys at runtime. + */ +#ifndef MPI26_IOUNITPAGE11_PHY_MAX +#define MPI26_IOUNITPAGE11_PHY_MAX (4) +#endif + +typedef struct _MPI26_CONFIG_PAGE_IO_UNIT_11 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + MPI26_IOUNIT11_SPINUP_GROUP SpinupGroupParameters[4]; /*0x08 */ + U32 Reserved2; /*0x18 */ + U32 Reserved3; /*0x1C */ + U32 Reserved4; /*0x20 */ + U8 BootDeviceWaitTime; /*0x24 */ + U8 Reserved5; /*0x25 */ + U16 Reserved6; /*0x26 */ + U8 NumPhys; /*0x28 */ + U8 PEInitialSpinupDelay; /*0x29 */ + U8 PEReplyDelay; /*0x2A */ + U8 Flags; /*0x2B */ + U8 PHY[MPI26_IOUNITPAGE11_PHY_MAX];/*0x2C */ +} MPI26_CONFIG_PAGE_IO_UNIT_11, + *PTR_MPI26_CONFIG_PAGE_IO_UNIT_11, + Mpi26IOUnitPage11_t, + *pMpi26IOUnitPage11_t; + +#define MPI26_IOUNITPAGE11_PAGEVERSION (0x00) + +/* defines for Flags field */ +#define MPI26_IOUNITPAGE11_FLAGS_AUTO_PORTENABLE (0x01) + +/* defines for PHY field */ +#define MPI26_IOUNITPAGE11_PHY_SPINUP_GROUP_MASK (0x03) + + + + + /**************************************************************************** * IOC Config Pages @@ -1343,6 +1414,10 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 { #define MPI2_BIOSPAGE1_PAGEVERSION (0x07) /*values for BIOS Page 1 BiosOptions field */ +#define MPI2_BIOSPAGE1_OPTIONS_BOOT_LIST_ADD_ALT_BOOT_DEVICE (0x00008000) +#define MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG (0x00004000) + +#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800) #define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800) #define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000) #define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800) @@ -1492,6 +1567,8 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_2 { /*BIOS Page 3 */ +#define MPI2_BIOSPAGE3_NUM_ADAPTER (4) + typedef struct _MPI2_ADAPTER_INFO { U8 PciBusNumber; /*0x00 */ U8 PciDeviceAndFunctionNumber; /*0x01 */ @@ -1502,17 +1579,26 @@ typedef struct _MPI2_ADAPTER_INFO { #define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001) #define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002) +typedef struct _MPI2_ADAPTER_ORDER_AUX { + U64 WWID; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ +} MPI2_ADAPTER_ORDER_AUX, *PTR_MPI2_ADAPTER_ORDER_AUX, + Mpi2AdapterOrderAux_t, *pMpi2AdapterOrderAux_t; + + typedef struct _MPI2_CONFIG_PAGE_BIOS_3 { MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ U32 GlobalFlags; /*0x04 */ U32 BiosVersion; /*0x08 */ - MPI2_ADAPTER_INFO AdapterOrder[4]; /*0x0C */ + MPI2_ADAPTER_INFO AdapterOrder[MPI2_BIOSPAGE3_NUM_ADAPTER]; U32 Reserved1; /*0x1C */ + MPI2_ADAPTER_ORDER_AUX AdapterOrderAux[MPI2_BIOSPAGE3_NUM_ADAPTER]; } MPI2_CONFIG_PAGE_BIOS_3, *PTR_MPI2_CONFIG_PAGE_BIOS_3, Mpi2BiosPage3_t, *pMpi2BiosPage3_t; -#define MPI2_BIOSPAGE3_PAGEVERSION (0x00) +#define MPI2_BIOSPAGE3_PAGEVERSION (0x01) /*values for BIOS Page 3 GlobalFlags */ #define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002) @@ -2006,6 +2092,8 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 { #define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01) /*values for SAS IO Unit Page 0 PhyFlags */ +#define MPI2_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) +#define MPI2_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) #define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10) #define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) @@ -2108,6 +2196,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 { #define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001) /*values for SAS IO Unit Page 1 AdditionalControlFlags */ +#define MPI2_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100) #define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080) #define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040) #define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020) @@ -2125,6 +2214,8 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 { #define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01) /*values for SAS IO Unit Page 1 PhyFlags */ +#define MPI2_SASIOUNIT1_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) +#define MPI2_SASIOUNIT1_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) #define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) #define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) @@ -2144,7 +2235,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 { *SAS IO Unit Page 1 ControllerPhyDeviceInfo values */ -/*SAS IO Unit Page 4 */ +/*SAS IO Unit Page 4 (for MPI v2.5 and earlier) */ typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP { U8 MaxTargetSpinup; /*0x00 */ @@ -2715,6 +2806,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 { #define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) #define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) #define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) +#define MPI2_SAS_DEVICE0_FLAGS_PERSIST_CAPABLE (0x0004) #define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002) #define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) @@ -2922,6 +3014,19 @@ typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG { #define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1) #define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2) +/*Following codes are product specific and in MPI v2.6 and later */ +#define MPI2_SASPHY3_EVENT_CODE_LCARB_WAIT_TIME (0xD3) +#define MPI2_SASPHY3_EVENT_CODE_RCVD_CONN_RESP_WAIT_TIME (0xD4) +#define MPI2_SASPHY3_EVENT_CODE_LCCONN_TIME (0xD5) +#define MPI2_SASPHY3_EVENT_CODE_SSP_TX_START_TRANSMIT (0xD6) +#define MPI2_SASPHY3_EVENT_CODE_SATA_TX_START (0xD7) +#define MPI2_SASPHY3_EVENT_CODE_SMP_TX_START_TRANSMT (0xD8) +#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_BREAK_CONN (0xD9) +#define MPI2_SASPHY3_EVENT_CODE_SSP_RX_START_RECEIVE (0xDA) +#define MPI2_SASPHY3_EVENT_CODE_SATA_RX_START_RECEIVE (0xDB) +#define MPI2_SASPHY3_EVENT_CODE_SMP_RX_START_RECEIVE (0xDC) + + /*values for the CounterType field */ #define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00) #define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h index 068c98efd742..c38f624b859d 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2014 LSI Corporation. + * Copyright 2000-2015 Avago Technologies. All rights reserved. * * * Name: mpi2_init.h * Title: MPI SCSI initiator mode messages and structures * Creation Date: June 23, 2006 * - * mpi2_init.h Version: 02.00.15 + * mpi2_init.h Version: 02.00.17 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -46,6 +46,11 @@ * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY, * replacing the Reserved4 field. + * 11-18-14 02.00.16 Updated copyright information. + * 03-16-15 02.00.17 Updated for MPI v2.6. + * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH. + * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and + * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF. * -------------------------------------------------------------------------- */ @@ -128,6 +133,7 @@ typedef struct _MPI2_SCSI_IO_REQUEST { #define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04) #define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08) #define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C) +#define MPI26_SCSIIO_MSGFLAGS_IOCCTL_SENSE_ADDR (0x08) /*SCSI IO SGLFlags bits */ @@ -228,7 +234,7 @@ typedef union _MPI25_SCSI_IO_CDB_UNION { } MPI25_SCSI_IO_CDB_UNION, *PTR_MPI25_SCSI_IO_CDB_UNION, Mpi25ScsiIoCdb_t, *pMpi25ScsiIoCdb_t; -/*MPI v2.5 SCSI IO Request Message */ +/*MPI v2.5/2.6 SCSI IO Request Message */ typedef struct _MPI25_SCSI_IO_REQUEST { U16 DevHandle; /*0x00 */ U8 ChainOffset; /*0x02 */ @@ -302,12 +308,14 @@ typedef struct _MPI25_SCSI_IO_REQUEST { #define MPI25_SCSIIO_NUM_SGLOFFSETS (4) /*defines for the IoFlags field */ -#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000) -#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000) -#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000) +#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000) +#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000) +#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000) +#define MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH (0x2000) #define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000) #define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI26_SCSIIO_IOFLAGS_PORT_REQUEST (0x0400) #define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) /*MPI v2.5 defines for the EEDPFlags bits */ @@ -512,6 +520,7 @@ typedef struct _MPI2_SEP_REQUEST { #define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01) /*SlotStatus defines */ +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF (0x00080000) #define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000) #define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) #define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200) @@ -547,6 +556,7 @@ typedef struct _MPI2_SEP_REPLY { Mpi2SepReply_t, *pMpi2SepReply_t; /*SlotStatus defines */ +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF (0x00080000) #define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000) #define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) #define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h index d7598cc4bb8e..cf510ed91924 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2014 LSI Corporation. + * Copyright 2000-2015 Avago Technologies. All rights reserved. * * * Name: mpi2_ioc.h * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages * Creation Date: October 11, 2006 * - * mpi2_ioc.h Version: 02.00.24 + * mpi2_ioc.h Version: 02.00.26 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -133,6 +133,10 @@ * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. * Added Encrypted Hash Extended Image. * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. + * 11-18-14 02.00.25 Updated copyright information. + * 03-16-15 02.00.26 Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and + * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. + * Added MPI26_CTRL_OP_SHUTDOWN. * -------------------------------------------------------------------------- */ @@ -165,7 +169,7 @@ typedef struct _MPI2_IOC_INIT_REQUEST { U16 HeaderVersion; /*0x0E */ U32 Reserved5; /*0x10 */ U16 Reserved6; /*0x14 */ - U8 Reserved7; /*0x16 */ + U8 HostPageSize; /*0x16 */ U8 HostMSIxVectors; /*0x17 */ U16 Reserved8; /*0x18 */ U16 SystemRequestFrameSize; /*0x1A */ @@ -289,7 +293,8 @@ typedef struct _MPI2_IOC_FACTS_REPLY { U16 MaxDevHandle; /*0x38 */ U16 MaxPersistentEntries; /*0x3A */ U16 MinDevHandle; /*0x3C */ - U16 Reserved4; /*0x3E */ + U8 CurrentHostPageSize; /* 0x3E */ + U8 Reserved4; /* 0x3F */ } MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY, Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t; @@ -326,6 +331,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY { /*ProductID field uses MPI2_FW_HEADER_PID_ */ /*IOCCapabilities */ +#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000) #define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) #define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000) #define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000) @@ -343,8 +349,8 @@ typedef struct _MPI2_IOC_FACTS_REPLY { #define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) /*ProtocolFlags */ -#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001) #define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002) +#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001) /**************************************************************************** * PortFacts message @@ -1247,6 +1253,7 @@ typedef struct _MPI2_FW_UPLOAD_REQUEST { #define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09) #define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A) #define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) +#define MPI2_FW_UPLOAD_ITYPE_CBB_BACKUP (0x0D) /*MPI v2.0 FWUpload TransactionContext Element */ typedef struct _MPI2_FW_UPLOAD_TCSGE { @@ -1328,7 +1335,7 @@ typedef struct _MPI2_FW_IMAGE_HEADER { U32 Reserved54; /*0x54 */ U32 Reserved58; /*0x58 */ U32 Reserved5C; /*0x5C */ - U32 Reserved60; /*0x60 */ + U32 BootFlags; /*0x60 */ U32 FirmwareVersionNameWhat; /*0x64 */ U8 FirmwareVersionName[32]; /*0x68 */ U32 VendorNameWhat; /*0x88 */ @@ -1354,18 +1361,22 @@ typedef struct _MPI2_FW_IMAGE_HEADER { #define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00) #define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000) #define MPI2_FW_HEADER_SIGNATURE (0xEA000000) +#define MPI26_FW_HEADER_SIGNATURE (0xEB000000) /*Signature0 field */ #define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04) #define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A) +#define MPI26_FW_HEADER_SIGNATURE0 (0x5AEAA55A) /*Signature1 field */ #define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08) #define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5) +#define MPI26_FW_HEADER_SIGNATURE1 (0xA55AEAA5) /*Signature2 field */ #define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C) #define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA) +#define MPI26_FW_HEADER_SIGNATURE2 (0x5AA55AEA) /*defines for using the ProductID field */ #define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000) @@ -1381,6 +1392,8 @@ typedef struct _MPI2_FW_IMAGE_HEADER { #define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013) #define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014) #define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021) +#define MPI26_FW_HEADER_PID_FAMILY_3324_SAS (0x0028) +#define MPI26_FW_HEADER_PID_FAMILY_3516_SAS (0x0031) /*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ @@ -1388,6 +1401,7 @@ typedef struct _MPI2_FW_IMAGE_HEADER { #define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C) #define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30) +#define MPI26_FW_HEADER_BOOTFLAGS_OFFSET (0x60) #define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64) #define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840) @@ -1493,7 +1507,9 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA { #define MPI2_FLASH_REGION_CONFIG_1 (0x07) #define MPI2_FLASH_REGION_CONFIG_2 (0x08) #define MPI2_FLASH_REGION_MEGARAID (0x09) -#define MPI2_FLASH_REGION_INIT (0x0A) +#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A) +#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK) +#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D) /*ImageRevision */ #define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00) @@ -1619,7 +1635,6 @@ typedef struct _MPI25_ENCRYPTED_HASH_DATA { Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t; - /**************************************************************************** * PowerManagementControl message ****************************************************************************/ @@ -1726,4 +1741,90 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY { } MPI2_PWR_MGMT_CONTROL_REPLY, *PTR_MPI2_PWR_MGMT_CONTROL_REPLY, Mpi2PwrMgmtControlReply_t, *pMpi2PwrMgmtControlReply_t; +/**************************************************************************** +* IO Unit Control messages (MPI v2.6 and later only.) +****************************************************************************/ + +/* IO Unit Control Request Message */ +typedef struct _MPI26_IOUNIT_CONTROL_REQUEST { + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 IOCParameter; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U8 PhyNum; /* 0x0E */ + U8 PrimFlags; /* 0x0F */ + U32 Primitive; /* 0x10 */ + U8 LookupMethod; /* 0x14 */ + U8 Reserved5; /* 0x15 */ + U16 SlotNumber; /* 0x16 */ + U64 LookupAddress; /* 0x18 */ + U32 IOCParameterValue; /* 0x20 */ + U32 Reserved7; /* 0x24 */ + U32 Reserved8; /* 0x28 */ +} MPI26_IOUNIT_CONTROL_REQUEST, + *PTR_MPI26_IOUNIT_CONTROL_REQUEST, + Mpi26IoUnitControlRequest_t, + *pMpi26IoUnitControlRequest_t; + +/* values for the Operation field */ +#define MPI26_CTRL_OP_CLEAR_ALL_PERSISTENT (0x02) +#define MPI26_CTRL_OP_SAS_PHY_LINK_RESET (0x06) +#define MPI26_CTRL_OP_SAS_PHY_HARD_RESET (0x07) +#define MPI26_CTRL_OP_PHY_CLEAR_ERROR_LOG (0x08) +#define MPI26_CTRL_OP_SAS_SEND_PRIMITIVE (0x0A) +#define MPI26_CTRL_OP_FORCE_FULL_DISCOVERY (0x0B) +#define MPI26_CTRL_OP_REMOVE_DEVICE (0x0D) +#define MPI26_CTRL_OP_LOOKUP_MAPPING (0x0E) +#define MPI26_CTRL_OP_SET_IOC_PARAMETER (0x0F) +#define MPI26_CTRL_OP_ENABLE_FP_DEVICE (0x10) +#define MPI26_CTRL_OP_DISABLE_FP_DEVICE (0x11) +#define MPI26_CTRL_OP_ENABLE_FP_ALL (0x12) +#define MPI26_CTRL_OP_DISABLE_FP_ALL (0x13) +#define MPI26_CTRL_OP_DEV_ENABLE_NCQ (0x14) +#define MPI26_CTRL_OP_DEV_DISABLE_NCQ (0x15) +#define MPI26_CTRL_OP_SHUTDOWN (0x16) +#define MPI26_CTRL_OP_DEV_ENABLE_PERSIST_CONNECTION (0x17) +#define MPI26_CTRL_OP_DEV_DISABLE_PERSIST_CONNECTION (0x18) +#define MPI26_CTRL_OP_DEV_CLOSE_PERSIST_CONNECTION (0x19) +#define MPI26_CTRL_OP_PRODUCT_SPECIFIC_MIN (0x80) + +/* values for the PrimFlags field */ +#define MPI26_CTRL_PRIMFLAGS_SINGLE (0x08) +#define MPI26_CTRL_PRIMFLAGS_TRIPLE (0x02) +#define MPI26_CTRL_PRIMFLAGS_REDUNDANT (0x01) + +/* values for the LookupMethod field */ +#define MPI26_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01) +#define MPI26_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02) +#define MPI26_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03) + + +/* IO Unit Control Reply Message */ +typedef struct _MPI26_IOUNIT_CONTROL_REPLY { + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 IOCParameter; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI26_IOUNIT_CONTROL_REPLY, + *PTR_MPI26_IOUNIT_CONTROL_REPLY, + Mpi26IoUnitControlReply_t, + *pMpi26IoUnitControlReply_t; + + #endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h index 13d93ca029d5..1c0eeeeb5eaf 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2014 LSI Corporation. + * Copyright 2000-2014 Avago Technologies. All rights reserved. * * * Name: mpi2_raid.h * Title: MPI Integrated RAID messages and structures * Creation Date: April 26, 2007 * - * mpi2_raid.h Version: 02.00.10 + * mpi2_raid.h Version: 02.00.11 * * Version History * --------------- @@ -31,6 +31,7 @@ * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI. + * 11-18-14 02.00.11 Updated copyright information. * -------------------------------------------------------------------------- */ diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h index 156e30543a2f..c10c2c02a945 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2014 LSI Corporation. + * Copyright 2000-2015 Avago Technologies. All rights reserved. * * * Name: mpi2_sas.h * Title: MPI Serial Attached SCSI structures and definitions * Creation Date: February 9, 2007 * - * mpi2_sas.h Version: 02.00.08 + * mpi2_sas.h Version: 02.00.10 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -32,6 +32,9 @@ * Passthrough Request message. * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete * for anything newer than MPI v2.0. + * 11-18-14 02.00.09 Updated copyright information. + * 03-16-15 02.00.10 Updated for MPI v2.6. + * Added MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA. * -------------------------------------------------------------------------- */ @@ -183,6 +186,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST { /*values for PassthroughFlags field */ #define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100) +#define MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA (0x0040) #define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020) #define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010) #define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004) @@ -216,6 +220,8 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REPLY { /**************************************************************************** * SAS IO Unit Control messages +* (MPI v2.5 and earlier only. +* Replaced by IO Unit Control messages in MPI v2.6 and later.) ****************************************************************************/ /*SAS IO Unit Control Request Message */ diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h index 1629e5bce7e1..5f9289a1166f 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2014 LSI Corporation. + * Copyright 2000-2014 Avago Technologies. All rights reserved. * * * Name: mpi2_tool.h * Title: MPI diagnostic tool structures and definitions * Creation Date: March 26, 2007 * - * mpi2_tool.h Version: 02.00.12 + * mpi2_tool.h Version: 02.00.13 * * Version History * --------------- @@ -34,6 +34,7 @@ * it uses MPI Chain SGE as well as MPI Simple SGE. * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. + * 11-18-14 02.00.13 Updated copyright information. * -------------------------------------------------------------------------- */ diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h index 99ab093602e8..92a81abc2c31 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_type.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2014 LSI Corporation. + * Copyright 2000-2014 Avago Technologies. All rights reserved. * * * Name: mpi2_type.h * Title: MPI basic type definitions * Creation Date: August 16, 2006 * - * mpi2_type.h Version: 02.00.00 + * mpi2_type.h Version: 02.00.01 * * Version History * --------------- @@ -14,6 +14,7 @@ * Date Version Description * -------- -------- ------------------------------------------------------ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 11-18-14 02.00.01 Updated copyright information. * -------------------------------------------------------------------------- */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 11393ebf1a68..e4db5fb3239a 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -83,6 +83,10 @@ static int msix_disable = -1; module_param(msix_disable, int, 0); MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); +static int smp_affinity_enable = 1; +module_param(smp_affinity_enable, int, S_IRUGO); +MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)"); + static int max_msix_vectors = -1; module_param(max_msix_vectors, int, 0); MODULE_PARM_DESC(max_msix_vectors, @@ -395,6 +399,9 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: desc = "insufficient resources"; break; + case MPI2_IOCSTATUS_INSUFFICIENT_POWER: + desc = "insufficient power"; + break; case MPI2_IOCSTATUS_INVALID_FIELD: desc = "invalid field"; break; @@ -772,7 +779,7 @@ mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) - return 1; + return mpt3sas_check_for_pending_internal_cmds(ioc, smid); if (ioc->base_cmds.status == MPT3_CMD_NOT_USED) return 1; @@ -803,6 +810,7 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) Mpi2EventNotificationReply_t *mpi_reply; Mpi2EventAckRequest_t *ack_request; u16 smid; + struct _event_ack_list *delayed_event_ack; mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); if (!mpi_reply) @@ -816,8 +824,18 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) goto out; smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); if (!smid) { - pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", - ioc->name, __func__); + delayed_event_ack = kzalloc(sizeof(*delayed_event_ack), + GFP_ATOMIC); + if (!delayed_event_ack) + goto out; + INIT_LIST_HEAD(&delayed_event_ack->list); + delayed_event_ack->Event = mpi_reply->Event; + delayed_event_ack->EventContext = mpi_reply->EventContext; + list_add_tail(&delayed_event_ack->list, + &ioc->delayed_event_ack_list); + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "DELAYED: EVENT ACK: event (0x%04x)\n", + ioc->name, le16_to_cpu(mpi_reply->Event))); goto out; } @@ -1348,6 +1366,7 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | MPI25_IEEE_SGE_FLAGS_END_OF_LIST); + _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); } @@ -1797,9 +1816,10 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc) list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { list_del(&reply_q->list); - irq_set_affinity_hint(reply_q->vector, NULL); - free_cpumask_var(reply_q->affinity_hint); - synchronize_irq(reply_q->vector); + if (smp_affinity_enable) { + irq_set_affinity_hint(reply_q->vector, NULL); + free_cpumask_var(reply_q->affinity_hint); + } free_irq(reply_q->vector, reply_q); kfree(reply_q); } @@ -1829,9 +1849,12 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) reply_q->msix_index = index; reply_q->vector = vector; - if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) - return -ENOMEM; - cpumask_clear(reply_q->affinity_hint); + if (smp_affinity_enable) { + if (!zalloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) { + kfree(reply_q); + return -ENOMEM; + } + } atomic_set(&reply_q->busy, 0); if (ioc->msix_enable) @@ -1845,6 +1868,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) if (r) { pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n", reply_q->name, vector); + free_cpumask_var(reply_q->affinity_hint); kfree(reply_q); return -EBUSY; } @@ -1894,16 +1918,17 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) for (i = 0 ; i < group ; i++) { ioc->cpu_msix_table[cpu] = index; - cpumask_or(reply_q->affinity_hint, + if (smp_affinity_enable) + cpumask_or(reply_q->affinity_hint, reply_q->affinity_hint, get_cpu_mask(cpu)); cpu = cpumask_next(cpu, cpu_online_mask); } - - if (irq_set_affinity_hint(reply_q->vector, + if (smp_affinity_enable) + if (irq_set_affinity_hint(reply_q->vector, reply_q->affinity_hint)) - dinitprintk(ioc, pr_info(MPT3SAS_FMT - "error setting affinity hint for irq vector %d\n", - ioc->name, reply_q->vector)); + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "Err setting affinity hint to irq vector %d\n", + ioc->name, reply_q->vector)); index++; } } @@ -1961,6 +1986,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) } else if (max_msix_vectors == 0) goto try_ioapic; + if (ioc->msix_vector_count < ioc->cpu_count) + smp_affinity_enable = 0; + entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), GFP_KERNEL); if (!entries) { @@ -2020,8 +2048,10 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) _base_free_irq(ioc); _base_disable_msix(ioc); - if (ioc->msix96_vector) + if (ioc->msix96_vector) { kfree(ioc->replyPostRegisterIndex); + ioc->replyPostRegisterIndex = NULL; + } if (ioc->chip_phys) { iounmap(ioc->chip); @@ -2229,6 +2259,12 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr) return ioc->reply + (phys_addr - (u32)ioc->reply_dma); } +static inline u8 +_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc) +{ + return ioc->cpu_msix_table[raw_smp_processor_id()]; +} + /** * mpt3sas_base_get_smid - obtain a free smid from internal queue * @ioc: per adapter object @@ -2289,6 +2325,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, request->scmd = scmd; request->cb_idx = cb_idx; smid = request->smid; + request->msix_io = _base_get_msix_index(ioc); list_del(&request->tracker_list); spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); return smid; @@ -2411,12 +2448,6 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) } #endif -static inline u8 -_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc) -{ - return ioc->cpu_msix_table[raw_smp_processor_id()]; -} - /** * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware * @ioc: per adapter object @@ -2470,18 +2501,19 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware * @ioc: per adapter object * @smid: system request message index - * + * @msix_task: msix_task will be same as msix of IO incase of task abort else 0. * Return nothing. */ void -mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid) +mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 msix_task) { Mpi2RequestDescriptorUnion_t descriptor; u64 *request = (u64 *)&descriptor; descriptor.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; - descriptor.HighPriority.MSIxIndex = 0; + descriptor.HighPriority.MSIxIndex = msix_task; descriptor.HighPriority.SMID = cpu_to_le16(smid); descriptor.HighPriority.LMID = 0; descriptor.HighPriority.Reserved1 = 0; @@ -3183,20 +3215,35 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) } ioc->shost->sg_tablesize = sg_tablesize; - ioc->hi_priority_depth = facts->HighPriorityCredit; - ioc->internal_depth = ioc->hi_priority_depth + (5); + ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)), + (facts->RequestCredit / 4)); + if (ioc->internal_depth < INTERNAL_CMDS_COUNT) { + if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT + + INTERNAL_SCSIIO_CMDS_COUNT)) { + pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \ + Credits, it has just %d number of credits\n", + ioc->name, facts->RequestCredit); + return -ENOMEM; + } + ioc->internal_depth = 10; + } + + ioc->hi_priority_depth = ioc->internal_depth - (5); /* command line tunables for max controller queue depth */ if (max_queue_depth != -1 && max_queue_depth != 0) { max_request_credit = min_t(u16, max_queue_depth + - ioc->hi_priority_depth + ioc->internal_depth, - facts->RequestCredit); + ioc->internal_depth, facts->RequestCredit); if (max_request_credit > MAX_HBA_QUEUE_DEPTH) max_request_credit = MAX_HBA_QUEUE_DEPTH; } else max_request_credit = min_t(u16, facts->RequestCredit, MAX_HBA_QUEUE_DEPTH); - ioc->hba_queue_depth = max_request_credit; + /* Firmware maintains additional facts->HighPriorityCredit number of + * credits for HiPriprity Request messages, so hba queue depth will be + * sum of max_request_credit and high priority queue depth. + */ + ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth; /* request frame size */ ioc->request_sz = facts->IOCRequestFrameSize * 4; @@ -3204,6 +3251,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) /* reply frame size */ ioc->reply_sz = facts->ReplyFrameSize * 4; + /* chain segment size */ + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + if (facts->IOCMaxChainSegmentSize) + ioc->chain_segment_sz = + facts->IOCMaxChainSegmentSize * + MAX_CHAIN_ELEMT_SZ; + else + /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */ + ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS * + MAX_CHAIN_ELEMT_SZ; + } else + ioc->chain_segment_sz = ioc->request_sz; + /* calculate the max scatter element size */ sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); @@ -3215,7 +3275,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) ioc->max_sges_in_main_message = max_sge_elements/sge_size; /* now do the same for a chain buffer */ - max_sge_elements = ioc->request_sz - sge_size; + max_sge_elements = ioc->chain_segment_sz - sge_size; ioc->max_sges_in_chain_message = max_sge_elements/sge_size; /* @@ -3243,7 +3303,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) ioc->reply_post_queue_depth += 16 - (ioc->reply_post_queue_depth % 16); - if (ioc->reply_post_queue_depth > facts->MaxReplyDescriptorPostQueueDepth) { ioc->reply_post_queue_depth = @@ -3325,7 +3384,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) /* set the scsi host can_queue depth * with some internal commands that could be outstanding */ - ioc->shost->can_queue = ioc->scsiio_depth; + ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT; dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsi host: can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue)); @@ -3352,8 +3411,9 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) ioc->chains_needed_per_io, ioc->request_sz, sz/1024); if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) goto out; - retry_sz += 64; - ioc->hba_queue_depth = max_request_credit - retry_sz; + retry_sz = 64; + ioc->hba_queue_depth -= retry_sz; + _base_release_memory_pools(ioc); goto retry_allocation; } @@ -3408,7 +3468,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) goto out; } ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev, - ioc->request_sz, 16, 0); + ioc->chain_segment_sz, 16, 0); if (!ioc->chain_dma_pool) { pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n", ioc->name); @@ -3422,13 +3482,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) ioc->chain_depth = i; goto chain_done; } - total_sz += ioc->request_sz; + total_sz += ioc->chain_segment_sz; } chain_done: dinitprintk(ioc, pr_info(MPT3SAS_FMT "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", - ioc->name, ioc->chain_depth, ioc->request_sz, - ((ioc->chain_depth * ioc->request_sz))/1024)); + ioc->name, ioc->chain_depth, ioc->chain_segment_sz, + ((ioc->chain_depth * ioc->chain_segment_sz))/1024)); /* initialize hi-priority queue smid's */ ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, @@ -4289,6 +4349,10 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); facts->IOCRequestFrameSize = le16_to_cpu(mpi_reply.IOCRequestFrameSize); + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + facts->IOCMaxChainSegmentSize = + le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize); + } facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); ioc->shost->max_id = -1; @@ -4971,6 +5035,8 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) u32 reply_address; u16 smid; struct _tr_list *delayed_tr, *delayed_tr_next; + struct _sc_list *delayed_sc, *delayed_sc_next; + struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next; u8 hide_flag; struct adapter_reply_queue *reply_q; long reply_post_free; @@ -4993,6 +5059,18 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) kfree(delayed_tr); } + list_for_each_entry_safe(delayed_sc, delayed_sc_next, + &ioc->delayed_sc_list, list) { + list_del(&delayed_sc->list); + kfree(delayed_sc); + } + + list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next, + &ioc->delayed_event_ack_list, list) { + list_del(&delayed_event_ack->list); + kfree(delayed_event_ack); + } + /* initialize the scsi lookup free list */ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); INIT_LIST_HEAD(&ioc->free_list); @@ -5224,6 +5302,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->build_zero_len_sge = &_base_build_zero_len_sge; break; case MPI25_VERSION: + case MPI26_VERSION: /* * In SAS3.0, * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 5ad271efbd45..32580b514b18 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -73,9 +73,9 @@ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "09.102.00.00" -#define MPT3SAS_MAJOR_VERSION 9 -#define MPT3SAS_MINOR_VERSION 102 +#define MPT3SAS_DRIVER_VERSION "12.100.00.00" +#define MPT3SAS_MAJOR_VERSION 12 +#define MPT3SAS_MINOR_VERSION 100 #define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_RELEASE_VERSION 00 @@ -122,11 +122,16 @@ #define NO_SLEEP 0 #define INTERNAL_CMDS_COUNT 10 /* reserved cmds */ +/* reserved for issuing internally framed scsi io cmds */ +#define INTERNAL_SCSIIO_CMDS_COUNT 3 #define MPI3_HIM_MASK 0xFFFFFFFF /* mask every bit*/ #define MPT3SAS_INVALID_DEVICE_HANDLE 0xFFFF +#define MAX_CHAIN_ELEMT_SZ 16 +#define DEFAULT_NUM_FWCHAIN_ELEMTS 8 + /* * reset phases */ @@ -398,6 +403,7 @@ struct MPT3SAS_DEVICE { u8 configured_lun; u8 block; u8 tlr_snoop_check; + u8 ignore_delay_remove; }; #define MPT3_CMD_NOT_USED 0x8000 /* free */ @@ -643,6 +649,7 @@ struct chain_tracker { * @cb_idx: callback index * @direct_io: To indicate whether I/O is direct (WARPDRIVE) * @tracker_list: list of free request (ioc->free_list) + * @msix_io: IO's msix */ struct scsiio_tracker { u16 smid; @@ -651,6 +658,7 @@ struct scsiio_tracker { u8 direct_io; struct list_head chain_list; struct list_head tracker_list; + u16 msix_io; }; /** @@ -676,6 +684,25 @@ struct _tr_list { u16 state; }; +/** + * struct _sc_list - delayed SAS_IO_UNIT_CONTROL message list + * @handle: device handle + */ +struct _sc_list { + struct list_head list; + u16 handle; +}; + +/** + * struct _event_ack_list - delayed event acknowledgment list + * @Event: Event ID + * @EventContext: used to track the event uniquely + */ +struct _event_ack_list { + struct list_head list; + u16 Event; + u32 EventContext; +}; /** * struct adapter_reply_queue - the reply queue struct @@ -737,7 +764,7 @@ struct mpt3sas_facts { u32 IOCCapabilities; union mpi3_version_union FWVersion; u16 IOCRequestFrameSize; - u16 Reserved3; + u16 IOCMaxChainSegmentSize; u16 MaxInitiators; u16 MaxTargets; u16 MaxSasExpanders; @@ -884,6 +911,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @max_sges_in_chain_message: number sg elements per chain * @chains_needed_per_io: max chains per io * @chain_depth: total chains allocated + * @chain_segment_sz: gives the max number of + * SGEs accommodate on single chain buffer * @hi_priority_smid: * @hi_priority: * @hi_priority_dma: @@ -921,6 +950,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @replyPostRegisterIndex: index of next position in Reply Desc Post Queue * @delayed_tr_list: target reset link list * @delayed_tr_volume_list: volume target reset link list + * @delayed_sc_list: + * @delayed_event_ack_list: * @temp_sensors_count: flag to carry the number of temperature sensors * @pci_access_mutex: Mutex to synchronize ioctl,sysfs show path and * pci resource handling. PCI resource freeing will lead to free @@ -1089,6 +1120,7 @@ struct MPT3SAS_ADAPTER { u16 max_sges_in_chain_message; u16 chains_needed_per_io; u32 chain_depth; + u16 chain_segment_sz; /* hi-priority queue */ u16 hi_priority_smid; @@ -1142,6 +1174,8 @@ struct MPT3SAS_ADAPTER { struct list_head delayed_tr_list; struct list_head delayed_tr_volume_list; + struct list_head delayed_sc_list; + struct list_head delayed_event_ack_list; u8 temp_sensors_count; struct mutex pci_access_mutex; @@ -1213,7 +1247,8 @@ void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle); void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle); -void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, + u16 smid, u16 msix_task); void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid); void mpt3sas_base_initialize_callback_handler(void); u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func); @@ -1259,6 +1294,8 @@ void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address); void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, u64 sas_address); +u8 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, + u16 smid); struct _sas_node *mpt3sas_scsih_expander_find_by_handle( struct MPT3SAS_ADAPTER *ioc, u16 handle); diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index d8366b056b70..7d00f09666b6 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -401,7 +401,8 @@ mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, Mpi2EventNotificationReply_t *mpi_reply; mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); - mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); + if (mpi_reply) + mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); return 1; } @@ -410,7 +411,7 @@ mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, * @ioc: per adapter object * @iocpp: The ioc pointer is returned in this. * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & - * MPI25_VERSION for mpt3ctl ioctl device. + * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. * * Return (-1) means error, else ioc_number. */ @@ -419,6 +420,7 @@ _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, int mpi_version) { struct MPT3SAS_ADAPTER *ioc; + int version = 0; /* global ioc lock to protect controller on list operations */ spin_lock(&gioc_lock); list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { @@ -427,8 +429,21 @@ _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, /* Check whether this ioctl command is from right * ioctl device or not, if not continue the search. */ - if (ioc->hba_mpi_version_belonged != mpi_version) - continue; + version = ioc->hba_mpi_version_belonged; + /* MPI25_VERSION and MPI26_VERSION uses same ioctl + * device. + */ + if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) { + if ((version == MPI25_VERSION) || + (version == MPI26_VERSION)) + goto out; + else + continue; + } else { + if (version != mpi_version) + continue; + } +out: spin_unlock(&gioc_lock); *iocpp = ioc; return ioc_number; @@ -817,7 +832,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, tm_request->DevHandle)); ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, data_in_dma, data_in_sz); - mpt3sas_base_put_smid_hi_priority(ioc, smid); + mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); break; } case MPI2_FUNCTION_SMP_PASSTHROUGH: @@ -1053,6 +1068,7 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION); break; case MPI25_VERSION: + case MPI26_VERSION: karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); break; @@ -2203,7 +2219,7 @@ _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, * @arg - user space data buffer * @compat - handles 32 bit applications in 64bit os * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & - * MPI25_VERSION for mpt3ctl ioctl device. + * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. */ static long _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, @@ -2341,10 +2357,12 @@ _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; - /* pass MPI25_VERSION value, to indicate that this ioctl cmd + /* pass MPI25_VERSION | MPI26_VERSION value, + * to indicate that this ioctl cmd * came from mpt3ctl ioctl device. */ - ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI25_VERSION); + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, + MPI25_VERSION | MPI26_VERSION); return ret; } @@ -2379,7 +2397,8 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) { long ret; - ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI25_VERSION); + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, + MPI25_VERSION | MPI26_VERSION); return ret; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 9ab77b06434d..e0e4920d0fa6 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -1589,10 +1589,16 @@ scsih_get_resync(struct device *dev) percent_complete = 0; out: - if (ioc->hba_mpi_version_belonged == MPI2_VERSION) + + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: raid_set_resync(mpt2sas_raid_template, dev, percent_complete); - if (ioc->hba_mpi_version_belonged == MPI25_VERSION) + break; + case MPI25_VERSION: + case MPI26_VERSION: raid_set_resync(mpt3sas_raid_template, dev, percent_complete); + break; + } } /** @@ -1650,10 +1656,15 @@ scsih_get_state(struct device *dev) break; } out: - if (ioc->hba_mpi_version_belonged == MPI2_VERSION) + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: raid_set_state(mpt2sas_raid_template, dev, state); - if (ioc->hba_mpi_version_belonged == MPI25_VERSION) + break; + case MPI25_VERSION: + case MPI26_VERSION: raid_set_state(mpt3sas_raid_template, dev, state); + break; + } } /** @@ -1682,12 +1693,17 @@ _scsih_set_level(struct MPT3SAS_ADAPTER *ioc, break; } - if (ioc->hba_mpi_version_belonged == MPI2_VERSION) + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: raid_set_level(mpt2sas_raid_template, - &sdev->sdev_gendev, level); - if (ioc->hba_mpi_version_belonged == MPI25_VERSION) + &sdev->sdev_gendev, level); + break; + case MPI25_VERSION: + case MPI26_VERSION: raid_set_level(mpt3sas_raid_template, - &sdev->sdev_gendev, level); + &sdev->sdev_gendev, level); + break; + } } @@ -1937,7 +1953,15 @@ scsih_slave_configure(struct scsi_device *sdev) if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { qdepth = MPT3SAS_SAS_QUEUE_DEPTH; ssp_target = 1; - ds = "SSP"; + if (sas_device->device_info & + MPI2_SAS_DEVICE_INFO_SEP) { + sdev_printk(KERN_WARNING, sdev, + "set ignore_delay_remove for handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->ignore_delay_remove = 1; + ds = "SES"; + } else + ds = "SSP"; } else { qdepth = MPT3SAS_SATA_QUEUE_DEPTH; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) @@ -2193,6 +2217,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, unsigned long timeleft; struct scsiio_tracker *scsi_lookup = NULL; int rc; + u16 msix_task = 0; if (m_type == TM_MUTEX_ON) mutex_lock(&ioc->tm_cmds.mutex); @@ -2256,7 +2281,12 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); mpt3sas_scsih_set_tm_flag(ioc, handle); init_completion(&ioc->tm_cmds.done); - mpt3sas_base_put_smid_hi_priority(ioc, smid); + if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) && + (scsi_lookup->msix_io < ioc->reply_queue_count)) + msix_task = scsi_lookup->msix_io; + else + msix_task = 0; + mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task); timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { pr_err(MPT3SAS_FMT "%s: timeout\n", @@ -2383,7 +2413,7 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) (unsigned long long) sas_device->enclosure_logical_id, sas_device->slot); - if (sas_device->connector_name) + if (sas_device->connector_name[0] != '\0') starget_printk(KERN_INFO, starget, "enclosure level(0x%04x),connector name(%s)\n", sas_device->enclosure_level, @@ -2927,6 +2957,12 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) continue; if (sas_device_priv_data->block) continue; + if (sas_device_priv_data->ignore_delay_remove) { + sdev_printk(KERN_INFO, sdev, + "%s skip device_block for SES handle(0x%04x)\n", + __func__, sas_device_priv_data->sas_target->handle); + continue; + } _scsih_internal_device_block(sdev, sas_device_priv_data); } } @@ -2959,6 +2995,12 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) continue; if (sas_device->pend_sas_rphy_add) continue; + if (sas_device_priv_data->ignore_delay_remove) { + sdev_printk(KERN_INFO, sdev, + "%s skip device_block for SES handle(0x%04x)\n", + __func__, sas_device_priv_data->sas_target->handle); + continue; + } _scsih_internal_device_block(sdev, sas_device_priv_data); } @@ -3118,7 +3160,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) " slot(%d)\n", ioc->name, (unsigned long long) sas_device->enclosure_logical_id, sas_device->slot)); - if (sas_device->connector_name) + if (sas_device->connector_name[0] != '\0') dewtprintk(ioc, pr_info(MPT3SAS_FMT "setting delete flag: enclosure level(0x%04x)," " connector name( %s)\n", ioc->name, @@ -3151,7 +3193,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; mpi_request->DevHandle = cpu_to_le16(handle); mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; - mpt3sas_base_put_smid_hi_priority(ioc, smid); + mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); out: @@ -3186,6 +3228,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, Mpi2SasIoUnitControlRequest_t *mpi_request; u16 smid_sas_ctrl; u32 ioc_state; + struct _sc_list *delayed_sc; if (ioc->remove_host) { dewtprintk(ioc, pr_info(MPT3SAS_FMT @@ -3228,9 +3271,16 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); if (!smid_sas_ctrl) { - pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", - ioc->name, __func__); - return 1; + delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC); + if (!delayed_sc) + return _scsih_check_for_pending_tm(ioc, smid); + INIT_LIST_HEAD(&delayed_sc->list); + delayed_sc->handle = mpi_request_tm->DevHandle; + list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "DELAYED:sc:handle(0x%04x), (open)\n", + ioc->name, handle)); + return _scsih_check_for_pending_tm(ioc, smid); } dewtprintk(ioc, pr_info(MPT3SAS_FMT @@ -3281,7 +3331,7 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); } - return 1; + return mpt3sas_check_for_pending_internal_cmds(ioc, smid); } /** @@ -3332,7 +3382,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; mpi_request->DevHandle = cpu_to_le16(handle); mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; - mpt3sas_base_put_smid_hi_priority(ioc, smid); + mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); } /** @@ -3388,6 +3438,142 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, return _scsih_check_for_pending_tm(ioc, smid); } +/** + * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages + * @ioc: per adapter object + * @smid: system request message index + * @event: Event ID + * @event_context: used to track events uniquely + * + * Context - processed in interrupt context. + */ +void +_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event, + u32 event_context) +{ + Mpi2EventAckRequest_t *ack_request; + int i = smid - ioc->internal_smid; + unsigned long flags; + + /* Without releasing the smid just update the + * call back index and reuse the same smid for + * processing this delayed request + */ + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", + ioc->name, le16_to_cpu(event), smid, + ioc->base_cb_idx)); + ack_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); + ack_request->Function = MPI2_FUNCTION_EVENT_ACK; + ack_request->Event = event; + ack_request->EventContext = event_context; + ack_request->VF_ID = 0; /* TODO */ + ack_request->VP_ID = 0; + mpt3sas_base_put_smid_default(ioc, smid); +} + +/** + * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed + * sas_io_unit_ctrl messages + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle + * + * Context - processed in interrupt context. + */ +void +_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, + u16 smid, u16 handle) + { + Mpi2SasIoUnitControlRequest_t *mpi_request; + u32 ioc_state; + int i = smid - ioc->internal_smid; + unsigned long flags; + + if (ioc->remove_host) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host has been removed\n", + __func__, ioc->name)); + return; + } else if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host in pci error recovery\n", + __func__, ioc->name)); + return; + } + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host is not operational\n", + __func__, ioc->name)); + return; + } + + /* Without releasing the smid just update the + * call back index and reuse the same smid for + * processing this delayed request + */ + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, le16_to_cpu(handle), smid, + ioc->tm_sas_control_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; + mpi_request->DevHandle = handle; + mpt3sas_base_put_smid_default(ioc, smid); +} + +/** + * _scsih_check_for_pending_internal_cmds - check for pending internal messages + * @ioc: per adapter object + * @smid: system request message index + * + * Context: Executed in interrupt context + * + * This will check delayed internal messages list, and process the + * next request. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct _sc_list *delayed_sc; + struct _event_ack_list *delayed_event_ack; + + if (!list_empty(&ioc->delayed_event_ack_list)) { + delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next, + struct _event_ack_list, list); + _scsih_issue_delayed_event_ack(ioc, smid, + delayed_event_ack->Event, delayed_event_ack->EventContext); + list_del(&delayed_event_ack->list); + kfree(delayed_event_ack); + return 0; + } + + if (!list_empty(&ioc->delayed_sc_list)) { + delayed_sc = list_entry(ioc->delayed_sc_list.next, + struct _sc_list, list); + _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid, + delayed_sc->handle); + list_del(&delayed_sc->list); + kfree(delayed_sc); + return 0; + } + return 1; +} /** * _scsih_check_for_pending_tm - check for pending task management @@ -4084,6 +4270,9 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: desc_ioc_state = "eedp app tag error"; break; + case MPI2_IOCSTATUS_INSUFFICIENT_POWER: + desc_ioc_state = "insufficient power"; + break; default: desc_ioc_state = "unknown"; break; @@ -4609,6 +4798,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) case MPI2_IOCSTATUS_INVALID_STATE: case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + case MPI2_IOCSTATUS_INSUFFICIENT_POWER: default: scmd->result = DID_SOFT_ERROR << 16; break; @@ -8391,7 +8581,8 @@ static struct raid_function_template mpt3sas_raid_functions = { * @pdev: PCI device struct * * return MPI2_VERSION for SAS 2.0 HBA devices, - * MPI25_VERSION for SAS 3.0 HBA devices. + * MPI25_VERSION for SAS 3.0 HBA devices, and + * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices */ u16 _scsih_determine_hba_mpi_version(struct pci_dev *pdev) @@ -8423,6 +8614,17 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev) case MPI25_MFGPAGE_DEVID_SAS3108_5: case MPI25_MFGPAGE_DEVID_SAS3108_6: return MPI25_VERSION; + case MPI26_MFGPAGE_DEVID_SAS3216: + case MPI26_MFGPAGE_DEVID_SAS3224: + case MPI26_MFGPAGE_DEVID_SAS3316_1: + case MPI26_MFGPAGE_DEVID_SAS3316_2: + case MPI26_MFGPAGE_DEVID_SAS3316_3: + case MPI26_MFGPAGE_DEVID_SAS3316_4: + case MPI26_MFGPAGE_DEVID_SAS3324_1: + case MPI26_MFGPAGE_DEVID_SAS3324_2: + case MPI26_MFGPAGE_DEVID_SAS3324_3: + case MPI26_MFGPAGE_DEVID_SAS3324_4: + return MPI26_VERSION; } return 0; } @@ -8456,7 +8658,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two, * for other generation HBA's return with -ENODEV */ - if ((hbas_to_enumerate == 2) && (hba_mpi_version != MPI25_VERSION)) + if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION + || hba_mpi_version == MPI26_VERSION))) return -ENODEV; switch (hba_mpi_version) { @@ -8478,6 +8681,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; break; case MPI25_VERSION: + case MPI26_VERSION: /* Use mpt3sas driver host template for SAS 3.0 HBA's */ shost = scsi_host_alloc(&mpt3sas_driver_template, sizeof(struct MPT3SAS_ADAPTER)); @@ -8488,7 +8692,9 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->hba_mpi_version_belonged = hba_mpi_version; ioc->id = mpt3_ids++; sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME); - if (pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) + if ((ioc->hba_mpi_version_belonged == MPI25_VERSION && + pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) || + (ioc->hba_mpi_version_belonged == MPI26_VERSION)) ioc->msix96_vector = 1; break; default: @@ -8533,6 +8739,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&ioc->raid_device_list); INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); INIT_LIST_HEAD(&ioc->delayed_tr_list); + INIT_LIST_HEAD(&ioc->delayed_sc_list); + INIT_LIST_HEAD(&ioc->delayed_event_ack_list); INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); INIT_LIST_HEAD(&ioc->reply_queue_list); @@ -8866,6 +9074,28 @@ static const struct pci_device_id mpt3sas_pci_table[] = { PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, PCI_ANY_ID, PCI_ANY_ID }, + /* Cutlass ~ 3216 and 3224 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224, + PCI_ANY_ID, PCI_ANY_ID }, + /* Intruder ~ 3316 and 3324 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4, + PCI_ANY_ID, PCI_ANY_ID }, {0} /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index ca36d7ea0964..6a84b82d71bb 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -1418,7 +1418,6 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state; unsigned long timeleft; void *psge; - u32 sgl_flags; u8 issue_reset = 0; void *data_out = NULL; dma_addr_t data_out_dma; @@ -1507,24 +1506,10 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, cpu_to_le16(sizeof(struct phy_error_log_request)); psge = &mpi_request->SGL; - /* WRITE sgel first */ - sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | - MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); - sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; - ioc->base_add_sg_single(psge, sgl_flags | - sizeof(struct phy_control_request), data_out_dma); - - /* incr sgel */ - psge += ioc->sge_size; - - /* READ sgel last */ - sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | - MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | - MPI2_SGE_FLAGS_END_OF_LIST); - sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; - ioc->base_add_sg_single(psge, sgl_flags | - sizeof(struct phy_control_reply), data_out_dma + - sizeof(struct phy_control_request)); + ioc->build_sg(ioc, psge, data_out_dma, + sizeof(struct phy_control_request), + data_out_dma + sizeof(struct phy_control_request), + sizeof(struct phy_control_reply)); dtransportprintk(ioc, pr_info(MPT3SAS_FMT "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", @@ -1615,7 +1600,7 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset) SMP_PHY_CONTROL_LINK_RESET); /* handle hba phys */ - memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlReply_t)); + memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; mpi_request.Operation = hard_reset ? MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET; diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c index 9270d15ff1a4..f6fc4a705924 100644 --- a/drivers/scsi/mvsas/mv_94xx.c +++ b/drivers/scsi/mvsas/mv_94xx.c @@ -330,6 +330,51 @@ static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id) mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff); } +static void mvs_94xx_sgpio_init(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + u32 tmp; + + tmp = mr32(MVS_HST_CHIP_CONFIG); + tmp |= 0x100; + mw32(MVS_HST_CHIP_CONFIG, tmp); + + mw32(MVS_SGPIO_CTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, + MVS_SGPIO_CTRL_SDOUT_AUTO << MVS_SGPIO_CTRL_SDOUT_SHIFT); + + mw32(MVS_SGPIO_CFG1 + MVS_SGPIO_HOST_OFFSET * mvi->id, + 8 << MVS_SGPIO_CFG1_LOWA_SHIFT | + 8 << MVS_SGPIO_CFG1_HIA_SHIFT | + 4 << MVS_SGPIO_CFG1_LOWB_SHIFT | + 4 << MVS_SGPIO_CFG1_HIB_SHIFT | + 2 << MVS_SGPIO_CFG1_MAXACTON_SHIFT | + 1 << MVS_SGPIO_CFG1_FORCEACTOFF_SHIFT + ); + + mw32(MVS_SGPIO_CFG2 + MVS_SGPIO_HOST_OFFSET * mvi->id, + (300000 / 100) << MVS_SGPIO_CFG2_CLK_SHIFT | /* 100kHz clock */ + 66 << MVS_SGPIO_CFG2_BLINK_SHIFT /* (66 * 0,121 Hz?)*/ + ); + + mw32(MVS_SGPIO_CFG0 + MVS_SGPIO_HOST_OFFSET * mvi->id, + MVS_SGPIO_CFG0_ENABLE | + MVS_SGPIO_CFG0_BLINKA | + MVS_SGPIO_CFG0_BLINKB | + /* 3*4 data bits / PDU */ + (12 - 1) << MVS_SGPIO_CFG0_AUT_BITLEN_SHIFT + ); + + mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, + DEFAULT_SGPIO_BITS); + + mw32(MVS_SGPIO_DSRC + MVS_SGPIO_HOST_OFFSET * mvi->id, + ((mvi->id * 4) + 3) << (8 * 3) | + ((mvi->id * 4) + 2) << (8 * 2) | + ((mvi->id * 4) + 1) << (8 * 1) | + ((mvi->id * 4) + 0) << (8 * 0)); + +} + static int mvs_94xx_init(struct mvs_info *mvi) { void __iomem *regs = mvi->regs; @@ -533,6 +578,8 @@ static int mvs_94xx_init(struct mvs_info *mvi) /* Enable SRS interrupt */ mw32(MVS_INT_MASK_SRS_0, 0xFFFF); + mvs_94xx_sgpio_init(mvi); + return 0; } @@ -1005,6 +1052,92 @@ static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time) } +static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv, + u8 reg_type, u8 reg_index, + u8 reg_count, u8 *write_data) +{ + int i; + + switch (reg_type) { + + case SAS_GPIO_REG_TX_GP: + if (reg_index == 0) + return -EINVAL; + + if (reg_count > 1) + return -EINVAL; + + if (reg_count == 0) + return 0; + + /* maximum supported bits = hosts * 4 drives * 3 bits */ + for (i = 0; i < mvs_prv->n_host * 4 * 3; i++) { + + /* select host */ + struct mvs_info *mvi = mvs_prv->mvi[i/(4*3)]; + + void __iomem *regs = mvi->regs_ex - 0x10200; + + int drive = (i/3) & (4-1); /* drive number on host */ + u32 block = mr32(MVS_SGPIO_DCTRL + + MVS_SGPIO_HOST_OFFSET * mvi->id); + + + /* + * if bit is set then create a mask with the first + * bit of the drive set in the mask ... + */ + u32 bit = (write_data[i/8] & (1 << (i&(8-1)))) ? + 1<<(24-drive*8) : 0; + + /* + * ... and then shift it to the right position based + * on the led type (activity/id/fail) + */ + switch (i%3) { + case 0: /* activity */ + block &= ~((0x7 << MVS_SGPIO_DCTRL_ACT_SHIFT) + << (24-drive*8)); + /* hardwire activity bit to SOF */ + block |= LED_BLINKA_SOF << ( + MVS_SGPIO_DCTRL_ACT_SHIFT + + (24-drive*8)); + break; + case 1: /* id */ + block &= ~((0x3 << MVS_SGPIO_DCTRL_LOC_SHIFT) + << (24-drive*8)); + block |= bit << MVS_SGPIO_DCTRL_LOC_SHIFT; + break; + case 2: /* fail */ + block &= ~((0x7 << MVS_SGPIO_DCTRL_ERR_SHIFT) + << (24-drive*8)); + block |= bit << MVS_SGPIO_DCTRL_ERR_SHIFT; + break; + } + + mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, + block); + + } + + return reg_count; + + case SAS_GPIO_REG_TX: + if (reg_index + reg_count > mvs_prv->n_host) + return -EINVAL; + + for (i = 0; i < reg_count; i++) { + struct mvs_info *mvi = mvs_prv->mvi[i+reg_index]; + void __iomem *regs = mvi->regs_ex - 0x10200; + + mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, + be32_to_cpu(((u32 *) write_data)[i])); + } + return reg_count; + } + return -ENOSYS; +} + const struct mvs_dispatch mvs_94xx_dispatch = { "mv94xx", mvs_94xx_init, @@ -1057,5 +1190,6 @@ const struct mvs_dispatch mvs_94xx_dispatch = { mvs_94xx_fix_dma, mvs_94xx_tune_interrupt, mvs_94xx_non_spec_ncq_error, + mvs_94xx_gpio_write, }; diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h index 14e197497b46..578960803a00 100644 --- a/drivers/scsi/mvsas/mv_94xx.h +++ b/drivers/scsi/mvsas/mv_94xx.h @@ -38,6 +38,10 @@ enum VANIR_REVISION_ID { VANIR_C2_REV = 0xC2, }; +enum host_registers { + MVS_HST_CHIP_CONFIG = 0x10104, /* chip configuration */ +}; + enum hw_registers { MVS_GBL_CTL = 0x04, /* global control */ MVS_GBL_INT_STAT = 0x00, /* global irq status */ @@ -239,6 +243,73 @@ struct mvs_prd { __le32 im_len; } __attribute__ ((packed)); +enum sgpio_registers { + MVS_SGPIO_HOST_OFFSET = 0x100, /* offset between hosts */ + + MVS_SGPIO_CFG0 = 0xc200, + MVS_SGPIO_CFG0_ENABLE = (1 << 0), /* enable pins */ + MVS_SGPIO_CFG0_BLINKB = (1 << 1), /* blink generators */ + MVS_SGPIO_CFG0_BLINKA = (1 << 2), + MVS_SGPIO_CFG0_INVSCLK = (1 << 3), /* invert signal? */ + MVS_SGPIO_CFG0_INVSLOAD = (1 << 4), + MVS_SGPIO_CFG0_INVSDOUT = (1 << 5), + MVS_SGPIO_CFG0_SLOAD_FALLEDGE = (1 << 6), /* rise/fall edge? */ + MVS_SGPIO_CFG0_SDOUT_FALLEDGE = (1 << 7), + MVS_SGPIO_CFG0_SDIN_RISEEDGE = (1 << 8), + MVS_SGPIO_CFG0_MAN_BITLEN_SHIFT = 18, /* bits/frame manual mode */ + MVS_SGPIO_CFG0_AUT_BITLEN_SHIFT = 24, /* bits/frame auto mode */ + + MVS_SGPIO_CFG1 = 0xc204, /* blink timing register */ + MVS_SGPIO_CFG1_LOWA_SHIFT = 0, /* A off time */ + MVS_SGPIO_CFG1_HIA_SHIFT = 4, /* A on time */ + MVS_SGPIO_CFG1_LOWB_SHIFT = 8, /* B off time */ + MVS_SGPIO_CFG1_HIB_SHIFT = 12, /* B on time */ + MVS_SGPIO_CFG1_MAXACTON_SHIFT = 16, /* max activity on time */ + + /* force activity off time */ + MVS_SGPIO_CFG1_FORCEACTOFF_SHIFT = 20, + /* stretch activity on time */ + MVS_SGPIO_CFG1_STRCHACTON_SHIFT = 24, + /* stretch activiity off time */ + MVS_SGPIO_CFG1_STRCHACTOFF_SHIFT = 28, + + + MVS_SGPIO_CFG2 = 0xc208, /* clock speed register */ + MVS_SGPIO_CFG2_CLK_SHIFT = 0, + MVS_SGPIO_CFG2_BLINK_SHIFT = 20, + + MVS_SGPIO_CTRL = 0xc20c, /* SDOUT/SDIN mode control */ + MVS_SGPIO_CTRL_SDOUT_AUTO = 2, + MVS_SGPIO_CTRL_SDOUT_SHIFT = 2, + + MVS_SGPIO_DSRC = 0xc220, /* map ODn bits to drives */ + + MVS_SGPIO_DCTRL = 0xc238, + MVS_SGPIO_DCTRL_ERR_SHIFT = 0, + MVS_SGPIO_DCTRL_LOC_SHIFT = 3, + MVS_SGPIO_DCTRL_ACT_SHIFT = 5, +}; + +enum sgpio_led_status { + LED_OFF = 0, + LED_ON = 1, + LED_BLINKA = 2, + LED_BLINKA_INV = 3, + LED_BLINKA_SOF = 4, + LED_BLINKA_EOF = 5, + LED_BLINKB = 6, + LED_BLINKB_INV = 7, +}; + +#define DEFAULT_SGPIO_BITS ((LED_BLINKA_SOF << \ + MVS_SGPIO_DCTRL_ACT_SHIFT) << (8 * 3) | \ + (LED_BLINKA_SOF << \ + MVS_SGPIO_DCTRL_ACT_SHIFT) << (8 * 2) | \ + (LED_BLINKA_SOF << \ + MVS_SGPIO_DCTRL_ACT_SHIFT) << (8 * 1) | \ + (LED_BLINKA_SOF << \ + MVS_SGPIO_DCTRL_ACT_SHIFT) << (8 * 0)) + /* * these registers are accessed through port vendor * specific address/data registers diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 675e7fab0796..c7c250519c4b 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -84,6 +84,8 @@ static struct sas_domain_function_template mvs_transport_ops = { .lldd_port_formed = mvs_port_formed, .lldd_port_deformed = mvs_port_deformed, + .lldd_write_gpio = mvs_gpio_write, + }; static void mvs_phy_init(struct mvs_info *mvi, int phy_id) diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 9c780740fb82..83cd3ea2df41 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -737,8 +737,8 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf mv_dprintk("device %016llx not ready.\n", SAS_ADDR(dev->sas_addr)); - rc = SAS_PHY_DOWN; - return rc; + rc = SAS_PHY_DOWN; + return rc; } tei.port = dev->port->lldd_port; if (tei.port && !tei.port->port_attached && !tmf) { @@ -2105,3 +2105,16 @@ int mvs_int_rx(struct mvs_info *mvi, bool self_clear) return 0; } +int mvs_gpio_write(struct sas_ha_struct *sha, u8 reg_type, u8 reg_index, + u8 reg_count, u8 *write_data) +{ + struct mvs_prv_info *mvs_prv = sha->lldd_ha; + struct mvs_info *mvi = mvs_prv->mvi[0]; + + if (MVS_CHIP_DISP->gpio_write) { + return MVS_CHIP_DISP->gpio_write(mvs_prv, reg_type, + reg_index, reg_count, write_data); + } + + return -ENOSYS; +} diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index dc409c04747a..f9afd4cdd4c4 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -103,6 +103,7 @@ enum dev_reset { }; struct mvs_info; +struct mvs_prv_info; struct mvs_dispatch { char *name; @@ -172,6 +173,8 @@ struct mvs_dispatch { int buf_len, int from, void *prd); void (*tune_interrupt)(struct mvs_info *mvi, u32 time); void (*non_spec_ncq_error)(struct mvs_info *mvi); + int (*gpio_write)(struct mvs_prv_info *mvs_prv, u8 reg_type, + u8 reg_index, u8 reg_count, u8 *write_data); }; @@ -476,5 +479,7 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events); void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); int mvs_int_rx(struct mvs_info *mvi, bool self_clear); struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, u8 reg_set); +int mvs_gpio_write(struct sas_ha_struct *, u8 reg_type, u8 reg_index, + u8 reg_count, u8 *write_data); #endif diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index 02360de6b7e0..39285070f3b5 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -2629,7 +2629,7 @@ static void mvumi_shutdown(struct pci_dev *pdev) mvumi_flush_cache(mhba); } -static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state) { struct mvumi_hba *mhba = NULL; @@ -2648,7 +2648,7 @@ static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -static int mvumi_resume(struct pci_dev *pdev) +static int __maybe_unused mvumi_resume(struct pci_dev *pdev) { int ret; struct mvumi_hba *mhba = NULL; diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 0cccd6033feb..3b11aad03752 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -170,10 +170,7 @@ static int _osd_get_print_system_info(struct osd_dev *od, /* FIXME: Where are the time utilities */ pFirst = get_attrs[a++].val_ptr; - OSD_INFO("CLOCK [0x%02x%02x%02x%02x%02x%02x]\n", - ((char *)pFirst)[0], ((char *)pFirst)[1], - ((char *)pFirst)[2], ((char *)pFirst)[3], - ((char *)pFirst)[4], ((char *)pFirst)[5]); + OSD_INFO("CLOCK [0x%6phN]\n", pFirst); if (a < nelem) { /* IBM-OSD-SIM bug, Might not have it */ unsigned len = get_attrs[a].len; @@ -2009,9 +2006,8 @@ EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps); */ void osd_set_caps(struct osd_cdb *cdb, const void *caps) { - bool is_ver1 = true; /* NOTE: They start at same address */ - memcpy(&cdb->v1.caps, caps, is_ver1 ? OSDv1_CAP_LEN : OSD_CAP_LEN); + memcpy(&cdb->v1.caps, caps, OSDv1_CAP_LEN); } bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused) diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c index e81eadd08afc..512037e27783 100644 --- a/drivers/scsi/pas16.c +++ b/drivers/scsi/pas16.c @@ -1,6 +1,4 @@ #define PSEUDO_DMA -#define UNSAFE /* Not unsafe for PAS16 -- use it */ -#define PDEBUG 0 /* * This driver adapted from Drew Eckhardt's Trantor T128 driver @@ -71,14 +69,10 @@ #include <linux/module.h> -#include <linux/signal.h> -#include <linux/proc_fs.h> #include <asm/io.h> #include <asm/dma.h> #include <linux/blkdev.h> -#include <linux/delay.h> #include <linux/interrupt.h> -#include <linux/stat.h> #include <linux/init.h> #include <scsi/scsi_host.h> @@ -87,8 +81,8 @@ #include "NCR5380.h" -static unsigned short pas16_addr = 0; -static int pas16_irq = 0; +static unsigned short pas16_addr; +static int pas16_irq; static const int scsi_irq_translate[] = @@ -146,22 +140,6 @@ static const unsigned short pas16_offset[ 8 ] = * START_DMA_INITIATOR_RECEIVE_REG wo */ }; -/*----------------------------------------------------------------*/ -/* the following will set the monitor border color (useful to find - where something crashed or gets stuck at */ -/* 1 = blue - 2 = green - 3 = cyan - 4 = red - 5 = magenta - 6 = yellow - 7 = white -*/ -#if 1 -#define rtrc(i) {inb(0x3da); outb(0x31, 0x3c0); outb((i), 0x3c0);} -#else -#define rtrc(i) {} -#endif /* @@ -205,7 +183,7 @@ static void __init outb( 0x01, io_port + P_TIMEOUT_STATUS_REG_OFFSET ); /* Reset TC */ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */ - NCR5380_read( RESET_PARITY_INTERRUPT_REG ); + inb(io_port + pas16_offset[RESET_PARITY_INTERRUPT_REG]); /* Set the SCSI interrupt pointer without mucking up the sound * interrupt pointer in the same byte. @@ -280,13 +258,13 @@ static int __init * put in an additional test to try to weed them out. */ - outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */ - NCR5380_write( MODE_REG, 0x20 ); /* Is it really SCSI? */ - if( NCR5380_read( MODE_REG ) != 0x20 ) /* Write to a reg. */ - return 0; /* and try to read */ - NCR5380_write( MODE_REG, 0x00 ); /* it back. */ - if( NCR5380_read( MODE_REG ) != 0x00 ) - return 0; + outb(0x01, io_port + WAIT_STATE); /* 1 Wait state */ + outb(0x20, io_port + pas16_offset[MODE_REG]); /* Is it really SCSI? */ + if (inb(io_port + pas16_offset[MODE_REG]) != 0x20) /* Write to a reg. */ + return 0; /* and try to read */ + outb(0x00, io_port + pas16_offset[MODE_REG]); /* it back. */ + if (inb(io_port + pas16_offset[MODE_REG]) != 0x00) + return 0; return 1; } @@ -305,7 +283,7 @@ static int __init static int __init pas16_setup(char *str) { - static int commandline_current = 0; + static int commandline_current; int i; int ints[10]; @@ -344,8 +322,8 @@ __setup("pas16=", pas16_setup); static int __init pas16_detect(struct scsi_host_template *tpnt) { - static int current_override = 0; - static unsigned short current_base = 0; + static int current_override; + static unsigned short current_base; struct Scsi_Host *instance; unsigned short io_port; int count; @@ -377,34 +355,32 @@ static int __init pas16_detect(struct scsi_host_template *tpnt) } else for (; !io_port && (current_base < NO_BASES); ++current_base) { -#if (PDEBUG & PDEBUG_INIT) - printk("scsi-pas16 : probing io_port %04x\n", (unsigned int) bases[current_base].io_port); -#endif + dprintk(NDEBUG_INIT, "pas16: probing io_port 0x%04x\n", + (unsigned int)bases[current_base].io_port); if ( !bases[current_base].noauto && pas16_hw_detect( current_base ) ){ io_port = bases[current_base].io_port; init_board( io_port, default_irqs[ current_base ], 0 ); -#if (PDEBUG & PDEBUG_INIT) - printk("scsi-pas16 : detected board.\n"); -#endif + dprintk(NDEBUG_INIT, "pas16: detected board\n"); } } - -#if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT) - printk("scsi-pas16 : io_port = %04x\n", (unsigned int) io_port); -#endif + dprintk(NDEBUG_INIT, "pas16: io_port = 0x%04x\n", + (unsigned int)io_port); if (!io_port) break; instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); if(instance == NULL) - break; + goto out; instance->io_port = io_port; - NCR5380_init(instance, 0); + if (NCR5380_init(instance, 0)) + goto out_unregister; + + NCR5380_maybe_reset_bus(instance); if (overrides[current_override].irq != IRQ_AUTO) instance->irq = overrides[current_override].irq; @@ -431,14 +407,18 @@ static int __init pas16_detect(struct scsi_host_template *tpnt) outb( (inb(io_port + IO_CONFIG_3) & 0x0f), io_port + IO_CONFIG_3 ); } -#if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT) - printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); -#endif + dprintk(NDEBUG_INIT, "scsi%d : irq = %d\n", + instance->host_no, instance->irq); ++current_override; ++count; } return count; + +out_unregister: + scsi_unregister(instance); +out: + return count; } /* @@ -561,29 +541,29 @@ static int pas16_release(struct Scsi_Host *shost) if (shost->irq != NO_IRQ) free_irq(shost->irq, shost); NCR5380_exit(shost); - if (shost->io_port && shost->n_io_port) - release_region(shost->io_port, shost->n_io_port); scsi_unregister(shost); return 0; } static struct scsi_host_template driver_template = { - .name = "Pro Audio Spectrum-16 SCSI", - .detect = pas16_detect, - .release = pas16_release, - .proc_name = "pas16", - .show_info = pas16_show_info, - .write_info = pas16_write_info, - .info = pas16_info, - .queuecommand = pas16_queue_command, - .eh_abort_handler = pas16_abort, - .eh_bus_reset_handler = pas16_bus_reset, - .bios_param = pas16_biosparam, - .can_queue = CAN_QUEUE, - .this_id = 7, - .sg_tablesize = SG_ALL, - .cmd_per_lun = CMD_PER_LUN, - .use_clustering = DISABLE_CLUSTERING, + .name = "Pro Audio Spectrum-16 SCSI", + .detect = pas16_detect, + .release = pas16_release, + .proc_name = "pas16", + .show_info = pas16_show_info, + .write_info = pas16_write_info, + .info = pas16_info, + .queuecommand = pas16_queue_command, + .eh_abort_handler = pas16_abort, + .eh_bus_reset_handler = pas16_bus_reset, + .bios_param = pas16_biosparam, + .can_queue = 32, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = 2, + .use_clustering = DISABLE_CLUSTERING, + .cmd_size = NCR5380_CMD_SIZE, + .max_sectors = 128, }; #include "scsi_module.c" diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h index c6109c80050b..d37527717225 100644 --- a/drivers/scsi/pas16.h +++ b/drivers/scsi/pas16.h @@ -24,9 +24,6 @@ #ifndef PAS16_H #define PAS16_H -#define PDEBUG_INIT 0x1 -#define PDEBUG_TRANSFER 0x2 - #define PAS16_DEFAULT_BASE_1 0x388 #define PAS16_DEFAULT_BASE_2 0x384 #define PAS16_DEFAULT_BASE_3 0x38c @@ -98,46 +95,16 @@ #define OPERATION_MODE_1 0xec03 #define IO_CONFIG_3 0xf002 +#define NCR5380_implementation_fields /* none */ -#ifndef ASM - -#ifndef CMD_PER_LUN -#define CMD_PER_LUN 2 -#endif - -#ifndef CAN_QUEUE -#define CAN_QUEUE 32 -#endif - -#define NCR5380_implementation_fields \ - volatile unsigned short io_port - -#define NCR5380_local_declare() \ - volatile unsigned short io_port +#define PAS16_io_port(reg) (instance->io_port + pas16_offset[(reg)]) -#define NCR5380_setup(instance) \ - io_port = (instance)->io_port - -#define PAS16_io_port(reg) ( io_port + pas16_offset[(reg)] ) - -#if !(PDEBUG & PDEBUG_TRANSFER) #define NCR5380_read(reg) ( inb(PAS16_io_port(reg)) ) #define NCR5380_write(reg, value) ( outb((value),PAS16_io_port(reg)) ) -#else -#define NCR5380_read(reg) \ - (((unsigned char) printk("scsi%d : read register %d at io_port %04x\n"\ - , instance->hostno, (reg), PAS16_io_port(reg))), inb( PAS16_io_port(reg)) ) - -#define NCR5380_write(reg, value) \ - (printk("scsi%d : write %02x to register %d at io_port %04x\n", \ - instance->hostno, (value), (reg), PAS16_io_port(reg)), \ - outb( (value),PAS16_io_port(reg) ) ) - -#endif +#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) #define NCR5380_intr pas16_intr -#define do_NCR5380_intr do_pas16_intr #define NCR5380_queue_command pas16_queue_command #define NCR5380_abort pas16_abort #define NCR5380_bus_reset pas16_bus_reset @@ -150,5 +117,4 @@ #define PAS16_IRQS 0xd4a8 -#endif /* ndef ASM */ #endif /* PAS16_H */ diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index ee00e27ba396..f6ad579280d4 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c @@ -37,6 +37,7 @@ typedef struct { unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */ unsigned int failed:1; /* Failure flag */ unsigned wanted:1; /* Parport sharing busy flag */ + unsigned int dev_no; /* Device number */ wait_queue_head_t *waiting; struct Scsi_Host *host; struct list_head list; @@ -985,15 +986,40 @@ static struct scsi_host_template ppa_template = { static LIST_HEAD(ppa_hosts); +/* + * Finds the first available device number that can be alloted to the + * new ppa device and returns the address of the previous node so that + * we can add to the tail and have a list in the ascending order. + */ + +static inline ppa_struct *find_parent(void) +{ + ppa_struct *dev, *par = NULL; + unsigned int cnt = 0; + + if (list_empty(&ppa_hosts)) + return NULL; + + list_for_each_entry(dev, &ppa_hosts, list) { + if (dev->dev_no != cnt) + return par; + cnt++; + par = dev; + } + + return par; +} + static int __ppa_attach(struct parport *pb) { struct Scsi_Host *host; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waiting); DEFINE_WAIT(wait); - ppa_struct *dev; + ppa_struct *dev, *temp; int ports; int modes, ppb, ppb_hi; int err = -ENOMEM; + struct pardev_cb ppa_cb; dev = kzalloc(sizeof(ppa_struct), GFP_KERNEL); if (!dev) @@ -1002,8 +1028,15 @@ static int __ppa_attach(struct parport *pb) dev->mode = PPA_AUTODETECT; dev->recon_tmo = PPA_RECON_TMO; init_waitqueue_head(&waiting); - dev->dev = parport_register_device(pb, "ppa", NULL, ppa_wakeup, - NULL, 0, dev); + temp = find_parent(); + if (temp) + dev->dev_no = temp->dev_no + 1; + + memset(&ppa_cb, 0, sizeof(ppa_cb)); + ppa_cb.private = dev; + ppa_cb.wakeup = ppa_wakeup; + + dev->dev = parport_register_dev_model(pb, "ppa", &ppa_cb, dev->dev_no); if (!dev->dev) goto out; @@ -1110,9 +1143,10 @@ static void ppa_detach(struct parport *pb) } static struct parport_driver ppa_driver = { - .name = "ppa", - .attach = ppa_attach, - .detach = ppa_detach, + .name = "ppa", + .match_port = ppa_attach, + .detach = ppa_detach, + .devmodel = true, }; static int __init ppa_driver_init(void) diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index a0f732b138e4..10aa18ba05fd 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -18,9 +18,6 @@ config SCSI_QLA_FC 2322, 6322 ql2322_fw.bin 24xx, 54xx ql2400_fw.bin 25xx ql2500_fw.bin - 2031 ql2600_fw.bin - 8031 ql8300_fw.bin - 27xx ql2700_fw.bin Upon request, the driver caches the firmware image until the driver is unloaded. diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 6b942d9e5b74..4dc06a13cab8 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -272,8 +272,8 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, iter = (uint32_t *)buf; chksum = 0; - for (cnt = 0; cnt < ((count >> 2) - 1); cnt++) - chksum += le32_to_cpu(*iter++); + for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++) + chksum += le32_to_cpu(*iter); chksum = ~chksum + 1; *iter = cpu_to_le32(chksum); } else { @@ -562,6 +562,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; + uint32_t faddr; if (unlikely(pci_channel_offline(ha->pdev))) return -EAGAIN; @@ -569,9 +570,16 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN)) return -EINVAL; - if (IS_NOCACHE_VPD_TYPE(ha)) - ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, + if (IS_NOCACHE_VPD_TYPE(ha)) { + faddr = ha->flt_region_vpd << 2; + + if (IS_QLA27XX(ha) && + qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE) + faddr = ha->flt_region_vpd_sec << 2; + + ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size); + } return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size); } @@ -824,6 +832,41 @@ static struct bin_attribute sysfs_reset_attr = { }; static ssize_t +qla2x00_issue_logo(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + int type; + int rval = 0; + port_id_t did; + + type = simple_strtol(buf, NULL, 10); + + did.b.domain = (type & 0x00ff0000) >> 16; + did.b.area = (type & 0x0000ff00) >> 8; + did.b.al_pa = (type & 0x000000ff); + + ql_log(ql_log_info, vha, 0x70e3, "portid=%02x%02x%02x done\n", + did.b.domain, did.b.area, did.b.al_pa); + + ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type); + + rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); + return count; +} + +static struct bin_attribute sysfs_issue_logo_attr = { + .attr = { + .name = "issue_logo", + .mode = S_IWUSR, + }, + .size = 0, + .write = qla2x00_issue_logo, +}; + +static ssize_t qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) @@ -937,6 +980,7 @@ static struct sysfs_entry { { "vpd", &sysfs_vpd_attr, 1 }, { "sfp", &sysfs_sfp_attr, 1 }, { "reset", &sysfs_reset_attr, }, + { "issue_logo", &sysfs_issue_logo_attr, }, { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, { NULL }, @@ -1873,7 +1917,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) if (qla2x00_reset_active(vha)) goto done; - stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); + stats = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct link_statistics), &stats_dma, GFP_KERNEL); if (stats == NULL) { ql_log(ql_log_warn, vha, 0x707d, "Failed to allocate memory for stats.\n"); @@ -1921,7 +1966,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) do_div(pfc_host_stat->seconds_since_last_reset, HZ); done_free: - dma_pool_free(ha->s_dma_pool, stats, stats_dma); + dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics), + stats, stats_dma); done: return pfc_host_stat; } diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index c26acde797f0..392c147d5793 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -2107,6 +2107,195 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job) } static int +qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + struct qla_flash_update_caps cap; + + if (!(IS_QLA27XX(ha))) + return -EPERM; + + memset(&cap, 0, sizeof(cap)); + cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 | + (uint64_t)ha->fw_attributes_ext[0] << 32 | + (uint64_t)ha->fw_attributes_h << 16 | + (uint64_t)ha->fw_attributes; + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap)); + bsg_job->reply->reply_payload_rcv_len = sizeof(cap); + + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); + return 0; +} + +static int +qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + uint64_t online_fw_attr = 0; + struct qla_flash_update_caps cap; + + if (!(IS_QLA27XX(ha))) + return -EPERM; + + memset(&cap, 0, sizeof(cap)); + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &cap, sizeof(cap)); + + online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 | + (uint64_t)ha->fw_attributes_ext[0] << 32 | + (uint64_t)ha->fw_attributes_h << 16 | + (uint64_t)ha->fw_attributes; + + if (online_fw_attr != cap.capabilities) { + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_INVALID_PARAM; + return -EINVAL; + } + + if (cap.outage_duration < MAX_LOOP_TIMEOUT) { + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_INVALID_PARAM; + return -EINVAL; + } + + bsg_job->reply->reply_payload_rcv_len = 0; + + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); + return 0; +} + +static int +qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + struct qla_bbcr_data bbcr; + uint16_t loop_id, topo, sw_cap; + uint8_t domain, area, al_pa, state; + int rval; + + if (!(IS_QLA27XX(ha))) + return -EPERM; + + memset(&bbcr, 0, sizeof(bbcr)); + + if (vha->flags.bbcr_enable) + bbcr.status = QLA_BBCR_STATUS_ENABLED; + else + bbcr.status = QLA_BBCR_STATUS_DISABLED; + + if (bbcr.status == QLA_BBCR_STATUS_ENABLED) { + rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, + &area, &domain, &topo, &sw_cap); + if (rval != QLA_SUCCESS) { + bbcr.status = QLA_BBCR_STATUS_UNKNOWN; + bbcr.state = QLA_BBCR_STATE_OFFLINE; + bbcr.mbx1 = loop_id; + goto done; + } + + state = (vha->bbcr >> 12) & 0x1; + + if (state) { + bbcr.state = QLA_BBCR_STATE_OFFLINE; + bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT; + } else { + bbcr.state = QLA_BBCR_STATE_ONLINE; + bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf; + } + + bbcr.configured_bbscn = vha->bbcr & 0xf; + } + +done: + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr)); + bsg_job->reply->reply_payload_rcv_len = sizeof(bbcr); + + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); + return 0; +} + +static int +qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + struct link_statistics *stats = NULL; + dma_addr_t stats_dma; + int rval = QLA_FUNCTION_FAILED; + + if (test_bit(UNLOADING, &vha->dpc_flags)) + goto done; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto done; + + if (qla2x00_reset_active(vha)) + goto done; + + if (!IS_FWI2_CAPABLE(ha)) + goto done; + + stats = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct link_statistics), &stats_dma, GFP_KERNEL); + if (!stats) { + ql_log(ql_log_warn, vha, 0x70e2, + "Failed to allocate memory for stats.\n"); + goto done; + } + + memset(stats, 0, sizeof(struct link_statistics)); + + rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma); + + if (rval != QLA_SUCCESS) + goto done_free; + + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3, + (uint8_t *)stats, sizeof(struct link_statistics)); + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, stats, sizeof(struct link_statistics)); + bsg_job->reply->reply_payload_rcv_len = sizeof(struct link_statistics); + + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); + +done_free: + dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics), + stats, stats_dma); +done: + return rval; +} + +static int qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) { switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { @@ -2161,6 +2350,18 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) case QL_VND_SERDES_OP_EX: return qla8044_serdes_op(bsg_job); + case QL_VND_GET_FLASH_UPDATE_CAPS: + return qla27xx_get_flash_upd_cap(bsg_job); + + case QL_VND_SET_FLASH_UPDATE_CAPS: + return qla27xx_set_flash_upd_cap(bsg_job); + + case QL_VND_GET_BBCR_DATA: + return qla27xx_get_bbcr_data(bsg_job); + + case QL_VND_GET_PRIV_STATS: + return qla2x00_get_priv_stats(bsg_job); + default: return -ENOSYS; } diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index d38f9efa56fa..c80192d45536 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h @@ -25,6 +25,10 @@ #define QL_VND_FX00_MGMT_CMD 0x12 #define QL_VND_SERDES_OP 0x13 #define QL_VND_SERDES_OP_EX 0x14 +#define QL_VND_GET_FLASH_UPDATE_CAPS 0x15 +#define QL_VND_SET_FLASH_UPDATE_CAPS 0x16 +#define QL_VND_GET_BBCR_DATA 0x17 +#define QL_VND_GET_PRIV_STATS 0x18 /* BSG Vendor specific subcode returns */ #define EXT_STATUS_OK 0 @@ -232,4 +236,34 @@ struct qla_serdes_reg_ex { uint32_t val; } __packed; +struct qla_flash_update_caps { + uint64_t capabilities; + uint32_t outage_duration; + uint8_t reserved[20]; +} __packed; + +/* BB_CR Status */ +#define QLA_BBCR_STATUS_DISABLED 0 +#define QLA_BBCR_STATUS_ENABLED 1 +#define QLA_BBCR_STATUS_UNKNOWN 2 + +/* BB_CR State */ +#define QLA_BBCR_STATE_OFFLINE 0 +#define QLA_BBCR_STATE_ONLINE 1 + +/* BB_CR Offline Reason Code */ +#define QLA_BBCR_REASON_PORT_SPEED 1 +#define QLA_BBCR_REASON_PEER_PORT 2 +#define QLA_BBCR_REASON_SWITCH 3 +#define QLA_BBCR_REASON_LOGIN_REJECT 4 + +struct qla_bbcr_data { + uint8_t status; /* 1 - enabled, 0 - Disabled */ + uint8_t state; /* 1 - online, 0 - offline */ + uint8_t configured_bbscn; /* 0-15 */ + uint8_t negotiated_bbscn; /* 0-15 */ + uint8_t offline_reason_code; + uint16_t mbx1; /* Port state */ + uint8_t reserved[9]; +} __packed; #endif diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 34dc9a35670b..b64c504ff12f 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -11,28 +11,28 @@ * ---------------------------------------------------------------------- * | Level | Last Value Used | Holes | * ---------------------------------------------------------------------- - * | Module Init and Probe | 0x017f | 0x0146 | + * | Module Init and Probe | 0x018f | 0x0146 | * | | | 0x015b-0x0160 | * | | | 0x016e-0x0170 | - * | Mailbox commands | 0x118d | 0x1115-0x1116 | - * | | | 0x111a-0x111b | + * | Mailbox commands | 0x1192 | | + * | | | | * | Device Discovery | 0x2016 | 0x2020-0x2022, | * | | | 0x2011-0x2012, | * | | | 0x2099-0x20a4 | - * | Queue Command and IO tracing | 0x3075 | 0x300b | + * | Queue Command and IO tracing | 0x3074 | 0x300b | * | | | 0x3027-0x3028 | * | | | 0x303d-0x3041 | * | | | 0x302d,0x3033 | * | | | 0x3036,0x3038 | * | | | 0x303a | * | DPC Thread | 0x4023 | 0x4002,0x4013 | - * | Async Events | 0x508a | 0x502b-0x502f | - * | | | 0x5047 | + * | Async Events | 0x5089 | 0x502b-0x502f | + * | | | 0x505e | * | | | 0x5084,0x5075 | * | | | 0x503d,0x5044 | * | | | 0x507b,0x505f | * | Timer Routines | 0x6012 | | - * | User Space Interactions | 0x70e2 | 0x7018,0x702e | + * | User Space Interactions | 0x70e3 | 0x7018,0x702e | * | | | 0x7020,0x7024 | * | | | 0x7039,0x7045 | * | | | 0x7073-0x7075 | @@ -60,15 +60,11 @@ * | | | 0xb13c-0xb140 | * | | | 0xb149 | * | MultiQ | 0xc00c | | - * | Misc | 0xd300 | 0xd016-0xd017 | - * | | | 0xd021,0xd024 | - * | | | 0xd025,0xd029 | - * | | | 0xd02a,0xd02e | - * | | | 0xd031-0xd0ff | + * | Misc | 0xd301 | 0xd031-0xd0ff | * | | | 0xd101-0xd1fe | * | | | 0xd214-0xd2fe | * | Target Mode | 0xe080 | | - * | Target Mode Management | 0xf096 | 0xf002 | + * | Target Mode Management | 0xf09b | 0xf002 | * | | | 0xf046-0xf049 | * | Target Mode Task Management | 0x1000d | | * ---------------------------------------------------------------------- @@ -298,8 +294,8 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, WRT_REG_DWORD(®->iobase_addr, iobase); dmp_reg = ®->iobase_window; - while (count--) - *buf++ = htonl(RD_REG_DWORD(dmp_reg++)); + for ( ; count--; dmp_reg++) + *buf++ = htonl(RD_REG_DWORD(dmp_reg)); return buf; } @@ -461,8 +457,8 @@ qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count, { uint16_t __iomem *dmp_reg = ®->u.isp2300.fb_cmd; - while (count--) - *buf++ = htons(RD_REG_WORD(dmp_reg++)); + for ( ; count--; dmp_reg++) + *buf++ = htons(RD_REG_WORD(dmp_reg)); } static inline void * @@ -737,16 +733,18 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) if (rval == QLA_SUCCESS) { dmp_reg = ®->flash_address; - for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) - fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); + for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++) + fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); dmp_reg = ®->u.isp2300.req_q_in; - for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++) - fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); + for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; + cnt++, dmp_reg++) + fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); dmp_reg = ®->u.isp2300.mailbox0; - for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) - fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; + cnt++, dmp_reg++) + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); WRT_REG_WORD(®->ctrl_status, 0x40); qla2xxx_read_window(reg, 32, fw->resp_dma_reg); @@ -756,8 +754,9 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) WRT_REG_WORD(®->ctrl_status, 0x00); dmp_reg = ®->risc_hw; - for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) - fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); + for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; + cnt++, dmp_reg++) + fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); WRT_REG_WORD(®->pcr, 0x2000); qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); @@ -900,25 +899,25 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) } if (rval == QLA_SUCCESS) { dmp_reg = ®->flash_address; - for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) - fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); + for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++) + fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); dmp_reg = ®->u.isp2100.mailbox0; - for (cnt = 0; cnt < ha->mbx_count; cnt++) { + for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) { if (cnt == 8) dmp_reg = ®->u_end.isp2200.mailbox8; - fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); } dmp_reg = ®->u.isp2100.unused_2[0]; - for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++) - fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); + for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++) + fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); WRT_REG_WORD(®->ctrl_status, 0x00); dmp_reg = ®->risc_hw; - for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) - fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); + for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++) + fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); WRT_REG_WORD(®->pcr, 0x2000); qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); @@ -1100,8 +1099,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) /* Host interface registers. */ dmp_reg = ®->flash_addr; - for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) - fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); /* Disable interrupts. */ WRT_REG_DWORD(®->ictrl, 0); @@ -1133,8 +1132,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) /* Mailbox registers. */ mbx_reg = ®->mailbox0; - for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) - fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; @@ -1172,20 +1171,20 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) iter_reg = fw->req0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = ®->iobase_q; - for (cnt = 0; cnt < 7; cnt++) - *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = ®->iobase_q; - for (cnt = 0; cnt < 7; cnt++) - *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = ®->iobase_q; - for (cnt = 0; cnt < 7; cnt++) - *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; @@ -1363,8 +1362,10 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) RD_REG_DWORD(®->iobase_addr); WRT_REG_DWORD(®->iobase_window, 0x01); dmp_reg = ®->iobase_c4; - fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); - fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); + fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg)); + dmp_reg++; + fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg)); + dmp_reg++; fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); @@ -1373,8 +1374,8 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) /* Host interface registers. */ dmp_reg = ®->flash_addr; - for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) - fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); /* Disable interrupts. */ WRT_REG_DWORD(®->ictrl, 0); @@ -1422,8 +1423,8 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) /* Mailbox registers. */ mbx_reg = ®->mailbox0; - for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) - fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; @@ -1486,20 +1487,20 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) iter_reg = fw->req0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = ®->iobase_q; - for (cnt = 0; cnt < 7; cnt++) - *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = ®->iobase_q; - for (cnt = 0; cnt < 7; cnt++) - *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = ®->iobase_q; - for (cnt = 0; cnt < 7; cnt++) - *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; @@ -1684,8 +1685,10 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) RD_REG_DWORD(®->iobase_addr); WRT_REG_DWORD(®->iobase_window, 0x01); dmp_reg = ®->iobase_c4; - fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); - fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); + fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg)); + dmp_reg++; + fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg)); + dmp_reg++; fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); @@ -1694,8 +1697,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) /* Host interface registers. */ dmp_reg = ®->flash_addr; - for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) - fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); /* Disable interrupts. */ WRT_REG_DWORD(®->ictrl, 0); @@ -1743,8 +1746,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) /* Mailbox registers. */ mbx_reg = ®->mailbox0; - for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) - fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; @@ -1807,20 +1810,20 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) iter_reg = fw->req0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = ®->iobase_q; - for (cnt = 0; cnt < 7; cnt++) - *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = ®->iobase_q; - for (cnt = 0; cnt < 7; cnt++) - *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = ®->iobase_q; - for (cnt = 0; cnt < 7; cnt++) - *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; @@ -2027,8 +2030,10 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) RD_REG_DWORD(®->iobase_addr); WRT_REG_DWORD(®->iobase_window, 0x01); dmp_reg = ®->iobase_c4; - fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++)); - fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); + fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg)); + dmp_reg++; + fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg)); + dmp_reg++; fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); @@ -2037,8 +2042,8 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) /* Host interface registers. */ dmp_reg = ®->flash_addr; - for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) - fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); /* Disable interrupts. */ WRT_REG_DWORD(®->ictrl, 0); @@ -2086,8 +2091,8 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) /* Mailbox registers. */ mbx_reg = ®->mailbox0; - for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) - fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++)); + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; @@ -2182,20 +2187,20 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) iter_reg = fw->req0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = ®->iobase_q; - for (cnt = 0; cnt < 7; cnt++) - *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = ®->iobase_q; - for (cnt = 0; cnt < 7; cnt++) - *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = ®->iobase_q; - for (cnt = 0; cnt < 7; cnt++) - *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++)); + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 388d79088b59..ceb452dd143c 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -259,7 +259,7 @@ #define LOOP_DOWN_TIME 255 /* 240 */ #define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30) -#define DEFAULT_OUTSTANDING_COMMANDS 1024 +#define DEFAULT_OUTSTANDING_COMMANDS 4096 #define MIN_OUTSTANDING_COMMANDS 128 /* ISP request and response entry counts (37-65535) */ @@ -267,11 +267,13 @@ #define REQUEST_ENTRY_CNT_2200 2048 /* Number of request entries. */ #define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */ #define REQUEST_ENTRY_CNT_83XX 8192 /* Number of request entries. */ +#define RESPONSE_ENTRY_CNT_83XX 4096 /* Number of response entries.*/ #define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ #define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ #define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ #define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */ #define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ +#define EXTENDED_EXCH_ENTRY_CNT 32768 /* Entries for offload case */ struct req_que; struct qla_tgt_sess; @@ -309,6 +311,14 @@ struct srb_cmd { /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */ #define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID) +struct els_logo_payload { + uint8_t opcode; + uint8_t rsvd[3]; + uint8_t s_id[3]; + uint8_t rsvd1[1]; + uint8_t wwpn[WWN_SIZE]; +}; + /* * SRB extensions. */ @@ -322,6 +332,15 @@ struct srb_iocb { uint16_t data[2]; } logio; struct { +#define ELS_DCMD_TIMEOUT 20 +#define ELS_DCMD_LOGO 0x5 + uint32_t flags; + uint32_t els_cmd; + struct completion comp; + struct els_logo_payload *els_logo_pyld; + dma_addr_t els_logo_pyld_dma; + } els_logo; + struct { /* * Values for flags field below are as * defined in tsk_mgmt_entry struct @@ -382,7 +401,7 @@ struct srb_iocb { #define SRB_FXIOCB_DCMD 10 #define SRB_FXIOCB_BCMD 11 #define SRB_ABT_CMD 12 - +#define SRB_ELS_DCMD 13 typedef struct srb { atomic_t ref_count; @@ -891,6 +910,7 @@ struct mbx_cmd_32 { #define MBC_DISABLE_VI 0x24 /* Disable VI operation. */ #define MBC_ENABLE_VI 0x25 /* Enable VI operation. */ #define MBC_GET_FIRMWARE_OPTION 0x28 /* Get Firmware Options. */ +#define MBC_GET_MEM_OFFLOAD_CNTRL_STAT 0x34 /* Memory Offload ctrl/Stat*/ #define MBC_SET_FIRMWARE_OPTION 0x38 /* Set Firmware Options. */ #define MBC_LOOP_PORT_BYPASS 0x40 /* Loop Port Bypass. */ #define MBC_LOOP_PORT_ENABLE 0x41 /* Loop Port Enable. */ @@ -1040,6 +1060,12 @@ struct mbx_cmd_32 { #define FSTATE_FATAL_ERROR 4 #define FSTATE_LOOP_BACK_CONN 5 +#define QLA27XX_IMG_STATUS_VER_MAJOR 0x01 +#define QLA27XX_IMG_STATUS_VER_MINOR 0x00 +#define QLA27XX_IMG_STATUS_SIGN 0xFACEFADE +#define QLA27XX_PRIMARY_IMAGE 1 +#define QLA27XX_SECONDARY_IMAGE 2 + /* * Port Database structure definition * Little endian except where noted. @@ -1228,13 +1254,41 @@ struct link_statistics { uint32_t inval_xmit_word_cnt; uint32_t inval_crc_cnt; uint32_t lip_cnt; - uint32_t unused1[0x1a]; + uint32_t link_up_cnt; + uint32_t link_down_loop_init_tmo; + uint32_t link_down_los; + uint32_t link_down_loss_rcv_clk; + uint32_t reserved0[5]; + uint32_t port_cfg_chg; + uint32_t reserved1[11]; + uint32_t rsp_q_full; + uint32_t atio_q_full; + uint32_t drop_ae; + uint32_t els_proto_err; + uint32_t reserved2; uint32_t tx_frames; uint32_t rx_frames; uint32_t discarded_frames; uint32_t dropped_frames; - uint32_t unused2[1]; + uint32_t reserved3; uint32_t nos_rcvd; + uint32_t reserved4[4]; + uint32_t tx_prjt; + uint32_t rcv_exfail; + uint32_t rcv_abts; + uint32_t seq_frm_miss; + uint32_t corr_err; + uint32_t mb_rqst; + uint32_t nport_full; + uint32_t eofa; + uint32_t reserved5; + uint32_t fpm_recv_word_cnt_lo; + uint32_t fpm_recv_word_cnt_hi; + uint32_t fpm_disc_word_cnt_lo; + uint32_t fpm_disc_word_cnt_hi; + uint32_t fpm_xmit_word_cnt_lo; + uint32_t fpm_xmit_word_cnt_hi; + uint32_t reserved6[70]; }; /* @@ -2695,11 +2749,16 @@ struct isp_operations { struct scsi_qla_host; + +#define QLA83XX_RSPQ_MSIX_ENTRY_NUMBER 1 /* refer to qla83xx_msix_entries */ + struct qla_msix_entry { int have_irq; uint32_t vector; uint16_t entry; struct rsp_que *rsp; + struct irq_affinity_notify irq_notify; + int cpuid; }; #define WATCH_INTERVAL 1 /* number of seconds */ @@ -2910,12 +2969,15 @@ struct qlt_hw_data { uint32_t num_qfull_cmds_dropped; spinlock_t q_full_lock; uint32_t leak_exchg_thresh_hold; + spinlock_t sess_lock; + int rspq_vector_cpuid; + spinlock_t atio_lock ____cacheline_aligned; }; #define MAX_QFULL_CMDS_ALLOC 8192 #define Q_FULL_THRESH_HOLD_PERCENT 90 #define Q_FULL_THRESH_HOLD(ha) \ - ((ha->fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT) + ((ha->cur_fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT) #define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ @@ -2962,10 +3024,12 @@ struct qla_hw_data { uint32_t isp82xx_no_md_cap:1; uint32_t host_shutting_down:1; uint32_t idc_compl_status:1; - uint32_t mr_reset_hdlr_active:1; uint32_t mr_intr_valid:1; + uint32_t fawwpn_enabled:1; + uint32_t exlogins_enabled:1; + uint32_t exchoffld_enabled:1; /* 35 bits */ } flags; @@ -3237,6 +3301,21 @@ struct qla_hw_data { void *async_pd; dma_addr_t async_pd_dma; +#define ENABLE_EXTENDED_LOGIN BIT_7 + + /* Extended Logins */ + void *exlogin_buf; + dma_addr_t exlogin_buf_dma; + int exlogin_size; + +#define ENABLE_EXCHANGE_OFFLD BIT_2 + + /* Exchange Offload */ + void *exchoffld_buf; + dma_addr_t exchoffld_buf_dma; + int exchoffld_size; + int exchoffld_count; + void *swl; /* These are used by mailbox operations. */ @@ -3279,8 +3358,14 @@ struct qla_hw_data { #define RISC_START_ADDRESS_2100 0x1000 #define RISC_START_ADDRESS_2300 0x800 #define RISC_START_ADDRESS_2400 0x100000 - uint16_t fw_xcb_count; - uint16_t fw_iocb_count; + + uint16_t orig_fw_tgt_xcb_count; + uint16_t cur_fw_tgt_xcb_count; + uint16_t orig_fw_xcb_count; + uint16_t cur_fw_xcb_count; + uint16_t orig_fw_iocb_count; + uint16_t cur_fw_iocb_count; + uint16_t fw_max_fcf_count; uint32_t fw_shared_ram_start; uint32_t fw_shared_ram_end; @@ -3323,6 +3408,9 @@ struct qla_hw_data { uint32_t chain_offset; struct dentry *dfs_dir; struct dentry *dfs_fce; + struct dentry *dfs_tgt_counters; + struct dentry *dfs_fw_resource_cnt; + dma_addr_t fce_dma; void *fce; uint32_t fce_bufs; @@ -3379,14 +3467,20 @@ struct qla_hw_data { uint32_t flt_region_flt; uint32_t flt_region_fdt; uint32_t flt_region_boot; + uint32_t flt_region_boot_sec; uint32_t flt_region_fw; + uint32_t flt_region_fw_sec; uint32_t flt_region_vpd_nvram; uint32_t flt_region_vpd; + uint32_t flt_region_vpd_sec; uint32_t flt_region_nvram; uint32_t flt_region_npiv_conf; uint32_t flt_region_gold_fw; uint32_t flt_region_fcp_prio; uint32_t flt_region_bootload; + uint32_t flt_region_img_status_pri; + uint32_t flt_region_img_status_sec; + uint8_t active_image; /* Needed for BEACON */ uint16_t beacon_blink_led; @@ -3480,6 +3574,18 @@ struct qla_hw_data { int allow_cna_fw_dump; }; +struct qla_tgt_counters { + uint64_t qla_core_sbt_cmd; + uint64_t core_qla_que_buf; + uint64_t qla_core_ret_ctio; + uint64_t core_qla_snd_status; + uint64_t qla_core_ret_sta_ctio; + uint64_t core_qla_free_cmd; + uint64_t num_q_full_sent; + uint64_t num_alloc_iocb_failed; + uint64_t num_term_xchg_sent; +}; + /* * Qlogic scsi host structure */ @@ -3505,6 +3611,7 @@ typedef struct scsi_qla_host { uint32_t delete_progress:1; uint32_t fw_tgt_reported:1; + uint32_t bbcr_enable:1; } flags; atomic_t loop_state; @@ -3595,6 +3702,10 @@ typedef struct scsi_qla_host { atomic_t generation_tick; /* Time when global fcport update has been scheduled */ int total_fcport_update_gen; + /* List of pending LOGOs, protected by tgt_mutex */ + struct list_head logo_list; + /* List of pending PLOGI acks, protected by hw lock */ + struct list_head plogi_ack_list; uint32_t vp_abort_cnt; @@ -3632,8 +3743,20 @@ typedef struct scsi_qla_host { atomic_t vref_count; struct qla8044_reset_template reset_tmplt; + struct qla_tgt_counters tgt_counters; + uint16_t bbcr; } scsi_qla_host_t; +struct qla27xx_image_status { + uint8_t image_status_mask; + uint16_t generation_number; + uint8_t reserved[3]; + uint8_t ver_minor; + uint8_t ver_major; + uint32_t checksum; + uint32_t signature; +} __packed; + #define SET_VP_IDX 1 #define SET_AL_PA 2 #define RESET_VP_IDX 3 diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 15cf074ffa3c..cd8b96a4b0dd 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -13,6 +13,85 @@ static struct dentry *qla2x00_dfs_root; static atomic_t qla2x00_dfs_root_count; static int +qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) +{ + struct scsi_qla_host *vha = s->private; + struct qla_hw_data *ha = vha->hw; + + seq_puts(s, "FW Resource count\n\n"); + seq_printf(s, "Original TGT exchg count[%d]\n", + ha->orig_fw_tgt_xcb_count); + seq_printf(s, "current TGT exchg count[%d]\n", + ha->cur_fw_tgt_xcb_count); + seq_printf(s, "original Initiator Exchange count[%d]\n", + ha->orig_fw_xcb_count); + seq_printf(s, "Current Initiator Exchange count[%d]\n", + ha->cur_fw_xcb_count); + seq_printf(s, "Original IOCB count[%d]\n", ha->orig_fw_iocb_count); + seq_printf(s, "Current IOCB count[%d]\n", ha->cur_fw_iocb_count); + seq_printf(s, "MAX VP count[%d]\n", ha->max_npiv_vports); + seq_printf(s, "MAX FCF count[%d]\n", ha->fw_max_fcf_count); + + return 0; +} + +static int +qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file) +{ + struct scsi_qla_host *vha = inode->i_private; + return single_open(file, qla_dfs_fw_resource_cnt_show, vha); +} + +static const struct file_operations dfs_fw_resource_cnt_ops = { + .open = qla_dfs_fw_resource_cnt_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int +qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) +{ + struct scsi_qla_host *vha = s->private; + + seq_puts(s, "Target Counters\n"); + seq_printf(s, "qla_core_sbt_cmd = %lld\n", + vha->tgt_counters.qla_core_sbt_cmd); + seq_printf(s, "qla_core_ret_sta_ctio = %lld\n", + vha->tgt_counters.qla_core_ret_sta_ctio); + seq_printf(s, "qla_core_ret_ctio = %lld\n", + vha->tgt_counters.qla_core_ret_ctio); + seq_printf(s, "core_qla_que_buf = %lld\n", + vha->tgt_counters.core_qla_que_buf); + seq_printf(s, "core_qla_snd_status = %lld\n", + vha->tgt_counters.core_qla_snd_status); + seq_printf(s, "core_qla_free_cmd = %lld\n", + vha->tgt_counters.core_qla_free_cmd); + seq_printf(s, "num alloc iocb failed = %lld\n", + vha->tgt_counters.num_alloc_iocb_failed); + seq_printf(s, "num term exchange sent = %lld\n", + vha->tgt_counters.num_term_xchg_sent); + seq_printf(s, "num Q full sent = %lld\n", + vha->tgt_counters.num_q_full_sent); + + return 0; +} + +static int +qla_dfs_tgt_counters_open(struct inode *inode, struct file *file) +{ + struct scsi_qla_host *vha = inode->i_private; + return single_open(file, qla_dfs_tgt_counters_show, vha); +} + +static const struct file_operations dfs_tgt_counters_ops = { + .open = qla_dfs_tgt_counters_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int qla2x00_dfs_fce_show(struct seq_file *s, void *unused) { scsi_qla_host_t *vha = s->private; @@ -146,6 +225,22 @@ create_dir: atomic_inc(&qla2x00_dfs_root_count); create_nodes: + ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count", + S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops); + if (!ha->dfs_fw_resource_cnt) { + ql_log(ql_log_warn, vha, 0x00fd, + "Unable to create debugFS fw_resource_count node.\n"); + goto out; + } + + ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR, + ha->dfs_dir, vha, &dfs_tgt_counters_ops); + if (!ha->dfs_tgt_counters) { + ql_log(ql_log_warn, vha, 0xd301, + "Unable to create debugFS tgt_counters node.\n"); + goto out; + } + ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, &dfs_fce_ops); if (!ha->dfs_fce) { @@ -161,6 +256,17 @@ int qla2x00_dfs_remove(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; + + if (ha->dfs_fw_resource_cnt) { + debugfs_remove(ha->dfs_fw_resource_cnt); + ha->dfs_fw_resource_cnt = NULL; + } + + if (ha->dfs_tgt_counters) { + debugfs_remove(ha->dfs_tgt_counters); + ha->dfs_tgt_counters = NULL; + } + if (ha->dfs_fce) { debugfs_remove(ha->dfs_fce); ha->dfs_fce = NULL; diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 42bb357bf56b..4c0f3a774799 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1288,7 +1288,9 @@ struct vp_rpt_id_entry_24xx { uint8_t vp_idx_map[16]; - uint8_t reserved_4[32]; + uint8_t reserved_4[28]; + uint16_t bbcr; + uint8_t reserved_5[6]; }; #define VF_EVFP_IOCB_TYPE 0x26 /* Exchange Virtual Fabric Parameters entry. */ @@ -1393,6 +1395,16 @@ struct qla_flt_header { #define FLT_REG_FCOE_NVRAM_0 0xAA #define FLT_REG_FCOE_NVRAM_1 0xAC +/* 27xx */ +#define FLT_REG_IMG_PRI_27XX 0x95 +#define FLT_REG_IMG_SEC_27XX 0x96 +#define FLT_REG_FW_SEC_27XX 0x02 +#define FLT_REG_BOOTLOAD_SEC_27XX 0x9 +#define FLT_REG_VPD_SEC_27XX_0 0x50 +#define FLT_REG_VPD_SEC_27XX_1 0x52 +#define FLT_REG_VPD_SEC_27XX_2 0xD8 +#define FLT_REG_VPD_SEC_27XX_3 0xDA + struct qla_flt_region { uint32_t code; uint32_t size; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 7686bfe9a4a9..fe943772fe7b 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -44,6 +44,8 @@ extern int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *); extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); +extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t); + extern void qla2x00_update_fcports(scsi_qla_host_t *); extern int qla2x00_abort_isp(scsi_qla_host_t *); @@ -88,6 +90,7 @@ extern int qla2xxx_mctp_dump(scsi_qla_host_t *); extern int qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *); extern int qla2x00_init_rings(scsi_qla_host_t *); +extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *); /* * Global Data in qla_os.c source file. @@ -117,6 +120,9 @@ extern int ql2xdontresethba; extern uint64_t ql2xmaxlun; extern int ql2xmdcapmask; extern int ql2xmdenable; +extern int ql2xexlogins; +extern int ql2xexchoffld; +extern int ql2xfwholdabts; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); @@ -135,6 +141,10 @@ extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); +extern int qla2x00_set_exlogins_buffer(struct scsi_qla_host *); +extern void qla2x00_free_exlogin_buffer(struct qla_hw_data *); +extern int qla2x00_set_exchoffld_buffer(struct scsi_qla_host *); +extern void qla2x00_free_exchoffld_buffer(struct qla_hw_data *); extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); @@ -323,8 +333,7 @@ extern int qla2x00_get_id_list(scsi_qla_host_t *, void *, dma_addr_t, uint16_t *); extern int -qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *, - uint16_t *, uint16_t *, uint16_t *, uint16_t *); +qla2x00_get_resource_cnts(scsi_qla_host_t *); extern int qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); @@ -766,4 +775,11 @@ extern int qla8044_abort_isp(scsi_qla_host_t *); extern int qla8044_check_fw_alive(struct scsi_qla_host *); extern void qlt_host_reset_handler(struct qla_hw_data *ha); +extern int qla_get_exlogin_status(scsi_qla_host_t *, uint16_t *, + uint16_t *); +extern int qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr); +extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *); +extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *, dma_addr_t); +extern void qlt_handle_abts_recv(struct scsi_qla_host *, response_t *); + #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 16a1935cc9c1..c56cdb35f3ed 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -157,8 +157,12 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, if (data[1] & QLA_LOGIO_LOGIN_RETRIED) lio->u.logio.flags |= SRB_LOGIN_RETRIED; rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) + if (rval != QLA_SUCCESS) { + fcport->flags &= ~FCF_ASYNC_SENT; + fcport->flags |= FCF_LOGIN_NEEDED; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); goto done_free_sp; + } ql_dbg(ql_dbg_disc, vha, 0x2072, "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x " @@ -1766,10 +1770,10 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) (ql2xmultique_tag || ql2xmaxqueues > 1))) req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS; else { - if (ha->fw_xcb_count <= ha->fw_iocb_count) - req->num_outstanding_cmds = ha->fw_xcb_count; + if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count) + req->num_outstanding_cmds = ha->cur_fw_xcb_count; else - req->num_outstanding_cmds = ha->fw_iocb_count; + req->num_outstanding_cmds = ha->cur_fw_iocb_count; } req->outstanding_cmds = kzalloc(sizeof(srb_t *) * @@ -1843,9 +1847,23 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) ql_dbg(ql_dbg_init, vha, 0x00ca, "Starting firmware.\n"); + if (ql2xexlogins) + ha->flags.exlogins_enabled = 1; + + if (ql2xexchoffld) + ha->flags.exchoffld_enabled = 1; + rval = qla2x00_execute_fw(vha, srisc_address); /* Retrieve firmware information. */ if (rval == QLA_SUCCESS) { + rval = qla2x00_set_exlogins_buffer(vha); + if (rval != QLA_SUCCESS) + goto failed; + + rval = qla2x00_set_exchoffld_buffer(vha); + if (rval != QLA_SUCCESS) + goto failed; + enable_82xx_npiv: fw_major_version = ha->fw_major_version; if (IS_P3P_TYPE(ha)) @@ -1864,9 +1882,7 @@ enable_82xx_npiv: ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; } - qla2x00_get_resource_cnts(vha, NULL, - &ha->fw_xcb_count, NULL, &ha->fw_iocb_count, - &ha->max_npiv_vports, NULL); + qla2x00_get_resource_cnts(vha); /* * Allocate the array of outstanding commands @@ -2050,6 +2066,10 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) if (IS_P3P_TYPE(ha)) return; + /* Hold status IOCBs until ABTS response received. */ + if (ql2xfwholdabts) + ha->fw_options[3] |= BIT_12; + /* Update Serial Link options. */ if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) return; @@ -2192,7 +2212,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) /* Clear outstanding commands array. */ for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; - if (!req) + if (!req || !test_bit(que, ha->req_qid_map)) continue; req->out_ptr = (void *)(req->ring + req->length); *req->out_ptr = 0; @@ -2209,7 +2229,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) for (que = 0; que < ha->max_rsp_queues; que++) { rsp = ha->rsp_q_map[que]; - if (!rsp) + if (!rsp || !test_bit(que, ha->rsp_qid_map)) continue; rsp->in_ptr = (void *)(rsp->ring + rsp->length); *rsp->in_ptr = 0; @@ -2248,7 +2268,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) if (IS_FWI2_CAPABLE(ha)) { mid_init_cb->options = cpu_to_le16(BIT_1); mid_init_cb->init_cb.execution_throttle = - cpu_to_le16(ha->fw_xcb_count); + cpu_to_le16(ha->cur_fw_xcb_count); /* D-Port Status */ if (IS_DPORT_CAPABLE(ha)) mid_init_cb->init_cb.firmware_options_1 |= @@ -2832,7 +2852,6 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) if (nv->login_timeout < 4) nv->login_timeout = 4; ha->login_timeout = nv->login_timeout; - icb->login_timeout = nv->login_timeout; /* Set minimum RATOV to 100 tenths of a second. */ ha->r_a_tov = 100; @@ -3053,6 +3072,26 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) atomic_set(&vha->loop_state, LOOP_READY); ql_dbg(ql_dbg_disc, vha, 0x2069, "LOOP READY.\n"); + + /* + * Process any ATIO queue entries that came in + * while we weren't online. + */ + if (qla_tgt_mode_enabled(vha)) { + if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) { + spin_lock_irqsave(&ha->tgt.atio_lock, + flags); + qlt_24xx_process_atio_queue(vha, 0); + spin_unlock_irqrestore( + &ha->tgt.atio_lock, flags); + } else { + spin_lock_irqsave(&ha->hardware_lock, + flags); + qlt_24xx_process_atio_queue(vha, 1); + spin_unlock_irqrestore( + &ha->hardware_lock, flags); + } + } } } @@ -4907,7 +4946,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; - unsigned long flags; /* If firmware needs to be loaded */ if (qla2x00_isp_firmware(vha)) { @@ -4929,17 +4967,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); - vha->flags.online = 1; - - /* - * Process any ATIO queue entries that came in - * while we weren't online. - */ - spin_lock_irqsave(&ha->hardware_lock, flags); - if (qla_tgt_mode_enabled(vha)) - qlt_24xx_process_atio_queue(vha); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } @@ -4961,7 +4988,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) for (i = 1; i < ha->max_rsp_queues; i++) { rsp = ha->rsp_q_map[i]; - if (rsp) { + if (rsp && test_bit(i, ha->rsp_qid_map)) { rsp->options &= ~BIT_0; ret = qla25xx_init_rsp_que(base_vha, rsp); if (ret != QLA_SUCCESS) @@ -4976,8 +5003,8 @@ qla25xx_init_queues(struct qla_hw_data *ha) } for (i = 1; i < ha->max_req_queues; i++) { req = ha->req_q_map[i]; - if (req) { - /* Clear outstanding commands array. */ + if (req && test_bit(i, ha->req_qid_map)) { + /* Clear outstanding commands array. */ req->options &= ~BIT_0; ret = qla25xx_init_req_que(base_vha, req); if (ret != QLA_SUCCESS) @@ -5102,8 +5129,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) dptr = (uint32_t *)nv; ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base, ha->nvram_size); - for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) - chksum += le32_to_cpu(*dptr++); + for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) + chksum += le32_to_cpu(*dptr); ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a, "Contents of NVRAM\n"); @@ -5254,7 +5281,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) if (le16_to_cpu(nv->login_timeout) < 4) nv->login_timeout = cpu_to_le16(4); ha->login_timeout = le16_to_cpu(nv->login_timeout); - icb->login_timeout = nv->login_timeout; /* Set minimum RATOV to 100 tenths of a second. */ ha->r_a_tov = 100; @@ -5326,6 +5352,93 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) return (rval); } +uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha) +{ + struct qla27xx_image_status pri_image_status, sec_image_status; + uint8_t valid_pri_image, valid_sec_image; + uint32_t *wptr; + uint32_t cnt, chksum, size; + struct qla_hw_data *ha = vha->hw; + + valid_pri_image = valid_sec_image = 1; + ha->active_image = 0; + size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t); + + if (!ha->flt_region_img_status_pri) { + valid_pri_image = 0; + goto check_sec_image; + } + + qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status), + ha->flt_region_img_status_pri, size); + + if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) { + ql_dbg(ql_dbg_init, vha, 0x018b, + "Primary image signature (0x%x) not valid\n", + pri_image_status.signature); + valid_pri_image = 0; + goto check_sec_image; + } + + wptr = (uint32_t *)(&pri_image_status); + cnt = size; + + for (chksum = 0; cnt--; wptr++) + chksum += le32_to_cpu(*wptr); + if (chksum) { + ql_dbg(ql_dbg_init, vha, 0x018c, + "Checksum validation failed for primary image (0x%x)\n", + chksum); + valid_pri_image = 0; + } + +check_sec_image: + if (!ha->flt_region_img_status_sec) { + valid_sec_image = 0; + goto check_valid_image; + } + + qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), + ha->flt_region_img_status_sec, size); + + if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) { + ql_dbg(ql_dbg_init, vha, 0x018d, + "Secondary image signature(0x%x) not valid\n", + sec_image_status.signature); + valid_sec_image = 0; + goto check_valid_image; + } + + wptr = (uint32_t *)(&sec_image_status); + cnt = size; + for (chksum = 0; cnt--; wptr++) + chksum += le32_to_cpu(*wptr); + if (chksum) { + ql_dbg(ql_dbg_init, vha, 0x018e, + "Checksum validation failed for secondary image (0x%x)\n", + chksum); + valid_sec_image = 0; + } + +check_valid_image: + if (valid_pri_image && (pri_image_status.image_status_mask & 0x1)) + ha->active_image = QLA27XX_PRIMARY_IMAGE; + if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) { + if (!ha->active_image || + pri_image_status.generation_number < + sec_image_status.generation_number) + ha->active_image = QLA27XX_SECONDARY_IMAGE; + } + + ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n", + ha->active_image == 0 ? "default bootld and fw" : + ha->active_image == 1 ? "primary" : + ha->active_image == 2 ? "secondary" : + "Invalid"); + + return ha->active_image; +} + static int qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, uint32_t faddr) @@ -5348,6 +5461,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, dcode = (uint32_t *)req->ring; *srisc_addr = 0; + if (IS_QLA27XX(ha) && + qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE) + faddr = ha->flt_region_fw_sec; + /* Validate firmware image by checking version. */ qla24xx_read_flash_data(vha, dcode, faddr + 4, 4); for (i = 0; i < 4; i++) @@ -6048,8 +6165,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, ha->nvram_size); dptr = (uint32_t *)nv; - for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) - chksum += le32_to_cpu(*dptr++); + for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) + chksum += le32_to_cpu(*dptr); ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111, "Contents of NVRAM:\n"); @@ -6211,7 +6328,6 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) if (le16_to_cpu(nv->login_timeout) < 4) nv->login_timeout = cpu_to_le16(4); ha->login_timeout = le16_to_cpu(nv->login_timeout); - icb->login_timeout = nv->login_timeout; /* Set minimum RATOV to 100 tenths of a second. */ ha->r_a_tov = 100; @@ -6393,12 +6509,17 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; + /* Hold status IOCBs until ABTS response received. */ + if (ql2xfwholdabts) + ha->fw_options[3] |= BIT_12; + if (!ql2xetsenable) - return; + goto out; /* Enable ETS Burst. */ memset(ha->fw_options, 0, sizeof(ha->fw_options)); ha->fw_options[2] |= BIT_9; +out: qla2x00_set_fw_options(vha, ha->fw_options); } diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index fee9eb7c8a60..edc48f3b8230 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -87,8 +87,8 @@ host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize) __le32 *odest = (__le32 *) dst; uint32_t iter = bsize >> 2; - for (; iter ; iter--) - *odest++ = cpu_to_le32(*isrc++); + for ( ; iter--; isrc++) + *odest++ = cpu_to_le32(*isrc); } static inline void @@ -258,6 +258,8 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo) if ((IS_QLAFX00(sp->fcport->vha->hw)) && (sp->type == SRB_FXIOCB_DCMD)) init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); + if (sp->type == SRB_ELS_DCMD) + init_completion(&sp->u.iocb_cmd.u.els_logo.comp); } static inline int diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index c49df34e9b35..b41265a75ed5 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1868,6 +1868,7 @@ skip_cmd_array: } queuing_error: + vha->tgt_counters.num_alloc_iocb_failed++; return pkt; } @@ -2010,6 +2011,190 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) } static void +qla2x00_els_dcmd_sp_free(void *ptr, void *data) +{ + struct scsi_qla_host *vha = (scsi_qla_host_t *)ptr; + struct qla_hw_data *ha = vha->hw; + srb_t *sp = (srb_t *)data; + struct srb_iocb *elsio = &sp->u.iocb_cmd; + + kfree(sp->fcport); + + if (elsio->u.els_logo.els_logo_pyld) + dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE, + elsio->u.els_logo.els_logo_pyld, + elsio->u.els_logo.els_logo_pyld_dma); + + del_timer(&elsio->timer); + qla2x00_rel_sp(vha, sp); +} + +static void +qla2x00_els_dcmd_iocb_timeout(void *data) +{ + srb_t *sp = (srb_t *)data; + struct srb_iocb *lio = &sp->u.iocb_cmd; + fc_port_t *fcport = sp->fcport; + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; + + ql_dbg(ql_dbg_io, vha, 0x3069, + "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", + sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); + + /* Abort the exchange */ + spin_lock_irqsave(&ha->hardware_lock, flags); + if (ha->isp_ops->abort_command(sp)) { + ql_dbg(ql_dbg_io, vha, 0x3070, + "mbx abort_command failed.\n"); + } else { + ql_dbg(ql_dbg_io, vha, 0x3071, + "mbx abort_command success.\n"); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + complete(&lio->u.els_logo.comp); +} + +static void +qla2x00_els_dcmd_sp_done(void *data, void *ptr, int res) +{ + srb_t *sp = (srb_t *)ptr; + fc_port_t *fcport = sp->fcport; + struct srb_iocb *lio = &sp->u.iocb_cmd; + struct scsi_qla_host *vha = fcport->vha; + + ql_dbg(ql_dbg_io, vha, 0x3072, + "%s hdl=%x, portid=%02x%02x%02x done\n", + sp->name, sp->handle, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + + complete(&lio->u.els_logo.comp); +} + +int +qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, + port_id_t remote_did) +{ + srb_t *sp; + fc_port_t *fcport = NULL; + struct srb_iocb *elsio = NULL; + struct qla_hw_data *ha = vha->hw; + struct els_logo_payload logo_pyld; + int rval = QLA_SUCCESS; + + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (!fcport) { + ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n"); + return -ENOMEM; + } + + /* Alloc SRB structure */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + kfree(fcport); + ql_log(ql_log_info, vha, 0x70e6, + "SRB allocation failed\n"); + return -ENOMEM; + } + + elsio = &sp->u.iocb_cmd; + fcport->loop_id = 0xFFFF; + fcport->d_id.b.domain = remote_did.b.domain; + fcport->d_id.b.area = remote_did.b.area; + fcport->d_id.b.al_pa = remote_did.b.al_pa; + + ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n", + fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); + + sp->type = SRB_ELS_DCMD; + sp->name = "ELS_DCMD"; + sp->fcport = fcport; + qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT); + elsio->timeout = qla2x00_els_dcmd_iocb_timeout; + sp->done = qla2x00_els_dcmd_sp_done; + sp->free = qla2x00_els_dcmd_sp_free; + + elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev, + DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma, + GFP_KERNEL); + + if (!elsio->u.els_logo.els_logo_pyld) { + sp->free(vha, sp); + return QLA_FUNCTION_FAILED; + } + + memset(&logo_pyld, 0, sizeof(struct els_logo_payload)); + + elsio->u.els_logo.els_cmd = els_opcode; + logo_pyld.opcode = els_opcode; + logo_pyld.s_id[0] = vha->d_id.b.al_pa; + logo_pyld.s_id[1] = vha->d_id.b.area; + logo_pyld.s_id[2] = vha->d_id.b.domain; + host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t)); + memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE); + + memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld, + sizeof(struct els_logo_payload)); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + sp->free(vha, sp); + return QLA_FUNCTION_FAILED; + } + + ql_dbg(ql_dbg_io, vha, 0x3074, + "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n", + sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa); + + wait_for_completion(&elsio->u.els_logo.comp); + + sp->free(vha, sp); + return rval; +} + +static void +qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) +{ + scsi_qla_host_t *vha = sp->fcport->vha; + struct srb_iocb *elsio = &sp->u.iocb_cmd; + + els_iocb->entry_type = ELS_IOCB_TYPE; + els_iocb->entry_count = 1; + els_iocb->sys_define = 0; + els_iocb->entry_status = 0; + els_iocb->handle = sp->handle; + els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); + els_iocb->tx_dsd_count = 1; + els_iocb->vp_index = vha->vp_idx; + els_iocb->sof_type = EST_SOFI3; + els_iocb->rx_dsd_count = 0; + els_iocb->opcode = elsio->u.els_logo.els_cmd; + + els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; + els_iocb->port_id[1] = sp->fcport->d_id.b.area; + els_iocb->port_id[2] = sp->fcport->d_id.b.domain; + els_iocb->control_flags = 0; + + els_iocb->tx_byte_count = sizeof(struct els_logo_payload); + els_iocb->tx_address[0] = + cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma)); + els_iocb->tx_address[1] = + cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma)); + els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload)); + + els_iocb->rx_byte_count = 0; + els_iocb->rx_address[0] = 0; + els_iocb->rx_address[1] = 0; + els_iocb->rx_len = 0; + + sp->fcport->vha->qla_stats.control_requests++; +} + +static void qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) { struct fc_bsg_job *bsg_job = sp->u.bsg_job; @@ -2623,6 +2808,9 @@ qla2x00_start_sp(srb_t *sp) qlafx00_abort_iocb(sp, pkt) : qla24xx_abort_iocb(sp, pkt); break; + case SRB_ELS_DCMD: + qla24xx_els_logo_iocb(sp, pkt); + break; default: break; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index ccf6a7f99024..5649c200d37c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -18,6 +18,10 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, sts_entry_t *); +static void qla_irq_affinity_notify(struct irq_affinity_notify *, + const cpumask_t *); +static void qla_irq_affinity_release(struct kref *); + /** * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. @@ -930,10 +934,6 @@ skip_rio: break; global_port_update: - /* Port unavailable. */ - ql_log(ql_log_warn, vha, 0x505e, - "Link is offline.\n"); - if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, @@ -1418,6 +1418,12 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, case SRB_CT_CMD: type = "ct pass-through"; break; + case SRB_ELS_DCMD: + type = "Driver ELS logo"; + ql_dbg(ql_dbg_user, vha, 0x5047, + "Completing %s: (%p) type=%d.\n", type, sp, sp->type); + sp->done(vha, sp, 0); + return; default: ql_dbg(ql_dbg_user, vha, 0x503e, "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); @@ -2542,6 +2548,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (!vha->flags.online) return; + if (rsp->msix->cpuid != smp_processor_id()) { + /* if kernel does not notify qla of IRQ's CPU change, + * then set it here. + */ + rsp->msix->cpuid = smp_processor_id(); + ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid; + } + while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { pkt = (struct sts_entry_24xx *)rsp->ring_ptr; @@ -2587,8 +2601,14 @@ process_err: qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); break; case ABTS_RECV_24XX: - /* ensure that the ATIO queue is empty */ - qlt_24xx_process_atio_queue(vha); + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + /* ensure that the ATIO queue is empty */ + qlt_handle_abts_recv(vha, (response_t *)pkt); + break; + } else { + /* drop through */ + qlt_24xx_process_atio_queue(vha, 1); + } case ABTS_RESP_24XX: case CTIO_TYPE7: case NOTIFY_ACK_TYPE: @@ -2755,13 +2775,22 @@ qla24xx_intr_handler(int irq, void *dev_id) case INTR_RSP_QUE_UPDATE_83XX: qla24xx_process_response_queue(vha, rsp); break; - case INTR_ATIO_QUE_UPDATE: - qlt_24xx_process_atio_queue(vha); + case INTR_ATIO_QUE_UPDATE:{ + unsigned long flags2; + spin_lock_irqsave(&ha->tgt.atio_lock, flags2); + qlt_24xx_process_atio_queue(vha, 1); + spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); break; - case INTR_ATIO_RSP_QUE_UPDATE: - qlt_24xx_process_atio_queue(vha); + } + case INTR_ATIO_RSP_QUE_UPDATE: { + unsigned long flags2; + spin_lock_irqsave(&ha->tgt.atio_lock, flags2); + qlt_24xx_process_atio_queue(vha, 1); + spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); + qla24xx_process_response_queue(vha, rsp); break; + } default: ql_dbg(ql_dbg_async, vha, 0x504f, "Unrecognized interrupt type (%d).\n", stat * 0xff); @@ -2920,13 +2949,22 @@ qla24xx_msix_default(int irq, void *dev_id) case INTR_RSP_QUE_UPDATE_83XX: qla24xx_process_response_queue(vha, rsp); break; - case INTR_ATIO_QUE_UPDATE: - qlt_24xx_process_atio_queue(vha); + case INTR_ATIO_QUE_UPDATE:{ + unsigned long flags2; + spin_lock_irqsave(&ha->tgt.atio_lock, flags2); + qlt_24xx_process_atio_queue(vha, 1); + spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); break; - case INTR_ATIO_RSP_QUE_UPDATE: - qlt_24xx_process_atio_queue(vha); + } + case INTR_ATIO_RSP_QUE_UPDATE: { + unsigned long flags2; + spin_lock_irqsave(&ha->tgt.atio_lock, flags2); + qlt_24xx_process_atio_queue(vha, 1); + spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); + qla24xx_process_response_queue(vha, rsp); break; + } default: ql_dbg(ql_dbg_async, vha, 0x5051, "Unrecognized interrupt type (%d).\n", stat & 0xff); @@ -2973,8 +3011,11 @@ qla24xx_disable_msix(struct qla_hw_data *ha) for (i = 0; i < ha->msix_count; i++) { qentry = &ha->msix_entries[i]; - if (qentry->have_irq) + if (qentry->have_irq) { + /* un-register irq cpu affinity notification */ + irq_set_affinity_notifier(qentry->vector, NULL); free_irq(qentry->vector, qentry->rsp); + } } pci_disable_msix(ha->pdev); kfree(ha->msix_entries); @@ -3018,9 +3059,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) "MSI-X: Failed to enable support " "-- %d/%d\n Retry with %d vectors.\n", ha->msix_count, ret, ret); + ha->msix_count = ret; + ha->max_rsp_queues = ha->msix_count - 1; } - ha->msix_count = ret; - ha->max_rsp_queues = ha->msix_count - 1; ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * ha->msix_count, GFP_KERNEL); if (!ha->msix_entries) { @@ -3037,6 +3078,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) qentry->entry = entries[i].entry; qentry->have_irq = 0; qentry->rsp = NULL; + qentry->irq_notify.notify = qla_irq_affinity_notify; + qentry->irq_notify.release = qla_irq_affinity_release; + qentry->cpuid = -1; } /* Enable MSI-X vectors for the base queue */ @@ -3055,6 +3099,18 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) qentry->have_irq = 1; qentry->rsp = rsp; rsp->msix = qentry; + + /* Register for CPU affinity notification. */ + irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify); + + /* Schedule work (ie. trigger a notification) to read cpu + * mask for this specific irq. + * kref_get is required because + * irq_affinity_notify() will do + * kref_put(). + */ + kref_get(&qentry->irq_notify.kref); + schedule_work(&qentry->irq_notify.work); } /* @@ -3234,3 +3290,47 @@ int qla25xx_request_irq(struct rsp_que *rsp) msix->rsp = rsp; return ret; } + + +/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */ +static void qla_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct qla_msix_entry *e = + container_of(notify, struct qla_msix_entry, irq_notify); + struct qla_hw_data *ha; + struct scsi_qla_host *base_vha; + + /* user is recommended to set mask to just 1 cpu */ + e->cpuid = cpumask_first(mask); + + ha = e->rsp->hw; + base_vha = pci_get_drvdata(ha->pdev); + + ql_dbg(ql_dbg_init, base_vha, 0xffff, + "%s: host %ld : vector %d cpu %d \n", __func__, + base_vha->host_no, e->vector, e->cpuid); + + if (e->have_irq) { + if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && + (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) { + ha->tgt.rspq_vector_cpuid = e->cpuid; + ql_dbg(ql_dbg_init, base_vha, 0xffff, + "%s: host%ld: rspq vector %d cpu %d runtime change\n", + __func__, base_vha->host_no, e->vector, e->cpuid); + } + } +} + +static void qla_irq_affinity_release(struct kref *ref) +{ + struct irq_affinity_notify *notify = + container_of(ref, struct irq_affinity_notify, kref); + struct qla_msix_entry *e = + container_of(notify, struct qla_msix_entry, irq_notify); + struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev); + + ql_dbg(ql_dbg_init, base_vha, 0xffff, + "%s: host%ld: vector %d cpu %d \n", __func__, + base_vha->host_no, e->vector, e->cpuid); +} diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index cb11e04be568..968b84613096 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -489,6 +489,13 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) EXTENDED_BB_CREDITS); } else mcp->mb[4] = 0; + + if (ha->flags.exlogins_enabled) + mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; + + if (ha->flags.exchoffld_enabled) + mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; + mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1; mcp->in_mb |= MBX_1; } else { @@ -521,6 +528,226 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) } /* + * qla_get_exlogin_status + * Get extended login status + * uses the memory offload control/status Mailbox + * + * Input: + * ha: adapter state pointer. + * fwopt: firmware options + * + * Returns: + * qla2x00 local function status + * + * Context: + * Kernel context. + */ +#define FETCH_XLOGINS_STAT 0x8 +int +qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, + uint16_t *ex_logins_cnt) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, + "Entered %s\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; + mcp->mb[1] = FETCH_XLOGINS_STAT; + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_10|MBX_4|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); + } else { + *buf_sz = mcp->mb[4]; + *ex_logins_cnt = mcp->mb[10]; + + ql_log(ql_log_info, vha, 0x1190, + "buffer size 0x%x, exchange login count=%d\n", + mcp->mb[4], mcp->mb[10]); + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla_set_exlogin_mem_cfg + * set extended login memory configuration + * Mbx needs to be issues before init_cb is set + * + * Input: + * ha: adapter state pointer. + * buffer: buffer pointer + * phys_addr: physical address of buffer + * size: size of buffer + * TARGET_QUEUE_LOCK must be released + * ADAPTER_STATE_LOCK must be release + * + * Returns: + * qla2x00 local funxtion status code. + * + * Context: + * Kernel context. + */ +#define CONFIG_XLOGINS_MEM 0x3 +int +qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + int configured_count; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, + "Entered %s.\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; + mcp->mb[1] = CONFIG_XLOGINS_MEM; + mcp->mb[2] = MSW(phys_addr); + mcp->mb[3] = LSW(phys_addr); + mcp->mb[6] = MSW(MSD(phys_addr)); + mcp->mb[7] = LSW(MSD(phys_addr)); + mcp->mb[8] = MSW(ha->exlogin_size); + mcp->mb[9] = LSW(ha->exlogin_size); + mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_11|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); + } else { + configured_count = mcp->mb[11]; + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla_get_exchoffld_status + * Get exchange offload status + * uses the memory offload control/status Mailbox + * + * Input: + * ha: adapter state pointer. + * fwopt: firmware options + * + * Returns: + * qla2x00 local function status + * + * Context: + * Kernel context. + */ +#define FETCH_XCHOFFLD_STAT 0x2 +int +qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, + uint16_t *ex_logins_cnt) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, + "Entered %s\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; + mcp->mb[1] = FETCH_XCHOFFLD_STAT; + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_10|MBX_4|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); + } else { + *buf_sz = mcp->mb[4]; + *ex_logins_cnt = mcp->mb[10]; + + ql_log(ql_log_info, vha, 0x118e, + "buffer size 0x%x, exchange offload count=%d\n", + mcp->mb[4], mcp->mb[10]); + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qla_set_exchoffld_mem_cfg + * Set exchange offload memory configuration + * Mbx needs to be issues before init_cb is set + * + * Input: + * ha: adapter state pointer. + * buffer: buffer pointer + * phys_addr: physical address of buffer + * size: size of buffer + * TARGET_QUEUE_LOCK must be released + * ADAPTER_STATE_LOCK must be release + * + * Returns: + * qla2x00 local funxtion status code. + * + * Context: + * Kernel context. + */ +#define CONFIG_XCHOFFLD_MEM 0x3 +int +qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, + "Entered %s.\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; + mcp->mb[1] = CONFIG_XCHOFFLD_MEM; + mcp->mb[2] = MSW(phys_addr); + mcp->mb[3] = LSW(phys_addr); + mcp->mb[6] = MSW(MSD(phys_addr)); + mcp->mb[7] = LSW(MSD(phys_addr)); + mcp->mb[8] = MSW(ha->exlogin_size); + mcp->mb[9] = LSW(ha->exlogin_size); + mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_11|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + /*EMPTY*/ + ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, + "Done %s.\n", __func__); + } + + return rval; +} + +/* * qla2x00_get_fw_version * Get firmware version. * @@ -594,6 +821,16 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", __func__, mcp->mb[17], mcp->mb[16]); + + if (ha->fw_attributes_h & 0x4) + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, + "%s: Firmware supports Extended Login 0x%x\n", + __func__, ha->fw_attributes_h); + + if (ha->fw_attributes_h & 0x8) + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, + "%s: Firmware supports Exchange Offload 0x%x\n", + __func__, ha->fw_attributes_h); } if (IS_QLA27XX(ha)) { @@ -1112,6 +1349,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; if (IS_FWI2_CAPABLE(vha->hw)) mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; + if (IS_QLA27XX(vha->hw)) + mcp->in_mb |= MBX_15; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -1163,6 +1402,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, wwn_to_u64(vha->port_name)); } } + + if (IS_QLA27XX(vha->hw)) + vha->bbcr = mcp->mb[15]; } return rval; @@ -2383,10 +2625,9 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, * Kernel context. */ int -qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, - uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt, - uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs) +qla2x00_get_resource_cnts(scsi_qla_host_t *vha) { + struct qla_hw_data *ha = vha->hw; int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; @@ -2414,19 +2655,16 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11], mcp->mb[12]); - if (cur_xchg_cnt) - *cur_xchg_cnt = mcp->mb[3]; - if (orig_xchg_cnt) - *orig_xchg_cnt = mcp->mb[6]; - if (cur_iocb_cnt) - *cur_iocb_cnt = mcp->mb[7]; - if (orig_iocb_cnt) - *orig_iocb_cnt = mcp->mb[10]; - if (vha->hw->flags.npiv_supported && max_npiv_vports) - *max_npiv_vports = mcp->mb[11]; - if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || - IS_QLA27XX(vha->hw)) && max_fcfs) - *max_fcfs = mcp->mb[12]; + ha->orig_fw_tgt_xcb_count = mcp->mb[1]; + ha->cur_fw_tgt_xcb_count = mcp->mb[2]; + ha->cur_fw_xcb_count = mcp->mb[3]; + ha->orig_fw_xcb_count = mcp->mb[6]; + ha->cur_fw_iocb_count = mcp->mb[7]; + ha->orig_fw_iocb_count = mcp->mb[10]; + if (ha->flags.npiv_supported) + ha->max_npiv_vports = mcp->mb[11]; + if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) + ha->fw_max_fcf_count = mcp->mb[12]; } return (rval); @@ -2521,7 +2759,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - uint32_t *siter, *diter, dwords; + uint32_t *iter, dwords; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, @@ -2561,10 +2799,11 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, /* Copy over data -- firmware data is LE. */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, "Done %s.\n", __func__); - dwords = offsetof(struct link_statistics, unused1) / 4; - siter = diter = &stats->link_fail_cnt; - while (dwords--) - *diter++ = le32_to_cpu(*siter++); + dwords = offsetof(struct link_statistics, + link_up_cnt) / 4; + iter = &stats->link_fail_cnt; + for ( ; dwords--; iter++) + le32_to_cpus(iter); } } else { /* Failed. */ @@ -2581,7 +2820,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - uint32_t *siter, *diter, dwords; + uint32_t *iter, dwords; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, "Entered %s.\n", __func__); @@ -2610,9 +2849,9 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, "Done %s.\n", __func__); /* Copy over data -- firmware data is LE. */ dwords = sizeof(struct link_statistics) / 4; - siter = diter = &stats->link_fail_cnt; - while (dwords--) - *diter++ = le32_to_cpu(*siter++); + iter = &stats->link_fail_cnt; + for ( ; dwords--; iter++) + le32_to_cpus(iter); } } else { /* Failed. */ @@ -3379,6 +3618,9 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); + /* buffer to buffer credit flag */ + vha->flags.bbcr_enable = (rptid_entry->bbcr & 0xf) != 0; + /* FA-WWN is only for physical port */ if (!vp_idx) { void *wwpn = ha->init_cb->port_name; diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c5dd594f6c31..cf7ba52bae66 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) /* Delete request queues */ for (cnt = 1; cnt < ha->max_req_queues; cnt++) { req = ha->req_q_map[cnt]; - if (req) { + if (req && test_bit(cnt, ha->req_qid_map)) { ret = qla25xx_delete_req_que(vha, req); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00ea, @@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) /* Delete response queues */ for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { rsp = ha->rsp_q_map[cnt]; - if (rsp) { + if (rsp && test_bit(cnt, ha->rsp_qid_map)) { ret = qla25xx_delete_rsp_que(vha, rsp); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00eb, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index bfa9a64c316b..7c0b60ca158f 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -221,6 +221,25 @@ MODULE_PARM_DESC(ql2xmdenable, "0 - MiniDump disabled. " "1 (Default) - MiniDump enabled."); +int ql2xexlogins = 0; +module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xexlogins, + "Number of extended Logins. " + "0 (Default)- Disabled."); + +int ql2xexchoffld = 0; +module_param(ql2xexchoffld, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xexchoffld, + "Number of exchanges to offload. " + "0 (Default)- Disabled."); + +int ql2xfwholdabts = 0; +module_param(ql2xfwholdabts, int, S_IRUGO); +MODULE_PARM_DESC(ql2xfwholdabts, + "Allow FW to hold status IOCB until ABTS rsp received. " + "0 (Default) Do not set fw option. " + "1 - Set fw option to hold ABTS."); + /* * SCSI host template entry points */ @@ -397,6 +416,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) int cnt; for (cnt = 0; cnt < ha->max_req_queues; cnt++) { + if (!test_bit(cnt, ha->req_qid_map)) + continue; + req = ha->req_q_map[cnt]; qla2x00_free_req_que(ha, req); } @@ -404,6 +426,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) ha->req_q_map = NULL; for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { + if (!test_bit(cnt, ha->rsp_qid_map)) + continue; + rsp = ha->rsp_q_map[cnt]; qla2x00_free_rsp_que(ha, rsp); } @@ -2198,6 +2223,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; + ha->device_type |= DT_T10_PI; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2271: @@ -2205,6 +2231,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; + ha->device_type |= DT_T10_PI; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2261: @@ -2212,6 +2239,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; + ha->device_type |= DT_T10_PI; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; } @@ -2324,6 +2352,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->tgt.enable_class_2 = ql2xenableclass2; INIT_LIST_HEAD(&ha->tgt.q_full_list); spin_lock_init(&ha->tgt.q_full_lock); + spin_lock_init(&ha->tgt.sess_lock); + spin_lock_init(&ha->tgt.atio_lock); + /* Clear our data area */ ha->bars = bars; @@ -2468,7 +2499,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_83XX; - rsp_length = RESPONSE_ENTRY_CNT_2300; + rsp_length = RESPONSE_ENTRY_CNT_83XX; ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_81xx); @@ -2498,8 +2529,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->portnum = PCI_FUNC(ha->pdev->devfn); ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; ha->mbx_count = MAILBOX_REGISTER_COUNT; - req_length = REQUEST_ENTRY_CNT_24XX; - rsp_length = RESPONSE_ENTRY_CNT_2300; + req_length = REQUEST_ENTRY_CNT_83XX; + rsp_length = RESPONSE_ENTRY_CNT_83XX; ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_81xx); @@ -3128,6 +3159,14 @@ qla2x00_remove_one(struct pci_dev *pdev) base_vha->flags.online = 0; + /* free DMA memory */ + if (ha->exlogin_buf) + qla2x00_free_exlogin_buffer(ha); + + /* free DMA memory */ + if (ha->exchoffld_buf) + qla2x00_free_exchoffld_buffer(ha); + qla2x00_destroy_deferred_work(ha); qlt_remove_target(ha, base_vha); @@ -3587,6 +3626,140 @@ fail: return -ENOMEM; } +int +qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha) +{ + int rval; + uint16_t size, max_cnt, temp; + struct qla_hw_data *ha = vha->hw; + + /* Return if we don't need to alloacate any extended logins */ + if (!ql2xexlogins) + return QLA_SUCCESS; + + ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins); + max_cnt = 0; + rval = qla_get_exlogin_status(vha, &size, &max_cnt); + if (rval != QLA_SUCCESS) { + ql_log_pci(ql_log_fatal, ha->pdev, 0xd029, + "Failed to get exlogin status.\n"); + return rval; + } + + temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins; + ha->exlogin_size = (size * temp); + ql_log(ql_log_info, vha, 0xd024, + "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n", + max_cnt, size, temp); + + ql_log(ql_log_info, vha, 0xd025, "EXLOGIN: requested size=0x%x\n", + ha->exlogin_size); + + /* Get consistent memory for extended logins */ + ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev, + ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL); + if (!ha->exlogin_buf) { + ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a, + "Failed to allocate memory for exlogin_buf_dma.\n"); + return -ENOMEM; + } + + /* Now configure the dma buffer */ + rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma); + if (rval) { + ql_log(ql_log_fatal, vha, 0x00cf, + "Setup extended login buffer ****FAILED****.\n"); + qla2x00_free_exlogin_buffer(ha); + } + + return rval; +} + +/* +* qla2x00_free_exlogin_buffer +* +* Input: +* ha = adapter block pointer +*/ +void +qla2x00_free_exlogin_buffer(struct qla_hw_data *ha) +{ + if (ha->exlogin_buf) { + dma_free_coherent(&ha->pdev->dev, ha->exlogin_size, + ha->exlogin_buf, ha->exlogin_buf_dma); + ha->exlogin_buf = NULL; + ha->exlogin_size = 0; + } +} + +int +qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) +{ + int rval; + uint16_t size, max_cnt, temp; + struct qla_hw_data *ha = vha->hw; + + /* Return if we don't need to alloacate any extended logins */ + if (!ql2xexchoffld) + return QLA_SUCCESS; + + ql_log(ql_log_info, vha, 0xd014, + "Exchange offload count: %d.\n", ql2xexlogins); + + max_cnt = 0; + rval = qla_get_exchoffld_status(vha, &size, &max_cnt); + if (rval != QLA_SUCCESS) { + ql_log_pci(ql_log_fatal, ha->pdev, 0xd012, + "Failed to get exlogin status.\n"); + return rval; + } + + temp = (ql2xexchoffld > max_cnt) ? max_cnt : ql2xexchoffld; + ha->exchoffld_size = (size * temp); + ql_log(ql_log_info, vha, 0xd016, + "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n", + max_cnt, size, temp); + + ql_log(ql_log_info, vha, 0xd017, + "Exchange Buffers requested size = 0x%x\n", ha->exchoffld_size); + + /* Get consistent memory for extended logins */ + ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev, + ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL); + if (!ha->exchoffld_buf) { + ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, + "Failed to allocate memory for exchoffld_buf_dma.\n"); + return -ENOMEM; + } + + /* Now configure the dma buffer */ + rval = qla_set_exchoffld_mem_cfg(vha, ha->exchoffld_buf_dma); + if (rval) { + ql_log(ql_log_fatal, vha, 0xd02e, + "Setup exchange offload buffer ****FAILED****.\n"); + qla2x00_free_exchoffld_buffer(ha); + } + + return rval; +} + +/* +* qla2x00_free_exchoffld_buffer +* +* Input: +* ha = adapter block pointer +*/ +void +qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha) +{ + if (ha->exchoffld_buf) { + dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size, + ha->exchoffld_buf, ha->exchoffld_buf_dma); + ha->exchoffld_buf = NULL; + ha->exchoffld_size = 0; + } +} + /* * qla2x00_free_fw_dump * Frees fw dump stuff. @@ -3766,6 +3939,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->list); INIT_LIST_HEAD(&vha->qla_cmd_list); INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list); + INIT_LIST_HEAD(&vha->logo_list); + INIT_LIST_HEAD(&vha->plogi_ack_list); spin_lock_init(&vha->work_lock); spin_lock_init(&vha->cmd_list_lock); @@ -5843,6 +6018,3 @@ MODULE_FIRMWARE(FW_FILE_ISP2300); MODULE_FIRMWARE(FW_FILE_ISP2322); MODULE_FIRMWARE(FW_FILE_ISP24XX); MODULE_FIRMWARE(FW_FILE_ISP25XX); -MODULE_FIRMWARE(FW_FILE_ISP2031); -MODULE_FIRMWARE(FW_FILE_ISP8031); -MODULE_FIRMWARE(FW_FILE_ISP27XX); diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 3272ed5bbcc7..5e9392316425 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -610,8 +610,8 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) wptr = (uint16_t *)req->ring; cnt = sizeof(struct qla_flt_location) >> 1; - for (chksum = 0; cnt; cnt--) - chksum += le16_to_cpu(*wptr++); + for (chksum = 0; cnt--; wptr++) + chksum += le16_to_cpu(*wptr); if (chksum) { ql_log(ql_log_fatal, vha, 0x0045, "Inconsistent FLTL detected: checksum=0x%x.\n", chksum); @@ -702,8 +702,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) } cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1; - for (chksum = 0; cnt; cnt--) - chksum += le16_to_cpu(*wptr++); + for (chksum = 0; cnt--; wptr++) + chksum += le16_to_cpu(*wptr); if (chksum) { ql_log(ql_log_fatal, vha, 0x0048, "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", @@ -846,6 +846,38 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) if (ha->port_no == 1) ha->flt_region_nvram = start; break; + case FLT_REG_IMG_PRI_27XX: + if (IS_QLA27XX(ha)) + ha->flt_region_img_status_pri = start; + break; + case FLT_REG_IMG_SEC_27XX: + if (IS_QLA27XX(ha)) + ha->flt_region_img_status_sec = start; + break; + case FLT_REG_FW_SEC_27XX: + if (IS_QLA27XX(ha)) + ha->flt_region_fw_sec = start; + break; + case FLT_REG_BOOTLOAD_SEC_27XX: + if (IS_QLA27XX(ha)) + ha->flt_region_boot_sec = start; + break; + case FLT_REG_VPD_SEC_27XX_0: + if (IS_QLA27XX(ha)) + ha->flt_region_vpd_sec = start; + break; + case FLT_REG_VPD_SEC_27XX_1: + if (IS_QLA27XX(ha)) + ha->flt_region_vpd_sec = start; + break; + case FLT_REG_VPD_SEC_27XX_2: + if (IS_QLA27XX(ha)) + ha->flt_region_vpd_sec = start; + break; + case FLT_REG_VPD_SEC_27XX_3: + if (IS_QLA27XX(ha)) + ha->flt_region_vpd_sec = start; + break; } } goto done; @@ -898,9 +930,8 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha) fdt->sig[3] != 'D') goto no_flash_data; - for (cnt = 0, chksum = 0; cnt < sizeof(struct qla_fdt_layout) >> 1; - cnt++) - chksum += le16_to_cpu(*wptr++); + for (cnt = 0, chksum = 0; cnt < sizeof(*fdt) >> 1; cnt++, wptr++) + chksum += le16_to_cpu(*wptr); if (chksum) { ql_dbg(ql_dbg_init, vha, 0x004c, "Inconsistent FDT detected:" @@ -995,7 +1026,8 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha) ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT; ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT; } else { - ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr++); + ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr); + wptr++; ha->fcoe_reset_timeout = le32_to_cpu(*wptr); } ql_dbg(ql_dbg_init, vha, 0x004e, @@ -1072,10 +1104,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) ha->isp_ops->read_optrom(vha, (uint8_t *)data, ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE); - cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) * - sizeof(struct qla_npiv_entry)) >> 1; - for (wptr = data, chksum = 0; cnt; cnt--) - chksum += le16_to_cpu(*wptr++); + cnt = (sizeof(hdr) + le16_to_cpu(hdr.entries) * sizeof(*entry)) >> 1; + for (wptr = data, chksum = 0; cnt--; wptr++) + chksum += le16_to_cpu(*wptr); if (chksum) { ql_dbg(ql_dbg_user, vha, 0x7092, "Inconsistent NPIV-Config " @@ -2989,6 +3020,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) uint8_t code_type, last_image; int i; struct qla_hw_data *ha = vha->hw; + uint32_t faddr = 0; + + pcihdr = pcids = 0; if (IS_P3P_TYPE(ha)) return ret; @@ -3002,9 +3036,11 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); dcode = mbuf; - - /* Begin with first PCI expansion ROM header. */ pcihdr = ha->flt_region_boot << 2; + if (IS_QLA27XX(ha) && + qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE) + pcihdr = ha->flt_region_boot_sec << 2; + last_image = 1; do { /* Verify PCI expansion ROM header. */ @@ -3077,8 +3113,12 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) /* Read firmware image information. */ memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); dcode = mbuf; + faddr = ha->flt_region_fw; + if (IS_QLA27XX(ha) && + qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE) + faddr = ha->flt_region_fw_sec; - qla24xx_read_flash_data(vha, dcode, ha->flt_region_fw + 4, 4); + qla24xx_read_flash_data(vha, dcode, faddr + 4, 4); for (i = 0; i < 4; i++) dcode[i] = be32_to_cpu(dcode[i]); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 75514a15bea0..ee967becd257 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -100,12 +100,12 @@ enum fcp_resp_rsp_codes { */ /* Predefs for callbacks handed to qla2xxx LLD */ static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, - struct atio_from_isp *pkt); + struct atio_from_isp *pkt, uint8_t); static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, int fn, void *iocb, int flags); static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd - *cmd, struct atio_from_isp *atio, int ha_locked); + *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, struct qla_tgt_srr_imm *imm, int ha_lock); static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, @@ -118,10 +118,13 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *ntfy, uint32_t add_flags, uint16_t resp_code, int resp_code_valid, uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); +static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *imm, int ha_locked); /* * Global Variables */ static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; +static struct kmem_cache *qla_tgt_plogi_cachep; static mempool_t *qla_tgt_mgmt_cmd_mempool; static struct workqueue_struct *qla_tgt_wq; static DEFINE_MUTEX(qla_tgt_mutex); @@ -226,8 +229,8 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); } -static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, - struct atio_from_isp *atio) +static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, + struct atio_from_isp *atio, uint8_t ha_locked) { ql_dbg(ql_dbg_tgt, vha, 0xe072, "%s: qla_target(%d): type %x ox_id %04x\n", @@ -248,7 +251,7 @@ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, atio->u.isp24.fcp_hdr.d_id[2]); break; } - qlt_24xx_atio_pkt(host, atio); + qlt_24xx_atio_pkt(host, atio, ha_locked); break; } @@ -271,7 +274,7 @@ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, break; } } - qlt_24xx_atio_pkt(host, atio); + qlt_24xx_atio_pkt(host, atio, ha_locked); break; } @@ -282,7 +285,7 @@ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, break; } - return; + return false; } void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) @@ -389,6 +392,131 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) } +/* + * All qlt_plogi_ack_t operations are protected by hardware_lock + */ + +/* + * This is a zero-base ref-counting solution, since hardware_lock + * guarantees that ref_count is not modified concurrently. + * Upon successful return content of iocb is undefined + */ +static qlt_plogi_ack_t * +qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, + struct imm_ntfy_from_isp *iocb) +{ + qlt_plogi_ack_t *pla; + + list_for_each_entry(pla, &vha->plogi_ack_list, list) { + if (pla->id.b24 == id->b24) { + qlt_send_term_imm_notif(vha, &pla->iocb, 1); + pla->iocb = *iocb; + return pla; + } + } + + pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC); + if (!pla) { + ql_dbg(ql_dbg_async, vha, 0x5088, + "qla_target(%d): Allocation of plogi_ack failed\n", + vha->vp_idx); + return NULL; + } + + pla->iocb = *iocb; + pla->id = *id; + list_add_tail(&pla->list, &vha->plogi_ack_list); + + return pla; +} + +static void qlt_plogi_ack_unref(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla) +{ + BUG_ON(!pla->ref_count); + pla->ref_count--; + + if (pla->ref_count) + return; + + ql_dbg(ql_dbg_async, vha, 0x5089, + "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" + " exch %#x ox_id %#x\n", pla->iocb.u.isp24.port_name, + pla->iocb.u.isp24.port_id[2], pla->iocb.u.isp24.port_id[1], + pla->iocb.u.isp24.port_id[0], + le16_to_cpu(pla->iocb.u.isp24.nport_handle), + pla->iocb.u.isp24.exchange_address, pla->iocb.ox_id); + qlt_send_notify_ack(vha, &pla->iocb, 0, 0, 0, 0, 0, 0); + + list_del(&pla->list); + kmem_cache_free(qla_tgt_plogi_cachep, pla); +} + +static void +qlt_plogi_ack_link(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla, + struct qla_tgt_sess *sess, qlt_plogi_link_t link) +{ + /* Inc ref_count first because link might already be pointing at pla */ + pla->ref_count++; + + if (sess->plogi_link[link]) + qlt_plogi_ack_unref(vha, sess->plogi_link[link]); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, + "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" + " s_id %02x:%02x:%02x, ref=%d\n", sess, link, sess->port_name, + pla->iocb.u.isp24.port_name, pla->iocb.u.isp24.port_id[2], + pla->iocb.u.isp24.port_id[1], pla->iocb.u.isp24.port_id[0], + pla->ref_count); + + sess->plogi_link[link] = pla; +} + +typedef struct { + /* These fields must be initialized by the caller */ + port_id_t id; + /* + * number of cmds dropped while we were waiting for + * initiator to ack LOGO initialize to 1 if LOGO is + * triggered by a command, otherwise, to 0 + */ + int cmd_count; + + /* These fields are used by callee */ + struct list_head list; +} qlt_port_logo_t; + +static void +qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) +{ + qlt_port_logo_t *tmp; + int res; + + mutex_lock(&vha->vha_tgt.tgt_mutex); + + list_for_each_entry(tmp, &vha->logo_list, list) { + if (tmp->id.b24 == logo->id.b24) { + tmp->cmd_count += logo->cmd_count; + mutex_unlock(&vha->vha_tgt.tgt_mutex); + return; + } + } + + list_add_tail(&logo->list, &vha->logo_list); + + mutex_unlock(&vha->vha_tgt.tgt_mutex); + + res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id); + + mutex_lock(&vha->vha_tgt.tgt_mutex); + list_del(&logo->list); + mutex_unlock(&vha->vha_tgt.tgt_mutex); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098, + "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n", + logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa, + logo->cmd_count, res); +} + static void qlt_free_session_done(struct work_struct *work) { struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, @@ -402,14 +530,21 @@ static void qlt_free_session_done(struct work_struct *work) ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" - " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n", + " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, sess->logout_on_delete, sess->keep_nport_handle, - sess->plogi_ack_needed); + sess->send_els_logo); BUG_ON(!tgt); + if (sess->send_els_logo) { + qlt_port_logo_t logo; + logo.id = sess->s_id; + logo.cmd_count = 0; + qlt_send_first_logo(vha, &logo); + } + if (sess->logout_on_delete) { int rc; @@ -455,9 +590,34 @@ static void qlt_free_session_done(struct work_struct *work) spin_lock_irqsave(&ha->hardware_lock, flags); - if (sess->plogi_ack_needed) - qlt_send_notify_ack(vha, &sess->tm_iocb, - 0, 0, 0, 0, 0, 0); + { + qlt_plogi_ack_t *own = + sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; + qlt_plogi_ack_t *con = + sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; + + if (con) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, + "se_sess %p / sess %p port %8phC is gone," + " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", + sess->se_sess, sess, sess->port_name, + own ? "releasing own PLOGI" : + "no own PLOGI pending", + own ? own->ref_count : -1, + con->iocb.u.isp24.port_name, con->ref_count); + qlt_plogi_ack_unref(vha, con); + } else { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, + "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", + sess->se_sess, sess, sess->port_name, + own ? "releasing own PLOGI" : + "no own PLOGI pending", + own ? own->ref_count : -1); + } + + if (own) + qlt_plogi_ack_unref(vha, own); + } list_del(&sess->sess_list_entry); @@ -476,7 +636,7 @@ static void qlt_free_session_done(struct work_struct *work) wake_up_all(&tgt->waitQ); } -/* ha->hardware_lock supposed to be held on entry */ +/* ha->tgt.sess_lock supposed to be held on entry */ void qlt_unreg_sess(struct qla_tgt_sess *sess) { struct scsi_qla_host *vha = sess->vha; @@ -492,7 +652,7 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess) } EXPORT_SYMBOL(qlt_unreg_sess); -/* ha->hardware_lock supposed to be held on entry */ + static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) { struct qla_hw_data *ha = vha->hw; @@ -502,12 +662,15 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) int res = 0; struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; struct atio_from_isp *a = (struct atio_from_isp *)iocb; + unsigned long flags; loop_id = le16_to_cpu(n->u.isp24.nport_handle); if (loop_id == 0xFFFF) { /* Global event */ atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); #if 0 /* FIXME: do we need to choose a session here? */ if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { sess = list_entry(ha->tgt.qla_tgt->sess_list.next, @@ -534,7 +697,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) sess = NULL; #endif } else { + spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } ql_dbg(ql_dbg_tgt, vha, 0xe000, @@ -556,7 +721,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) iocb, QLA24XX_MGMT_SEND_NACK); } -/* ha->hardware_lock supposed to be held on entry */ +/* ha->tgt.sess_lock supposed to be held on entry */ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, bool immediate) { @@ -600,7 +765,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, sess->expires - jiffies); } -/* ha->hardware_lock supposed to be held on entry */ +/* ha->tgt.sess_lock supposed to be held on entry */ static void qlt_clear_tgt_db(struct qla_tgt *tgt) { struct qla_tgt_sess *sess; @@ -636,12 +801,12 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, "qla_target(%d): get_id_list() failed: %x\n", vha->vp_idx, rc); - res = -1; + res = -EBUSY; goto out_free_id_list; } id_iter = (char *)gid_list; - res = -1; + res = -ENOENT; for (i = 0; i < entries; i++) { struct gid_list_info *gid = (struct gid_list_info *)id_iter; if ((gid->al_pa == s_id[2]) && @@ -660,7 +825,7 @@ out_free_id_list: return res; } -/* ha->hardware_lock supposed to be held on entry */ +/* ha->tgt.sess_lock supposed to be held on entry */ static void qlt_undelete_sess(struct qla_tgt_sess *sess) { BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING); @@ -678,7 +843,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) struct qla_tgt_sess *sess; unsigned long flags, elapsed; - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); while (!list_empty(&tgt->del_sess_list)) { sess = list_entry(tgt->del_sess_list.next, typeof(*sess), del_list_entry); @@ -699,7 +864,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) break; } } - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } /* @@ -717,7 +882,7 @@ static struct qla_tgt_sess *qlt_create_sess( unsigned char be_sid[3]; /* Check to avoid double sessions */ - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list, sess_list_entry) { if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { @@ -732,7 +897,7 @@ static struct qla_tgt_sess *qlt_create_sess( /* Cannot undelete at this point */ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { - spin_unlock_irqrestore(&ha->hardware_lock, + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return NULL; } @@ -749,12 +914,12 @@ static struct qla_tgt_sess *qlt_create_sess( qlt_do_generation_tick(vha, &sess->generation); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return sess; } } - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); sess = kzalloc(sizeof(*sess), GFP_KERNEL); if (!sess) { @@ -799,7 +964,7 @@ static struct qla_tgt_sess *qlt_create_sess( } /* * Take an extra reference to ->sess_kref here to handle qla_tgt_sess - * access across ->hardware_lock reaquire. + * access across ->tgt.sess_lock reaquire. */ kref_get(&sess->se_sess->sess_kref); @@ -807,11 +972,11 @@ static struct qla_tgt_sess *qlt_create_sess( BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); vha->vha_tgt.qla_tgt->sess_count++; qlt_do_generation_tick(vha, &sess->generation); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " @@ -842,23 +1007,23 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) if (qla_ini_mode_enabled(vha)) return; - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (tgt->tgt_stop) { - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; } sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); if (!sess) { - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); mutex_lock(&vha->vha_tgt.tgt_mutex); sess = qlt_create_sess(vha, fcport, false); mutex_unlock(&vha->vha_tgt.tgt_mutex); - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { /* Point of no return */ - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; } else { kref_get(&sess->se_sess->sess_kref); @@ -887,7 +1052,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) sess->local = 0; } ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } /* @@ -899,6 +1064,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_sess *sess; + unsigned long flags; if (!vha->hw->tgt.tgt_ops) return; @@ -906,15 +1072,19 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) if (!tgt) return; + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); if (tgt->tgt_stop) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); return; } sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); if (!sess) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); return; } if (max_gen - sess->generation < 0) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, "Ignoring stale deletion request for se_sess %p / sess %p" " for port %8phC, req_gen %d, sess_gen %d\n", @@ -927,6 +1097,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) sess->local = 1; qlt_schedule_sess_for_deletion(sess, false); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } static inline int test_tgt_sess_count(struct qla_tgt *tgt) @@ -984,10 +1155,10 @@ int qlt_stop_phase1(struct qla_tgt *tgt) * Lock is needed, because we still can get an incoming packet. */ mutex_lock(&vha->vha_tgt.tgt_mutex); - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); tgt->tgt_stop = 1; qlt_clear_tgt_db(tgt); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); mutex_unlock(&vha->vha_tgt.tgt_mutex); mutex_unlock(&qla_tgt_mutex); @@ -1040,7 +1211,7 @@ void qlt_stop_phase2(struct qla_tgt *tgt) mutex_lock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); - while (tgt->irq_cmd_count != 0) { + while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) { spin_unlock_irqrestore(&ha->hardware_lock, flags); udelay(2); spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1309,7 +1480,7 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { if (tag == cmd->atio.u.isp24.exchange_addr) { - cmd->state = QLA_TGT_STATE_ABORTED; + cmd->aborted = 1; spin_unlock(&vha->cmd_list_lock); return 1; } @@ -1351,7 +1522,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, cmd_lun = scsilun_to_int( (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); if (cmd_key == key && cmd_lun == lun) - cmd->state = QLA_TGT_STATE_ABORTED; + cmd->aborted = 1; } spin_unlock(&vha->cmd_list_lock); } @@ -1435,6 +1606,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, uint32_t tag = abts->exchange_addr_to_abort; uint8_t s_id[3]; int rc; + unsigned long flags; if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, @@ -1462,6 +1634,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, s_id[1] = abts->fcp_hdr_le.s_id[1]; s_id[2] = abts->fcp_hdr_le.s_id[0]; + spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); if (!sess) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, @@ -1469,12 +1642,17 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, vha->vp_idx); rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt, QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); + + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + if (rc != 0) { qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); } return; } + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); @@ -1560,15 +1738,15 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) spin_lock_irqsave(&ha->hardware_lock, flags); - if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) { + if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) { /* - * Either a chip reset is active or this request was from + * Either the port is not online or this request was from * previous life, just abort the processing. */ ql_dbg(ql_dbg_async, vha, 0xe100, - "RESET-TMR active/old-count/new-count = %d/%d/%d.\n", - qla2x00_reset_active(vha), mcmd->reset_count, - ha->chip_reset); + "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n", + vha->flags.online, qla2x00_reset_active(vha), + mcmd->reset_count, ha->chip_reset); ha->tgt.tgt_ops->free_mcmd(mcmd); spin_unlock_irqrestore(&ha->hardware_lock, flags); return; @@ -1578,7 +1756,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); else { - if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) + if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, mcmd->fc_tm_rsp, false); else @@ -2487,7 +2665,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, /* no need to terminate. FW already freed exchange. */ qlt_abort_cmd_on_host_reset(cmd->vha, cmd); else - qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); + qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); return 0; } @@ -2510,17 +2688,22 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, spin_lock_irqsave(&ha->hardware_lock, flags); - if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) { + if (xmit_type == QLA_TGT_XMIT_STATUS) + vha->tgt_counters.core_qla_snd_status++; + else + vha->tgt_counters.core_qla_que_buf++; + + if (!vha->flags.online || cmd->reset_count != ha->chip_reset) { /* - * Either a chip reset is active or this request was from + * Either the port is not online or this request was from * previous life, just abort the processing. */ cmd->state = QLA_TGT_STATE_PROCESSED; qlt_abort_cmd_on_host_reset(cmd->vha, cmd); ql_dbg(ql_dbg_async, vha, 0xe101, - "RESET-RSP active/old-count/new-count = %d/%d/%d.\n", - qla2x00_reset_active(vha), cmd->reset_count, - ha->chip_reset); + "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", + vha->flags.online, qla2x00_reset_active(vha), + cmd->reset_count, ha->chip_reset); spin_unlock_irqrestore(&ha->hardware_lock, flags); return 0; } @@ -2651,18 +2834,18 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) spin_lock_irqsave(&ha->hardware_lock, flags); - if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) || + if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) || (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) { /* - * Either a chip reset is active or this request was from + * Either the port is not online or this request was from * previous life, just abort the processing. */ cmd->state = QLA_TGT_STATE_NEED_DATA; qlt_abort_cmd_on_host_reset(cmd->vha, cmd); ql_dbg(ql_dbg_async, vha, 0xe102, - "RESET-XFR active/old-count/new-count = %d/%d/%d.\n", - qla2x00_reset_active(vha), cmd->reset_count, - ha->chip_reset); + "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", + vha->flags.online, qla2x00_reset_active(vha), + cmd->reset_count, ha->chip_reset); spin_unlock_irqrestore(&ha->hardware_lock, flags); return 0; } @@ -2957,12 +3140,13 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, ret = 1; } + vha->tgt_counters.num_term_xchg_sent++; pkt->entry_count = 1; pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; ctio24 = (struct ctio7_to_24xx *)pkt; ctio24->entry_type = CTIO_TYPE7; - ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED; + ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED; ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio24->vp_index = vha->vp_idx; ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; @@ -2989,7 +3173,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, } static void qlt_send_term_exchange(struct scsi_qla_host *vha, - struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) + struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, + int ul_abort) { unsigned long flags = 0; int rc; @@ -3009,8 +3194,7 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha, qlt_alloc_qfull_cmd(vha, atio, 0, 0); done: - if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) || - !cmd->cmd_sent_to_fw)) { + if (cmd && !ul_abort && !cmd->aborted) { if (cmd->sg_mapped) qlt_unmap_sg(vha, cmd); vha->hw->tgt.tgt_ops->free_cmd(cmd); @@ -3028,7 +3212,7 @@ static void qlt_init_term_exchange(struct scsi_qla_host *vha) struct qla_tgt_cmd *cmd, *tcmd; vha->hw->tgt.leak_exchg_thresh_hold = - (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; + (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; cmd = tcmd = NULL; if (!list_empty(&vha->hw->tgt.q_full_list)) { @@ -3058,7 +3242,7 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) ql_dbg(ql_dbg_tgt, vha, 0xe079, "Chip reset due to exchange starvation: %d/%d.\n", - total_leaked, vha->hw->fw_xcb_count); + total_leaked, vha->hw->cur_fw_xcb_count); if (IS_P3P_TYPE(vha->hw)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); @@ -3069,21 +3253,38 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) } -void qlt_abort_cmd(struct qla_tgt_cmd *cmd) +int qlt_abort_cmd(struct qla_tgt_cmd *cmd) { struct qla_tgt *tgt = cmd->tgt; struct scsi_qla_host *vha = tgt->vha; struct se_cmd *se_cmd = &cmd->se_cmd; + unsigned long flags; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, "qla_target(%d): terminating exchange for aborted cmd=%p " "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, se_cmd->tag); - cmd->state = QLA_TGT_STATE_ABORTED; + spin_lock_irqsave(&cmd->cmd_lock, flags); + if (cmd->aborted) { + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + /* + * It's normal to see 2 calls in this path: + * 1) XFER Rdy completion + CMD_T_ABORT + * 2) TCM TMR - drain_state_list + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "multiple abort. %p transport_state %x, t_state %x," + " se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state, + cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags); + return EIO; + } + cmd->aborted = 1; cmd->cmd_flags |= BIT_6; + spin_unlock_irqrestore(&cmd->cmd_lock, flags); - qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); + qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1); + return 0; } EXPORT_SYMBOL(qlt_abort_cmd); @@ -3098,6 +3299,9 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd) BUG_ON(cmd->cmd_in_wq); + if (cmd->sg_mapped) + qlt_unmap_sg(cmd->vha, cmd); + if (!cmd->q_full) qlt_decr_num_pend_cmds(cmd->vha); @@ -3215,7 +3419,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, term = 1; if (term) - qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); + qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); return term; } @@ -3300,9 +3504,6 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) ha->tgt.tgt_ops->handle_data(cmd); return; - } else if (cmd->state == QLA_TGT_STATE_ABORTED) { - ql_dbg(ql_dbg_io, vha, 0xff02, - "HOST-ABORT: handle=%d, state=ABORTED.\n", handle); } else { ql_dbg(ql_dbg_io, vha, 0xff03, "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle, @@ -3398,13 +3599,27 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, case CTIO_PORT_LOGGED_OUT: case CTIO_PORT_UNAVAILABLE: + { + int logged_out = + (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, - "qla_target(%d): CTIO with PORT LOGGED " - "OUT (29) or PORT UNAVAILABLE (28) status %x " + "qla_target(%d): CTIO with %s status %x " "received (state %x, se_cmd %p)\n", vha->vp_idx, + logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE", status, cmd->state, se_cmd); - break; + if (logged_out && cmd->sess) { + /* + * Session is already logged out, but we need + * to notify initiator, who's not aware of this + */ + cmd->sess->logout_on_delete = 0; + cmd->sess->send_els_logo = 1; + qlt_schedule_sess_for_deletion(cmd->sess, true); + } + break; + } case CTIO_SRR_RECEIVED: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, "qla_target(%d): CTIO with SRR_RECEIVED" @@ -3454,14 +3669,14 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, } - /* "cmd->state == QLA_TGT_STATE_ABORTED" means + /* "cmd->aborted" means * cmd is already aborted/terminated, we don't * need to terminate again. The exchange is already * cleaned up/freed at FW level. Just cleanup at driver * level. */ if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && - (cmd->state != QLA_TGT_STATE_ABORTED)) { + (!cmd->aborted)) { cmd->cmd_flags |= BIT_13; if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) return; @@ -3479,7 +3694,7 @@ skip_term: ha->tgt.tgt_ops->handle_data(cmd); return; - } else if (cmd->state == QLA_TGT_STATE_ABORTED) { + } else if (cmd->aborted) { cmd->cmd_flags |= BIT_18; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); @@ -3491,7 +3706,7 @@ skip_term: } if (unlikely(status != CTIO_SUCCESS) && - (cmd->state != QLA_TGT_STATE_ABORTED)) { + !cmd->aborted) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); dump_stack(); } @@ -3553,13 +3768,14 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) if (tgt->tgt_stop) goto out_term; - if (cmd->state == QLA_TGT_STATE_ABORTED) { + if (cmd->aborted) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, "cmd with tag %u is aborted\n", cmd->atio.u.isp24.exchange_addr); goto out_term; } + spin_lock_init(&cmd->cmd_lock); cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; cmd->se_cmd.tag = atio->u.isp24.exchange_addr; cmd->unpacked_lun = scsilun_to_int( @@ -3589,9 +3805,9 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) /* * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( */ - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; out_term: @@ -3602,12 +3818,15 @@ out_term: */ cmd->cmd_flags |= BIT_2; spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); + qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0); qlt_decr_num_pend_cmds(vha); percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); - ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->hardware_lock, flags); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } static void qlt_do_work(struct work_struct *work) @@ -3692,10 +3911,8 @@ static void qlt_create_sess_from_atio(struct work_struct *work) goto out_term; } - mutex_lock(&vha->vha_tgt.tgt_mutex); sess = qlt_make_local_sess(vha, s_id); /* sess has an extra creation ref. */ - mutex_unlock(&vha->vha_tgt.tgt_mutex); if (!sess) goto out_term; @@ -3723,7 +3940,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work) out_term: spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_term_exchange(vha, NULL, &op->atio, 1); + qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); kfree(op); @@ -3787,13 +4004,24 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, cmd->cmd_in_wq = 1; cmd->cmd_flags |= BIT_0; + cmd->se_cmd.cpuid = ha->msix_count ? + ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND; spin_lock(&vha->cmd_list_lock); list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); spin_unlock(&vha->cmd_list_lock); INIT_WORK(&cmd->work, qlt_do_work); - queue_work(qla_tgt_wq, &cmd->work); + if (ha->msix_count) { + if (cmd->atio.u.isp24.fcp_cmnd.rddata) + queue_work_on(smp_processor_id(), qla_tgt_wq, + &cmd->work); + else + queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, + &cmd->work); + } else { + queue_work(qla_tgt_wq, &cmd->work); + } return 0; } @@ -3917,13 +4145,18 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) struct qla_tgt_sess *sess; uint32_t lun, unpacked_lun; int fn; + unsigned long flags; tgt = vha->vha_tgt.qla_tgt; lun = a->u.isp24.fcp_cmnd.lun; fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, a->u.isp24.fcp_hdr.s_id); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); if (!sess) { @@ -3987,10 +4220,14 @@ static int qlt_abort_task(struct scsi_qla_host *vha, struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess; int loop_id; + unsigned long flags; loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + if (sess == NULL) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, "qla_target(%d): task abort for unexisting " @@ -4022,15 +4259,6 @@ void qlt_logo_completion_handler(fc_port_t *fcport, int rc) } } -static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a, - struct imm_ntfy_from_isp *b) -{ - struct imm_ntfy_from_isp tmp; - memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp)); - memcpy(a, b, sizeof(struct imm_ntfy_from_isp)); - memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp)); -} - /* * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) * @@ -4040,11 +4268,13 @@ static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a, */ static struct qla_tgt_sess * qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, - port_id_t port_id, uint16_t loop_id) + port_id_t port_id, uint16_t loop_id, struct qla_tgt_sess **conflict_sess) { struct qla_tgt_sess *sess = NULL, *other_sess; uint64_t other_wwn; + *conflict_sess = NULL; + list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) { other_wwn = wwn_to_u64(other_sess->port_name); @@ -4072,9 +4302,10 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, } else { /* * Another wwn used to have our s_id/loop_id - * combo - kill the session, but don't log out + * kill the session, but don't free the loop_id */ - sess->logout_on_delete = 0; + other_sess->keep_nport_handle = 1; + *conflict_sess = other_sess; qlt_schedule_sess_for_deletion(other_sess, true); } @@ -4119,7 +4350,7 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); if (cmd_key == key) { - cmd->state = QLA_TGT_STATE_ABORTED; + cmd->aborted = 1; count++; } } @@ -4136,12 +4367,14 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, { struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_hw_data *ha = vha->hw; - struct qla_tgt_sess *sess = NULL; + struct qla_tgt_sess *sess = NULL, *conflict_sess = NULL; uint64_t wwn; port_id_t port_id; uint16_t loop_id; uint16_t wd3_lo; int res = 0; + qlt_plogi_ack_t *pla; + unsigned long flags; wwn = wwn_to_u64(iocb->u.isp24.port_name); @@ -4165,27 +4398,20 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, /* Mark all stale commands in qla_tgt_wq for deletion */ abort_cmds_for_s_id(vha, &port_id); - if (wwn) + if (wwn) { + spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); sess = qlt_find_sess_invalidate_other(tgt, wwn, - port_id, loop_id); + port_id, loop_id, &conflict_sess); + spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); + } - if (!sess || IS_SW_RESV_ADDR(sess->s_id)) { + if (IS_SW_RESV_ADDR(port_id) || (!sess && !conflict_sess)) { res = 1; break; } - if (sess->plogi_ack_needed) { - /* - * Initiator sent another PLOGI before last PLOGI could - * finish. Swap plogi iocbs and terminate old one - * without acking, new one will get acked when session - * deletion completes. - */ - ql_log(ql_log_warn, sess->vha, 0xf094, - "sess %p received double plogi.\n", sess); - - qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb); - + pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); + if (!pla) { qlt_send_term_imm_notif(vha, iocb, 1); res = 0; @@ -4194,13 +4420,14 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, res = 0; - /* - * Save immediate Notif IOCB for Ack when sess is done - * and being deleted. - */ - memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb)); - sess->plogi_ack_needed = 1; + if (conflict_sess) + qlt_plogi_ack_link(vha, pla, conflict_sess, + QLT_PLOGI_LINK_CONFLICT); + + if (!sess) + break; + qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); /* * Under normal circumstances we want to release nport handle * during LOGO process to avoid nport handle leaks inside FW. @@ -4227,9 +4454,21 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, case ELS_PRLI: wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); - if (wwn) + if (wwn) { + spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id, - loop_id); + loop_id, &conflict_sess); + spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); + } + + if (conflict_sess) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, + "PRLI with conflicting sess %p port %8phC\n", + conflict_sess, conflict_sess->port_name); + qlt_send_term_imm_notif(vha, iocb, 1); + res = 0; + break; + } if (sess != NULL) { if (sess->deleted) { @@ -4554,7 +4793,7 @@ out_reject: dump_stack(); } else { cmd->cmd_flags |= BIT_9; - qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); + qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); } spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -4733,7 +4972,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, sctio, sctio->srr_id); list_del(&sctio->srr_list_entry); qlt_send_term_exchange(vha, sctio->cmd, - &sctio->cmd->atio, 1); + &sctio->cmd->atio, 1, 0); kfree(sctio); } } @@ -4899,11 +5138,14 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, struct qla_hw_data *ha = vha->hw; request_t *pkt; struct qla_tgt_sess *sess = NULL; + unsigned long flags; + spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); if (!sess) { - qlt_send_term_exchange(vha, NULL, atio, 1); + qlt_send_term_exchange(vha, NULL, atio, 1, 0); return 0; } /* Sending marker isn't necessary, since we called from ISR */ @@ -4916,6 +5158,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, return -ENOMEM; } + vha->tgt_counters.num_q_full_sent++; pkt->entry_count = 1; pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; @@ -5129,11 +5372,12 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, /* ha->hardware_lock supposed to be held on entry */ /* called via callback from qla2xxx */ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, - struct atio_from_isp *atio) + struct atio_from_isp *atio, uint8_t ha_locked) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; int rc; + unsigned long flags; if (unlikely(tgt == NULL)) { ql_dbg(ql_dbg_io, vha, 0x3064, @@ -5145,7 +5389,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, * Otherwise, some commands can stuck. */ - tgt->irq_cmd_count++; + tgt->atio_irq_cmd_count++; switch (atio->u.raw.entry_type) { case ATIO_TYPE7: @@ -5155,7 +5399,11 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, "qla_target(%d): ATIO_TYPE7 " "received with UNKNOWN exchange address, " "sending QUEUE_FULL\n", vha->vp_idx); + if (!ha_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); + if (!ha_locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); break; } @@ -5164,7 +5412,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { rc = qlt_chk_qfull_thresh_hold(vha, atio); if (rc != 0) { - tgt->irq_cmd_count--; + tgt->atio_irq_cmd_count--; return; } rc = qlt_handle_cmd_for_atio(vha, atio); @@ -5173,11 +5421,20 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, } if (unlikely(rc != 0)) { if (rc == -ESRCH) { + if (!ha_locked) + spin_lock_irqsave + (&ha->hardware_lock, flags); + #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ qlt_send_busy(vha, atio, SAM_STAT_BUSY); #else - qlt_send_term_exchange(vha, NULL, atio, 1); + qlt_send_term_exchange(vha, NULL, atio, 1, 0); #endif + + if (!ha_locked) + spin_unlock_irqrestore + (&ha->hardware_lock, flags); + } else { if (tgt->tgt_stop) { ql_dbg(ql_dbg_tgt, vha, 0xe059, @@ -5189,7 +5446,13 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, "qla_target(%d): Unable to send " "command to target, sending BUSY " "status.\n", vha->vp_idx); + if (!ha_locked) + spin_lock_irqsave( + &ha->hardware_lock, flags); qlt_send_busy(vha, atio, SAM_STAT_BUSY); + if (!ha_locked) + spin_unlock_irqrestore( + &ha->hardware_lock, flags); } } } @@ -5206,7 +5469,12 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, break; } ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); + + if (!ha_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); + if (!ha_locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); break; } @@ -5217,7 +5485,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, break; } - tgt->irq_cmd_count--; + tgt->atio_irq_cmd_count--; } /* ha->hardware_lock supposed to be held on entry */ @@ -5277,7 +5545,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ qlt_send_busy(vha, atio, 0); #else - qlt_send_term_exchange(vha, NULL, atio, 1); + qlt_send_term_exchange(vha, NULL, atio, 1, 0); #endif } else { if (tgt->tgt_stop) { @@ -5286,7 +5554,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) "command to target, sending TERM " "EXCHANGE for rsp\n"); qlt_send_term_exchange(vha, NULL, - atio, 1); + atio, 1, 0); } else { ql_dbg(ql_dbg_tgt, vha, 0xe060, "qla_target(%d): Unable to send " @@ -5534,12 +5802,16 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, int rc, global_resets; uint16_t loop_id = 0; + mutex_lock(&vha->vha_tgt.tgt_mutex); + retry: global_resets = atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); rc = qla24xx_get_loop_id(vha, s_id, &loop_id); if (rc != 0) { + mutex_unlock(&vha->vha_tgt.tgt_mutex); + if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) { /* @@ -5550,17 +5822,27 @@ retry: "Unable to find initiator with S_ID %x:%x:%x", s_id[0], s_id[1], s_id[2]); } else - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071, + ql_log(ql_log_info, vha, 0xf071, "qla_target(%d): Unable to find " "initiator with S_ID %x:%x:%x", vha->vp_idx, s_id[0], s_id[1], s_id[2]); + + if (rc == -ENOENT) { + qlt_port_logo_t logo; + sid_to_portid(s_id, &logo.id); + logo.cmd_count = 1; + qlt_send_first_logo(vha, &logo); + } + return NULL; } fcport = qlt_get_port_database(vha, loop_id); - if (!fcport) + if (!fcport) { + mutex_unlock(&vha->vha_tgt.tgt_mutex); return NULL; + } if (global_resets != atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { @@ -5575,6 +5857,8 @@ retry: sess = qlt_create_sess(vha, fcport, true); + mutex_unlock(&vha->vha_tgt.tgt_mutex); + kfree(fcport); return sess; } @@ -5585,15 +5869,15 @@ static void qlt_abort_work(struct qla_tgt *tgt, struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess = NULL; - unsigned long flags; + unsigned long flags = 0, flags2 = 0; uint32_t be_s_id; uint8_t s_id[3]; int rc; - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags2); if (tgt->tgt_stop) - goto out_term; + goto out_term2; s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; @@ -5602,41 +5886,47 @@ static void qlt_abort_work(struct qla_tgt *tgt, sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, (unsigned char *)&be_s_id); if (!sess) { - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); - mutex_lock(&vha->vha_tgt.tgt_mutex); sess = qlt_make_local_sess(vha, s_id); /* sess has got an extra creation ref */ - mutex_unlock(&vha->vha_tgt.tgt_mutex); - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags2); if (!sess) - goto out_term; + goto out_term2; } else { if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { sess = NULL; - goto out_term; + goto out_term2; } kref_get(&sess->se_sess->sess_kref); } + spin_lock_irqsave(&ha->hardware_lock, flags); + if (tgt->tgt_stop) goto out_term; rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); if (rc != 0) goto out_term; + spin_unlock_irqrestore(&ha->hardware_lock, flags); ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); return; +out_term2: + spin_lock_irqsave(&ha->hardware_lock, flags); + out_term: qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (sess) ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); } static void qlt_tmr_work(struct qla_tgt *tgt, @@ -5653,7 +5943,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, int fn; void *iocb; - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (tgt->tgt_stop) goto out_term; @@ -5661,14 +5951,12 @@ static void qlt_tmr_work(struct qla_tgt *tgt, s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); if (!sess) { - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - mutex_lock(&vha->vha_tgt.tgt_mutex); sess = qlt_make_local_sess(vha, s_id); /* sess has got an extra creation ref */ - mutex_unlock(&vha->vha_tgt.tgt_mutex); - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (!sess) goto out_term; } else { @@ -5690,14 +5978,14 @@ static void qlt_tmr_work(struct qla_tgt *tgt, goto out_term; ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; out_term: - qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1); + qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); if (sess) ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } static void qlt_sess_work_fn(struct work_struct *work) @@ -6002,6 +6290,7 @@ qlt_enable_vha(struct scsi_qla_host *vha) struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; unsigned long flags; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER; if (!tgt) { ql_dbg(ql_dbg_tgt, vha, 0xe069, @@ -6020,6 +6309,17 @@ qlt_enable_vha(struct scsi_qla_host *vha) qla24xx_disable_vp(vha); qla24xx_enable_vp(vha); } else { + if (ha->msix_entries) { + ql_dbg(ql_dbg_tgt, vha, 0xffff, + "%s: host%ld : vector %d cpu %d\n", + __func__, vha->host_no, + ha->msix_entries[rspq_ent].vector, + ha->msix_entries[rspq_ent].cpuid); + + ha->tgt.rspq_vector_cpuid = + ha->msix_entries[rspq_ent].cpuid; + } + set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); qla2xxx_wake_dpc(base_vha); qla2x00_wait_for_hba_online(base_vha); @@ -6131,7 +6431,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha) * @ha: SCSI driver HA context */ void -qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) +qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) { struct qla_hw_data *ha = vha->hw; struct atio_from_isp *pkt; @@ -6144,7 +6444,8 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; cnt = pkt->u.raw.entry_count; - qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt); + qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt, + ha_locked); for (i = 0; i < cnt; i++) { ha->tgt.atio_ring_index++; @@ -6265,10 +6566,21 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, { struct qla_hw_data *ha = vha->hw; + if (!QLA_TGT_MODE_ENABLED()) + return; + if (ha->tgt.node_name_set) { memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); icb->firmware_options_1 |= cpu_to_le32(BIT_14); } + + /* disable ZIO at start time. */ + if (!vha->flags.init_done) { + uint32_t tmp; + tmp = le32_to_cpu(icb->firmware_options_2); + tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); + icb->firmware_options_2 = cpu_to_le32(tmp); + } } void @@ -6359,6 +6671,15 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); icb->firmware_options_1 |= cpu_to_le32(BIT_14); } + + /* disable ZIO at start time. */ + if (!vha->flags.init_done) { + uint32_t tmp; + tmp = le32_to_cpu(icb->firmware_options_2); + tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); + icb->firmware_options_2 = cpu_to_le32(tmp); + } + } void @@ -6428,16 +6749,59 @@ qla83xx_msix_atio_q(int irq, void *dev_id) ha = rsp->hw; vha = pci_get_drvdata(ha->pdev); - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.atio_lock, flags); - qlt_24xx_process_atio_queue(vha); - qla24xx_process_response_queue(vha, rsp); + qlt_24xx_process_atio_queue(vha, 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); return IRQ_HANDLED; } +static void +qlt_handle_abts_recv_work(struct work_struct *work) +{ + struct qla_tgt_sess_op *op = container_of(work, + struct qla_tgt_sess_op, work); + scsi_qla_host_t *vha = op->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + + if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset)) + return; + + spin_lock_irqsave(&ha->tgt.atio_lock, flags); + qlt_24xx_process_atio_queue(vha, 0); + spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); + + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_response_pkt_all_vps(vha, (response_t *)&op->atio); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +void +qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt) +{ + struct qla_tgt_sess_op *op; + + op = kzalloc(sizeof(*op), GFP_ATOMIC); + + if (!op) { + /* do not reach for ATIO queue here. This is best effort err + * recovery at this point. + */ + qlt_response_pkt_all_vps(vha, pkt); + return; + } + + memcpy(&op->atio, pkt, sizeof(*pkt)); + op->vha = vha; + op->chip_reset = vha->hw->chip_reset; + INIT_WORK(&op->work, qlt_handle_abts_recv_work); + queue_work(qla_tgt_wq, &op->work); + return; +} + int qlt_mem_alloc(struct qla_hw_data *ha) { @@ -6532,13 +6896,25 @@ int __init qlt_init(void) return -ENOMEM; } + qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", + sizeof(qlt_plogi_ack_t), + __alignof__(qlt_plogi_ack_t), + 0, NULL); + + if (!qla_tgt_plogi_cachep) { + ql_log(ql_log_fatal, NULL, 0xe06d, + "kmem_cache_create for qla_tgt_plogi_cachep failed\n"); + ret = -ENOMEM; + goto out_mgmt_cmd_cachep; + } + qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, mempool_free_slab, qla_tgt_mgmt_cmd_cachep); if (!qla_tgt_mgmt_cmd_mempool) { ql_log(ql_log_fatal, NULL, 0xe06e, "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); ret = -ENOMEM; - goto out_mgmt_cmd_cachep; + goto out_plogi_cachep; } qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); @@ -6555,6 +6931,8 @@ int __init qlt_init(void) out_cmd_mempool: mempool_destroy(qla_tgt_mgmt_cmd_mempool); +out_plogi_cachep: + kmem_cache_destroy(qla_tgt_plogi_cachep); out_mgmt_cmd_cachep: kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); return ret; @@ -6567,5 +6945,6 @@ void qlt_exit(void) destroy_workqueue(qla_tgt_wq); mempool_destroy(qla_tgt_mgmt_cmd_mempool); + kmem_cache_destroy(qla_tgt_plogi_cachep); kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); } diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index bca584ae45b7..22a6a767fe07 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -787,7 +787,7 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *); #define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */ #define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */ #define QLA_TGT_STATE_PROCESSED 3 /* target done processing */ -#define QLA_TGT_STATE_ABORTED 4 /* Command aborted */ + /* Special handles */ #define QLA_TGT_NULL_HANDLE 0 @@ -835,6 +835,7 @@ struct qla_tgt { * HW lock. */ int irq_cmd_count; + int atio_irq_cmd_count; int datasegs_per_cmd, datasegs_per_cont, sg_tablesize; @@ -883,6 +884,7 @@ struct qla_tgt { struct qla_tgt_sess_op { struct scsi_qla_host *vha; + uint32_t chip_reset; struct atio_from_isp atio; struct work_struct work; struct list_head cmd_list; @@ -896,6 +898,19 @@ enum qla_sess_deletion { QLA_SESS_DELETION_IN_PROGRESS = 2, }; +typedef enum { + QLT_PLOGI_LINK_SAME_WWN, + QLT_PLOGI_LINK_CONFLICT, + QLT_PLOGI_LINK_MAX +} qlt_plogi_link_t; + +typedef struct { + struct list_head list; + struct imm_ntfy_from_isp iocb; + port_id_t id; + int ref_count; +} qlt_plogi_ack_t; + /* * Equivilant to IT Nexus (Initiator-Target) */ @@ -907,8 +922,8 @@ struct qla_tgt_sess { unsigned int deleted:2; unsigned int local:1; unsigned int logout_on_delete:1; - unsigned int plogi_ack_needed:1; unsigned int keep_nport_handle:1; + unsigned int send_els_logo:1; unsigned char logout_completed; @@ -925,11 +940,39 @@ struct qla_tgt_sess { uint8_t port_name[WWN_SIZE]; struct work_struct free_work; - union { - struct imm_ntfy_from_isp tm_iocb; - }; + qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; }; +typedef enum { + /* + * BIT_0 - Atio Arrival / schedule to work + * BIT_1 - qlt_do_work + * BIT_2 - qlt_do work failed + * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending + * BIT_4 - read respond/tcm_qla2xx_queue_data_in + * BIT_5 - status respond / tcm_qla2xx_queue_status + * BIT_6 - tcm request to abort/Term exchange. + * pre_xmit_response->qlt_send_term_exchange + * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response) + * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer) + * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange) + * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data + + * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd + * BIT_13 - Bad completion - + * qlt_ctio_do_completion --> qlt_term_ctio_exchange + * BIT_14 - Back end data received/sent. + * BIT_15 - SRR prepare ctio + * BIT_16 - complete free + * BIT_17 - flush - qlt_abort_cmd_on_host_reset + * BIT_18 - completion w/abort status + * BIT_19 - completion w/unknown status + * BIT_20 - tcm_qla2xxx_free_cmd + */ + CMD_FLAG_DATA_WORK = BIT_11, + CMD_FLAG_DATA_WORK_FREE = BIT_21, +} cmd_flags_t; + struct qla_tgt_cmd { struct se_cmd se_cmd; struct qla_tgt_sess *sess; @@ -939,6 +982,7 @@ struct qla_tgt_cmd { /* Sense buffer that will be mapped into outgoing status */ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; + spinlock_t cmd_lock; /* to save extra sess dereferences */ unsigned int conf_compl_supported:1; unsigned int sg_mapped:1; @@ -949,6 +993,7 @@ struct qla_tgt_cmd { unsigned int term_exchg:1; unsigned int cmd_sent_to_fw:1; unsigned int cmd_in_wq:1; + unsigned int aborted:1; struct scatterlist *sg; /* cmd data buffer SG vector */ int sg_cnt; /* SG segments count */ @@ -972,30 +1017,8 @@ struct qla_tgt_cmd { uint64_t jiffies_at_alloc; uint64_t jiffies_at_free; - /* BIT_0 - Atio Arrival / schedule to work - * BIT_1 - qlt_do_work - * BIT_2 - qlt_do work failed - * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending - * BIT_4 - read respond/tcm_qla2xx_queue_data_in - * BIT_5 - status respond / tcm_qla2xx_queue_status - * BIT_6 - tcm request to abort/Term exchange. - * pre_xmit_response->qlt_send_term_exchange - * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response) - * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer) - * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange) - * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data - * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work - * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd - * BIT_13 - Bad completion - - * qlt_ctio_do_completion --> qlt_term_ctio_exchange - * BIT_14 - Back end data received/sent. - * BIT_15 - SRR prepare ctio - * BIT_16 - complete free - * BIT_17 - flush - qlt_abort_cmd_on_host_reset - * BIT_18 - completion w/abort status - * BIT_19 - completion w/unknown status - */ - uint32_t cmd_flags; + + cmd_flags_t cmd_flags; }; struct qla_tgt_sess_work_param { @@ -1120,13 +1143,21 @@ static inline uint32_t sid_to_key(const uint8_t *s_id) return key; } +static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p) +{ + memset(p, 0, sizeof(*p)); + p->b.domain = s_id[0]; + p->b.area = s_id[1]; + p->b.al_pa = s_id[2]; +} + /* * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. */ extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); -extern void qlt_abort_cmd(struct qla_tgt_cmd *); +extern int qlt_abort_cmd(struct qla_tgt_cmd *); extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); @@ -1135,7 +1166,7 @@ extern void qlt_enable_vha(struct scsi_qla_host *); extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *); extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *); extern void qlt_init_atio_q_entries(struct scsi_qla_host *); -extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *); +extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *, uint8_t); extern void qlt_24xx_config_rings(struct scsi_qla_host *); extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *, struct nvram_24xx *); diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index ddbe2e7ac14d..c3e622524604 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) { for (i = 0; i < vha->hw->max_req_queues; i++) { struct req_que *req = vha->hw->req_q_map[i]; + + if (!test_bit(i, vha->hw->req_qid_map)) + continue; + if (req || !buf) { length = req ? req->length : REQUEST_ENTRY_CNT_24XX; @@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { for (i = 0; i < vha->hw->max_rsp_queues; i++) { struct rsp_que *rsp = vha->hw->rsp_q_map[i]; + + if (!test_bit(i, vha->hw->rsp_qid_map)) + continue; + if (rsp || !buf) { length = rsp ? rsp->length : RESPONSE_ENTRY_CNT_MQ; @@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { for (i = 0; i < vha->hw->max_req_queues; i++) { struct req_que *req = vha->hw->req_q_map[i]; + + if (!test_bit(i, vha->hw->req_qid_map)) + continue; + if (req || !buf) { qla27xx_insert16(i, buf, len); qla27xx_insert16(1, buf, len); @@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { for (i = 0; i < vha->hw->max_rsp_queues; i++) { struct rsp_que *rsp = vha->hw->rsp_q_map[i]; + + if (!test_bit(i, vha->hw->rsp_qid_map)) + continue; + if (rsp || !buf) { qla27xx_insert16(i, buf, len); qla27xx_insert16(1, buf, len); diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 6d31faa8c57b..0bc93fa46dae 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.07.00.26-k" +#define QLA2XXX_VERSION "8.07.00.33-k" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 7 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 81af294f15a7..1808a01cfb7e 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -284,6 +284,7 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work) WARN_ON(cmd->cmd_flags & BIT_16); + cmd->vha->tgt_counters.qla_core_ret_sta_ctio++; cmd->cmd_flags |= BIT_16; transport_generic_free_cmd(&cmd->se_cmd, 0); } @@ -295,9 +296,14 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work) */ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) { + cmd->vha->tgt_counters.core_qla_free_cmd++; cmd->cmd_in_wq = 1; + + BUG_ON(cmd->cmd_flags & BIT_20); + cmd->cmd_flags |= BIT_20; + INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); - queue_work(tcm_qla2xxx_free_wq, &cmd->work); + queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); } /* @@ -342,9 +348,9 @@ static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess) BUG_ON(!sess); vha = sess->vha; - spin_lock_irqsave(&vha->hw->hardware_lock, flags); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); target_sess_cmd_list_set_waiting(se_sess); - spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); return 1; } @@ -358,9 +364,9 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess) BUG_ON(!sess); vha = sess->vha; - spin_lock_irqsave(&vha->hw->hardware_lock, flags); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); qlt_unreg_sess(sess); - spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess) @@ -372,6 +378,20 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); + + if (cmd->aborted) { + /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task + * can get ahead of this cmd. tcm_qla2xxx_aborted_task + * already kick start the free. + */ + pr_debug("write_pending aborted cmd[%p] refcount %d " + "transport_state %x, t_state %x, se_cmd_flags %x\n", + cmd,cmd->se_cmd.cmd_kref.refcount.counter, + cmd->se_cmd.transport_state, + cmd->se_cmd.t_state, + cmd->se_cmd.se_cmd_flags); + return 0; + } cmd->cmd_flags |= BIT_3; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); @@ -403,7 +423,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd) se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, - 3 * HZ); + 50); return 0; } spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); @@ -442,6 +462,9 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, if (bidi) flags |= TARGET_SCF_BIDI_OP; + if (se_cmd->cpuid != WORK_CPU_UNBOUND) + flags |= TARGET_SCF_USE_CPUID; + sess = cmd->sess; if (!sess) { pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); @@ -454,6 +477,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, return -EINVAL; } + cmd->vha->tgt_counters.qla_core_sbt_cmd++; return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], cmd->unpacked_lun, data_length, fcp_task_attr, data_dir, flags); @@ -462,13 +486,26 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, static void tcm_qla2xxx_handle_data_work(struct work_struct *work) { struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + unsigned long flags; /* * Ensure that the complete FCP WRITE payload has been received. * Otherwise return an exception via CHECK_CONDITION status. */ cmd->cmd_in_wq = 0; - cmd->cmd_flags |= BIT_11; + + spin_lock_irqsave(&cmd->cmd_lock, flags); + cmd->cmd_flags |= CMD_FLAG_DATA_WORK; + if (cmd->aborted) { + cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE; + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + + tcm_qla2xxx_free_cmd(cmd); + return; + } + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + + cmd->vha->tgt_counters.qla_core_ret_ctio++; if (!cmd->write_data_transferred) { /* * Check if se_cmd has already been aborted via LUN_RESET, and @@ -500,7 +537,7 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) cmd->cmd_flags |= BIT_10; cmd->cmd_in_wq = 1; INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); - queue_work(tcm_qla2xxx_free_wq, &cmd->work); + queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); } static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) @@ -542,6 +579,20 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); + if (cmd->aborted) { + /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task + * can get ahead of this cmd. tcm_qla2xxx_aborted_task + * already kick start the free. + */ + pr_debug("queue_data_in aborted cmd[%p] refcount %d " + "transport_state %x, t_state %x, se_cmd_flags %x\n", + cmd,cmd->se_cmd.cmd_kref.refcount.counter, + cmd->se_cmd.transport_state, + cmd->se_cmd.t_state, + cmd->se_cmd.se_cmd_flags); + return 0; + } + cmd->cmd_flags |= BIT_4; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); @@ -633,17 +684,40 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) qlt_xmit_tm_rsp(mcmd); } + +#define DATA_WORK_NOT_FREE(_flags) \ + (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \ + CMD_FLAG_DATA_WORK) static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - qlt_abort_cmd(cmd); + unsigned long flags; + + if (qlt_abort_cmd(cmd)) + return; + + spin_lock_irqsave(&cmd->cmd_lock, flags); + if ((cmd->state == QLA_TGT_STATE_NEW)|| + ((cmd->state == QLA_TGT_STATE_DATA_IN) && + DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) { + + cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE; + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + /* Cmd have not reached firmware. + * Use this trigger to free it. */ + tcm_qla2xxx_free_cmd(cmd); + return; + } + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + return; + } static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); /* - * Expected to be called with struct qla_hw_data->hardware_lock held + * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) { @@ -697,13 +771,13 @@ static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) if (!sess) return; - assert_spin_locked(&sess->vha->hw->hardware_lock); + assert_spin_locked(&sess->vha->hw->tgt.sess_lock); kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session); } static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) { - assert_spin_locked(&sess->vha->hw->hardware_lock); + assert_spin_locked(&sess->vha->hw->tgt.sess_lock); target_sess_cmd_list_set_waiting(sess->se_sess); } @@ -1077,7 +1151,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( } /* - * Expected to be called with struct qla_hw_data->hardware_lock held + * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( scsi_qla_host_t *vha, @@ -1116,7 +1190,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( } /* - * Expected to be called with struct qla_hw_data->hardware_lock held + * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ static void tcm_qla2xxx_set_sess_by_s_id( struct tcm_qla2xxx_lport *lport, @@ -1182,7 +1256,7 @@ static void tcm_qla2xxx_set_sess_by_s_id( } /* - * Expected to be called with struct qla_hw_data->hardware_lock held + * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( scsi_qla_host_t *vha, @@ -1221,7 +1295,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( } /* - * Expected to be called with struct qla_hw_data->hardware_lock held + * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ static void tcm_qla2xxx_set_sess_by_loop_id( struct tcm_qla2xxx_lport *lport, @@ -1285,7 +1359,7 @@ static void tcm_qla2xxx_set_sess_by_loop_id( } /* - * Should always be called with qla_hw_data->hardware_lock held. + * Should always be called with qla_hw_data->tgt.sess_lock held. */ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess) @@ -1353,7 +1427,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( struct qla_tgt_sess *sess = qla_tgt_sess; unsigned char port_name[36]; unsigned long flags; - int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count : + int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count : TCM_QLA2XXX_DEFAULT_TAGS; lport = vha->vha_tgt.target_lport_ptr; @@ -1401,12 +1475,12 @@ static int tcm_qla2xxx_check_initiator_node_acl( * And now setup the new se_nacl and session pointers into our HW lport * mappings for fabric S_ID and LOOP_ID. */ - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, qla_tgt_sess, s_id); tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess, qla_tgt_sess, loop_id); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); /* * Finally register the new FC Nexus with TCM */ diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c index 5d4f8e67fb25..638f72c5ab05 100644 --- a/drivers/scsi/qla4xxx/ql4_83xx.c +++ b/drivers/scsi/qla4xxx/ql4_83xx.c @@ -46,11 +46,13 @@ int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, ret_val = qla4_83xx_set_win_base(ha, addr); - if (ret_val == QLA_SUCCESS) + if (ret_val == QLA_SUCCESS) { *data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD); - else + } else { + *data = 0xffffffff; ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n", __func__, addr); + } return ret_val; } diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 676385ff28ef..69bfc0a1aea3 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -710,7 +710,7 @@ static int qpti_map_regs(struct qlogicpti *qpti) "PTI Qlogic/ISP"); if (!qpti->qregs) { printk("PTI: Qlogic/ISP registers are unmappable\n"); - return -1; + return -ENODEV; } if (qpti->is_pti) { qpti->sreg = of_ioremap(&op->resource[0], (16 * 4096), @@ -718,7 +718,7 @@ static int qpti_map_regs(struct qlogicpti *qpti) "PTI Qlogic/ISP statreg"); if (!qpti->sreg) { printk("PTI: Qlogic/ISP status register is unmappable\n"); - return -1; + return -ENODEV; } } return 0; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index d07fb653f5dc..b1bf42b93fcc 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -782,7 +782,7 @@ void scsi_attach_vpd(struct scsi_device *sdev) int vpd_len = SCSI_VPD_PG_LEN; int pg80_supported = 0; int pg83_supported = 0; - unsigned char *vpd_buf; + unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL; if (sdev->skip_vpd_pages) return; @@ -828,8 +828,16 @@ retry_pg80: kfree(vpd_buf); goto retry_pg80; } + mutex_lock(&sdev->inquiry_mutex); + orig_vpd_buf = sdev->vpd_pg80; sdev->vpd_pg80_len = result; - sdev->vpd_pg80 = vpd_buf; + rcu_assign_pointer(sdev->vpd_pg80, vpd_buf); + mutex_unlock(&sdev->inquiry_mutex); + synchronize_rcu(); + if (orig_vpd_buf) { + kfree(orig_vpd_buf); + orig_vpd_buf = NULL; + } vpd_len = SCSI_VPD_PG_LEN; } @@ -849,8 +857,14 @@ retry_pg83: kfree(vpd_buf); goto retry_pg83; } + mutex_lock(&sdev->inquiry_mutex); + orig_vpd_buf = sdev->vpd_pg83; sdev->vpd_pg83_len = result; - sdev->vpd_pg83 = vpd_buf; + rcu_assign_pointer(sdev->vpd_pg83, vpd_buf); + mutex_unlock(&sdev->inquiry_mutex); + synchronize_rcu(); + if (orig_vpd_buf) + kfree(orig_vpd_buf); } } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index d09d60293c27..f3d69a98c725 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -129,7 +129,7 @@ static const char *scsi_debug_version_date = "20141022"; #define DEF_NO_LUN_0 0 #define DEF_NUM_PARTS 0 #define DEF_OPTS 0 -#define DEF_OPT_BLKS 64 +#define DEF_OPT_BLKS 1024 #define DEF_PHYSBLK_EXP 0 #define DEF_PTYPE 0 #define DEF_REMOVABLE false @@ -679,7 +679,7 @@ static void *fake_store(unsigned long long lba) static struct sd_dif_tuple *dif_store(sector_t sector) { - sector = do_div(sector, sdebug_store_sectors); + sector = sector_div(sector, sdebug_store_sectors); return dif_storep + sector; } @@ -2781,7 +2781,7 @@ static unsigned long lba_to_map_index(sector_t lba) lba += scsi_debug_unmap_granularity - scsi_debug_unmap_alignment; } - do_div(lba, scsi_debug_unmap_granularity); + sector_div(lba, scsi_debug_unmap_granularity); return lba; } @@ -4140,7 +4140,7 @@ MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))"); MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)"); -MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)"); +MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)"); MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)"); MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); @@ -4847,10 +4847,10 @@ static int __init scsi_debug_init(void) /* play around with geometry, don't waste too much on track 0 */ sdebug_heads = 8; sdebug_sectors_per = 32; - if (scsi_debug_dev_size_mb >= 16) - sdebug_heads = 32; - else if (scsi_debug_dev_size_mb >= 256) + if (scsi_debug_dev_size_mb >= 256) sdebug_heads = 64; + else if (scsi_debug_dev_size_mb >= 16) + sdebug_heads = 32; sdebug_cylinders_per = (unsigned long)sdebug_capacity / (sdebug_sectors_per * sdebug_heads); if (sdebug_cylinders_per >= 1024) { diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 2c1160c7ec92..3408578b08d6 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -205,6 +205,8 @@ static struct { {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, + {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES}, + {"Marvell", "91xx Config", "1.01", BLIST_SKIP_VPD_PAGES}, {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, @@ -218,6 +220,8 @@ static struct { {"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"NEC", "iStorage", NULL, BLIST_REPORTLUN2}, + {"NETAPP", "LUN C-Mode", NULL, BLIST_SYNC_ALUA}, + {"NETAPP", "INF-01-00", NULL, BLIST_SYNC_ALUA}, {"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, @@ -227,6 +231,7 @@ static struct { {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, {"Promise", "", NULL, BLIST_SPARSELUN}, {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, + {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024}, {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN}, diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index e7649ed3f667..54d446c9f56e 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -153,76 +153,11 @@ static void scsi_dh_handler_detach(struct scsi_device *sdev) module_put(sdev->handler->module); } -/* - * Functions for sysfs attribute 'dh_state' - */ -static ssize_t -store_dh_state(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct scsi_device *sdev = to_scsi_device(dev); - struct scsi_device_handler *scsi_dh; - int err = -EINVAL; - - if (sdev->sdev_state == SDEV_CANCEL || - sdev->sdev_state == SDEV_DEL) - return -ENODEV; - - if (!sdev->handler) { - /* - * Attach to a device handler - */ - scsi_dh = scsi_dh_lookup(buf); - if (!scsi_dh) - return err; - err = scsi_dh_handler_attach(sdev, scsi_dh); - } else { - if (!strncmp(buf, "detach", 6)) { - /* - * Detach from a device handler - */ - sdev_printk(KERN_WARNING, sdev, - "can't detach handler %s.\n", - sdev->handler->name); - err = -EINVAL; - } else if (!strncmp(buf, "activate", 8)) { - /* - * Activate a device handler - */ - if (sdev->handler->activate) - err = sdev->handler->activate(sdev, NULL, NULL); - else - err = 0; - } - } - - return err<0?err:count; -} - -static ssize_t -show_dh_state(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct scsi_device *sdev = to_scsi_device(dev); - - if (!sdev->handler) - return snprintf(buf, 20, "detached\n"); - - return snprintf(buf, 20, "%s\n", sdev->handler->name); -} - -static struct device_attribute scsi_dh_state_attr = - __ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state, - store_dh_state); - int scsi_dh_add_device(struct scsi_device *sdev) { struct scsi_device_handler *devinfo = NULL; const char *drv; - int err; - - err = device_create_file(&sdev->sdev_gendev, &scsi_dh_state_attr); - if (err) - return err; + int err = 0; drv = scsi_dh_find_driver(sdev); if (drv) @@ -238,11 +173,6 @@ void scsi_dh_release_device(struct scsi_device *sdev) scsi_dh_handler_detach(sdev); } -void scsi_dh_remove_device(struct scsi_device *sdev) -{ - device_remove_file(&sdev->sdev_gendev, &scsi_dh_state_attr); -} - /* * scsi_register_device_handler - register a device handler personality * module. diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index dd8ad2a44510..8106515d1df8 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -23,6 +23,7 @@ #include <linux/scatterlist.h> #include <linux/blk-mq.h> #include <linux/ratelimit.h> +#include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -1343,6 +1344,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret) switch (ret) { case BLKPREP_KILL: + case BLKPREP_INVALID: req->errors = DID_NO_CONNECT << 16; /* release the command and kill it */ if (req->special) { @@ -2698,6 +2700,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) envp[idx++] = "SDEV_MEDIA_CHANGE=1"; break; case SDEV_EVT_INQUIRY_CHANGE_REPORTED: + scsi_rescan_device(&sdev->sdev_gendev); envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; break; case SDEV_EVT_CAPACITY_CHANGE_REPORTED: @@ -3154,3 +3157,190 @@ void sdev_enable_disk_events(struct scsi_device *sdev) atomic_dec(&sdev->disk_events_disable_depth); } EXPORT_SYMBOL(sdev_enable_disk_events); + +/** + * scsi_vpd_lun_id - return a unique device identification + * @sdev: SCSI device + * @id: buffer for the identification + * @id_len: length of the buffer + * + * Copies a unique device identification into @id based + * on the information in the VPD page 0x83 of the device. + * The string will be formatted as a SCSI name string. + * + * Returns the length of the identification or error on failure. + * If the identifier is longer than the supplied buffer the actual + * identifier length is returned and the buffer is not zero-padded. + */ +int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) +{ + u8 cur_id_type = 0xff; + u8 cur_id_size = 0; + unsigned char *d, *cur_id_str; + unsigned char __rcu *vpd_pg83; + int id_size = -EINVAL; + + rcu_read_lock(); + vpd_pg83 = rcu_dereference(sdev->vpd_pg83); + if (!vpd_pg83) { + rcu_read_unlock(); + return -ENXIO; + } + + /* + * Look for the correct descriptor. + * Order of preference for lun descriptor: + * - SCSI name string + * - NAA IEEE Registered Extended + * - EUI-64 based 16-byte + * - EUI-64 based 12-byte + * - NAA IEEE Registered + * - NAA IEEE Extended + * as longer descriptors reduce the likelyhood + * of identification clashes. + */ + + /* The id string must be at least 20 bytes + terminating NULL byte */ + if (id_len < 21) { + rcu_read_unlock(); + return -EINVAL; + } + + memset(id, 0, id_len); + d = vpd_pg83 + 4; + while (d < vpd_pg83 + sdev->vpd_pg83_len) { + /* Skip designators not referring to the LUN */ + if ((d[1] & 0x30) != 0x00) + goto next_desig; + + switch (d[1] & 0xf) { + case 0x2: + /* EUI-64 */ + if (cur_id_size > d[3]) + break; + /* Prefer NAA IEEE Registered Extended */ + if (cur_id_type == 0x3 && + cur_id_size == d[3]) + break; + cur_id_size = d[3]; + cur_id_str = d + 4; + cur_id_type = d[1] & 0xf; + switch (cur_id_size) { + case 8: + id_size = snprintf(id, id_len, + "eui.%8phN", + cur_id_str); + break; + case 12: + id_size = snprintf(id, id_len, + "eui.%12phN", + cur_id_str); + break; + case 16: + id_size = snprintf(id, id_len, + "eui.%16phN", + cur_id_str); + break; + default: + cur_id_size = 0; + break; + } + break; + case 0x3: + /* NAA */ + if (cur_id_size > d[3]) + break; + cur_id_size = d[3]; + cur_id_str = d + 4; + cur_id_type = d[1] & 0xf; + switch (cur_id_size) { + case 8: + id_size = snprintf(id, id_len, + "naa.%8phN", + cur_id_str); + break; + case 16: + id_size = snprintf(id, id_len, + "naa.%16phN", + cur_id_str); + break; + default: + cur_id_size = 0; + break; + } + break; + case 0x8: + /* SCSI name string */ + if (cur_id_size + 4 > d[3]) + break; + /* Prefer others for truncated descriptor */ + if (cur_id_size && d[3] > id_len) + break; + cur_id_size = id_size = d[3]; + cur_id_str = d + 4; + cur_id_type = d[1] & 0xf; + if (cur_id_size >= id_len) + cur_id_size = id_len - 1; + memcpy(id, cur_id_str, cur_id_size); + /* Decrease priority for truncated descriptor */ + if (cur_id_size != id_size) + cur_id_size = 6; + break; + default: + break; + } +next_desig: + d += d[3] + 4; + } + rcu_read_unlock(); + + return id_size; +} +EXPORT_SYMBOL(scsi_vpd_lun_id); + +/* + * scsi_vpd_tpg_id - return a target port group identifier + * @sdev: SCSI device + * + * Returns the Target Port Group identifier from the information + * froom VPD page 0x83 of the device. + * + * Returns the identifier or error on failure. + */ +int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) +{ + unsigned char *d; + unsigned char __rcu *vpd_pg83; + int group_id = -EAGAIN, rel_port = -1; + + rcu_read_lock(); + vpd_pg83 = rcu_dereference(sdev->vpd_pg83); + if (!vpd_pg83) { + rcu_read_unlock(); + return -ENXIO; + } + + d = sdev->vpd_pg83 + 4; + while (d < sdev->vpd_pg83 + sdev->vpd_pg83_len) { + switch (d[1] & 0xf) { + case 0x4: + /* Relative target port */ + rel_port = get_unaligned_be16(&d[6]); + break; + case 0x5: + /* Target port group */ + group_id = get_unaligned_be16(&d[6]); + break; + default: + break; + } + d += d[3] + 4; + } + rcu_read_unlock(); + + if (group_id >= 0 && rel_id && rel_port != -1) + *rel_id = rel_port; + + return group_id; +} +EXPORT_SYMBOL(scsi_vpd_tpg_id); diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 4d01cdb1b348..27b4d0a6a01d 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -174,12 +174,11 @@ extern struct async_domain scsi_sd_probe_domain; #ifdef CONFIG_SCSI_DH int scsi_dh_add_device(struct scsi_device *sdev); void scsi_dh_release_device(struct scsi_device *sdev); -void scsi_dh_remove_device(struct scsi_device *sdev); #else static inline int scsi_dh_add_device(struct scsi_device *sdev) { return 0; } static inline void scsi_dh_release_device(struct scsi_device *sdev) { } -static inline void scsi_dh_remove_device(struct scsi_device *sdev) { } #endif +static inline void scsi_dh_remove_device(struct scsi_device *sdev) { } /* * internal scsi timeout functions: for use by mid-layer and transport diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 054923e3393c..97074c91e328 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -43,6 +43,7 @@ #include <scsi/scsi_devinfo.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> +#include <scsi/scsi_dh.h> #include <scsi/scsi_eh.h> #include "scsi_priv.h" @@ -236,6 +237,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, INIT_LIST_HEAD(&sdev->starved_entry); INIT_LIST_HEAD(&sdev->event_list); spin_lock_init(&sdev->list_lock); + mutex_init(&sdev->inquiry_mutex); INIT_WORK(&sdev->event_work, scsi_evt_thread); INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue); @@ -517,7 +519,8 @@ void scsi_target_reap(struct scsi_target *starget) } /** - * sanitize_inquiry_string - remove non-graphical chars from an INQUIRY result string + * scsi_sanitize_inquiry_string - remove non-graphical chars from an + * INQUIRY result string * @s: INQUIRY result string to sanitize * @len: length of the string * @@ -530,7 +533,7 @@ void scsi_target_reap(struct scsi_target *starget) * string terminator, so all the following characters are set to * spaces. **/ -static void sanitize_inquiry_string(unsigned char *s, int len) +void scsi_sanitize_inquiry_string(unsigned char *s, int len) { int terminated = 0; @@ -541,6 +544,7 @@ static void sanitize_inquiry_string(unsigned char *s, int len) *s = ' '; } } +EXPORT_SYMBOL(scsi_sanitize_inquiry_string); /** * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY @@ -626,9 +630,9 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, } if (result == 0) { - sanitize_inquiry_string(&inq_result[8], 8); - sanitize_inquiry_string(&inq_result[16], 16); - sanitize_inquiry_string(&inq_result[32], 4); + scsi_sanitize_inquiry_string(&inq_result[8], 8); + scsi_sanitize_inquiry_string(&inq_result[16], 16); + scsi_sanitize_inquiry_string(&inq_result[32], 4); response_len = inq_result[4] + 5; if (response_len > 255) @@ -961,6 +965,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, if (*bflags & BLIST_NO_DIF) sdev->no_dif = 1; + if (*bflags & BLIST_SYNC_ALUA) + sdev->synchronous_alua = 1; + sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; if (*bflags & BLIST_TRY_VPD_PAGES) @@ -1518,7 +1525,15 @@ EXPORT_SYMBOL(scsi_add_device); void scsi_rescan_device(struct device *dev) { + struct scsi_device *sdev = to_scsi_device(dev); + device_lock(dev); + + scsi_attach_vpd(sdev); + + if (sdev->handler && sdev->handler->rescan) + sdev->handler->rescan(sdev); + if (dev->driver && try_module_get(dev->driver->owner)) { struct scsi_driver *drv = to_scsi_driver(dev->driver); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 21930c9ac9cd..d16441961f3a 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -17,6 +17,7 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> +#include <scsi/scsi_dh.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_driver.h> @@ -80,6 +81,33 @@ const char *scsi_host_state_name(enum scsi_host_state state) return name; } +static const struct { + unsigned char value; + char *name; +} sdev_access_states[] = { + { SCSI_ACCESS_STATE_OPTIMAL, "active/optimized" }, + { SCSI_ACCESS_STATE_ACTIVE, "active/non-optimized" }, + { SCSI_ACCESS_STATE_STANDBY, "standby" }, + { SCSI_ACCESS_STATE_UNAVAILABLE, "unavailable" }, + { SCSI_ACCESS_STATE_LBA, "lba-dependent" }, + { SCSI_ACCESS_STATE_OFFLINE, "offline" }, + { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" }, +}; + +const char *scsi_access_state_name(unsigned char state) +{ + int i; + char *name = NULL; + + for (i = 0; i < ARRAY_SIZE(sdev_access_states); i++) { + if (sdev_access_states[i].value == state) { + name = sdev_access_states[i].name; + break; + } + } + return name; +} + static int check_set(unsigned long long *val, char *src) { char *last; @@ -760,11 +788,15 @@ show_vpd_##_page(struct file *filp, struct kobject *kobj, \ { \ struct device *dev = container_of(kobj, struct device, kobj); \ struct scsi_device *sdev = to_scsi_device(dev); \ + int ret; \ if (!sdev->vpd_##_page) \ return -EINVAL; \ - return memory_read_from_buffer(buf, count, &off, \ - sdev->vpd_##_page, \ + rcu_read_lock(); \ + ret = memory_read_from_buffer(buf, count, &off, \ + rcu_dereference(sdev->vpd_##_page), \ sdev->vpd_##_page##_len); \ + rcu_read_unlock(); \ + return ret; \ } \ static struct bin_attribute dev_attr_vpd_##_page = { \ .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \ @@ -901,6 +933,113 @@ static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, sdev_store_queue_depth); static ssize_t +sdev_show_wwid(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + ssize_t count; + + count = scsi_vpd_lun_id(sdev, buf, PAGE_SIZE); + if (count > 0) { + buf[count] = '\n'; + count++; + } + return count; +} +static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL); + +#ifdef CONFIG_SCSI_DH +static ssize_t +sdev_show_dh_state(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + if (!sdev->handler) + return snprintf(buf, 20, "detached\n"); + + return snprintf(buf, 20, "%s\n", sdev->handler->name); +} + +static ssize_t +sdev_store_dh_state(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + int err = -EINVAL; + + if (sdev->sdev_state == SDEV_CANCEL || + sdev->sdev_state == SDEV_DEL) + return -ENODEV; + + if (!sdev->handler) { + /* + * Attach to a device handler + */ + err = scsi_dh_attach(sdev->request_queue, buf); + } else if (!strncmp(buf, "activate", 8)) { + /* + * Activate a device handler + */ + if (sdev->handler->activate) + err = sdev->handler->activate(sdev, NULL, NULL); + else + err = 0; + } else if (!strncmp(buf, "detach", 6)) { + /* + * Detach from a device handler + */ + sdev_printk(KERN_WARNING, sdev, + "can't detach handler %s.\n", + sdev->handler->name); + err = -EINVAL; + } + + return err < 0 ? err : count; +} + +static DEVICE_ATTR(dh_state, S_IRUGO | S_IWUSR, sdev_show_dh_state, + sdev_store_dh_state); + +static ssize_t +sdev_show_access_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + unsigned char access_state; + const char *access_state_name; + + if (!sdev->handler) + return -EINVAL; + + access_state = (sdev->access_state & SCSI_ACCESS_STATE_MASK); + access_state_name = scsi_access_state_name(access_state); + + return sprintf(buf, "%s\n", + access_state_name ? access_state_name : "unknown"); +} +static DEVICE_ATTR(access_state, S_IRUGO, sdev_show_access_state, NULL); + +static ssize_t +sdev_show_preferred_path(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + if (!sdev->handler) + return -EINVAL; + + if (sdev->access_state & SCSI_ACCESS_STATE_PREFERRED) + return sprintf(buf, "1\n"); + else + return sprintf(buf, "0\n"); +} +static DEVICE_ATTR(preferred_path, S_IRUGO, sdev_show_preferred_path, NULL); +#endif + +static ssize_t sdev_show_queue_ramp_up_period(struct device *dev, struct device_attribute *attr, char *buf) @@ -945,9 +1084,33 @@ static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj, !sdev->host->hostt->change_queue_depth) return 0; +#ifdef CONFIG_SCSI_DH + if (attr == &dev_attr_access_state.attr && + !sdev->handler) + return 0; + if (attr == &dev_attr_preferred_path.attr && + !sdev->handler) + return 0; +#endif return attr->mode; } +static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj, + struct bin_attribute *attr, int i) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct scsi_device *sdev = to_scsi_device(dev); + + + if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80) + return 0; + + if (attr == &dev_attr_vpd_pg83 && sdev->vpd_pg83) + return 0; + + return S_IRUGO; +} + /* Default template for device attributes. May NOT be modified */ static struct attribute *scsi_sdev_attrs[] = { &dev_attr_device_blocked.attr, @@ -969,6 +1132,12 @@ static struct attribute *scsi_sdev_attrs[] = { &dev_attr_modalias.attr, &dev_attr_queue_depth.attr, &dev_attr_queue_type.attr, + &dev_attr_wwid.attr, +#ifdef CONFIG_SCSI_DH + &dev_attr_dh_state.attr, + &dev_attr_access_state.attr, + &dev_attr_preferred_path.attr, +#endif &dev_attr_queue_ramp_up_period.attr, REF_EVT(media_change), REF_EVT(inquiry_change_reported), @@ -989,6 +1158,7 @@ static struct attribute_group scsi_sdev_attr_group = { .attrs = scsi_sdev_attrs, .bin_attrs = scsi_sdev_bin_attrs, .is_visible = scsi_sdev_attr_is_visible, + .is_bin_visible = scsi_sdev_bin_attr_is_visible, }; static const struct attribute_group *scsi_sdev_attr_groups[] = { @@ -1050,17 +1220,19 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) scsi_autopm_get_device(sdev); - error = device_add(&sdev->sdev_gendev); - if (error) { + error = scsi_dh_add_device(sdev); + if (error) + /* + * device_handler is optional, so any error can be ignored + */ sdev_printk(KERN_INFO, sdev, - "failed to add device: %d\n", error); - return error; - } + "failed to add device handler: %d\n", error); - error = scsi_dh_add_device(sdev); + error = device_add(&sdev->sdev_gendev); if (error) { sdev_printk(KERN_INFO, sdev, - "failed to add device handler: %d\n", error); + "failed to add device: %d\n", error); + scsi_dh_remove_device(sdev); return error; } @@ -1192,16 +1364,18 @@ static void __scsi_remove_target(struct scsi_target *starget) void scsi_remove_target(struct device *dev) { struct Scsi_Host *shost = dev_to_shost(dev->parent); - struct scsi_target *starget; + struct scsi_target *starget, *last_target = NULL; unsigned long flags; restart: spin_lock_irqsave(shost->host_lock, flags); list_for_each_entry(starget, &shost->__targets, siblings) { - if (starget->state == STARGET_DEL) + if (starget->state == STARGET_DEL || + starget == last_target) continue; if (starget->dev.parent == dev || &starget->dev == dev) { kref_get(&starget->reap_ref); + last_target = starget; spin_unlock_irqrestore(shost->host_lock, flags); __scsi_remove_target(starget); scsi_target_reap(starget); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 24eaaf66af71..8a8822641b26 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -2586,7 +2586,7 @@ fc_rport_final_delete(struct work_struct *work) transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); - put_device(&shost->shost_gendev); /* for fc_host->rport list */ + scsi_host_put(shost); /* for fc_host->rport list */ put_device(dev); /* for self-reference */ } @@ -2650,7 +2650,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel, else rport->scsi_target_id = -1; list_add_tail(&rport->peers, &fc_host->rports); - get_device(&shost->shost_gendev); /* for fc_host->rport list */ + scsi_host_get(shost); /* for fc_host->rport list */ spin_unlock_irqrestore(shost->host_lock, flags); @@ -2685,7 +2685,7 @@ delete_rport: transport_destroy_device(dev); spin_lock_irqsave(shost->host_lock, flags); list_del(&rport->peers); - put_device(&shost->shost_gendev); /* for fc_host->rport list */ + scsi_host_put(shost); /* for fc_host->rport list */ spin_unlock_irqrestore(shost->host_lock, flags); put_device(dev->parent); kfree(rport); @@ -3383,7 +3383,7 @@ fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev, fc_host->npiv_vports_inuse++; vport->number = fc_host->next_vport_number++; list_add_tail(&vport->peers, &fc_host->vports); - get_device(&shost->shost_gendev); /* for fc_host->vport list */ + scsi_host_get(shost); /* for fc_host->vport list */ spin_unlock_irqrestore(shost->host_lock, flags); @@ -3441,7 +3441,7 @@ delete_vport: transport_destroy_device(dev); spin_lock_irqsave(shost->host_lock, flags); list_del(&vport->peers); - put_device(&shost->shost_gendev); /* for fc_host->vport list */ + scsi_host_put(shost); /* for fc_host->vport list */ fc_host->npiv_vports_inuse--; spin_unlock_irqrestore(shost->host_lock, flags); put_device(dev->parent); @@ -3504,7 +3504,7 @@ fc_vport_terminate(struct fc_vport *vport) vport->flags |= FC_VPORT_DELETED; list_del(&vport->peers); fc_host->npiv_vports_inuse--; - put_device(&shost->shost_gendev); /* for fc_host->vport list */ + scsi_host_put(shost); /* for fc_host->vport list */ } spin_unlock_irqrestore(shost->host_lock, flags); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index e4b3d8f4fd85..441481623fb9 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -4308,6 +4308,8 @@ static const struct { {ISCSI_PORT_SPEED_100MBPS, "100 Mbps" }, {ISCSI_PORT_SPEED_1GBPS, "1 Gbps" }, {ISCSI_PORT_SPEED_10GBPS, "10 Gbps" }, + {ISCSI_PORT_SPEED_25GBPS, "25 Gbps" }, + {ISCSI_PORT_SPEED_40GBPS, "40 Gbps" }, }; char *iscsi_get_port_speed_name(struct Scsi_Host *shost) diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 30d26e345dcc..80520e2f0fa2 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -341,6 +341,22 @@ static int do_sas_phy_delete(struct device *dev, void *data) } /** + * is_sas_attached - check if device is SAS attached + * @sdev: scsi device to check + * + * returns true if the device is SAS attached + */ +int is_sas_attached(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + + return shost->transportt->host_attrs.ac.class == + &sas_host_class.class; +} +EXPORT_SYMBOL(is_sas_attached); + + +/** * sas_remove_children - tear down a devices SAS data structures * @dev: device belonging to the sas object * @@ -367,6 +383,20 @@ void sas_remove_host(struct Scsi_Host *shost) EXPORT_SYMBOL(sas_remove_host); /** + * sas_get_address - return the SAS address of the device + * @sdev: scsi device + * + * Returns the SAS address of the scsi device + */ +u64 sas_get_address(struct scsi_device *sdev) +{ + struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); + + return rdev->rphy.identify.sas_address; +} +EXPORT_SYMBOL(sas_get_address); + +/** * sas_tlr_supported - checking TLR bit in vpd 0x90 * @sdev: scsi device struct * diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 4e08d1cd704d..5a5457ac9cdb 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -648,7 +648,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) */ if (sdkp->lbprz) { q->limits.discard_alignment = 0; - q->limits.discard_granularity = 1; + q->limits.discard_granularity = logical_block_size; } else { q->limits.discard_alignment = sdkp->unmap_alignment * logical_block_size; @@ -761,7 +761,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd) break; default: - ret = BLKPREP_KILL; + ret = BLKPREP_INVALID; goto out; } @@ -839,7 +839,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) int ret; if (sdkp->device->no_write_same) - return BLKPREP_KILL; + return BLKPREP_INVALID; BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); @@ -2893,7 +2893,7 @@ static int sd_revalidate_disk(struct gendisk *disk) sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE) rw_max = q->limits.io_opt = - logical_to_sectors(sdp, sdkp->opt_xfer_blocks); + sdkp->opt_xfer_blocks * sdp->sector_size; else rw_max = BLK_DEF_MAX_SECTORS; @@ -3268,8 +3268,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) struct scsi_disk *sdkp = dev_get_drvdata(dev); int ret = 0; - if (!sdkp) - return 0; /* this can happen */ + if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ + return 0; if (sdkp->WCE && sdkp->media_present) { sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); @@ -3308,6 +3308,9 @@ static int sd_resume(struct device *dev) { struct scsi_disk *sdkp = dev_get_drvdata(dev); + if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ + return 0; + if (!sdkp->device->manage_start_stop) return 0; diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 044d06410d4c..53ef1cb6418e 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -34,6 +34,8 @@ #include <scsi/scsi_driver.h> #include <scsi/scsi_host.h> +#include <scsi/scsi_transport_sas.h> + struct ses_device { unsigned char *page1; unsigned char *page1_types; @@ -579,31 +581,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, static void ses_match_to_enclosure(struct enclosure_device *edev, struct scsi_device *sdev) { - unsigned char *desc; struct efd efd = { .addr = 0, }; ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); - if (!sdev->vpd_pg83_len) - return; - - desc = sdev->vpd_pg83 + 4; - while (desc < sdev->vpd_pg83 + sdev->vpd_pg83_len) { - enum scsi_protocol proto = desc[0] >> 4; - u8 code_set = desc[0] & 0x0f; - u8 piv = desc[1] & 0x80; - u8 assoc = (desc[1] & 0x30) >> 4; - u8 type = desc[1] & 0x0f; - u8 len = desc[3]; - - if (piv && code_set == 1 && assoc == 1 - && proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8) - efd.addr = get_unaligned_be64(&desc[4]); + if (is_sas_attached(sdev)) + efd.addr = sas_get_address(sdev); - desc += len + 4; - } if (efd.addr) { efd.dev = &sdev->sdev_gendev; diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 503ab8b46c0b..ae7d9bdf409c 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -652,7 +652,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) else hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; hp->dxfer_len = mxsize; - if (hp->dxfer_direction == SG_DXFER_TO_DEV) + if ((hp->dxfer_direction == SG_DXFER_TO_DEV) || + (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)) hp->dxferp = (char __user *)buf + cmd_size; else hp->dxferp = NULL; @@ -1261,7 +1262,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) } sfp->mmap_called = 1; - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = sfp; vma->vm_ops = &sg_mmap_vm_ops; return 0; diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c index 3b3b56f4a830..82ed99848378 100644 --- a/drivers/scsi/sim710.c +++ b/drivers/scsi/sim710.c @@ -176,8 +176,7 @@ static struct eisa_device_id sim710_eisa_ids[] = { }; MODULE_DEVICE_TABLE(eisa, sim710_eisa_ids); -static __init int -sim710_eisa_probe(struct device *dev) +static int sim710_eisa_probe(struct device *dev) { struct eisa_device *edev = to_eisa_device(dev); unsigned long io_addr = edev->base_addr; diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c index aebe75320ed3..ab0e06b0b4ff 100644 --- a/drivers/scsi/snic/snic_ctl.c +++ b/drivers/scsi/snic/snic_ctl.c @@ -75,7 +75,7 @@ snic_ver_enc(const char *s) continue; } - if (i > 4 || !isdigit(c)) + if (i > 3 || !isdigit(c)) goto end; v[i] = v[i] * 10 + (c - '0'); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 8bd54a64efd6..64c867405ad4 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -144,6 +144,9 @@ static int sr_runtime_suspend(struct device *dev) { struct scsi_cd *cd = dev_get_drvdata(dev); + if (!cd) /* E.g.: runtime suspend following sr_remove() */ + return 0; + if (cd->media_present) return -EBUSY; else @@ -985,6 +988,7 @@ static int sr_remove(struct device *dev) scsi_autopm_get_device(cd->device); del_gendisk(cd->disk); + dev_set_drvdata(dev, NULL); mutex_lock(&sr_ref_mutex); kref_put(&cd->kref, sr_kref_release); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 2e522951b619..607b0a505844 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -9,7 +9,7 @@ Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky, Michael Schaefer, J"org Weule, and Eric Youngdale. - Copyright 1992 - 2010 Kai Makisara + Copyright 1992 - 2016 Kai Makisara email Kai.Makisara@kolumbus.fi Some small formal changes - aeb, 950809 @@ -17,7 +17,7 @@ Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support */ -static const char *verstr = "20101219"; +static const char *verstr = "20160209"; #include <linux/module.h> @@ -3296,7 +3296,10 @@ static int switch_partition(struct scsi_tape *STp) #define PP_OFF_RESERVED 7 #define PP_BIT_IDP 0x20 +#define PP_BIT_FDP 0x80 #define PP_MSK_PSUM_MB 0x10 +#define PP_MSK_PSUM_UNITS 0x18 +#define PP_MSK_POFM 0x04 /* Get the number of partitions on the tape. As a side effect reads the mode page into the tape buffer. */ @@ -3322,6 +3325,29 @@ static int nbr_partitions(struct scsi_tape *STp) } +static int format_medium(struct scsi_tape *STp, int format) +{ + int result = 0; + int timeout = STp->long_timeout; + unsigned char scmd[MAX_COMMAND_SIZE]; + struct st_request *SRpnt; + + memset(scmd, 0, MAX_COMMAND_SIZE); + scmd[0] = FORMAT_UNIT; + scmd[2] = format; + if (STp->immediate) { + scmd[1] |= 1; /* Don't wait for completion */ + timeout = STp->device->request_queue->rq_timeout; + } + DEBC_printk(STp, "Sending FORMAT MEDIUM\n"); + SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE, + timeout, MAX_RETRIES, 1); + if (!SRpnt) + result = STp->buffer->syscall_result; + return result; +} + + /* Partition the tape into two partitions if size > 0 or one partition if size == 0. @@ -3340,11 +3366,16 @@ static int nbr_partitions(struct scsi_tape *STp) and 10 when 1 partition is defined (information from Eric Lee Green). This is is acceptable also to some other old drives and enforced if the first partition size field is used for the first additional partition size. + + For drives that advertize SCSI-3 or newer, use the SSC-3 methods. */ static int partition_tape(struct scsi_tape *STp, int size) { int result; + int target_partition; + bool scsi3 = STp->device->scsi_level >= SCSI_3, needs_format = false; int pgo, psd_cnt, psdo; + int psum = PP_MSK_PSUM_MB, units = 0; unsigned char *bp; result = read_mode_page(STp, PART_PAGE, 0); @@ -3352,6 +3383,12 @@ static int partition_tape(struct scsi_tape *STp, int size) DEBC_printk(STp, "Can't read partition mode page.\n"); return result; } + target_partition = 1; + if (size < 0) { + target_partition = 0; + size = -size; + } + /* The mode page is in the buffer. Let's modify it and write it. */ bp = (STp->buffer)->b_data; pgo = MODE_HEADER_LENGTH + bp[MH_OFF_BDESCS_LENGTH]; @@ -3359,9 +3396,52 @@ static int partition_tape(struct scsi_tape *STp, int size) bp[pgo + MP_OFF_PAGE_LENGTH] + 2); psd_cnt = (bp[pgo + MP_OFF_PAGE_LENGTH] + 2 - PART_PAGE_FIXED_LENGTH) / 2; + + if (scsi3) { + needs_format = (bp[pgo + PP_OFF_FLAGS] & PP_MSK_POFM) != 0; + if (needs_format && size == 0) { + /* No need to write the mode page when clearing + * partitioning + */ + DEBC_printk(STp, "Formatting tape with one partition.\n"); + result = format_medium(STp, 0); + goto out; + } + if (needs_format) /* Leave the old value for HP DATs claiming SCSI_3 */ + psd_cnt = 2; + if ((bp[pgo + PP_OFF_FLAGS] & PP_MSK_PSUM_UNITS) == PP_MSK_PSUM_UNITS) { + /* Use units scaling for large partitions if the device + * suggests it and no precision lost. Required for IBM + * TS1140/50 drives that don't support MB units. + */ + if (size >= 1000 && (size % 1000) == 0) { + size /= 1000; + psum = PP_MSK_PSUM_UNITS; + units = 9; /* GB */ + } + } + /* Try it anyway if too large to specify in MB */ + if (psum == PP_MSK_PSUM_MB && size >= 65534) { + size /= 1000; + psum = PP_MSK_PSUM_UNITS; + units = 9; /* GB */ + } + } + + if (size >= 65535 || /* Does not fit into two bytes */ + (target_partition == 0 && psd_cnt < 2)) { + result = -EINVAL; + goto out; + } + psdo = pgo + PART_PAGE_FIXED_LENGTH; - if (psd_cnt > bp[pgo + PP_OFF_MAX_ADD_PARTS]) { - bp[psdo] = bp[psdo + 1] = 0xff; /* Rest of the tape */ + /* The second condition is for HP DDS which use only one partition size + * descriptor + */ + if (target_partition > 0 && + (psd_cnt > bp[pgo + PP_OFF_MAX_ADD_PARTS] || + bp[pgo + PP_OFF_MAX_ADD_PARTS] != 1)) { + bp[psdo] = bp[psdo + 1] = 0xff; /* Rest to partition 0 */ psdo += 2; } memset(bp + psdo, 0, bp[pgo + PP_OFF_NBR_ADD_PARTS] * 2); @@ -3370,7 +3450,7 @@ static int partition_tape(struct scsi_tape *STp, int size) psd_cnt, bp[pgo + PP_OFF_MAX_ADD_PARTS], bp[pgo + PP_OFF_NBR_ADD_PARTS]); - if (size <= 0) { + if (size == 0) { bp[pgo + PP_OFF_NBR_ADD_PARTS] = 0; if (psd_cnt <= bp[pgo + PP_OFF_MAX_ADD_PARTS]) bp[pgo + MP_OFF_PAGE_LENGTH] = 6; @@ -3378,22 +3458,37 @@ static int partition_tape(struct scsi_tape *STp, int size) } else { bp[psdo] = (size >> 8) & 0xff; bp[psdo + 1] = size & 0xff; + if (target_partition == 0) + bp[psdo + 2] = bp[psdo + 3] = 0xff; bp[pgo + 3] = 1; if (bp[pgo + MP_OFF_PAGE_LENGTH] < 8) bp[pgo + MP_OFF_PAGE_LENGTH] = 8; - DEBC_printk(STp, "Formatting tape with two partitions " - "(1 = %d MB).\n", size); + DEBC_printk(STp, + "Formatting tape with two partitions (%i = %d MB).\n", + target_partition, units > 0 ? size * 1000 : size); } bp[pgo + PP_OFF_PART_UNITS] = 0; bp[pgo + PP_OFF_RESERVED] = 0; - bp[pgo + PP_OFF_FLAGS] = PP_BIT_IDP | PP_MSK_PSUM_MB; + if (size != 1 || units != 0) { + bp[pgo + PP_OFF_FLAGS] = PP_BIT_IDP | psum | + (bp[pgo + PP_OFF_FLAGS] & 0x07); + bp[pgo + PP_OFF_PART_UNITS] = units; + } else + bp[pgo + PP_OFF_FLAGS] = PP_BIT_FDP | + (bp[pgo + PP_OFF_FLAGS] & 0x1f); + bp[pgo + MP_OFF_PAGE_LENGTH] = 6 + psd_cnt * 2; result = write_mode_page(STp, PART_PAGE, 1); + + if (!result && needs_format) + result = format_medium(STp, 1); + if (result) { st_printk(KERN_INFO, STp, "Partitioning of tape failed.\n"); result = (-EIO); } +out: return result; } @@ -3570,8 +3665,13 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) retval = (-EINVAL); goto out; } - if ((i = st_int_ioctl(STp, MTREW, 0)) < 0 || - (i = partition_tape(STp, mtc.mt_count)) < 0) { + i = do_load_unload(STp, file, 1); + if (i < 0) { + retval = i; + goto out; + } + i = partition_tape(STp, mtc.mt_count); + if (i < 0) { retval = i; goto out; } @@ -3581,7 +3681,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) STp->ps[i].last_block_valid = 0; } STp->partition = STp->new_partition = 0; - STp->nbr_partitions = 1; /* Bad guess ?-) */ + STp->nbr_partitions = mtc.mt_count != 0 ? 2 : 1; STps->drv_block = STps->drv_file = 0; retval = 0; goto out; diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h index b6486b5d8681..8c732c8de015 100644 --- a/drivers/scsi/st.h +++ b/drivers/scsi/st.h @@ -148,8 +148,6 @@ struct scsi_tape { int tape_type; int long_timeout; /* timeout for commands known to take long time */ - unsigned long max_pfn; /* the maximum page number reachable by the HBA */ - /* Mode characteristics */ struct st_modedef modes[ST_NBR_MODES]; int current_mode; diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 2de28d7a0b04..5b23175a584c 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -1,7 +1,7 @@ /* * SuperTrak EX Series Storage Controller driver for Linux * - * Copyright (C) 2005-2009 Promise Technology Inc. + * Copyright (C) 2005-2015 Promise Technology Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -38,11 +38,11 @@ #include <scsi/scsi_eh.h> #define DRV_NAME "stex" -#define ST_DRIVER_VERSION "4.6.0000.4" -#define ST_VER_MAJOR 4 -#define ST_VER_MINOR 6 -#define ST_OEM 0 -#define ST_BUILD_VER 4 +#define ST_DRIVER_VERSION "5.00.0000.01" +#define ST_VER_MAJOR 5 +#define ST_VER_MINOR 00 +#define ST_OEM 0000 +#define ST_BUILD_VER 01 enum { /* MU register offset */ @@ -84,6 +84,8 @@ enum { MU_STATE_STARTED = 2, MU_STATE_RESETTING = 3, MU_STATE_FAILED = 4, + MU_STATE_STOP = 5, + MU_STATE_NOCONNECT = 6, MU_MAX_DELAY = 120, MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, @@ -165,6 +167,14 @@ enum { ST_ADDITIONAL_MEM = 0x200000, ST_ADDITIONAL_MEM_MIN = 0x80000, + PMIC_SHUTDOWN = 0x0D, + PMIC_REUMSE = 0x10, + ST_IGNORED = -1, + ST_NOTHANDLED = 7, + ST_S3 = 3, + ST_S4 = 4, + ST_S5 = 5, + ST_S6 = 6, }; struct st_sgitem { @@ -328,6 +338,7 @@ struct st_hba { u16 rq_count; u16 rq_size; u16 sts_count; + u8 supports_pm; }; struct st_card_info { @@ -536,6 +547,27 @@ stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) readl(hba->mmio_base + YH2I_REQ); /* flush */ } +static void return_abnormal_state(struct st_hba *hba, int status) +{ + struct st_ccb *ccb; + unsigned long flags; + u16 tag; + + spin_lock_irqsave(hba->host->host_lock, flags); + for (tag = 0; tag < hba->host->can_queue; tag++) { + ccb = &hba->ccb[tag]; + if (ccb->req == NULL) + continue; + ccb->req = NULL; + if (ccb->cmd) { + scsi_dma_unmap(ccb->cmd); + ccb->cmd->result = status << 16; + ccb->cmd->scsi_done(ccb->cmd); + ccb->cmd = NULL; + } + } + spin_unlock_irqrestore(hba->host->host_lock, flags); +} static int stex_slave_config(struct scsi_device *sdev) { @@ -559,8 +591,12 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) id = cmd->device->id; lun = cmd->device->lun; hba = (struct st_hba *) &host->hostdata[0]; - - if (unlikely(hba->mu_status == MU_STATE_RESETTING)) + if (hba->mu_status == MU_STATE_NOCONNECT) { + cmd->result = DID_NO_CONNECT; + done(cmd); + return 0; + } + if (unlikely(hba->mu_status != MU_STATE_STARTED)) return SCSI_MLQUEUE_HOST_BUSY; switch (cmd->cmnd[0]) { @@ -1259,10 +1295,8 @@ static void stex_ss_reset(struct st_hba *hba) static int stex_do_reset(struct st_hba *hba) { - struct st_ccb *ccb; unsigned long flags; unsigned int mu_status = MU_STATE_RESETTING; - u16 tag; spin_lock_irqsave(hba->host->host_lock, flags); if (hba->mu_status == MU_STATE_STARTING) { @@ -1296,20 +1330,8 @@ static int stex_do_reset(struct st_hba *hba) else if (hba->cardtype == st_yel) stex_ss_reset(hba); - spin_lock_irqsave(hba->host->host_lock, flags); - for (tag = 0; tag < hba->host->can_queue; tag++) { - ccb = &hba->ccb[tag]; - if (ccb->req == NULL) - continue; - ccb->req = NULL; - if (ccb->cmd) { - scsi_dma_unmap(ccb->cmd); - ccb->cmd->result = DID_RESET << 16; - ccb->cmd->scsi_done(ccb->cmd); - ccb->cmd = NULL; - } - } - spin_unlock_irqrestore(hba->host->host_lock, flags); + + return_abnormal_state(hba, DID_RESET); if (stex_handshake(hba) == 0) return 0; @@ -1560,6 +1582,25 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) hba->cardtype = (unsigned int) id->driver_data; ci = &stex_card_info[hba->cardtype]; + switch (id->subdevice) { + case 0x4221: + case 0x4222: + case 0x4223: + case 0x4224: + case 0x4225: + case 0x4226: + case 0x4227: + case 0x4261: + case 0x4262: + case 0x4263: + case 0x4264: + case 0x4265: + break; + default: + if (hba->cardtype == st_yel) + hba->supports_pm = 1; + } + sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size; if (hba->cardtype == st_yel) sts_offset += (ci->sts_count+1) * sizeof(u32); @@ -1685,7 +1726,7 @@ out_disable: return err; } -static void stex_hba_stop(struct st_hba *hba) +static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic) { struct req_msg *req; struct st_msg_header *msg_h; @@ -1694,6 +1735,15 @@ static void stex_hba_stop(struct st_hba *hba) u16 tag = 0; spin_lock_irqsave(hba->host->host_lock, flags); + + if (hba->cardtype == st_yel && hba->supports_pm == 1) + { + if(st_sleep_mic == ST_NOTHANDLED) + { + spin_unlock_irqrestore(hba->host->host_lock, flags); + return; + } + } req = hba->alloc_rq(hba); if (hba->cardtype == st_yel) { msg_h = (struct st_msg_header *)req - 1; @@ -1701,11 +1751,18 @@ static void stex_hba_stop(struct st_hba *hba) } else memset(req, 0, hba->rq_size); - if (hba->cardtype == st_yosemite || hba->cardtype == st_yel) { + if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel) + && st_sleep_mic == ST_IGNORED) { req->cdb[0] = MGT_CMD; req->cdb[1] = MGT_CMD_SIGNATURE; req->cdb[2] = CTLR_CONFIG_CMD; req->cdb[3] = CTLR_SHUTDOWN; + } else if (hba->cardtype == st_yel && st_sleep_mic != ST_IGNORED) { + req->cdb[0] = MGT_CMD; + req->cdb[1] = MGT_CMD_SIGNATURE; + req->cdb[2] = CTLR_CONFIG_CMD; + req->cdb[3] = PMIC_SHUTDOWN; + req->cdb[4] = st_sleep_mic; } else { req->cdb[0] = CONTROLLER_CMD; req->cdb[1] = CTLR_POWER_STATE_CHANGE; @@ -1725,10 +1782,12 @@ static void stex_hba_stop(struct st_hba *hba) while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) { if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { hba->ccb[tag].req_type = 0; + hba->mu_status = MU_STATE_STOP; return; } msleep(1); } + hba->mu_status = MU_STATE_STOP; } static void stex_hba_free(struct st_hba *hba) @@ -1751,9 +1810,11 @@ static void stex_remove(struct pci_dev *pdev) { struct st_hba *hba = pci_get_drvdata(pdev); + hba->mu_status = MU_STATE_NOCONNECT; + return_abnormal_state(hba, DID_NO_CONNECT); scsi_remove_host(hba->host); - stex_hba_stop(hba); + scsi_block_requests(hba->host); stex_hba_free(hba); @@ -1766,9 +1827,43 @@ static void stex_shutdown(struct pci_dev *pdev) { struct st_hba *hba = pci_get_drvdata(pdev); - stex_hba_stop(hba); + if (hba->supports_pm == 0) + stex_hba_stop(hba, ST_IGNORED); + else + stex_hba_stop(hba, ST_S5); +} + +static int stex_choice_sleep_mic(pm_message_t state) +{ + switch (state.event) { + case PM_EVENT_SUSPEND: + return ST_S3; + case PM_EVENT_HIBERNATE: + return ST_S4; + default: + return ST_NOTHANDLED; + } +} + +static int stex_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct st_hba *hba = pci_get_drvdata(pdev); + + if (hba->cardtype == st_yel && hba->supports_pm == 1) + stex_hba_stop(hba, stex_choice_sleep_mic(state)); + else + stex_hba_stop(hba, ST_IGNORED); + return 0; } +static int stex_resume(struct pci_dev *pdev) +{ + struct st_hba *hba = pci_get_drvdata(pdev); + + hba->mu_status = MU_STATE_STARTING; + stex_handshake(hba); + return 0; +} MODULE_DEVICE_TABLE(pci, stex_pci_tbl); static struct pci_driver stex_pci_driver = { @@ -1777,6 +1872,8 @@ static struct pci_driver stex_pci_driver = { .probe = stex_probe, .remove = stex_remove, .shutdown = stex_shutdown, + .suspend = stex_suspend, + .resume = stex_resume, }; static int __init stex_init(void) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 3fba42ad9fb8..3ddcabb790a8 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -41,6 +41,8 @@ #include <scsi/scsi_eh.h> #include <scsi/scsi_devinfo.h> #include <scsi/scsi_dbg.h> +#include <scsi/scsi_transport_fc.h> +#include <scsi/scsi_transport.h> /* * All wire protocol details (storage protocol between the guest and the host) @@ -92,9 +94,8 @@ enum vstor_packet_operation { */ struct hv_fc_wwn_packet { - bool primary_active; - u8 reserved1; - u8 reserved2; + u8 primary_active; + u8 reserved1[3]; u8 primary_port_wwn[8]; u8 primary_node_wwn[8]; u8 secondary_port_wwn[8]; @@ -164,6 +165,26 @@ static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE; */ static int vmstor_proto_version; +#define STORVSC_LOGGING_NONE 0 +#define STORVSC_LOGGING_ERROR 1 +#define STORVSC_LOGGING_WARN 2 + +static int logging_level = STORVSC_LOGGING_ERROR; +module_param(logging_level, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(logging_level, + "Logging level, 0 - None, 1 - Error (default), 2 - Warning."); + +static inline bool do_logging(int level) +{ + return logging_level >= level; +} + +#define storvsc_log(dev, level, fmt, ...) \ +do { \ + if (do_logging(level)) \ + dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \ +} while (0) + struct vmscsi_win8_extension { /* * The following were added in Windows 8 @@ -370,7 +391,7 @@ module_param(storvsc_ringbuffer_size, int, S_IRUGO); MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)"); module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO); -MODULE_PARM_DESC(vcpus_per_sub_channel, "Ratio of VCPUs to subchannels"); +MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels"); /* * Timeout in seconds for all devices managed by this driver. */ @@ -378,6 +399,9 @@ static int storvsc_timeout = 180; static int msft_blist_flags = BLIST_TRY_VPD_PAGES; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) +static struct scsi_transport_template *fc_transport_template; +#endif static void storvsc_on_channel_callback(void *context); @@ -437,6 +461,11 @@ struct storvsc_device { /* Used for vsc/vsp channel reset process */ struct storvsc_cmd_request init_request; struct storvsc_cmd_request reset_request; + /* + * Currently active port and node names for FC devices. + */ + u64 node_name; + u64 port_name; }; struct hv_host_device { @@ -449,19 +478,18 @@ struct hv_host_device { struct storvsc_scan_work { struct work_struct work; struct Scsi_Host *host; - uint lun; + u8 lun; + u8 tgt_id; }; static void storvsc_device_scan(struct work_struct *work) { struct storvsc_scan_work *wrk; - uint lun; struct scsi_device *sdev; wrk = container_of(work, struct storvsc_scan_work, work); - lun = wrk->lun; - sdev = scsi_device_lookup(wrk->host, 0, 0, lun); + sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); if (!sdev) goto done; scsi_rescan_device(&sdev->sdev_gendev); @@ -512,7 +540,7 @@ static void storvsc_remove_lun(struct work_struct *work) if (!scsi_host_get(wrk->host)) goto done; - sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun); + sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); if (sdev) { scsi_remove_device(sdev); @@ -676,29 +704,36 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns) vmbus_are_subchannels_present(device->channel); } -static int storvsc_channel_init(struct hv_device *device) +static void cache_wwn(struct storvsc_device *stor_device, + struct vstor_packet *vstor_packet) { - struct storvsc_device *stor_device; - struct storvsc_cmd_request *request; - struct vstor_packet *vstor_packet; - int ret, t, i; - int max_chns; - bool process_sub_channels = false; + /* + * Cache the currently active port and node ww names. + */ + if (vstor_packet->wwn_packet.primary_active) { + stor_device->node_name = + wwn_to_u64(vstor_packet->wwn_packet.primary_node_wwn); + stor_device->port_name = + wwn_to_u64(vstor_packet->wwn_packet.primary_port_wwn); + } else { + stor_device->node_name = + wwn_to_u64(vstor_packet->wwn_packet.secondary_node_wwn); + stor_device->port_name = + wwn_to_u64(vstor_packet->wwn_packet.secondary_port_wwn); + } +} - stor_device = get_out_stor_device(device); - if (!stor_device) - return -ENODEV; - request = &stor_device->init_request; +static int storvsc_execute_vstor_op(struct hv_device *device, + struct storvsc_cmd_request *request, + bool status_check) +{ + struct vstor_packet *vstor_packet; + int ret, t; + vstor_packet = &request->vstor_packet; - /* - * Now, initiate the vsc/vsp initialization protocol on the open - * channel - */ - memset(request, 0, sizeof(struct storvsc_cmd_request)); init_completion(&request->wait_event); - vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION; vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = vmbus_sendpacket(device->channel, vstor_packet, @@ -708,27 +743,56 @@ static int storvsc_channel_init(struct hv_device *device) VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) - goto cleanup; + return ret; t = wait_for_completion_timeout(&request->wait_event, 5*HZ); - if (t == 0) { - ret = -ETIMEDOUT; - goto cleanup; - } + if (t == 0) + return -ETIMEDOUT; + + if (!status_check) + return ret; if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || - vstor_packet->status != 0) { - ret = -EINVAL; - goto cleanup; - } + vstor_packet->status != 0) + return -EINVAL; + + return ret; +} + +static int storvsc_channel_init(struct hv_device *device, bool is_fc) +{ + struct storvsc_device *stor_device; + struct storvsc_cmd_request *request; + struct vstor_packet *vstor_packet; + int ret, i; + int max_chns; + bool process_sub_channels = false; + stor_device = get_out_stor_device(device); + if (!stor_device) + return -ENODEV; + + request = &stor_device->init_request; + vstor_packet = &request->vstor_packet; + + /* + * Now, initiate the vsc/vsp initialization protocol on the open + * channel + */ + memset(request, 0, sizeof(struct storvsc_cmd_request)); + vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION; + ret = storvsc_execute_vstor_op(device, request, true); + if (ret) + return ret; + /* + * Query host supported protocol version. + */ for (i = 0; i < ARRAY_SIZE(vmstor_protocols); i++) { /* reuse the packet for version range supported */ memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION; - vstor_packet->flags = REQUEST_COMPLETION_FLAG; vstor_packet->version.major_minor = vmstor_protocols[i].protocol_version; @@ -737,26 +801,12 @@ static int storvsc_channel_init(struct hv_device *device) * The revision number is only used in Windows; set it to 0. */ vstor_packet->version.revision = 0; - - ret = vmbus_sendpacket(device->channel, vstor_packet, - (sizeof(struct vstor_packet) - - vmscsi_size_delta), - (unsigned long)request, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + ret = storvsc_execute_vstor_op(device, request, false); if (ret != 0) - goto cleanup; + return ret; - t = wait_for_completion_timeout(&request->wait_event, 5*HZ); - if (t == 0) { - ret = -ETIMEDOUT; - goto cleanup; - } - - if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO) { - ret = -EINVAL; - goto cleanup; - } + if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO) + return -EINVAL; if (vstor_packet->status == 0) { vmstor_proto_version = @@ -772,37 +822,15 @@ static int storvsc_channel_init(struct hv_device *device) } } - if (vstor_packet->status != 0) { - ret = -EINVAL; - goto cleanup; - } + if (vstor_packet->status != 0) + return -EINVAL; memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES; - vstor_packet->flags = REQUEST_COMPLETION_FLAG; - - ret = vmbus_sendpacket(device->channel, vstor_packet, - (sizeof(struct vstor_packet) - - vmscsi_size_delta), - (unsigned long)request, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - + ret = storvsc_execute_vstor_op(device, request, true); if (ret != 0) - goto cleanup; - - t = wait_for_completion_timeout(&request->wait_event, 5*HZ); - if (t == 0) { - ret = -ETIMEDOUT; - goto cleanup; - } - - if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || - vstor_packet->status != 0) { - ret = -EINVAL; - goto cleanup; - } + return ret; /* * Check to see if multi-channel support is there. @@ -818,37 +846,34 @@ static int storvsc_channel_init(struct hv_device *device) stor_device->max_transfer_bytes = vstor_packet->storage_channel_properties.max_transfer_bytes; - memset(vstor_packet, 0, sizeof(struct vstor_packet)); - vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION; - vstor_packet->flags = REQUEST_COMPLETION_FLAG; - - ret = vmbus_sendpacket(device->channel, vstor_packet, - (sizeof(struct vstor_packet) - - vmscsi_size_delta), - (unsigned long)request, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (!is_fc) + goto done; + /* + * For FC devices retrieve FC HBA data. + */ + memset(vstor_packet, 0, sizeof(struct vstor_packet)); + vstor_packet->operation = VSTOR_OPERATION_FCHBA_DATA; + ret = storvsc_execute_vstor_op(device, request, true); if (ret != 0) - goto cleanup; + return ret; - t = wait_for_completion_timeout(&request->wait_event, 5*HZ); - if (t == 0) { - ret = -ETIMEDOUT; - goto cleanup; - } + /* + * Cache the currently active port and node ww names. + */ + cache_wwn(stor_device, vstor_packet); - if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || - vstor_packet->status != 0) { - ret = -EINVAL; - goto cleanup; - } +done: + + memset(vstor_packet, 0, sizeof(struct vstor_packet)); + vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION; + ret = storvsc_execute_vstor_op(device, request, true); + if (ret != 0) + return ret; if (process_sub_channels) handle_multichannel_storage(device, max_chns); - -cleanup: return ret; } @@ -889,8 +914,9 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, do_work = true; process_err_fn = storvsc_remove_lun; break; - case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID): - if ((asc == 0x2a) && (ascq == 0x9)) { + case SRB_STATUS_ABORTED: + if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID && + (asc == 0x2a) && (ascq == 0x9)) { do_work = true; process_err_fn = storvsc_device_scan; /* @@ -915,24 +941,22 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, wrk->host = host; wrk->lun = vm_srb->lun; + wrk->tgt_id = vm_srb->target_id; INIT_WORK(&wrk->work, process_err_fn); schedule_work(&wrk->work); } -static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) +static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request, + struct storvsc_device *stor_dev) { struct scsi_cmnd *scmnd = cmd_request->cmd; - struct hv_host_device *host_dev = shost_priv(scmnd->device->host); struct scsi_sense_hdr sense_hdr; struct vmscsi_request *vm_srb; struct Scsi_Host *host; - struct storvsc_device *stor_dev; - struct hv_device *dev = host_dev->dev; u32 payload_sz = cmd_request->payload_sz; void *payload = cmd_request->payload; - stor_dev = get_in_stor_device(dev); host = stor_dev->host; vm_srb = &cmd_request->vstor_packet.vm_srb; @@ -941,7 +965,8 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) if (scmnd->result) { if (scsi_normalize_sense(scmnd->sense_buffer, - SCSI_SENSE_BUFFERSIZE, &sense_hdr)) + SCSI_SENSE_BUFFERSIZE, &sense_hdr) && + do_logging(STORVSC_LOGGING_ERROR)) scsi_print_sense_hdr(scmnd->device, "storvsc", &sense_hdr); } @@ -961,14 +986,13 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) kfree(payload); } -static void storvsc_on_io_completion(struct hv_device *device, +static void storvsc_on_io_completion(struct storvsc_device *stor_device, struct vstor_packet *vstor_packet, struct storvsc_cmd_request *request) { - struct storvsc_device *stor_device; struct vstor_packet *stor_pkt; + struct hv_device *device = stor_device->device; - stor_device = hv_get_drvdata(device); stor_pkt = &request->vstor_packet; /* @@ -995,6 +1019,13 @@ static void storvsc_on_io_completion(struct hv_device *device, stor_pkt->vm_srb.sense_info_length = vstor_packet->vm_srb.sense_info_length; + if (vstor_packet->vm_srb.scsi_status != 0 || + vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) + storvsc_log(device, STORVSC_LOGGING_WARN, + "cmd 0x%x scsi status 0x%x srb status 0x%x\n", + stor_pkt->vm_srb.cdb[0], + vstor_packet->vm_srb.scsi_status, + vstor_packet->vm_srb.srb_status); if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) { /* CHECK_CONDITION */ @@ -1002,6 +1033,10 @@ static void storvsc_on_io_completion(struct hv_device *device, SRB_STATUS_AUTOSENSE_VALID) { /* autosense data available */ + storvsc_log(device, STORVSC_LOGGING_WARN, + "stor pkt %p autosense data valid - len %d\n", + request, vstor_packet->vm_srb.sense_info_length); + memcpy(request->cmd->sense_buffer, vstor_packet->vm_srb.sense_data, vstor_packet->vm_srb.sense_info_length); @@ -1012,7 +1047,7 @@ static void storvsc_on_io_completion(struct hv_device *device, stor_pkt->vm_srb.data_transfer_length = vstor_packet->vm_srb.data_transfer_length; - storvsc_command_completion(request); + storvsc_command_completion(request, stor_device); if (atomic_dec_and_test(&stor_device->num_outstanding_req) && stor_device->drain_notify) @@ -1021,21 +1056,19 @@ static void storvsc_on_io_completion(struct hv_device *device, } -static void storvsc_on_receive(struct hv_device *device, +static void storvsc_on_receive(struct storvsc_device *stor_device, struct vstor_packet *vstor_packet, struct storvsc_cmd_request *request) { struct storvsc_scan_work *work; - struct storvsc_device *stor_device; switch (vstor_packet->operation) { case VSTOR_OPERATION_COMPLETE_IO: - storvsc_on_io_completion(device, vstor_packet, request); + storvsc_on_io_completion(stor_device, vstor_packet, request); break; case VSTOR_OPERATION_REMOVE_DEVICE: case VSTOR_OPERATION_ENUMERATE_BUS: - stor_device = get_in_stor_device(device); work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC); if (!work) return; @@ -1045,6 +1078,13 @@ static void storvsc_on_receive(struct hv_device *device, schedule_work(&work->work); break; + case VSTOR_OPERATION_FCHBA_DATA: + cache_wwn(stor_device, vstor_packet); +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + fc_host_node_name(stor_device->host) = stor_device->node_name; + fc_host_port_name(stor_device->host) = stor_device->port_name; +#endif + break; default: break; } @@ -1088,7 +1128,7 @@ static void storvsc_on_channel_callback(void *context) vmscsi_size_delta)); complete(&request->wait_event); } else { - storvsc_on_receive(device, + storvsc_on_receive(stor_device, (struct vstor_packet *)packet, request); } @@ -1100,7 +1140,8 @@ static void storvsc_on_channel_callback(void *context) return; } -static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size) +static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size, + bool is_fc) { struct vmstorage_channel_properties props; int ret; @@ -1117,7 +1158,7 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size) if (ret != 0) return ret; - ret = storvsc_channel_init(device); + ret = storvsc_channel_init(device, is_fc); return ret; } @@ -1542,6 +1583,7 @@ static int storvsc_probe(struct hv_device *device, struct Scsi_Host *host; struct hv_host_device *host_dev; bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false); + bool is_fc = ((dev_id->driver_data == SFC_GUID) ? true : false); int target = 0; struct storvsc_device *stor_device; int max_luns_per_target; @@ -1599,7 +1641,7 @@ static int storvsc_probe(struct hv_device *device, hv_set_drvdata(device, stor_device); stor_device->port_number = host->host_no; - ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size); + ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc); if (ret) goto err_out1; @@ -1611,6 +1653,9 @@ static int storvsc_probe(struct hv_device *device, host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET; host->max_id = STORVSC_FC_MAX_TARGETS; host->max_channel = STORVSC_FC_MAX_CHANNELS - 1; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + host->transportt = fc_transport_template; +#endif break; case SCSI_GUID: @@ -1650,6 +1695,12 @@ static int storvsc_probe(struct hv_device *device, goto err_out2; } } +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + if (host->transportt == fc_transport_template) { + fc_host_node_name(host) = stor_device->node_name; + fc_host_port_name(host) = stor_device->port_name; + } +#endif return 0; err_out2: @@ -1675,6 +1726,10 @@ static int storvsc_remove(struct hv_device *dev) struct storvsc_device *stor_device = hv_get_drvdata(dev); struct Scsi_Host *host = stor_device->host; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + if (host->transportt == fc_transport_template) + fc_remove_host(host); +#endif scsi_remove_host(host); storvsc_dev_remove(dev); scsi_host_put(host); @@ -1689,8 +1744,16 @@ static struct hv_driver storvsc_drv = { .remove = storvsc_remove, }; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) +static struct fc_function_template fc_transport_functions = { + .show_host_node_name = 1, + .show_host_port_name = 1, +}; +#endif + static int __init storvsc_drv_init(void) { + int ret; /* * Divide the ring buffer data size (which is 1 page less @@ -1705,12 +1768,33 @@ static int __init storvsc_drv_init(void) vmscsi_size_delta, sizeof(u64))); - return vmbus_driver_register(&storvsc_drv); +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + fc_transport_template = fc_attach_transport(&fc_transport_functions); + if (!fc_transport_template) + return -ENODEV; + + /* + * Install Hyper-V specific timeout handler. + */ + fc_transport_template->eh_timed_out = storvsc_eh_timed_out; +#endif + + ret = vmbus_driver_register(&storvsc_drv); + +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + if (ret) + fc_release_transport(fc_transport_template); +#endif + + return ret; } static void __exit storvsc_drv_exit(void) { vmbus_driver_unregister(&storvsc_drv); +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + fc_release_transport(fc_transport_template); +#endif } MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 22a42836d193..b9de487bbd31 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -53,13 +53,12 @@ #define NCR5380_queue_command sun3scsi_queue_command #define NCR5380_bus_reset sun3scsi_bus_reset #define NCR5380_abort sun3scsi_abort -#define NCR5380_show_info sun3scsi_show_info #define NCR5380_info sun3scsi_info #define NCR5380_dma_read_setup(instance, data, count) \ - sun3scsi_dma_setup(data, count, 0) + sun3scsi_dma_setup(instance, data, count, 0) #define NCR5380_dma_write_setup(instance, data, count) \ - sun3scsi_dma_setup(data, count, 1) + sun3scsi_dma_setup(instance, data, count, 1) #define NCR5380_dma_residual(instance) \ sun3scsi_dma_residual(instance) #define NCR5380_dma_xfer_len(instance, cmd, phase) \ @@ -86,10 +85,6 @@ module_param(setup_use_tagged_queuing, int, 0); static int setup_hostid = -1; module_param(setup_hostid, int, 0); -/* #define RESET_BOOT */ - -#define AFTER_RESET_DELAY (HZ/2) - /* ms to wait after hitting dma regs */ #define SUN3_DMA_DELAY 10 @@ -100,11 +95,10 @@ static struct scsi_cmnd *sun3_dma_setup_done; static unsigned char *sun3_scsi_regp; static volatile struct sun3_dma_regs *dregs; static struct sun3_udc_regs *udc_regs; -static unsigned char *sun3_dma_orig_addr = NULL; -static unsigned long sun3_dma_orig_count = 0; -static int sun3_dma_active = 0; -static unsigned long last_residual = 0; -static struct Scsi_Host *default_instance; +static unsigned char *sun3_dma_orig_addr; +static unsigned long sun3_dma_orig_count; +static int sun3_dma_active; +static unsigned long last_residual; /* * NCR 5380 register access functions @@ -144,50 +138,12 @@ static inline void sun3_udc_write(unsigned short val, unsigned char reg) } #endif -#ifdef RESET_BOOT -static void sun3_scsi_reset_boot(struct Scsi_Host *instance) -{ - unsigned long end; - - /* - * Do a SCSI reset to clean up the bus during initialization. No - * messing with the queues, interrupts, or locks necessary here. - */ - - printk( "Sun3 SCSI: resetting the SCSI bus..." ); - - /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */ -// sun3_disable_irq( IRQ_SUN3_SCSI ); - - /* get in phase */ - NCR5380_write( TARGET_COMMAND_REG, - PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); - - /* assert RST */ - NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); - - /* The min. reset hold time is 25us, so 40us should be enough */ - udelay( 50 ); - - /* reset RST and interrupt */ - NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); - NCR5380_read( RESET_PARITY_INTERRUPT_REG ); - - for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); ) - barrier(); - - /* switch on SCSI IRQ again */ -// sun3_enable_irq( IRQ_SUN3_SCSI ); - - printk( " done\n" ); -} -#endif - // safe bits for the CSR #define CSR_GOOD 0x060f -static irqreturn_t scsi_sun3_intr(int irq, void *dummy) +static irqreturn_t scsi_sun3_intr(int irq, void *dev) { + struct Scsi_Host *instance = dev; unsigned short csr = dregs->csr; int handled = 0; @@ -196,46 +152,24 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dummy) #endif if(csr & ~CSR_GOOD) { - if(csr & CSR_DMA_BUSERR) { - printk("scsi%d: bus error in dma\n", default_instance->host_no); - } - - if(csr & CSR_DMA_CONFLICT) { - printk("scsi%d: dma conflict\n", default_instance->host_no); - } + if (csr & CSR_DMA_BUSERR) + shost_printk(KERN_ERR, instance, "bus error in DMA\n"); + if (csr & CSR_DMA_CONFLICT) + shost_printk(KERN_ERR, instance, "DMA conflict\n"); handled = 1; } if(csr & (CSR_SDB_INT | CSR_DMA_INT)) { - NCR5380_intr(irq, dummy); + NCR5380_intr(irq, dev); handled = 1; } return IRQ_RETVAL(handled); } -/* - * Debug stuff - to be called on NMI, or sysrq key. Use at your own risk; - * reentering NCR5380_print_status seems to have ugly side effects - */ - -/* this doesn't seem to get used at all -- sam */ -#if 0 -void sun3_sun3_debug (void) -{ - unsigned long flags; - - if (default_instance) { - local_irq_save(flags); - NCR5380_print_status(default_instance); - local_irq_restore(flags); - } -} -#endif - - /* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ -static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag) +static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance, + void *data, unsigned long count, int write_flag) { void *addr; @@ -287,10 +221,9 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri dregs->csr |= CSR_FIFO; if(dregs->fifo_count != count) { - printk("scsi%d: fifo_mismatch %04x not %04x\n", - default_instance->host_no, dregs->fifo_count, - (unsigned int) count); - NCR5380_dprint(NDEBUG_DMA, default_instance); + shost_printk(KERN_ERR, instance, "FIFO mismatch %04x not %04x\n", + dregs->fifo_count, (unsigned int) count); + NCR5380_dprint(NDEBUG_DMA, instance); } /* setup udc */ @@ -325,21 +258,6 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri } -#ifndef SUN3_SCSI_VME -static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance) -{ - unsigned short resid; - - dregs->udc_addr = 0x32; - udelay(SUN3_DMA_DELAY); - resid = dregs->udc_data; - udelay(SUN3_DMA_DELAY); - resid *= 2; - - return (unsigned long) resid; -} -#endif - static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) { return last_residual; @@ -437,7 +355,10 @@ static int sun3scsi_dma_finish(int write_flag) } } - count = sun3scsi_dma_count(default_instance); + dregs->udc_addr = 0x32; + udelay(SUN3_DMA_DELAY); + count = 2 * dregs->udc_data; + udelay(SUN3_DMA_DELAY); fifo = dregs->fifo_count; last_residual = fifo; @@ -502,17 +423,17 @@ static int sun3scsi_dma_finish(int write_flag) static struct scsi_host_template sun3_scsi_template = { .module = THIS_MODULE, .proc_name = DRV_MODULE_NAME, - .show_info = sun3scsi_show_info, .name = SUN3_SCSI_NAME, .info = sun3scsi_info, .queuecommand = sun3scsi_queue_command, - .eh_abort_handler = sun3scsi_abort, - .eh_bus_reset_handler = sun3scsi_bus_reset, + .eh_abort_handler = sun3scsi_abort, + .eh_bus_reset_handler = sun3scsi_bus_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = SG_NONE, .cmd_per_lun = 2, - .use_clustering = DISABLE_CLUSTERING + .use_clustering = DISABLE_CLUSTERING, + .cmd_size = NCR5380_CMD_SIZE, }; static int __init sun3_scsi_probe(struct platform_device *pdev) @@ -591,7 +512,6 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) error = -ENOMEM; goto fail_alloc; } - default_instance = instance; instance->io_port = (unsigned long)ioaddr; instance->irq = irq->start; @@ -600,7 +520,9 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; #endif - NCR5380_init(instance, host_flags); + error = NCR5380_init(instance, host_flags); + if (error) + goto fail_init; error = request_irq(instance->irq, scsi_sun3_intr, 0, "NCR5380", instance); @@ -631,9 +553,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) dregs->ivect = VME_DATA24 | (instance->irq & 0xff); #endif -#ifdef RESET_BOOT - sun3_scsi_reset_boot(instance); -#endif + NCR5380_maybe_reset_bus(instance); error = scsi_add_host(instance, NULL); if (error) @@ -649,6 +569,7 @@ fail_host: free_irq(instance->irq, instance); fail_irq: NCR5380_exit(instance); +fail_init: scsi_host_put(instance); fail_alloc: if (udc_regs) diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c index 87828acbf7c6..4615fda60dbd 100644 --- a/drivers/scsi/t128.c +++ b/drivers/scsi/t128.c @@ -68,14 +68,11 @@ * 15 9-11 */ -#include <linux/signal.h> #include <linux/io.h> #include <linux/blkdev.h> #include <linux/interrupt.h> -#include <linux/stat.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/delay.h> #include <scsi/scsi_host.h> #include "t128.h" @@ -126,7 +123,7 @@ static struct signature { static int __init t128_setup(char *str) { - static int commandline_current = 0; + static int commandline_current; int i; int ints[10]; @@ -165,7 +162,7 @@ __setup("t128=", t128_setup); static int __init t128_detect(struct scsi_host_template *tpnt) { - static int current_override = 0, current_base = 0; + static int current_override, current_base; struct Scsi_Host *instance; unsigned long base; void __iomem *p; @@ -182,9 +179,8 @@ static int __init t128_detect(struct scsi_host_template *tpnt) base = 0; } else for (; !base && (current_base < NO_BASES); ++current_base) { -#if (TDEBUG & TDEBUG_INIT) - printk("scsi-t128 : probing address %08x\n", bases[current_base].address); -#endif + dprintk(NDEBUG_INIT, "t128: probing address 0x%08x\n", + bases[current_base].address); if (bases[current_base].noauto) continue; p = ioremap(bases[current_base].address, 0x2000); @@ -195,17 +191,13 @@ static int __init t128_detect(struct scsi_host_template *tpnt) signatures[sig].string, strlen(signatures[sig].string))) { base = bases[current_base].address; -#if (TDEBUG & TDEBUG_INIT) - printk("scsi-t128 : detected board.\n"); -#endif + dprintk(NDEBUG_INIT, "t128: detected board\n"); goto found; } iounmap(p); } -#if defined(TDEBUG) && (TDEBUG & TDEBUG_INIT) - printk("scsi-t128 : base = %08x\n", (unsigned int) base); -#endif + dprintk(NDEBUG_INIT, "t128: base = 0x%08x\n", (unsigned int)base); if (!base) break; @@ -213,12 +205,15 @@ static int __init t128_detect(struct scsi_host_template *tpnt) found: instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); if(instance == NULL) - break; - + goto out_unmap; + instance->base = base; ((struct NCR5380_hostdata *)instance->hostdata)->base = p; - NCR5380_init(instance, 0); + if (NCR5380_init(instance, 0)) + goto out_unregister; + + NCR5380_maybe_reset_bus(instance); if (overrides[current_override].irq != IRQ_AUTO) instance->irq = overrides[current_override].irq; @@ -242,27 +237,30 @@ found: printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); } -#if defined(TDEBUG) && (TDEBUG & TDEBUG_INIT) - printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); -#endif + dprintk(NDEBUG_INIT, "scsi%d: irq = %d\n", + instance->host_no, instance->irq); ++current_override; ++count; } return count; + +out_unregister: + scsi_unregister(instance); +out_unmap: + iounmap(p); + return count; } static int t128_release(struct Scsi_Host *shost) { - NCR5380_local_declare(); - NCR5380_setup(shost); + struct NCR5380_hostdata *hostdata = shost_priv(shost); + if (shost->irq != NO_IRQ) free_irq(shost->irq, shost); NCR5380_exit(shost); - if (shost->io_port && shost->n_io_port) - release_region(shost->io_port, shost->n_io_port); scsi_unregister(shost); - iounmap(base); + iounmap(hostdata->base); return 0; } @@ -308,14 +306,14 @@ static int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev, * timeout. */ -static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, - int len) { - NCR5380_local_declare(); - void __iomem *reg; +static inline int +NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + void __iomem *reg, *base = hostdata->base; unsigned char *d = dst; register int i = len; - NCR5380_setup(instance); reg = base + T_DATA_REG_OFFSET; #if 0 @@ -354,14 +352,14 @@ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, * timeout. */ -static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src, - int len) { - NCR5380_local_declare(); - void __iomem *reg; +static inline int +NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + void __iomem *reg, *base = hostdata->base; unsigned char *s = src; register int i = len; - NCR5380_setup(instance); reg = base + T_DATA_REG_OFFSET; #if 0 @@ -392,21 +390,23 @@ MODULE_LICENSE("GPL"); #include "NCR5380.c" static struct scsi_host_template driver_template = { - .name = "Trantor T128/T128F/T228", - .detect = t128_detect, - .release = t128_release, - .proc_name = "t128", - .show_info = t128_show_info, - .write_info = t128_write_info, - .info = t128_info, - .queuecommand = t128_queue_command, - .eh_abort_handler = t128_abort, - .eh_bus_reset_handler = t128_bus_reset, - .bios_param = t128_biosparam, - .can_queue = CAN_QUEUE, - .this_id = 7, - .sg_tablesize = SG_ALL, - .cmd_per_lun = CMD_PER_LUN, - .use_clustering = DISABLE_CLUSTERING, + .name = "Trantor T128/T128F/T228", + .detect = t128_detect, + .release = t128_release, + .proc_name = "t128", + .show_info = t128_show_info, + .write_info = t128_write_info, + .info = t128_info, + .queuecommand = t128_queue_command, + .eh_abort_handler = t128_abort, + .eh_bus_reset_handler = t128_bus_reset, + .bios_param = t128_biosparam, + .can_queue = 32, + .this_id = 7, + .sg_tablesize = SG_ALL, + .cmd_per_lun = 2, + .use_clustering = DISABLE_CLUSTERING, + .cmd_size = NCR5380_CMD_SIZE, + .max_sectors = 128, }; #include "scsi_module.c" diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h index 2c7371454dfd..dd16d85497e1 100644 --- a/drivers/scsi/t128.h +++ b/drivers/scsi/t128.h @@ -23,10 +23,6 @@ #ifndef T128_H #define T128_H -#define TDEBUG 0 -#define TDEBUG_INIT 0x1 -#define TDEBUG_TRANSFER 0x2 - /* * The trantor boards are memory mapped. They use an NCR5380 or * equivalent (my sample board had part second sourced from ZILOG). @@ -71,44 +67,18 @@ #define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */ -#ifndef ASM - -#ifndef CMD_PER_LUN -#define CMD_PER_LUN 2 -#endif - -#ifndef CAN_QUEUE -#define CAN_QUEUE 32 -#endif - #define NCR5380_implementation_fields \ void __iomem *base -#define NCR5380_local_declare() \ - void __iomem *base - -#define NCR5380_setup(instance) \ - base = ((struct NCR5380_hostdata *)(instance->hostdata))->base +#define T128_address(reg) \ + (((struct NCR5380_hostdata *)shost_priv(instance))->base + T_5380_OFFSET + ((reg) * 0x20)) -#define T128_address(reg) (base + T_5380_OFFSET + ((reg) * 0x20)) - -#if !(TDEBUG & TDEBUG_TRANSFER) #define NCR5380_read(reg) readb(T128_address(reg)) #define NCR5380_write(reg, value) writeb((value),(T128_address(reg))) -#else -#define NCR5380_read(reg) \ - (((unsigned char) printk("scsi%d : read register %d at address %08x\n"\ - , instance->hostno, (reg), T128_address(reg))), readb(T128_address(reg))) - -#define NCR5380_write(reg, value) { \ - printk("scsi%d : write %02x to register %d at address %08x\n", \ - instance->hostno, (value), (reg), T128_address(reg)); \ - writeb((value), (T128_address(reg))); \ -} -#endif + +#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) #define NCR5380_intr t128_intr -#define do_NCR5380_intr do_t128_intr #define NCR5380_queue_command t128_queue_command #define NCR5380_abort t128_abort #define NCR5380_bus_reset t128_bus_reset @@ -121,5 +91,4 @@ #define T128_IRQS 0xc4a8 -#endif /* ndef ASM */ #endif /* T128_H */ diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 42c459a9d3fe..54a16cef0367 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -170,7 +170,7 @@ enum ufs_desc_max_size { * of descriptor header. */ QUERY_DESC_STRING_MAX_SIZE = 0xFE, - QUERY_DESC_GEOMETRY_MAZ_SIZE = 0x44, + QUERY_DESC_GEOMETRY_MAX_SIZE = 0x44, QUERY_DESC_POWER_MAX_SIZE = 0x62, QUERY_DESC_RFU_MAX_SIZE = 0x00, }; diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 9714f2a8b329..d2a7b127b05c 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -333,7 +333,7 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, err = ufshcd_init(hba, mmio_base, irq); if (err) { - dev_err(dev, "Intialization failed\n"); + dev_err(dev, "Initialization failed\n"); goto out_disable_rpm; } diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 85cd2564c157..9c1b94bef8f3 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3,7 +3,7 @@ * * This code is based on drivers/scsi/ufs/ufshcd.c * Copyright (C) 2011-2013 Samsung India Software Operations - * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. * * Authors: * Santosh Yaraganavi <santosh.sy@samsung.com> @@ -58,13 +58,25 @@ #define QUERY_REQ_RETRIES 10 /* Query request timeout */ #define QUERY_REQ_TIMEOUT 30 /* msec */ +/* + * Query request timeout for fDeviceInit flag + * fDeviceInit query response time for some devices is too large that default + * QUERY_REQ_TIMEOUT may not be enough for such devices. + */ +#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */ /* Task management command timeout */ #define TM_CMD_TIMEOUT 100 /* msecs */ +/* maximum number of retries for a general UIC command */ +#define UFS_UIC_COMMAND_RETRIES 3 + /* maximum number of link-startup retries */ #define DME_LINKSTARTUP_RETRIES 3 +/* Maximum retries for Hibern8 enter */ +#define UIC_HIBERN8_ENTER_RETRIES 3 + /* maximum number of reset retries before giving up */ #define MAX_HOST_RESET_RETRIES 5 @@ -92,7 +104,7 @@ static u32 ufs_query_desc_max_size[] = { QUERY_DESC_INTERCONNECT_MAX_SIZE, QUERY_DESC_STRING_MAX_SIZE, QUERY_DESC_RFU_MAX_SIZE, - QUERY_DESC_GEOMETRY_MAZ_SIZE, + QUERY_DESC_GEOMETRY_MAX_SIZE, QUERY_DESC_POWER_MAX_SIZE, QUERY_DESC_RFU_MAX_SIZE, }; @@ -190,6 +202,10 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *desired_pwr_mode); static int ufshcd_change_power_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *pwr_mode); +static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag) +{ + return tag >= 0 && tag < hba->nutrs; +} static inline int ufshcd_enable_irq(struct ufs_hba *hba) { @@ -360,6 +376,16 @@ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) } /** + * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field + * @hba: per adapter instance + * @tag: position of the bit to be cleared + */ +static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag) +{ + __clear_bit(tag, &hba->outstanding_reqs); +} + +/** * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY * @reg: Register value of host controller status * @@ -374,11 +400,9 @@ static inline int ufshcd_get_lists_status(u32 reg) * 1 UTRLRDY * 2 UTMRLRDY * 3 UCRDY - * 4 HEI - * 5 DEI - * 6-7 reserved + * 4-7 reserved */ - return (((reg) & (0xFF)) >> 1) ^ (0x07); + return ((reg & 0xFF) >> 1) ^ 0x07; } /** @@ -582,6 +606,11 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) spin_lock_irqsave(hba->host->host_lock, flags); hba->clk_gating.active_reqs++; + if (ufshcd_eh_in_progress(hba)) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + return 0; + } + start: switch (hba->clk_gating.state) { case CLKS_ON: @@ -697,7 +726,8 @@ static void __ufshcd_release(struct ufs_hba *hba) if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || hba->lrb_in_use || hba->outstanding_tasks - || hba->active_uic_cmd || hba->uic_async_done) + || hba->active_uic_cmd || hba->uic_async_done + || ufshcd_eh_in_progress(hba)) return; hba->clk_gating.state = REQ_CLKS_OFF; @@ -953,13 +983,15 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result * @hba: per adapter instance * @uic_cmd: UIC command + * @completion: initialize the completion only if this is set to true * * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called * with mutex held and host_lock locked. * Returns 0 only if success. */ static int -__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) +__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, + bool completion) { if (!ufshcd_ready_for_uic_cmd(hba)) { dev_err(hba->dev, @@ -967,7 +999,8 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) return -EIO; } - init_completion(&uic_cmd->done); + if (completion) + init_completion(&uic_cmd->done); ufshcd_dispatch_uic_cmd(hba, uic_cmd); @@ -992,7 +1025,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) ufshcd_add_delay_before_dme_cmd(hba); spin_lock_irqsave(hba->host->host_lock, flags); - ret = __ufshcd_send_uic_cmd(hba, uic_cmd); + ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); spin_unlock_irqrestore(hba->host->host_lock, flags); if (!ret) ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); @@ -1035,6 +1068,7 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp) cpu_to_le32(lower_32_bits(sg->dma_address)); prd_table[i].upper_addr = cpu_to_le32(upper_32_bits(sg->dma_address)); + prd_table[i].reserved = 0; } } else { lrbp->utr_descriptor_ptr->prd_table_length = 0; @@ -1117,7 +1151,8 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, /* Transfer request descriptor header fields */ req_desc->header.dword_0 = cpu_to_le32(dword_0); - + /* dword_1 is reserved, hence it is set to 0 */ + req_desc->header.dword_1 = 0; /* * assigning invalid value for command status. Controller * updates OCS on command completion, with the command @@ -1125,6 +1160,10 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, */ req_desc->header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS); + /* dword_3 is reserved, hence it is set to 0 */ + req_desc->header.dword_3 = 0; + + req_desc->prd_table_length = 0; } /** @@ -1137,6 +1176,7 @@ static void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) { struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; + unsigned short cdb_len; /* command descriptor fields */ ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( @@ -1151,8 +1191,11 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(lrbp->cmd->sdb.length); - memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, - (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE))); + cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE); + memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE); + memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len); + + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } /** @@ -1189,6 +1232,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) memcpy(descp, query->descriptor, len); + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) @@ -1201,6 +1245,11 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag); + /* clear rest of the fields of basic header */ + ucd_req_ptr->header.dword_1 = 0; + ucd_req_ptr->header.dword_2 = 0; + + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } /** @@ -1293,6 +1342,12 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) hba = shost_priv(host); tag = cmd->request->tag; + if (!ufshcd_valid_tag(hba, tag)) { + dev_err(hba->dev, + "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p", + __func__, tag, cmd, cmd->request); + BUG(); + } spin_lock_irqsave(hba->host->host_lock, flags); switch (hba->ufshcd_state) { @@ -1312,6 +1367,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) cmd->scsi_done(cmd); goto out_unlock; } + + /* if error handling is in progress, don't issue commands */ + if (ufshcd_eh_in_progress(hba)) { + set_host_byte(cmd, DID_ERROR); + cmd->scsi_done(cmd); + goto out_unlock; + } spin_unlock_irqrestore(hba->host->host_lock, flags); /* acquire the tag to make sure device cmds don't use it */ @@ -1475,9 +1537,17 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, if (!time_left) { err = -ETIMEDOUT; + dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", + __func__, lrbp->task_tag); if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) - /* sucessfully cleared the command, retry if needed */ + /* successfully cleared the command, retry if needed */ err = -EAGAIN; + /* + * in case of an error, after clearing the doorbell, + * we also need to clear the outstanding_request + * field in hba + */ + ufshcd_outstanding_req_clear(hba, lrbp->task_tag); } return err; @@ -1555,6 +1625,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, hba->dev_cmd.complete = &wait; + /* Make sure descriptors are ready before ringing the doorbell */ + wmb(); spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_send_command(hba, tag); spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -1591,6 +1663,29 @@ static inline void ufshcd_init_query(struct ufs_hba *hba, (*request)->upiu_req.selector = selector; } +static int ufshcd_query_flag_retry(struct ufs_hba *hba, + enum query_opcode opcode, enum flag_idn idn, bool *flag_res) +{ + int ret; + int retries; + + for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { + ret = ufshcd_query_flag(hba, opcode, idn, flag_res); + if (ret) + dev_dbg(hba->dev, + "%s: failed with error %d, retries %d\n", + __func__, ret, retries); + else + break; + } + + if (ret) + dev_err(hba->dev, + "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n", + __func__, opcode, idn, ret, retries); + return ret; +} + /** * ufshcd_query_flag() - API function for sending flag query requests * hba: per-adapter instance @@ -1600,12 +1695,13 @@ static inline void ufshcd_init_query(struct ufs_hba *hba, * * Returns 0 for success, non-zero in case of failure */ -static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, +int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, bool *flag_res) { struct ufs_query_req *request = NULL; struct ufs_query_res *response = NULL; int err, index = 0, selector = 0; + int timeout = QUERY_REQ_TIMEOUT; BUG_ON(!hba); @@ -1638,7 +1734,10 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, goto out_unlock; } - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + if (idn == QUERY_FLAG_IDN_FDEVICEINIT) + timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT; + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); if (err) { dev_err(hba->dev, @@ -1722,6 +1821,43 @@ out: } /** + * ufshcd_query_attr_retry() - API function for sending query + * attribute with retries + * @hba: per-adapter instance + * @opcode: attribute opcode + * @idn: attribute idn to access + * @index: index field + * @selector: selector field + * @attr_val: the attribute value after the query request + * completes + * + * Returns 0 for success, non-zero in case of failure +*/ +static int ufshcd_query_attr_retry(struct ufs_hba *hba, + enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, + u32 *attr_val) +{ + int ret = 0; + u32 retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + ret = ufshcd_query_attr(hba, opcode, idn, index, + selector, attr_val); + if (ret) + dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", + __func__, ret, retries); + else + break; + } + + if (ret) + dev_err(hba->dev, + "%s: query attribute, idn %d, failed with error %d after %d retires\n", + __func__, idn, ret, QUERY_REQ_RETRIES); + return ret; +} + +/** * ufshcd_query_descriptor - API function for sending descriptor requests * hba: per-adapter instance * opcode: attribute opcode @@ -2128,6 +2264,7 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, }; const char *set = action[!!peer]; int ret; + int retries = UFS_UIC_COMMAND_RETRIES; uic_cmd.command = peer ? UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; @@ -2135,10 +2272,18 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); uic_cmd.argument3 = mib_val; - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); - if (ret) - dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", - set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); + do { + /* for peer attributes we retry upon failure */ + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", + set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); + } while (ret && peer && --retries); + + if (!retries) + dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", + set, UIC_GET_ATTR_ID(attr_sel), mib_val, + retries); return ret; } @@ -2163,6 +2308,7 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, }; const char *get = action[!!peer]; int ret; + int retries = UFS_UIC_COMMAND_RETRIES; struct ufs_pa_layer_attr orig_pwr_info; struct ufs_pa_layer_attr temp_pwr_info; bool pwr_mode_change = false; @@ -2193,14 +2339,19 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; uic_cmd.argument1 = attr_sel; - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); - if (ret) { - dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n", - get, UIC_GET_ATTR_ID(attr_sel), ret); - goto out; - } + do { + /* for peer attributes we retry upon failure */ + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", + get, UIC_GET_ATTR_ID(attr_sel), ret); + } while (ret && peer && --retries); - if (mib_val) + if (!retries) + dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", + get, UIC_GET_ATTR_ID(attr_sel), retries); + + if (mib_val && !ret) *mib_val = uic_cmd.argument3; if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) @@ -2233,6 +2384,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) unsigned long flags; u8 status; int ret; + bool reenable_intr = false; mutex_lock(&hba->uic_cmd_mutex); init_completion(&uic_async_done); @@ -2240,15 +2392,17 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) spin_lock_irqsave(hba->host->host_lock, flags); hba->uic_async_done = &uic_async_done; - ret = __ufshcd_send_uic_cmd(hba, cmd); - spin_unlock_irqrestore(hba->host->host_lock, flags); - if (ret) { - dev_err(hba->dev, - "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", - cmd->command, cmd->argument3, ret); - goto out; + if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { + ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); + /* + * Make sure UIC command completion interrupt is disabled before + * issuing UIC command. + */ + wmb(); + reenable_intr = true; } - ret = ufshcd_wait_for_uic_cmd(hba, cmd); + ret = __ufshcd_send_uic_cmd(hba, cmd, false); + spin_unlock_irqrestore(hba->host->host_lock, flags); if (ret) { dev_err(hba->dev, "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", @@ -2274,7 +2428,10 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) } out: spin_lock_irqsave(hba->host->host_lock, flags); + hba->active_uic_cmd = NULL; hba->uic_async_done = NULL; + if (reenable_intr) + ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); spin_unlock_irqrestore(hba->host->host_lock, flags); mutex_unlock(&hba->uic_cmd_mutex); @@ -2315,13 +2472,65 @@ out: return ret; } -static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) +static int ufshcd_link_recovery(struct ufs_hba *hba) { + int ret; + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->ufshcd_state = UFSHCD_STATE_RESET; + ufshcd_set_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + ret = ufshcd_host_reset_and_restore(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (ret) + hba->ufshcd_state = UFSHCD_STATE_ERROR; + ufshcd_clear_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (ret) + dev_err(hba->dev, "%s: link recovery failed, err %d", + __func__, ret); + + return ret; +} + +static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) +{ + int ret; struct uic_command uic_cmd = {0}; uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + + if (ret) { + dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", + __func__, ret); + + /* + * If link recovery fails then return error so that caller + * don't retry the hibern8 enter again. + */ + if (ufshcd_link_recovery(hba)) + ret = -ENOLINK; + } - return ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + return ret; +} + +static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) +{ + int ret = 0, retries; + + for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) { + ret = __ufshcd_uic_hibern8_enter(hba); + if (!ret || ret == -ENOLINK) + goto out; + } +out: + return ret; } static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) @@ -2332,8 +2541,9 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); if (ret) { - ufshcd_set_link_off(hba); - ret = ufshcd_host_reset_and_restore(hba); + dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", + __func__, ret); + ret = ufshcd_link_recovery(hba); } return ret; @@ -2513,17 +2723,12 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba, */ static int ufshcd_complete_dev_init(struct ufs_hba *hba) { - int i, retries, err = 0; + int i; + int err; bool flag_res = 1; - for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { - /* Set the fDeviceInit flag */ - err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, - QUERY_FLAG_IDN_FDEVICEINIT, NULL); - if (!err || err == -ETIMEDOUT) - break; - dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); - } + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, NULL); if (err) { dev_err(hba->dev, "%s setting fDeviceInit flag failed with error %d\n", @@ -2531,18 +2736,11 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba) goto out; } - /* poll for max. 100 iterations for fDeviceInit flag to clear */ - for (i = 0; i < 100 && !err && flag_res; i++) { - for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { - err = ufshcd_query_flag(hba, - UPIU_QUERY_OPCODE_READ_FLAG, - QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); - if (!err || err == -ETIMEDOUT) - break; - dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, - err); - } - } + /* poll for max. 1000 iterations for fDeviceInit flag to clear */ + for (i = 0; i < 1000 && !err && flag_res; i++) + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); + if (err) dev_err(hba->dev, "%s reading fDeviceInit flag failed with error %d\n", @@ -2563,7 +2761,7 @@ out: * To bring UFS host controller to operational state, * 1. Enable required interrupts * 2. Configure interrupt aggregation - * 3. Program UTRL and UTMRL base addres + * 3. Program UTRL and UTMRL base address * 4. Configure run-stop-registers * * Returns 0 on success, non-zero value on failure @@ -2593,8 +2791,13 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) REG_UTP_TASK_REQ_LIST_BASE_H); /* + * Make sure base address and interrupt setup are updated before + * enabling the run/stop registers below. + */ + wmb(); + + /* * UCRDY, UTMRLDY and UTRLRDY bits must be 1 - * DEI, HEI bits must be 0 */ reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); if (!(ufshcd_get_lists_status(reg))) { @@ -3090,7 +3293,20 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) scsi_status = result & MASK_SCSI_STATUS; result = ufshcd_scsi_cmd_status(lrbp, scsi_status); - if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) + /* + * Currently we are only supporting BKOPs exception + * events hence we can ignore BKOPs exception event + * during power management callbacks. BKOPs exception + * event is not expected to be raised in runtime suspend + * callback as it allows the urgent bkops. + * During system suspend, we are anyway forcefully + * disabling the bkops and if urgent bkops is needed + * it will be enabled on system resume. Long term + * solution could be to abort the system suspend if + * UFS device needs urgent BKOPs. + */ + if (!hba->pm_op_in_progress && + ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) schedule_work(&hba->eeh_work); break; case UPIU_TRANSACTION_REJECT_UPIU: @@ -3222,7 +3438,7 @@ static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) val = hba->ee_ctrl_mask & ~mask; val &= 0xFFFF; /* 2 bytes */ - err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); if (!err) hba->ee_ctrl_mask &= ~mask; @@ -3250,7 +3466,7 @@ static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) val = hba->ee_ctrl_mask | mask; val &= 0xFFFF; /* 2 bytes */ - err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); if (!err) hba->ee_ctrl_mask |= mask; @@ -3276,7 +3492,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) if (hba->auto_bkops_enabled) goto out; - err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, QUERY_FLAG_IDN_BKOPS_EN, NULL); if (err) { dev_err(hba->dev, "%s: failed to enable bkops %d\n", @@ -3325,7 +3541,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) goto out; } - err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, QUERY_FLAG_IDN_BKOPS_EN, NULL); if (err) { dev_err(hba->dev, "%s: failed to disable bkops %d\n", @@ -3356,7 +3572,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) { - return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status); } @@ -3419,7 +3635,7 @@ static int ufshcd_urgent_bkops(struct ufs_hba *hba) static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) { - return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); } @@ -3645,16 +3861,20 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) */ static irqreturn_t ufshcd_intr(int irq, void *__hba) { - u32 intr_status; + u32 intr_status, enabled_intr_status; irqreturn_t retval = IRQ_NONE; struct ufs_hba *hba = __hba; spin_lock(hba->host->host_lock); intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + enabled_intr_status = + intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - if (intr_status) { + if (intr_status) ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); - ufshcd_sl_intr(hba, intr_status); + + if (enabled_intr_status) { + ufshcd_sl_intr(hba, enabled_intr_status); retval = IRQ_HANDLED; } spin_unlock(hba->host->host_lock); @@ -3740,6 +3960,10 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, /* send command to the controller */ __set_bit(free_slot, &hba->outstanding_tasks); + + /* Make sure descriptors are ready before ringing the task doorbell */ + wmb(); + ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); spin_unlock_irqrestore(host->host_lock, flags); @@ -3845,13 +4069,23 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) host = cmd->device->host; hba = shost_priv(host); tag = cmd->request->tag; + if (!ufshcd_valid_tag(hba, tag)) { + dev_err(hba->dev, + "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p", + __func__, tag, cmd, cmd->request); + BUG(); + } ufshcd_hold(hba, false); + reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); /* If command is already aborted/completed, return SUCCESS */ - if (!(test_bit(tag, &hba->outstanding_reqs))) + if (!(test_bit(tag, &hba->outstanding_reqs))) { + dev_err(hba->dev, + "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n", + __func__, tag, hba->outstanding_reqs, reg); goto out; + } - reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); if (!(reg & (1 << tag))) { dev_err(hba->dev, "%s: cmd was completed, but without a notifying intr, tag = %d", @@ -3905,7 +4139,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) scsi_dma_unmap(cmd); spin_lock_irqsave(host->host_lock, flags); - __clear_bit(tag, &hba->outstanding_reqs); + ufshcd_outstanding_req_clear(hba, tag); hba->lrb[tag].cmd = NULL; spin_unlock_irqrestore(host->host_lock, flags); @@ -4155,9 +4389,9 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, hba->init_prefetch_data.icc_level); - ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, - QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, - &hba->init_prefetch_data.icc_level); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, + &hba->init_prefetch_data.icc_level); if (ret) dev_err(hba->dev, @@ -4262,7 +4496,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) /* UFS device is also active now */ ufshcd_set_ufs_dev_active(hba); ufshcd_force_reset_auto_bkops(hba); - hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; hba->wlun_dev_clr_ua = true; if (ufshcd_get_max_pwr_mode(hba)) { @@ -4276,6 +4509,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) __func__, ret); } + /* set the state as operational after switching to desired gear */ + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; /* * If we are in error handling context or in power management callbacks * context, no need to scan the host @@ -4285,8 +4520,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) /* clear any previous UFS device information */ memset(&hba->dev_info, 0, sizeof(hba->dev_info)); - if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, - QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) + if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) hba->dev_info.f_power_on_wp_en = flag; if (!hba->is_init_prefetch) diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 2570d9477b37..e3931d0c94eb 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -3,6 +3,7 @@ * * This code is based on drivers/scsi/ufs/ufshcd.h * Copyright (C) 2011-2013 Samsung India Software Operations + * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. * * Authors: * Santosh Yaraganavi <santosh.sy@samsung.com> @@ -177,7 +178,7 @@ struct ufshcd_lrb { }; /** - * struct ufs_query - holds relevent data structures for query request + * struct ufs_query - holds relevant data structures for query request * @request: request upiu and function * @descriptor: buffer for sending/receiving descriptor * @response: response upiu and response @@ -681,6 +682,9 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); } +/* Expose Query-Request API */ +int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, + enum flag_idn idn, bool *flag_res); int ufshcd_hold(struct ufs_hba *hba, bool async); void ufshcd_release(struct ufs_hba *hba); diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 0f133c1817de..6164634aff18 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -349,9 +349,9 @@ static void pvscsi_create_sg(struct pvscsi_ctx *ctx, * Map all data buffers for a command into PCI space and * setup the scatter/gather list if needed. */ -static void pvscsi_map_buffers(struct pvscsi_adapter *adapter, - struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd, - struct PVSCSIRingReqDesc *e) +static int pvscsi_map_buffers(struct pvscsi_adapter *adapter, + struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd, + struct PVSCSIRingReqDesc *e) { unsigned count; unsigned bufflen = scsi_bufflen(cmd); @@ -360,18 +360,30 @@ static void pvscsi_map_buffers(struct pvscsi_adapter *adapter, e->dataLen = bufflen; e->dataAddr = 0; if (bufflen == 0) - return; + return 0; sg = scsi_sglist(cmd); count = scsi_sg_count(cmd); if (count != 0) { int segs = scsi_dma_map(cmd); - if (segs > 1) { + + if (segs == -ENOMEM) { + scmd_printk(KERN_ERR, cmd, + "vmw_pvscsi: Failed to map cmd sglist for DMA.\n"); + return -ENOMEM; + } else if (segs > 1) { pvscsi_create_sg(ctx, sg, segs); e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl, SGL_SIZE, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(adapter->dev, ctx->sglPA)) { + scmd_printk(KERN_ERR, cmd, + "vmw_pvscsi: Failed to map ctx sglist for DMA.\n"); + scsi_dma_unmap(cmd); + ctx->sglPA = 0; + return -ENOMEM; + } e->dataAddr = ctx->sglPA; } else e->dataAddr = sg_dma_address(sg); @@ -382,8 +394,15 @@ static void pvscsi_map_buffers(struct pvscsi_adapter *adapter, */ ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen, cmd->sc_data_direction); + if (pci_dma_mapping_error(adapter->dev, ctx->dataPA)) { + scmd_printk(KERN_ERR, cmd, + "vmw_pvscsi: Failed to map direct data buffer for DMA.\n"); + return -ENOMEM; + } e->dataAddr = ctx->dataPA; } + + return 0; } static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, @@ -690,6 +709,12 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(adapter->dev, ctx->sensePA)) { + scmd_printk(KERN_ERR, cmd, + "vmw_pvscsi: Failed to map sense buffer for DMA.\n"); + ctx->sensePA = 0; + return -ENOMEM; + } e->senseAddr = ctx->sensePA; e->senseLen = SCSI_SENSE_BUFFERSIZE; } else { @@ -711,7 +736,15 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, else e->flags = 0; - pvscsi_map_buffers(adapter, ctx, cmd, e); + if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) { + if (cmd->sense_buffer) { + pci_unmap_single(adapter->dev, ctx->sensePA, + SCSI_SENSE_BUFFERSIZE, + PCI_DMA_FROMDEVICE); + ctx->sensePA = 0; + } + return -ENOMEM; + } e->context = pvscsi_map_context(adapter, ctx); diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index ee16f0c5c47d..12712c92f37a 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -26,7 +26,7 @@ #include <linux/types.h> -#define PVSCSI_DRIVER_VERSION_STRING "1.0.5.0-k" +#define PVSCSI_DRIVER_VERSION_STRING "1.0.6.0-k" #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 |